id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
4953996 | <filename>mopidy_radio_pi/data/radiopidb.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sqlite3 as lite
import sys
con = lite.connect('radiopi.db')
with con:
cur = con.cursor()
#cur.execute("DROP TABLE IF EXISTS Tracklist")
#cur.execute("CREATE TABLE Tracklist(Id INTEGER PRIMARY KEY, ThemeId INT, PlaylistUri TEXT, TrackListTitle TEXT, TrackListDescription TEXT, TrackListImage TEXT, TracklistUser TEXT, TrackListDate DATE, IsCurrent INT)")
#cur.execute("DROP TABLE IF EXISTS TracklistTracks")
#cur.execute("CREATE TABLE TracklistTracks(Id INTEGER PRIMARY KEY, TracklistId INT, PlaylistUri TEXT, UserProfileId INT, TrackTitle TEXT, TrackArtist TEXT, TrackAlbum TEXT, TrackUri TEXT, ChosenBy TEXT, DedicatedTo TEXT, Comments TEXT, DateAdded DATE, Username TEXT, BeenPlayed INT, OnHold INT)")
#cur.execute("DROP TABLE IF EXISTS CurrentTrack")
#cur.execute("CREATE TABLE CurrentTrack(Id INTEGER PRIMARY KEY, TrackId INT, TracklistId INT, TrackUri TEXT)")
#cur.execute("DROP TABLE IF EXISTS TrackComments")
#cur.execute("CREATE TABLE TrackComments(Id INTEGER PRIMARY KEY, TrackId INT, TrackUri TEXT, UserProfileId INT, Username TEXT, Comments TEXT, DateAdded DATE)")
cur.execute("DROP TABLE IF EXISTS TrackLikes")
cur.execute("CREATE TABLE TrackLikes(Id INTEGER PRIMARY KEY, TrackId INT, TrackUri TEXT, TrackTitle TEXT, TrackArtist TEXT, TrackAlbum TEXT, TracklistId INT, UserProfileId INT, Username TEXT, Comments TEXT, DateLiked DATE, HostAddress TEXT)")
#cur.execute("DROP TABLE IF EXISTS TrackSkips")
#cur.execute("CREATE TABLE TrackSkips(Id INTEGER PRIMARY KEY, TrackId INT, TrackUri TEXT, TracklistId INT, UserProfileId INT, Username TEXT, Comments TEXT, DateSkipped DATE, HostAddress TEXT)")
#cur.execute("DROP TABLE IF EXISTS UserProfile")
#cur.execute("CREATE TABLE UserProfile(Id INTEGER PRIMARY KEY, UserName TEXT, Password TEXT, FirstName TEXT, Surname TEXT, Email TEXT, NickName TEXT, Bio TEXT, ProfilePicture TEXT, Vibes INT, LevelId INT, TracksRequested INT, TracksSkipped INT, TracksSucessfullySkipped INT, TracksLiked INT, Enabled INT, PermissionLevel INT)")
#cur.execute("DROP TABLE IF EXISTS RequestedTracks")
#cur.execute("CREATE TABLE RequestedTracks(Id INTEGER PRIMARY KEY, TracklistId INT, TrackId INT, UserProfileId INT, Username TEXT, PlaylistUri TEXT, TrackTitle TEXT, TrackArtist TEXT, TrackUri TEXT, ChosenBy TEXT, DedicatedTo TEXT, Comments TEXT, DateAdded DATE, BeenPlayed INT, OnHold INT)")
#cur.execute("DROP TABLE IF EXISTS FavouriteTracks")
#cur.execute("CREATE TABLE FavouriteTracks(Id INTEGER PRIMARY KEY, UserProfileId INT, Username TEXT, TrackUri TEXT, TrackTitle TEXT, TrackArtist TEXT, DateAdded DATE)")
#cur.execute("DROP TABLE IF EXISTS Level")
#cur.execute("CREATE TABLE Level(Id INTEGER PRIMARY KEY, LevelName TEXT, LevelDescription TEXT, LevelIcon TEXT, VibesRequired INT, TracksAllowed INT, VotePower INT, SkipPower INT)")
#cur.execute("DROP TABLE IF EXISTS Theme")
#cur.execute("CREATE TABLE Theme(Id INTEGER PRIMARY KEY, ThemeTitle TEXT, ThemeDescription TEXT, ThemeImage TEXT)")
#cur.execute("DROP TABLE IF EXISTS ThemeBanners")
#cur.execute("CREATE TABLE ThemeBanners(Id INTEGER PRIMARY KEY, ThemeId INT, BannerImageFileName TEXT, BannerImagePath TEXT, DateAdded DATE)")
#cur.execute("DROP TABLE IF EXISTS UserNotifications")
#cur.execute("CREATE TABLE UserNotifications(Id INTEGER PRIMARY KEY, UserProfileId INT, UserName TEXT, NotificationText TEXT, Read INT, DateAdded DATE)")
| StarcoderdataPython |
4981208 | # -*- coding: utf-8 -*-
# @Author: <NAME>
# @E-mail: <EMAIL>
# @Date: 2020-04-21 13:39:27
# @Last Modified by: <NAME>
# @Last Modified time: 2020-06-01 17:49:46
import os
import argparse as ap
from MAESTRO.scATAC_H5Process import *
from MAESTRO.scRNA_QC import scrna_qc
def scrna_analysis_parser(subparsers):
"""
Add argument parsers.
"""
workflow = subparsers.add_parser("scrna-analysis",
help = "Run MAESTRO analysis pipeline from scRNA-seq gene-cell count matrix. ")
group_input = workflow.add_argument_group("Input files arguments")
group_input.add_argument("--format", dest = "format", default = "",
choices = ["h5", "mtx", "plain"],
help = "Format of the count matrix file.")
group_input.add_argument("--matrix", dest = "matrix", default = "",
help = "Location of count matrix file. "
"If the format is 'h5' or 'plain', users need to specify the name of the count matrix file. "
"If the format is 'mtx', the 'matrix' should be the name of .mtx formatted matrix file, such as 'matrix.mtx'.")
group_input.add_argument("--separator", dest = "separator", default = "tab",
choices = ["tab", "space", "comma"],
help = "The separating character (only for the format of 'plain'). "
"Values on each line of the plain matrix file will be separated by the character. DEFAULT: tab.")
group_input.add_argument("--feature", dest = "feature", default = "features.tsv",
help = "Location of feature file (required for the format of 'mtx'). "
"Features correspond to row indices of count matrix. DEFAULT: features.tsv.")
group_input.add_argument("--gene-column", dest = "gene_column", default = 2, type = int,
help = "If the format is 'mtx', please specify which column of the feature file to use for gene names. DEFAULT: 2.")
group_input.add_argument("--gene-idtype", dest = "gene_idtype", default = "symbol",
choices = ["symbol", "ensembl"],
help = "Type of gene name, 'symbol' for gene symbol and 'ensembl' for ensembl id. DEFAULT: symbol.")
group_input.add_argument("--barcode", dest = "barcode", default = "barcodes.tsv",
help = "Location of barcode file (required for the format of 'mtx'). "
"Cell barcodes correspond to column indices of count matrix. DEFAULT: barcodes.tsv. ")
group_input.add_argument("--meta-file", dest = "meta_file", default = "",
help = "Location of metadata file. "
"The metadata file should be a table with cells as rows and meta information as columns. "
"The first line of the metadata file should contain the names of the variables.")
group_input.add_argument("--meta-sep", dest = "meta_sep", default = "tab",
choices = ["tab", "space", "comma"],
help = "The separating character of the metadata file. "
"Values on each line of the metadata file will be separated by the character. DEFAULT: tab.")
group_input.add_argument("--meta-cell-column", dest = "meta_cell", default = 1, type = int,
help = "Please specify which column of the metadata file to use for cell ID. DEFAULT: 1.")
group_input.add_argument("--assembly", dest = "assembly", default = "GRCh38",
choices = ["GRCh38", "GRCm38", "GRCh37", "NCBIM37"], type = str,
help = "Assembly (GRCh38/hg38 and GRCh37/hg19 for human, GRCm38/mm10 and NCBIM37/mm9 for mouse). DEFAULT: GRCh38.")
# Quality control cutoff
group_cutoff = workflow.add_argument_group("Quality control arguments")
group_cutoff.add_argument("--count-cutoff", dest = "count_cutoff", default = 1000, type = int,
help = "Cutoff for the number of count in each cell. DEFAULT: 1000.")
group_cutoff.add_argument("--gene-cutoff", dest = "gene_cutoff", default = 500, type = int,
help = "Cutoff for the number of genes included in each cell. DEFAULT: 500.")
group_cutoff.add_argument("--cell-cutoff", dest = "cell_cutoff", default = 10, type = int,
help = "Cutoff for the number of cells covered by each gene. DEFAULT: 10.")
group_output = workflow.add_argument_group("Output arguments")
group_output.add_argument("-d", "--directory", dest = "directory", default = "MAESTRO",
help = "Path to the directory where the result file shall be stored. DEFAULT: MAESTRO.")
group_output.add_argument("--outprefix", dest = "outprefix", default = "MAESTRO",
help = "Prefix of output files. DEFAULT: MAESTRO.")
# Generate Rscript
def GenerateRscript(count_file, gene_idtype, gene_cutoff, cell_cutoff, meta_file, meta_sep, meta_cell, assembly, outprefix, directory):
rfile = os.path.join(directory, "%s.R" %(outprefix))
outf = open(rfile, "w")
# load package
script = '''# load package
library(MAESTRO)
library(Seurat)
library(ggplot2)
library(future)
plan("multiprocess", workers = 8)
options(future.globals.maxSize = 10*1024^3)\n
'''
outf.write(script)
# read data
script = '''# read data
expr = Read10X_h5("%s")
''' %(count_file)
outf.write(script)
# assembly conversion and gene id conversion
if assembly == "GRCh37":
if gene_idtype == "symbol":
script = '''
# assembly conversion
expr = RNAAssemblyConvert(expr, from = "GRCh37", to = "GRCh38", organism = "Human")
'''
elif gene_idtype == "ensembl":
script = '''
# gene id conversion
expr = RNAEnsemblToSymbol(expr, organism = "GRCh38")
'''
outf.write(script)
species = "GRCh38"
elif assembly == "GRCh38":
if gene_idtype == "ensembl":
script = '''
# gene id conversion
expr = RNAEnsemblToSymbol(expr, organism = "GRCh38")
'''
outf.write(script)
species = "GRCh38"
elif assembly == "NCBIM37":
if gene_idtype == "symbol":
script = '''
# assembly conversion
expr = RNAAssemblyConvert(expr, from = "NCBIM37", to = "GRCm38", organism = "Mouse")
'''
elif gene_idtype == "ensembl":
script = '''
# gene id conversion
expr = RNAEnsemblToSymbol(expr, organism = "GRCm38")
'''
outf.write(script)
species = "GRCm38"
elif assembly == "GRCm38":
if gene_idtype == "ensembl":
script = '''
# gene id conversion
expr = RNAEnsemblToSymbol(expr, organism = "GRCm38")
'''
outf.write(script)
species = "GRCm38"
# analysis
script = '''
# choose optimal pc and resolution based on cell number
cells <- ncol(expr)
if (cells <= 5000) {
dims.use <- 1:15; cluster.res <- 0.6
} else if(cells <= 10000) {
dims.use <- 1:20; cluster.res <- 1
} else if(cells <= 40000) {
dims.use <- 1:30; cluster.res <- 1
} else if(cells <= 80000) {
dims.use <- 1:40; cluster.res <- 1
} else if(cells <= 150000) {
dims.use <- 1:50; cluster.res <- 1
} else {
dims.use <- 1:75; cluster.res <- 1
}\n
# choose npc
npc <- ifelse(max(dims.use) < 50, 50,
ifelse(max(dims.use) < 75, 75, 100))\n
# clustering
RNA.res = RNARunSeurat(inputMat = expr,
project = "%s",
min.c = %d,
min.g = %d,
runpca.agrs = list(npcs = npc),
dims.use = dims.use,
variable.genes = 2000,
organism = "%s",
cluster.res = 0.6,
genes.test.use = "presto",
only.pos = TRUE,
genes.cutoff = 1e-05)\n
# cell-type annotation
RNA.res$RNA = RNAAnnotateCelltype(RNA = RNA.res$RNA,
genes = RNA.res$genes,
signatures = "human.immune.CIBERSORT",
min.score = 0.1)
''' %(outprefix, cell_cutoff, gene_cutoff, species)
outf.write(script)
# read metadata
if meta_file:
if meta_sep == "tab":
sep = "\\t"
elif meta_sep == "space":
sep = " "
elif meta_sep == "comma":
sep = ","
script = '''
# add metadata
meta = read.delim("%s", header = T, row.names = %d, sep = "%s")
<EMAIL> = cbind(<EMAIL>, meta[colnames(RNA.res$RNA),, drop = FALSE])
for (i in colnames(meta)) {
p = DimPlot(object = RNA.res$RNA, group = i, label = FALSE, pt.size = 0.1)
if (length(unique(<EMAIL>[,i])) > 20) {
plot_width = 10
} else {
plot_width = 7
}
ggsave(file.path(paste0(<EMAIL>, "_", i, ".png")), p, width=plot_width, height=5)
}
''' %(os.path.abspath(meta_file), meta_cell, sep)
outf.write(script)
script = '''
# save object
saveRDS(RNA.res, "%s_res.rds")
''' %(outprefix)
outf.write(script)
outf.close()
return os.path.abspath(rfile)
def scrna_analysis(directory, outprefix, fileformat, matrix, separator, feature, gene_column, gene_idtype, barcode, meta_file, meta_sep, meta_cell, count_cutoff, gene_cutoff, cell_cutoff, assembly):
try:
os.makedirs(directory)
except OSError:
# either directory exists (then we can ignore) or it will fail in the
# next step.
pass
scrna_qc("Data", outprefix, fileformat, matrix, separator, feature, gene_column, barcode, count_cutoff, gene_cutoff, cell_cutoff, assembly)
count_file = os.path.abspath(os.path.join("Data", outprefix + "_filtered_gene_count.h5"))
rscript = GenerateRscript(count_file, gene_idtype, gene_cutoff, cell_cutoff, meta_file, meta_sep, meta_cell, assembly, outprefix, directory)
cmd = "Rscript %s" %(rscript)
os.system(cmd)
| StarcoderdataPython |
3293973 | <filename>python/migration.py<gh_stars>0
import requests
from requests_oauthlib import OAuth1
import json
from QBO_request import api_call
with open('config.json', 'r') as f:
config = json.load(f)
def migrate_tokens():
'''Migrate tokens from OAuth1 to OAuth2'''
headers = { 'Content-Type': 'application/json' }
auth = OAuth1 (
config['OAuth1']['consumer_key'],
config['OAuth1']['consumer_secret'],
config['OAuth1']['access_token'],
config['OAuth1']['access_secret']
)
body = {
'scope':'com.intuit.quickbooks.accounting',
'redirect_uri': config['Redirect_url'],
'client_id': config['OAuth2ClientId'],
'client_secret': config['OAuth2ClientSecret']
}
r = requests.post(config['Migration_url'], headers=headers, auth=auth, data=json.dumps(body))
print ('Migration API call:')
print ('Status code: '+str(r.status_code))
print (r.json())
return r.json()
oauth2_tokens = migrate_tokens()
# Optionally, make a sample QBO API request
company_info = api_call(oauth2_tokens['access_token'], oauth2_tokens['realmId'])
| StarcoderdataPython |
3387398 | <filename>SDK & Exmaples/examples/python/USB2GPIO/USB2GPIO_Test/USB2GPIO_Test.py<gh_stars>0
#-*- coding: utf-8 -*-
from ctypes import *
import platform
from time import sleep
from usb_device import *
from usb2gpio import *
if __name__ == '__main__':
DevHandles = (c_int * 20)()
DevHandle = 0
# Scan device
ret = USB_ScanDevice(byref(DevHandles))
if(ret == 0):
print("No device connected!")
exit()
else:
print("Have %d device connected!"%ret)
DevHandle = DevHandles[0]#选择设备0
# Open device
ret = USB_OpenDevice(DevHandle)
if(bool(ret)):
print("Open device success!")
else:
print("Open device faild!")
exit()
# Get device infomation
LuceroInfo = DEVICE_INFO()
LuceroFunctionString = (c_char * 256)()
ret = DEV_GetDeviceInfo(DevHandle,byref(LuceroInfo),byref(LuceroFunctionString))
if(bool(ret)):
print("Lucero device infomation:")
print("--Firmware Name: %s"%bytes(LuceroInfo.FirmwareName).decode('ascii'))
print("--Firmware Version: v%d.%d.%d"%((LuceroInfo.FirmwareVersion>>24)&0xFF,(LuceroInfo.FirmwareVersion>>16)&0xFF,LuceroInfo.FirmwareVersion&0xFFFF))
print("--Hardware Version: v%d.%d.%d"%((LuceroInfo.HardwareVersion>>24)&0xFF,(LuceroInfo.HardwareVersion>>16)&0xFF,LuceroInfo.HardwareVersion&0xFFFF))
print("--Build Date: %s"%bytes(LuceroInfo.BuildDate).decode('ascii'))
print("--Serial Number: ",end=' ')
for i in range(0, len(LuceroInfo.SerialNumber)):
print("%08X"%LuceroInfo.SerialNumber[i],end='')
print("")
print("--Function String: %s"%bytes(LuceroFunctionString.value).decode('ascii'))
else:
print("Get device infomation faild!")
exit()
# 设置GPIO输入输出电压,只针对带可变电压输出版本的适配器有用,其他适配器默认是3.3V
DEV_SetPowerLevel(DevHandle,POWER_LEVEL_3V3)
# 输出测试没——上下拉
GPIO_SetOutput(DevHandle,0xFFFF,0)
for i in range(0,10):
GPIO_Write(DevHandle,0xFFFF,0xAAAA)
GPIO_Write(DevHandle,0xFFFF,0x5555)
# 输出测试——上拉
GPIO_SetOutput(DevHandle,0xFFFF,1)
for i in range(0,10):
GPIO_Write(DevHandle,0xFFFF,0xAAAA)
GPIO_Write(DevHandle,0xFFFF,0x5555)
# 输出测试——下拉
GPIO_SetOutput(DevHandle,0xFFFF,2)
for i in range(0,10):
GPIO_Write(DevHandle,0xFFFF,0xAAAA)
GPIO_Write(DevHandle,0xFFFF,0x5555)
# 测试输入——浮空
GPIO_SetInput(DevHandle,0xFFFF,0)
PinValue = c_uint(0)
GPIO_Read(DevHandle,0xFFFF,byref(PinValue))
print("READ DATA(Float):%04X"%PinValue.value)
# 测试输入——上拉输入
GPIO_SetInput(DevHandle,0xFFFF,1)
GPIO_Read(DevHandle,0xFFFF,byref(PinValue))
print("READ DATA(Pu):%04X"%PinValue.value)
# 测试输入——下拉输入
GPIO_SetInput(DevHandle,0xFFFF,2)
GPIO_Read(DevHandle,0xFFFF,byref(PinValue))
print("READ DATA(Pd):%04X"%PinValue.value)
# 测试开漏输入——浮空
GPIO_SetOpenDrain(DevHandle,0xFFFF,0)
GPIO_Read(DevHandle,0xFFFF,byref(PinValue))
print("READ DATA(OD-Float):%04X"%PinValue.value)
# 测试开漏输入——上拉输入
GPIO_SetOpenDrain(DevHandle,0xFFFF,1)
GPIO_Read(DevHandle,0xFFFF,byref(PinValue))
print("READ DATA(OD-Pu):%04X"%PinValue.value)
# 测试开漏输入——下拉输入
GPIO_SetOpenDrain(DevHandle,0xFFFF,2)
GPIO_Read(DevHandle,0xFFFF,byref(PinValue))
print("READ DATA(OD-Pd):%04X"%PinValue.value)
# Close device
ret = USB_CloseDevice(DevHandle)
if(bool(ret)):
print("Close device success!")
else:
print("Close device faild!")
exit()
| StarcoderdataPython |
4800076 | #!/usr/bin/env python3
import csv
import os
import time
import sys
from datetime import datetime
from sys import platform
try:
from bs4 import BeautifulSoup
import requests
except:
sys.exit(sys.argv[0] + "maybe 'pip install requests bs4' first, then do a 'pip install bs4' then try again.")
url = "http://www.bsp.com.pg/International/Exchange-Rates/Exchange-Rates.aspx"
if platform == "linux" or platform == "linux2":
home = os.environ['HOME']
data, country_code, csv_file, csv_codes = {}, {}, home + "/.bsp_rates.csv", home + "/.cc.csv"
else: #platform == "win32":
home = os.environ['USERPROFILE']
data, country_code, csv_file, csv_codes = {}, {}, home + "\.bsp_rates.csv", home + "\.cc.csv"
def get_fx_rates():
# The main function that saves the rates and codes
try:
r = requests.get(url, timeout=15)
except:
sys.exit("Check Internet Connection")
soup = BeautifulSoup(r.text, 'lxml')
table = soup.find('table', attrs={'class': 'table-striped'}).find('tbody')
tbody = table.find_all('tr')
for _, rate in enumerate(tbody):
rate = rate.text.strip().split('\n')
ccode, country, value = rate[4], rate[3], rate[5]
data[ccode] = float(value)
country_code[ccode.lower()] = country
def save_csv_rates():
# Save rates to HD so we dont keep fetching rate from the net
with open(csv_file, "w", newline="\n") as f:
fields = ['country', 'rate']
w = csv.DictWriter(f, fieldnames=fields)
w.writeheader()
for _, country in enumerate(data):
w.writerow({'country': country.lower(), 'rate': data[country]})
def save_csv_country_codes():
# Save country codes
with open(csv_codes, "w", newline="\n") as f:
fields = ['code', 'country']
w = csv.DictWriter(f, fieldnames=fields)
w.writeheader()
for _, code in enumerate(country_code):
w.writerow({'code': code, 'country': country_code[code]})
def read_csv_rate():
with open(csv_file, "r") as f:
reader = csv.DictReader(f)
for _, rate in enumerate(reader):
data[rate['country']] = float(rate['rate'])
def read_csv_country_codes():
with open(csv_codes, "r") as f:
reader = csv.DictReader(f)
for _, code in enumerate(reader):
country_code[code['code']] = code['country']
def init():
if not os.path.isfile(csv_file):
print(f"Getting fresh rates...")
get_fx_rates()
save_csv_rates()
save_csv_country_codes()
todays_date = time.time()
creation_date = os.path.getmtime(csv_file)
dcreation_date = datetime.utcfromtimestamp(creation_date).strftime('%Y-%m-%d %H:%M:%S')
# 86400: the number of seconds in a day
if todays_date - creation_date > 86400:
print(f"Rates last updated: {dcreation_date}... Updating...")
get_fx_rates()
save_csv_rates()
read_csv_rate()
def convert(c_code, amt):
if not valid_c_code(c_code):
sys.exit(f"\n\n** Invalid Country Code! **\n{usage}")
print(f"{amt} {c_code.upper()} to PGK")
for _, cc in enumerate(data):
if c_code == cc:
print(f"Rate today: {data[cc]}")
print("Converted: K{:.2f}".format(float(amt) / data[cc]))
def show_codes():
if not os.path.isfile(csv_codes):
get_fx_rates()
save_csv_country_codes()
read_csv_country_codes()
print("\nCode:\tCountry:")
print("================================")
for i, code in enumerate(country_code):
print(f"{code}\t{country_code[code]}")
def valid_c_code(code):
read_csv_country_codes()
if not country_code:
get_fx_rates()
save_csv_country_codes()
read_csv_country_codes()
if code in country_code:
return True
return False
usage = f'''
USAGE:
======\n
> python {sys.argv[0]} <country code> <ammount in foreign currency>
eg:
> python {sys.argv[0]} usd 1999.19
NOTE:
=====\n
> To get valid country codes, just type:
> python {sys.argv[0]} codes
'''
if __name__ == '__main__':
length = len(sys.argv)
if length == 2:
if sys.argv[1] == 'codes':
show_codes()
exit(0)
else:
print("\n** Need Help **")
print(usage)
elif length == 3:
country = sys.argv[1]
pgk = sys.argv[2].replace(',','')
pgk = float(pgk)
init()
convert(country, pgk)
exit(0)
else:
print(usage)
| StarcoderdataPython |
6426866 | def strong_prefix_suffix(w, m):
sB, t = [-1] + [0] * m, -1
for i in range(1, m + 1): # niezmiennik: t = B[i - 1]
while t >= 0 and w[t + 1] != w[i]:
t = sB[t]
t = t + 1
if i == m or w[t + 1] != w[i + 1]:
sB[i] = t
else:
sB[i] = sB[t]
return sB | StarcoderdataPython |
9782108 | <gh_stars>1-10
#!/usr/bin/python3
# Generate svg rectangles from coordinate quadruples
# author: <NAME> <<EMAIL>>
# usage: python3 quad2svg.py [-s SHIFTX SHIFTY -d DELIMITER] RECTCOORDFILENAME > OUTPUTRECT.svgfrac
# svgfrac is incomplete svg file, they should be appended in the <g></g> tag in a svg file.
# SHIFTX & SHIFTY : shift position, default (0,0).
# DELIMITER : seperator of data, default ','.
# structure:
# box1_x_min,box1_y_min,box1_x_max,box1_y_max
# box2_x_min,box2_y_min,box2_x_max,box2_y_max
# ...
import csv
import codecs
import argparse
parser = argparse.ArgumentParser(description="Generate svg rectangles from coordinate quadruples.\nstructure:\n box1_x_min,box1_y_min,box1_x_max,box1_y_max\n box2_x_min,box2_y_min,box2_x_max,box2_y_max\n ...");
parser.add_argument("RECTCOORDFILENAME", help="the data file which stores coordinates of rectangles");
parser.add_argument("-s", "--shift", nargs=2, metavar=('SHIFTX', 'SHIFTY'), dest='shift', type=float, default=[0.,0.], help="SHIFTX & SHIFTY : shift position, default (0,0).");
parser.add_argument("-d", "--delimiter", metavar=('DELIMITER'), dest='inputdelimiter', default=',', help="delimiter to seperate coordinates, default ','.");
parser.add_argument("-i", "--idstart", type=int, metavar=('IDSTART'), dest='idstart', default=0, help="start id, default 0.");
args = parser.parse_args();
delimiter=codecs.escape_decode(bytes(args.inputdelimiter, "utf-8"))[0].decode("utf-8"); # http://stackoverflow.com/questions/4020539/process-escape-sequences-in-a-string-in-python#answer-37059682
inputfileobj=open(args.RECTCOORDFILENAME,'r');
filereader=csv.reader(inputfileobj, delimiter=delimiter);
linenum=0;
for coordquad in filereader:
linenum+=1;
if (len(coordquad)<4):
raise(Exception("Error: incomplete quadruple detected at line %d." % linenum));
else:
x=float(coordquad[0])-args.shift[0];
y=1052.3622047-float(coordquad[1])+args.shift[1];
width=abs(float(coordquad[0])-float(coordquad[2]));
xmin=min(x,float(coordquad[2])-args.shift[0]);
height=abs(float(coordquad[1])-float(coordquad[3]));
ymin=min(y,1052.3622047-float(coordquad[3])+args.shift[1]);
print("<rect id=\"rectgen%d\" style=\"opacity:0.5;fill:#ff7f2a\" width=\"%.7f\" height=\"%.7f\" x=\"%.7f\" y=\"%.7f\" />" % (linenum+args.idstart-1,width,height,xmin,ymin));
| StarcoderdataPython |
246689 | <reponame>feilaoda/espider<gh_stars>1-10
# -*- coding: utf-8 -*-
#!/usr/bin/env python
import os
import imp
import hashlib
class TestClass(object):
"""docstring for TestClass"""
def __init__(self, arg):
super(TestClass, self).__init__()
self.arg = arg
def md5str(url):
if type(url) == str:
url = url.encode('utf-8')
m = hashlib.md5()
m.update(url.strip())
urlmd5 = m.hexdigest()
return urlmd5
def split_cookie(cookies):
cookie_headers = {}
c_arr = cookies.split(';')
for i in c_arr:
i = i.strip()
s = i.split('=')
if len(s)==2:
k = s[0]
v = s[1]
if k == 'path' or k == 'HttpOnly':
continue
cookie_headers[k] = v
return cookie_headers
def merge_cookie(new_cookie, old_cookie):
cookie_headers = split_cookie(new_cookie)
if old_cookie is None:
return "; ".join(['%s=%s'%(key,value) for (key,value) in cookie_headers.items()])
else:
old_cookie_headers = split_cookie(old_cookie)
new_cookies = dict(old_cookie_headers)
new_cookies.update(cookie_headers)# dict(old_cookie_headers.items()+cookie_headers.items())
return "; ".join(['%s=%s'%(key,value) for (key,value) in new_cookies.items()])
def import_object(name, arg=None):
if '.' not in name:
return __import__(name)
parts = name.split('.')
#try:
print('.'.join(parts[:-1]))
obj = __import__('.'.join(parts[:-1]), None, None, [parts[-1]], 0)
#except ImportError:
# obj = None
o = getattr(obj, parts[-1], arg)
print(name, o, obj, o())
return o
def load_module(filepath):
class_name = None
expected_class = 'Spider'
mod_name,file_ext = os.path.splitext(os.path.split(filepath)[-1])
py_mod = None
if file_ext.lower() == '.py':
py_mod = imp.load_source(mod_name, filepath)
if hasattr(py_mod, expected_class):
class_name = getattr(py_mod, expected_class)
return class_name
| StarcoderdataPython |
3262011 | <gh_stars>1-10
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from .encoder import Encoder
from src.modules.nn_layers import *
from src.utils.args import args
class DensenetEncoder(Encoder):
def __init__(self, output_dim, input_shape):
super().__init__()
nc = input_shape[0]
if nc == 3:
H_OUT, W_OUT = input_shape[-2] // 4, input_shape[-1] // 4
else:
H_OUT, W_OUT = 7, 7 # HACK only for 28x28 dimentions!
self.main_nn = nn.Sequential(
DenseNet(nc, 15, 3),
Conv2d(45+nc, 48,
kernel_size=3, stride=2, padding=1),
nn.ELU(),
DenseNet(48, 16, 3),
Conv2d(96, 96,
kernel_size=3, stride=2, padding=1),
nn.ELU(),
DenseNet(96, 16, 6),
Conv2d(192, 96,
kernel_size=1, stride=1, padding=0),
nn.ELU(),
Conv2d(96, 24,
kernel_size=1, stride=1, padding=0),
nn.ELU(),
Flatten(),
nn.Linear(24 * H_OUT * W_OUT, 2 * output_dim)
)
def forward(self, input):
mu, logvar = self.main_nn(input).chunk(2, 1)
return mu, F.softplus(logvar)
if __name__ == "__main__":
pass
| StarcoderdataPython |
11318257 | # python2.6
# -*- coding: utf-8 -*-
# RFC 3720 (iSCSI) protocol implementation for conformance testing.
"""
iSCSI header definitions and constructor interfaces.
"""
from struct import Struct
from collections import namedtuple
from pycopia.iscsi.constants import *
## Field objects ##
class FieldBase(object):
"""Base for field packers/unpackers.
"""
__slots__ = ("_offset",)
_packer = None
size = None
def __init__(self, offset):
self._offset = offset
def pack_into(self, buf, value):
packer = self.__class__._packer
packer.pack_into(buf, self._offset, value)
def unpack_from(self, buf):
packer = self.__class__._packer
return packer.unpack_from(buf, self._offset)[0]
class ByteField(FieldBase):
_packer = Struct("!B")
size = _packer.size
class HalfField(FieldBase):
_packer = Struct("!H")
size = _packer.size
class IntField(FieldBase):
_packer = Struct("!I")
size = _packer.size
class ISIDField(FieldBase):
_packer = Struct("!BHBH")
size = _packer.size
def pack_into(self, buf, value):
offset = self._offset
raw = ISIDField._packer.pack(*value)
buf[offset:offset+ISIDField.size] = raw
def unpack_from(self, buf):
pass # XXX
class DataSegmentLength(FieldBase):
"""Special handler for DataSegmentLength, a 24 bit packed field."""
_packer = Struct("!I")
size = 3
def pack_into(self, buf, value):
offset = self._offset
raw = DataSegmentLength._packer.pack(value)
buf[offset:offset+3] = raw[1:]
def unpack_from(self, buf):
pass # XXX
# Some specialized field value types
class ISID(namedtuple('ISIDBase', "a, b, c, d")):
"""Value part of an ISID header field"""
# TODO: type and bit field access
class SSID(namedtuple('SSIDBase', "isid, tgpt")):
pass
# generates field instances with buffer offsets
def _field_generator(specs):
offset = 0
d = {}
for name, cls in specs:
d[name] = cls(offset)
offset += cls.size
return d
### Header objects ###
class ISCSIHeader(object):
_FIELDS = None # dict mapping field name to tuple of pack format and offset in buffer.
OP = None # PDU header base opcode
def __init__(self, **kwargs):
self._field_values = {"opcode": self.__class__.OP}
fields = self.__class__._FIELDS
for kwname, kwvalue in kwargs.items():
if kwname in fields:
object.__setitem__(self, kwname, kwvalue)
else:
raise TypeError("Invalid field name: %r" % (kwname,))
self.initialize()
def initialize(self):
"""Override in subclasses for additional initialization."""
pass
def __getitem__(self, name):
if name in self.__class__._FIELDS:
return self._field_values.get(name, 0)
else:
raise KeyError("Invalid field name: %r" % (name,))
def __setitem__(self, name, value):
if name in self.__class__._FIELDS:
self._field_values[name] = value
else:
raise KeyError("Invalid field name: %r" % (name,))
def pack_into(self, buf):
for name, value in self._field_values.items():
field = self.__class__._FIELDS[name]
field.pack_into(buf, value)
class AdditionalHeader(ISCSIHeader):
pass # TODO
class RequestHeader(ISCSIHeader):
"""Base class for initiator PDUs.
Provides functionality common to all request PDU headers.
"""
def _get_immediate(self):
return self._field_values["opcode"] & OP_IMMEDIATE
def _set_immediate(self, flag):
opcode = self._field_values["opcode"]
if flag:
opcode = opcode | OP_IMMEDIATE
else:
opcode = opcode & ~OP_IMMEDIATE
self._field_values["opcode"] = opcode
def _clear_immediate(self):
self._field_values["opcode"] = self._field_values["opcode"] & ~OP_IMMEDIATE
immediate = property(_get_immediate, _set_immediate, _clear_immediate)
class ResponseHeader(ISCSIHeader):
"""Base class for target PDUs.
Provides functionality common to all response PDU headers.
"""
def decode(self, buf):
pass
#### concrete PDU headers. ####
class LoginHeader(RequestHeader):
"""Login Header """
OP = OP_LOGIN
_FIELDS = _field_generator([
('opcode', ByteField),
('flags', ByteField),
('VersionMax', ByteField),
('VersionMin', ByteField),
('totalAHSLength', ByteField),
('dataSegmentLength', DataSegmentLength),
('ISID', ISIDField),
('TSIH', HalfField),
('ITT', IntField),
('CID', HalfField),
('_rsvd', HalfField),
('CmdSN', IntField),
('ExpStatSN', IntField),
])
def initialize(self):
self.__setitem__("VersionMax", DRAFT20_VERSION)
self.__setitem__("VersionMin", DRAFT20_VERSION)
self.__setitem__("ITT", 0)
self.immediate = True # login requests are always immediate
class LoginResponseHeader(ResponseHeader):
"""Login Response Header """
OP = OP_LOGIN_RSP
_FIELDS = _field_generator([
('opcode', ByteField),
])
class LogoutHeader(RequestHeader):
"""Logout Header """
OP = OP_LOGOUT
class LogoutResponseHeader(ResponseHeader):
"""Logout Response Header """
OP = OP_LOGOUT_RSP
class SCSICommandHeader(RequestHeader):
"iSCSI PDU Header """
OP = OP_SCSI_CMD
class SCSICommandResponseHeader(ResponseHeader):
"""SCSI command response"""
OP = OP_SCSI_CMD_RSP
class AsynchronousEventHeader(ResponseHeader):
"""Asynchronous Event Header """
OP = OP_ASYNC_EVENT
class NOPOutHeader(ResponseHeader):
"""NOP-Out Message """
OP = OP_NOOP_IN
class NOPInHeader(RequestHeader):
"""NOP-In Message """
OP = OP_NOOP_OUT
class TaskManagementMessageHeader(RequestHeader):
"""SCSI Task Management Message Header """
OP = OP_SCSI_TMFUNC
class TaskManagementResponseHeader(ResponseHeader):
"""SCSI Task Management Response Header """
OP = OP_SCSI_TMFUNC_RSP
class R2THeader(ResponseHeader):
"""Ready To Transfer Header """
OP = OP_R2T
class SCSIDataHeader(RequestHeader):
"""SCSI Data Hdr """
OP = OP_SCSI_DATA_OUT
class SCSIDataResponseHeader(ResponseHeader):
"""SCSI Data Response Hdr """
OP = OP_SCSI_DATA_IN
class TextHeader(RequestHeader):
"""Text Header """
OP = OP_TEXT
class TextResponseHeader(ResponseHeader):
"""Text Response Header """
OP = OP_TEXT_RSP
class SNACKHeader(RequestHeader):
"""SNACK Header """
OP = OP_SNACK
class RejectMessageHeader(ResponseHeader):
"""Reject Message Header """
OP = OP_REJECT
class RlengthHeader(AdditionalHeader):
pass
class ExtendedCDBHeader(AdditionalHeader):
"""Extended CDB AHS """
#### Data segments ####
class KeyValueData(dict):
def encode(self):
s = []
for key, value in self.items():
s.append("%s=%s" % (key, value))
return "\0".join(s) + "\0"
##### PDUs (whole messages) #####
class ISCSIPDU(object):
def __init__(self):
self._header = None
self._additional_headers = []
self._use_header_digest = False
self._data = None
self._use_data_digest = False
def encode(self):
# encode in reverse PDU order so lengths can be computed and
# placed in header.
pdulist = []
ahslength = 0
dlength = 0
# add data, if present
if self._data is not None:
dbuf = self._data.encode()
dlength = len(dbuf) # data length does not include padding
r = dlength % PAD_LEN
if r:
dbuf += "\0" * (PAD_LEN - r)
pdulist.append(dbuf)
# add data digest if required, digest includes padding
if self._use_data_digest:
pdulist.insert(0, _data_digest(dbuf))
self._header["dataSegmentLength"] = dlength
# add header digest if required
if self._use_header_digest:
pdulist.append(None) # TODO: fix this ugly hack. None is replaced by CRC value later
# do additional header segments first, so length can be computed
for h in self._additional_headers:
buf = h.encode()
pdulist.append(buf)
ahslength += len(buf)
self._header["totalAHSLength"] = ahslength / 4
# encode basic header
hbuf = bytearray(48)
self._header.pack_into(hbuf)
pdulist.append(str(hbuf))
# TODO: fix this ugly hack
if self._use_header_digest:
_header_digest(pdulist)
pdulist.reverse()
return "".join(pdulist)
def _set_data(self, data):
self._data = data
def _del_data(self):
self._data = None
data = property(lambda s: s._data, _set_data, _del_data)
def add_additional_header(self, hdr):
pass # TODO
def _header_digest(pdulist):
return pdulist # TODO
def _data_digest(buf):
return "" # TODO
class LoginPDU(ISCSIPDU):
def __init__(self):
super(LoginPDU, self).__init__()
self._header = LoginHeader()
self._data = KeyValueData()
def _set_transit(self, flag):
if flag:
self._header["flags"] |= FLAG_LOGIN_TRANSIT
else:
self._header["flags"] &= ~FLAG_LOGIN_TRANSIT
def _get_transit(self):
return self._header["flags"] & FLAG_LOGIN_TRANSIT
transit = property(_get_transit, _set_transit)
def _set_continue(self, flag):
if flag:
self._header["flags"] |= FLAG_LOGIN_CONTINUE
else:
self._header["flags"] &= ~FLAG_LOGIN_CONTINUE
def _get_continue(self):
return self._header["flags"] & FLAG_LOGIN_CONTINUE
continue_ = property(_get_continue, _set_continue) # "continue" is a keyword
def _get_isid(self):
return self._header["ISID"]
def _set_isid(self, value):
assert isinstance(value, ISID)
self._header["ISID"] = value
ISID = property(_get_isid, _set_isid)
def _get_current_stage(self):
return (self._header["flags"] & FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2
def _set_current_stage(self, value):
value = (value << 2) & FLAG_LOGIN_CURRENT_STAGE_MASK
self._header["flags"] &= ~FLAG_LOGIN_CURRENT_STAGE_MASK
self._header["flags"] |= value
current_stage = property(_get_current_stage, _set_current_stage)
def _get_next_stage(self):
return self._header["flags"] & FLAG_LOGIN_NEXT_STAGE_MASK
def _set_next_stage(self, value):
self._header["flags"] &= ~FLAG_LOGIN_NEXT_STAGE_MASK
self._header["flags"] |= (value & FLAG_LOGIN_NEXT_STAGE_MASK)
next_stage = property(_get_next_stage, _set_next_stage)
# PDUs with key-value data use mapping interface
def __setitem__(self, name, value):
self._data[name] = value
def __getitem__(self, name):
return self._data[name]
def __delitem__(self, name):
del self._data[name]
if __name__ == "__main__":
from pycopia import aid
from pycopia import autodebug
pdu = LoginPDU()
pdu.ISID = ISID(0, 0x023d, 3, 0)
buf = pdu.encode()
print aid.str2hex(buf)
pdu.transit = True
pdu.next_stage = FULL_FEATURE_PHASE
pdu.next_stage = OP_PARMS_NEGOTIATION_STAGE
pdu["SessionType"] = "Normal"
pdu["AuthMethod"] = "Chap,None"
buf = pdu.encode()
print aid.str2hex(buf)
assert buf[0] == chr(0x03 + 0x40)
assert buf[1] == chr(0x81)
assert len(buf) % 4 == 0
| StarcoderdataPython |
1841529 | # private keys
x = 91
y = 71
# public keys
g = 13
n = 27
my_public = pow(g,x,n)
your_public = pow(g,y,n)
my_key = pow(your_public,x,n)
your_key = pow(my_public, y,n)
print(my_key, your_key)
| StarcoderdataPython |
5099974 | <gh_stars>0
"""
Java exception thrown for non-keyword argument following keyword
"""
def parrot(**args):
pass
parrot(voltage=5.0, 'dead')
| StarcoderdataPython |
9638203 | <gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import etcd
# 1、初始化客户端
# 默认配置是(host='127.0.0.1', port=4001),etcd部署默认的配置是2379
# client = etcd.Client() # this will create a client against etcd server running on localhost on port 4001
# client = etcd.Client(port=4002)
client = etcd.Client(host='127.0.0.1', port=2379)
# client = etcd.Client(host=(('127.0.0.1', 4001), ('127.0.0.1', 4002), ('127.0.0.1', 4003)))
# client = etcd.Client(host='127.0.0.1', port=4003, allow_redirect=False) # wont let you run sensitive commands on non-leader machines, default is true
# If you have defined a SRV record for _etcd._tcp.example.com pointing to the clients
# client = etcd.Client(srv_domain='example.com', protocol="https")
# create a client against https://api.example.com:443/etcd
# client = etcd.Client(host='api.example.com', protocol='https', port=443, version_prefix='/etcd')
# 2、Write a key
client.write('/nodes/n1', 1)
# with ttl
client.write('/nodes/n2', 2, ttl=4) # sets the ttl to 4 seconds
client.set('/nodes/n2', 1) # Equivalent, for compatibility reasons.
# 3、Read a key
value1 = client.read('/nodes/n2').value
value2 = client.read('/nodes', recursive=True) # get all the values of a directory, recursively.
value3 = client.get('/nodes/n2').value
print("value1=", value1)
print("value2=", value2)
print("value3=", value3)
# raises etcd.EtcdKeyNotFound when key not found
try:
client.read('/invalid/path')
except etcd.EtcdKeyNotFound:
# do something
print("error")
# 4、Delete a key
client.delete('/nodes/n1')
# 5、Atomic Compare and Swap
'''
client.write('/nodes/n2', 2, prevValue = 4) # will set /nodes/n2 's value to 2 only if its previous value was 4 and
client.write('/nodes/n2', 2, prevExist = False) # will set /nodes/n2 's value to 2 only if the key did not exist before
client.write('/nodes/n2', 2, prevIndex = 30) # will set /nodes/n2 's value to 2 only if the key was last modified at index 30
client.test_and_set('/nodes/n2', 2, 4) #equivalent to client.write('/nodes/n2', 2, prevValue = 4)
'''
# You can also atomically update a result:
result = client.read('/foo')
print(result.value) # bar
result.value += u'bar'
updated = client.update(result) # if any other client wrote '/foo' in the meantime this will fail
print(updated.value) # barbar
# 6、Watch a key
'''
client.read('/nodes/n1', wait = True) # will wait till the key is changed, and return once its changed
client.read('/nodes/n1', wait = True, timeout=30) # will wait till the key is changed, and return once its changed, or exit with an exception after 30 seconds.
client.read('/nodes/n1', wait = True, waitIndex = 10) # get all changes on this key starting from index 10
client.watch('/nodes/n1') #equivalent to client.read('/nodes/n1', wait = True)
client.watch('/nodes/n1', index = 10)
'''
# 7、Refreshing key TTL
client.write('/nodes/n1', 'value', ttl=30) # sets the ttl to 30 seconds
client.refresh('/nodes/n1', ttl=600) # refresh ttl to 600 seconds, without notifying current watchers
value1 = client.read('/nodes/n2').value
value2 = client.read('/nodes', recursive=True) # get all the values of a directory, recursively.
value3 = client.get('/nodes/n2').value
print("value1=", value1)
print("value2=", value2)
print("value3=", value3)
# 9、集群信息
# Get machines in the cluster
print(client.machines)
# Get leader of the cluster
print(client.leader)
# Generate a sequential key in a directory
x = client.write("/Users/yangzl/mysoft/etcd-v3.3.12-darwin-amd64/test", "value", append=True)
print("generated key: " + x.key)
print("stored value: " + x.value)
# List contents of a directory
#stick a couple values in the directory
client.write("/dir/name", "value1", append=True)
client.write("/dir/name", "value2", append=True)
directory = client.get("/dir/name")
# loop through directory children
for result in directory.children:
print(result.key + ": " + result.value)
# or just get the first child value
print(directory.children.next().value)
| StarcoderdataPython |
1977243 | <filename>GA.py
####### PART 1.A - EA #######
# Name : <NAME>
# Student ID : HW00281038
# Date : Oct. 1st 2017
##############################
import random
import math
import numpy as np
import itertools
import copy
import time
import pandas as pd
import matplotlib.pyplot as plt
import profile
import functools
import operator
import time
from random import shuffle
import heapq
from statistics import mean
from operator import methodcaller
# TSP_Cost function implemented in Cython. Note : Would have to recompile in order to rename...
import test_fast
def checkData(data):
return True
if len(data) != 29:
return False
if len(data) > len(set(data)):
return False
return True
def checkCityDistances():
trav = Traveler(range(0,Traveler.encoding['lenght']))
del(trav.data[0])
#trav.data.append(trav.data[0])
#for x in range(1,Traveler.encoding['lenght']-1):
# distance = test_fast.TSP_Cost(Traveler.encoding['dataset'][trav.data[x]][1], Traveler.encoding['dataset'][trav.data[x]][2], Traveler.encoding['dataset'][trav.data[x+1]][1], Traveler.encoding['dataset'][trav.data[x+1]][2])
# print(f"Distance between city {x} and city {x+1} : {distance}")
geoPlot(trav)
def fitnessPlotFromFile():
data = [line.strip() for line in open("logs/last_fitness_record", 'r')][1:] # If non existant ?
lst = []
for x in data:
lst.append(x.split(';'))
lst[-1] = list(map(int,lst[-1])) # Convert strings to int
fitnessPlot(lst, 0, True)
def fitnessPlot(fitness, last, new_figure = False): # Part of this should be moved to the init phase so that it is not executed multiple times unnecessarily
if new_figure:
plt.figure(500)
else:
plt.figure(300)
plt.clf()
gen = [x[0] for x in fitness[-last:]]
fit = [x[1] for x in fitness[-last:]]
plt.plot(gen, fit)
plt.xlabel('Generation count')
plt.ylabel('Best individual fitness')
plt.title('Fitness vs generations')
#plt.text(gen[0]+10, fit[0], f'Current fitness : {fit[-1]}')
plt.legend()
plt.draw()
plt.pause(0.01)
def geoPlot(best):
plt.figure(200)
best.data.append(best.data[0])
DATA = Traveler.encoding['dataset']
for idx in range(len(best.data)-1):
plt.plot((DATA[best.data[idx]][2],DATA[best.data[idx+1]][2]),(DATA[best.data[idx]][1],DATA[best.data[idx+1]][1]), marker = 'o')
plt.draw()
plt.pause(0.001)
class GA:
stall_options = {'abort': 1, 'rm-dup':2, 'rm-dup-bts':3, 'ignore':4}
not_init = True
def __init__(self, config):
self.settings = config.settings
self.settings['encoding']['lenght'] = len(self.settings['encoding']['dataset'])-1
self.settings['encoding']['span'] = list(range(1,len(self.settings['encoding']['dataset'])))
self.pop_size = self.settings['pop']['pop_size'] # Shorter alias
Traveler.setSettings(self.settings)
self.init_pop()
self.fitness_record = []
def init_pop(self):
self.population = []
# Create a 10*$(pop_size) population
for x in range(0,self.pop_size*self.settings['pop']['init_factor']):
self.population.append(Traveler())
# Keep the best ones
self.sortPopulation()
self.population = self.population[:self.pop_size]
def crossover(self, parents_ids):
algo_name = self.settings['algo']['crossover']
#print(f"Using crossover {algo_name}")
if algo_name == 'one-point-co':
for x in pop:
cross_indiv = self.population[random.randrange(0,self.pop_size)]
x.crossover(cross_indiv)
elif algo_name == 'pmx':
p_fit = []
p_fit.append(self.population[parents_ids[0]].getFitness())
p_fit.append(self.population[parents_ids[1]].getFitness())
x1_t = random.randrange(0,self.settings['encoding']['lenght'])
x2_t = random.randrange(0,self.settings['encoding']['lenght'])
x1 = min([x1_t,x2_t]) # x1 > x2 otherwise list slices don't work
x2 = max([x1_t,x2_t])
chunk1 = self.population[parents_ids[0]].data[x1:x2+1]
chunk2 = self.population[parents_ids[1]].data[x1:x2+1]
coor1 = {}
coor2 = {}
for idx, x in enumerate(chunk1):
coor1[x] = chunk2[idx]
for idx, x in enumerate(chunk2):
coor2[x] = chunk1[idx]
child1_data = [None] * self.settings['encoding']['lenght']
child2_data = [None] * self.settings['encoding']['lenght']
child1_data[x1:x2+1] = chunk2[:]
child2_data[x1:x2+1] = chunk1[:]
for idx in range(0, self.settings['encoding']['lenght']):
if idx < x1 or idx > x2:
p1_val = self.population[parents_ids[0]].data[idx]
if p1_val not in coor2:
child1_data[idx] = p1_val
else:
while p1_val in coor2:
p1_val = coor2[p1_val]
child1_data[idx] = p1_val
for idx in range(0, self.settings['encoding']['lenght']):
if idx < x1 or idx > x2:
p2_val = self.population[parents_ids[1]].data[idx]
if p2_val not in coor1:
child2_data[idx] = p2_val
else:
while p2_val in coor1:
p2_val = coor1[p2_val]
child2_data[idx] = p2_val
assert(checkData(child2_data))
assert(checkData(child1_data))
children_arr = []
children_arr.append(Traveler(child1_data))
children_arr.append(Traveler(child2_data))
return children_arr
def select(self, nb, override_algo = None):
if override_algo == None:
select_algo = self.settings['algo']['select']
else:
select_algo = override_algo
ret_pop = []
for _ in range(0,nb):
if select_algo[0] == 'bts':
#print(f"Using select {select_algo}")
# Tournament population
trm_ids = random.sample(range(0, len(self.population)), int(select_algo[1] * len(self.population) / 100)) # Can't use pop size if using elitsm, len(pop) != pop_size for now
best_id = trm_ids[0]
best_fitness = self.population[best_id].getFitness()
# Get best individual from tournament
for idx in trm_ids:
fitness = self.population[idx].getFitness() # Avoid recalculating fitness everytime
if fitness < best_fitness:
best_id = idx
best_fitness = fitness
# Append selected individual to the list
ret_pop.append(best_id)
return ret_pop
def roulette(self, nb, individuals):
# roulette with high biais
if(nb >= len(individuals)):
raise Exception("Roulette must have more input individuals than output individuals : nb < len(individuals)")
if(nb == 0 or len(individuals) <= 1):
raise Exception("Roulette input count must be greater than 1 - output must be greater than 0")
indiv_fitness = []
for indiv in individuals:
indiv_fitness.append(indiv.getFitness())
# Product much faster than exponentiation. 6-7x
sum_fitness = sum(indiv_fitness)
real_fitness = [(sum_fitness-x)*(sum_fitness-x) for x in indiv_fitness]
indiv_fitness_norm = [x/sum(real_fitness) for x in real_fitness]
assert(round(sum(indiv_fitness_norm), 9) == 1.0) # Level to which numpy doesn't complain if sum != 1.0. Ex : p=[0.01,0.98999999] is fine
idx = []
for n in range(nb):
new_id = np.random.choice(range(len(individuals)), p=indiv_fitness_norm)
while new_id in idx: # Not optimized...
new_id = np.random.choice(range(len(individuals)), p=indiv_fitness_norm)
idx.append(new_id)
return [individuals[id_] for id_ in idx]
def nextGeneration(self):
update_algo = self.settings['algo']['update']
co_algo = self.settings['algo']['crossover']
if update_algo[0] == 'elitism':
self.sortPopulation()
# Current best individuals
kept_index = math.floor(update_algo[1] * self.pop_size / 100)
# Keep only the best ones !
old_pop = self.population
self.population = self.population[:kept_index]
if co_algo != None:
# Replenish population with children coming from crossover + mutated
for _ in range(0,int((self.pop_size - kept_index)/2)):
children = self.crossover(self.select(2))
for child in children:
self.population.append(child)
assert(self.population != old_pop)
# Truncation algorithm
else:
# Replenish population with mutated copies of the best ones
while(len(self.population) != self.pop_size):
best = self.population # Temporary variable, can't append to the list being iterated over
for x in best:
new_indiv = Traveler(x.data)
new_indiv.mutate()
self.population.append(new_indiv)
else:
# Update rounds
for _ in range(0, int(update_algo[1] * self.pop_size / 100)):
# Select
parents_ids = self.select(2)
p_fit = []
p_fit.append(self.population[parents_ids[0]].getFitness())
p_fit.append(self.population[parents_ids[1]].getFitness())
# Crossover
if co_algo != None:
children = self.crossover(parents_ids)
assert(len(children) == 2)
assert(checkData(children[0].data))
assert(checkData(children[1].data))
assert(self.population[parents_ids[0]].getFitness() == p_fit[0])
assert(self.population[parents_ids[1]].getFitness() == p_fit[1])
else:
children = [Traveler(self.population[x].data) for x in parents_ids]
# Mutate
for x in children:
x.mutate()
# So that we replace optimally. Ex : p1 = 3, p2 = 7 must be replaced by ch1 = 9, ch2 = 5 in this order -> result : 7,9, otherwise 5,9
children.sort(key=methodcaller('getFitness'), reverse=not self.settings['encoding']['maximize'])
if self.population[parents_ids[0]].getFitness() > self.population[parents_ids[1]].getFitness():
parents_ids[0], parents_ids[1] = parents_ids[1], parents_ids[0]
if update_algo[0] == 'proba-replace-parent':
indiv = children
indiv.extend([self.population[id_] for id_ in parents_ids])
replacement = self.roulette(2,indiv)
for idx in range(len(replacement)):
self.population[parents_ids[idx]] = replacement[idx]
if update_algo[0] == 'replace-parent':
# Replace (parents)
for idx in range(0, 2):
ch_fit = children[idx].getFitness()
if ch_fit < p_fit[idx]:
self.population[parents_ids[idx]] = children[idx]
assert(ch_fit < p_fit[0] or ch_fit < p_fit[1])
elif update_algo[0] == 'replace-worst':
#print(f"Using update {update_algo}")
self.sortPopulation()
for idx in range(0, 2):
ch_fit = children[idx].getFitness()
worst_fit = self.population[-2+idx].getFitness() # -2 + 0 = -2 : 2sd worst, replaced by best children, -2 + 1 = -1 : worst, replaced by worst child
if ch_fit < worst_fit:
self.population[-2+idx] = children[idx]
# Used to check for any "population contamination" - ie. the data field of 2 individuals are pointing at the same memory space - they are linked -> reduced diversity.
#for x in range(0, self.pop_size):
# for y in range(0,self.pop_size):
# if x != y:
# assert(self.population[x] is not self.population[y])
# assert(self.population[x].data is not self.population[y].data)
# Used to re-fill the population. Necessary when removing duplicates or using 'elitism' update scheme
def fill(self):
while(len(self.population) < self.pop_size):
self.population.append(Traveler())
def sortPopulation(self):
self.population.sort(key=methodcaller('getFitness'), reverse=self.settings['encoding']['maximize'])
def getPopFitness(self, size=0):
if size == 0:
size = self.pop_size
return [x.getFitness() for x in self.population[0:size]]
# Returns a string containing information about the current generation population
def getPop(self, size = 0, pop_list = None):
if pop_list == None:
pop_list = self.population
if size == 0:
size = len(pop_list)
text = [str(x.id) + " - Fitness : " + str(x.getFitness()) for x in pop_list[:size]]
string = '\n'.join(str(x) for x in text)
return "Generation : {}\n".format(self.gen_count) + str(string) + "\nTraveler created count : {}".format(Traveler.created_count) + "\n"
# Starts the GA
def start(self):
self.gen_count = 0
# Varibles used to stop the GA on specific goals
max_gen = self.settings['stop']['max_gen']
max_time = self.settings['stop']['max_time']
min_perf = self.settings['stop']['aim']
output = self.settings['output']
stop_on_perf = (min_perf != 0)
stop_on_time = (max_time != 0)
stop_on_gen = (max_gen != 0)
perf_stop = False
time_stop = False
gen_stop = False
# Determines how often the output is made - every X generations
calc_interval = output['out_interval']
# Used to detect that the GA is stuck in a local minima
datacheck_interval = 2*calc_interval
top_count = int(self.pop_size/50) #Top 2%
previous_top = []
last_calc = 0
last_check = 0
start_time = time.time()
# Prevents multiple or unnecessary matplotlib plot initialization
if output['mode'] == 'plot' or output['mode'] == 'full' and GA.not_init == True:
plt.ion()
GA.not_init = False
# Main GA loop
while time_stop == False and gen_stop == False and perf_stop == False:
self.nextGeneration()
# Whether or not it's time to display information to the user - and check if any goal is reached
if last_calc > calc_interval:
self.sortPopulation()
pop_fitness = self.getPopFitness(5)
self.fitness_record.append((self.gen_count, self.population[0].getFitness()))
# Goals check
if stop_on_perf and pop_fitness[0] >= min_perf:
perf_stop = True
if stop_on_time and time_elapsed > max_time:
time_stop = True
if stop_on_gen and self.gen_count > max_gen:
gen_stop = True
# User output
if any(x in ['text','full'] for x in output['mode']):
print(self.getPop(output['perf_ref']))
# Displays a "map" of the cities - useless except if the GA is actually working well...
if any(x in ['geoplot','plot','full'] for x in output['mode']):
geoPlot(self.population[0])
# Displays partial fitness/generation curve
if any(x in ['fitplot','plot','full'] for x in output['mode']):
fitnessPlot(self.fitness_record, 50)
last_calc = 0
else:
last_calc +=1
# Local minima detection
if last_check >= datacheck_interval:
new_top = [x.getFitness() for x in self.population[:top_count]]
# Stall detected
if new_top == previous_top :
if output['stall_action'] == 'manual':
print("Suspected local minimal detected - what do you want to do :")
print("1. Abort")
print("2. Remove all duplicates")
print("3. Remove duplicates and apply bts to the remainder")
print("4. Ignore")
choice = int(input())
else:
choice = GA.stall_options[output['stall_action']]
if choice == 1:
gen_stop = True
elif choice == 2:
self.cleanPop('rm-duplicates')
self.sortPopulation()
new_top = [x.getFitness() for x in self.population[:top_count]]
elif choice == 3:
self.cleanPop('rm-duplicates', 'bts')
self.sortPopulation()
new_top = [x.getFitness() for x in self.population[:top_count]]
previous_top = new_top
last_check = 0
else:
last_check +=1
self.gen_count +=1
time_elapsed = time.time() - start_time
# Shows plots when GA is done and records the fitness/generation data to a file for further plotting
if output['mode'] != 'none':
geoPlot(self.population[0])
fitnessPlot(self.fitness_record, 0)
with open('logs/last_fitness_record', 'w') as f:
f.write("generation;fitness\n")
for x in self.fitness_record:
f.write(f"{x[0]};{x[1]}\n")
# Shows the full fitness/generation curve
fitnessPlotFromFile()
if perf_stop == True:
return ("Desired fitness reached ! - {} generations and {} seconds".format(self.gen_count, time_elapsed), (self.gen_count, time_elapsed, self.getPopFitness(1)[0]))
elif time_stop == True:
return ("Excedeed max time ! - {} generations and {} seconds".format(self.gen_count, time_elapsed), (self.gen_count, time_elapsed, self.getPopFitness(1)[0]))
elif gen_stop == True:
return ("Excedeed max generation count ! - {} generations and {} seconds".format(self.gen_count, time_elapsed), (self.gen_count, time_elapsed, self.getPopFitness(1)[0]))
# Used when stuck in a local minima - removes duplicated and apply bts to the remaining population
def cleanPop(self, param, param2 = ''):
if param == 'rm-duplicates':
print("Removing duplicates from population - please wait...")
new_pop = []
for indiv in self.deduplicatePop():
new_pop.append(indiv)
# Non duplicated population
self.population = new_pop
if param2 == 'bts':
print("Applying BTS to the deduplicated population - please wait...")
# Keep half the remaining population
self.population = self.select(int(len(new_pop)/2), override_algo = 'bts')
print("Removing duplicates from bts population - please wait...")
# Re-deduplicate
for indiv in self.deduplicatePop():
new_pop.append(indiv)
print("Replacing missing individuals by new random - please wait...")
self.fill()
def deduplicatePop(self):
seen = set()
for indiv in self.population:
fit = indiv.getFitness()
if not fit in seen:
seen.add(fit)
yield indiv
# GA individual class - TSP cities visiting order
class Traveler:
newid = itertools.count()
created_count = 0
def __init__(self, data = ""):
Traveler.created_count += 1
self.id = next(Traveler.newid)
self.mut_count = 0
self.has_mut = True
if data == "":
self.data = list(Traveler.encoding_data)
shuffle(self.data)
else:
self.data = list(data)
def setData(self, data):
self.data = data
@classmethod
def setSettings(cls, problem_settings):
Traveler.encoding = problem_settings['encoding']
Traveler.mutation = problem_settings['algo']['mutate']
Traveler.cross_over = problem_settings['algo']['crossover']
Traveler.encoding_data = [_ for _ in range(min(Traveler.encoding['span']), max(Traveler.encoding['span'])+1)]
def getFitness(self):
if(self.has_mut):
total_len = 0
self.data.append(self.data[0]) # Go back to first city
for x in range(0,Traveler.encoding['lenght']-1):
total_len += test_fast.TSP_Cost(Traveler.encoding['dataset'][self.data[x]][1], Traveler.encoding['dataset'][self.data[x]][2], Traveler.encoding['dataset'][self.data[x+1]][1], Traveler.encoding['dataset'][self.data[x+1]][2])
del self.data[-1]
self.fitness = total_len
self.has_mut = False
return total_len
else:
return self.fitness
def mutate(self):
self.mut_count += 1
self.has_mut = True
# M-distinct-gene new-allele mutation (random or normal distribution) - not relevant with TSP
if type(Traveler.mutation[0]) == tuple and Traveler.mutation[0][0] == 'n-random':
for _ in range(0, self.mutation[0][1]):
rand_gene = random.randrange(0, len(self.data))
self.data[rand_gene] = self.getGeneVal(self.data[rand_gene])
# Genewise mutation - not relevant with TSP
elif Traveler.mutation[0] == 'genewise':
for x in range(0,Traveler.encoding['lenght']):
if random.choice('0000000000000000000000001'): # Better but slower : np.random.choice(2,1,p=[24/25,1/25]) 10-20x slow
self.data[x] = self.getGeneVal(self.data[x])
# Adjacent-swap
elif Traveler.mutation[0] == 'adj-swap':
rand_pos = random.randrange(1,Traveler.encoding['lenght'])
self.data[rand_pos-1], self.data[rand_pos] = self.data[rand_pos], self.data[rand_pos-1]
# Exchange mutation : random-swap
elif Traveler.mutation[0] == 'em':
rand_pos1 = random.randrange(0,Traveler.encoding['lenght'])
rand_pos2 = random.randrange(0,Traveler.encoding['lenght'])
self.data[rand_pos1], self.data[rand_pos2] = self.data[rand_pos2], self.data[rand_pos1]
# Inversion mutation : (1[23]4) -> (14[32])
elif Traveler.mutation[0] == 'ivm':
lenght = len(self.data)
x1_t = random.randrange(0,Traveler.encoding['lenght'])
x2_t = random.randrange(0,Traveler.encoding['lenght'])
x1 = min([x1_t,x2_t]) # x1 > x2 otherwise list slices don't work
x2 = max([x1_t,x2_t])
# Save and reverse chunk
chunk = self.data[x1:x2+1]
chunk = chunk[::-1] # Reverse chunk
count = 0
# Remove chunk
for _ in range(x1,x2+1):
count += 1
del self.data[x1] # Removing displaces elements... remove x time [x1] removes [x1..x1+x]
assert(count == len(chunk))
insert_pt = random.randrange(0,Traveler.encoding['lenght']-len(chunk)+1)
for x in range(0, len(chunk)):
self.data.insert(insert_pt+x, chunk[x])
assert(len(self.data) == lenght)
# Simple inversion mutation - Note : Wrongly typed... should be SIM - kept for consistency
elif Traveler.mutation[0] == 'ism':
start_data = self.data
x1_t = random.randrange(0,int(Traveler.encoding['lenght']/5)) ##### VARIBLE
x2_t = random.randrange(0,int(Traveler.encoding['lenght']/5))
x1 = min([x1_t,x2_t]) # x1 > x2 otherwise list slices don't work
x2 = max([x1_t,x2_t])
# Save and reverse chunk
chunk = self.data[x1:x2+1]
chunk = chunk[::-1] # Reverse chunk
self.data[x1:x2+1] = chunk
# Used to get a new random value - following a normal or uniform distribution
@classmethod
def getGeneVal(self, prev_val):
if Traveler.mutation[1] == 'random':
return random.randrange(min(Traveler.encoding['span'],max(Traveler.encoding['span'])))
elif Traveler.mutation[1] == 'normal':
new_value = abs(np.random.normal(prev_val, Traveler.mutation[2], 1)[0]) # Reverb value < min_encoding
max_encoding = max(Traveler.encoding['span'])
min_encoding = min(Traveler.encoding['span'])
new_value = int(round(new_value))
if new_value > max_encoding:
new_value = max_encoding - (new_value - max_encoding) # Reverb value > max_encoding
if new_value == 0:
new_value = min_encoding
return new_value
assert(work)
# Used to configure the GA algorithm - not necessary per se - but why not.
class GA_configurator:
# Only these valid_settings will be kept
valid_settings={'pop': ['pop_size', 'init_factor'], 'algo': ['mutate', 'select', 'update', 'crossover'], 'stop': ['max_gen', 'max_time', 'aim'], 'output': ['mode', 'perf_ref', 'stall_action', 'out_interval'], 'encoding': ['dataset', 'maximize']}
def __init__(self):
self.settings = {}
def conf(self, **kwargs):
if 'setting' in kwargs: # Is the setting type provided
setting_type = kwargs['setting']
if setting_type in GA_configurator.valid_settings: # Is the setting type valid
self.settings[setting_type] = {}
for param in kwargs: # Is the setting option valid
if param in GA_configurator.valid_settings[setting_type]:
self.settings[setting_type][param] = kwargs[param] # Recording setting in the appropriate dictionnary section
# Used to benchmark a list of algorithm against each other - aka "the time eater".
class GA_benchmark:
settings_list = {'pop':[1000],
'mutate':[('ivm', 'ism')],
'select':[('bts',15), ('bts',40)],
'update':[('replace-worst', 15), ('replace-parent',15), ('proba-replace-parent',15)],
'crossover' : ['pmx', None] }
def start(self):
conf = GA_configurator()
conf.conf(setting = 'stop', max_gen = 0, max_time = 300, aim = 0)
conf.conf(setting = 'output', mode = 'none', perf_ref = 10, out_interval = 50, stall_action = 'ignore')
bench = []
run = 0
file_path = "logs/TSP_{}_benchmark.txt".format(time.strftime("%d-%m-%y_%H:%M:%S"))
dataset = loadDataSet("data/Luxembourg_opti.txt")
with open(file_path, 'w') as log:
log.write("pop_size;mutate;select;crossover;update;gen_count;fitness\n")
for pop_set in GA_benchmark.settings_list['pop']:
for mut_set in GA_benchmark.settings_list['mutate']:
for sel_set in GA_benchmark.settings_list['select']:
for co_set in GA_benchmark.settings_list['crossover']:
for up_set in GA_benchmark.settings_list['update']:
conf.conf(setting = 'pop', pop_size = pop_set, init_factor = 3)
conf.conf(setting = 'algo', mutate = mut_set, select = sel_set, update = up_set, crossover = co_set)
conf.conf(setting = 'encoding', dataset = dataset, maximize = False)
print("Testing {} {} {} {} {}".format(pop_set, mut_set, sel_set, up_set, co_set))
bench.append(run)
bench[run] = {}
bench[run]['settings'] = []
bench[run]['results'] = []
bench[run]['settings'].append((pop_set, mut_set, sel_set, co_set))
print("Now running run {} / {}".format(run+1, functools.reduce(operator.mul,(len(x[1]) for x in GA_benchmark.settings_list.items()),1)))
# Run multiple times to average out - use 1 for maximum speed...
for subrun in range(0,1):
a = GA(conf)
result = a.start()[1]
bench[run]['results'].append(result)
print(".", end = "", flush = True)
print(bench[run]['results'])
print("")
log.write("{};{};{};{};{};{};{}\n".format(pop_set,
mut_set,
sel_set,
co_set,
up_set,
mean([x[0] for x in bench[run]['results']]),
mean([x[2] for x in bench[run]['results']])))
run += 1
# Plots a benchmark result - can be restricted to the X best combination
def plot(file_name,x_data,y_data, keep_best_nb = 0, print_best = False):
# Activate interactive mode for non-blocking plotting
plt.ion()
data = pd.read_csv(file_name, sep=";")
if keep_best_nb:
data = data.nsmallest(keep_best_nb, y_data)
if x_data != 'all':
xy_data = data[[x_data,y_data]]
xy_data = xy_data.groupby([x_data], as_index=False).mean()
xy_plot = xy_data.plot(kind='bar', x=x_data)
for tick in xy_plot.get_xticklabels():
tick.set_rotation(0)
# Plots the whole run configuration against the fitness
else:
x_data = 'algorithms'
y_data = 'fitness'
data['algorithms'] = data['pop_size'].astype(str) + ";" + data['mutate'].astype(str) + ";" + data['select'].astype(str) + ";" + data['crossover'].astype(str) + ";" + data['update'].astype(str)
xy_data = data[[x_data, y_data]]
xy_data = xy_data.groupby([x_data], as_index=False).mean()
xy_plot = xy_data.plot(kind='bar', x=x_data)
if print_best:
for x,y in zip(data[x_data], data[y_data]):
print(f"{x} : {y}")
plt.show()
# Load a optimized datased (as outputed by the "TSP_data_preprocessing.py" script)
def loadDataSet(file_path):
data = []
with open(file_path, "r") as ds:
data.append("EMPTY") # So that city id are aligned with list indexes
for i, line in enumerate(ds):
#if line.startswith('1') or i > 6:
line = line.strip().split(" ")
data.append((int(line[0]), float(line[1]), float(line[2])))
return data
if __name__ == "__main__":
# GA Configuration
conf = GA_configurator()
conf.conf(setting = 'pop', pop_size = 500, init_factor = 2)
conf.conf(setting = 'algo', mutate = ('ivm',), select = ('bts', 15), crossover = 'pmx', update = ('replace-worst',15))
conf.conf(setting = 'stop', max_gen = 0, max_time = 120, aim = 0)
conf.conf(setting = 'encoding', dataset = loadDataSet("data/Luxembourg_opti.txt"), maximize = False)
conf.conf(setting = 'output', mode = ['text', 'fitplot'], perf_ref = 10, out_interval = 25, stall_action = 'manual')
# Standard GA mode - slow to show the first results - please wait...
a = GA(conf)
print(a.start()[0])
# Profiler mode - SLOW
#profile.run('a.start()[0]; print()')
# Benchmark mode
#bench = GA_benchmark()
#bench.start()
##### PLOTTING ####
# Print the last fitness/generation curve file - data file provided
#fitnessPlotFromFile()
# You can run that, the data file are provided in the archive
#plot("logs/24_5min.txt", 'all', 'fitness',keep_best_nb=4, print_best=True)
#plot("logs/24_5min.txt", 'pop_size', 'fitness')
#plot("logs/24_5min.txt", 'mutate', 'fitness')
#plot("logs/24_5min.txt", 'select', 'fitness')
#plot("logs/24_5min.txt", 'crossover', 'fitness')
#plot("logs/24_5min.txt", 'update', 'fitness')
# Used to check city distances consistency (visual vs number)
#checkCityDistances()
# Wait for 'enter' key press so that we can interact with the graphs before exiting
input("Press enter to exit\n")
| StarcoderdataPython |
3287583 | <filename>imagepy/menus/Plugins/Manager/toltree_wgt.py
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 16 21:13:16 2017
@author: yxl
"""
from imagepy.core.engine import Free
import wx,os
from imagepy import root_dir
from imagepy.core.app import loader
from wx.py.editor import EditorFrame
from sciapp import Source
#from imagepy.ui.mkdownwindow import HtmlPanel, md2html
from sciwx.text import MDPad
from glob import glob
class Plugin ( wx.Panel ):
title = 'Tool Tree View'
single = None
def __init__( self, parent, app=None):
wx.Frame.__init__ ( self, parent, id = wx.ID_ANY,
pos = wx.DefaultPosition, size = wx.Size( 500,300 ),
style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )
self.app = app
bSizer1 = wx.BoxSizer( wx.HORIZONTAL )
self.tre_plugins = wx.TreeCtrl( self, wx.ID_ANY, wx.DefaultPosition,
wx.DefaultSize, wx.TR_DEFAULT_STYLE )
self.tre_plugins.SetMinSize( wx.Size( 200,-1 ) )
bSizer1.Add( self.tre_plugins, 0, wx.ALL|wx.EXPAND, 5 )
bSizer3 = wx.BoxSizer( wx.VERTICAL )
bSizer4 = wx.BoxSizer( wx.HORIZONTAL )
self.m_staticText2 = wx.StaticText( self, wx.ID_ANY, "Tool Information",
wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText2.Wrap( -1 )
bSizer4.Add( self.m_staticText2, 0, wx.ALL, 5 )
self.m_staticText3 = wx.StaticText( self, wx.ID_ANY, "[SourceCode]",
wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText3.Wrap( -1 )
self.m_staticText3.SetForegroundColour(
wx.SystemSettings.GetColour( wx.SYS_COLOUR_HIGHLIGHT ) )
bSizer4.Add( self.m_staticText3, 0, wx.ALL, 5 )
bSizer3.Add( bSizer4, 0, wx.EXPAND, 5 )
self.txt_info = MDPad( self )
bSizer3.Add( self.txt_info, 1, wx.ALL|wx.EXPAND, 5 )
bSizer1.Add( bSizer3, 1, wx.EXPAND, 5 )
self.SetSizer( bSizer1 )
self.Layout()
self.Centre( wx.BOTH )
# Connect Events
self.tre_plugins.Bind( wx.EVT_TREE_ITEM_ACTIVATED, self.on_run )
self.tre_plugins.Bind( wx.EVT_TREE_SEL_CHANGED, self.on_select )
self.m_staticText3.Bind( wx.EVT_LEFT_DOWN, self.on_source )
self.plg = None
self.load()
def addnode(self, parent, data):
for i in data:
if i=='-':continue
if isinstance(i, tuple):
item = self.tre_plugins.AppendItem(parent, i[0].title)
self.tre_plugins.SetItemData(item, i[0])
self.addnode(item, i[1])
else:
item = self.tre_plugins.AppendItem(parent, i[0].title)
self.tre_plugins.SetItemData(item, i[0])
def load(self):
datas = loader.build_tools('tools')
extends = glob('plugins/*/tools')
for i in extends:
tols = loader.build_tools(i)
if len(tols)!=0: datas[1].extend(tols[1])
root = self.tre_plugins.AddRoot('Tools')
for i in datas[1]:
item = self.tre_plugins.AppendItem(root, i[0].title)
self.tre_plugins.SetItemData(item, i[0])
for j in i[1]:
it = self.tre_plugins.AppendItem(item, j[0].title)
self.tre_plugins.SetItemData(it, j[0])
# Virtual event handlers, overide them in your derived class
def on_run( self, event ):
plg = self.tre_plugins.GetItemData(event.GetItem())
if hasattr(plg, 'start'):plg().start(self.app)
def on_select( self, event ):
plg = self.tre_plugins.GetItemData(event.GetItem())
if plg!=None:
self.plg = plg
name = self.tre_plugins.GetItemText(event.GetItem())
lang = Source.manager('config').get('language')
doc = Source.manager('document').get(name, tag=lang)
doc = doc or Source.manager('document').get(name, tag='English')
self.txt_info.set_cont(doc or 'No Document!')
def on_source(self, event):
## TODO: should it be absolute path ?
filename = self.plg.__module__.replace('.','/')+'.py'
root = os.path.split(root_dir)[0]
filename=os.path.join(root,filename)
EditorFrame(filename=filename).Show() | StarcoderdataPython |
6469277 | """
This module contains low-level APIs that are very generic and could be used in many
different applications.
""" | StarcoderdataPython |
6608986 | <gh_stars>1-10
"""create table for cm load insert queries
Revision ID: 053659c382dc
Revises: <KEY>
Create Date: 2018-12-01 12:46:48.608877
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '053659c382dc'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
'cm_load_insert_queries',
sa.Column('pk', sa.Integer, primary_key=True),
sa.Column('file_format', sa.String(200), nullable=False),
sa.Column('mo', sa.String(200)),
sa.Column('format_mo', sa.String(200)),
sa.Column('insert_query', sa.Text),
sa.Column('modified_by', sa.Integer),
sa.Column('added_by', sa.Integer),
sa.Column('date_added', sa.TIMESTAMP, default=sa.func.now(), onupdate=sa.func.now()),
sa.Column('date_modified', sa.TIMESTAMP, default=sa.func.now())
)
op.execute('ALTER SEQUENCE cm_load_insert_queries_pk_seq RENAME TO seq_cm_load_insert_queries_pk')
op.create_unique_constraint('unique_cm_load_insert_queries', 'cm_load_insert_queries', ['file_format', 'mo'])
def downgrade():
op.drop_constraint("unique_cm_load_insert_queries", "cm_load_insert_queries")
op.drop_table('cm_load_insert_queries')
| StarcoderdataPython |
4884559 | #!/usr/bin/env python3
# The Elves quickly load you into a spacecraft and prepare to launch.
#
# At the first Go / No Go poll, every Elf is Go until the Fuel Counter-Upper. They haven't determined the amount of fuel required yet.
#
# Fuel required to launch a given module is based on its mass. Specifically, to find the fuel required for a module, take its mass, divide by three, round down, and subtract 2.
#
# For example:
#
# For a mass of 12, divide by 3 and round down to get 4, then subtract 2 to get 2.
# For a mass of 14, dividing by 3 and rounding down still yields 4, so the fuel required is also 2.
# For a mass of 1969, the fuel required is 654.
# For a mass of 100756, the fuel required is 33583.
#
# The Fuel Counter-Upper needs to know the total fuel requirement. To find it, individually calculate the fuel needed for the mass of each module (your puzzle input), then add together all the fuel values.
#
# What is the sum of the fuel requirements for all of the modules on your spacecraft?
# ============================================================================================================
# Fuel itself requires fuel just like a module - take its mass, divide by three, round down, and subtract 2. However, that fuel also requires fuel, and that fuel requires fuel, and so on. Any mass that would require negative fuel should instead be treated as if it requires zero fuel; the remaining mass, if any, is instead handled by wishing really hard, which has no mass and is outside the scope of this calculation.
#
# So, for each module mass, calculate its fuel and add it to the total. Then, treat the fuel amount you just calculated as the input mass and repeat the process, continuing until a fuel requirement is zero or negative. For example:
#
# A module of mass 14 requires 2 fuel. This fuel requires no further fuel (2 divided by 3 and rounded down is 0, which would call for a negative fuel), so the total fuel required is still just 2.
# At first, a module of mass 1969 requires 654 fuel. Then, this fuel requires 216 more fuel (654 / 3 - 2). 216 then requires 70 more fuel, which requires 21 fuel, which requires 5 fuel, which requires no further fuel. So, the total fuel required for a module of mass 1969 is 654 + 216 + 70 + 21 + 5 = 966.
# The fuel required by a module of mass 100756 and its fuel is: 33583 + 11192 + 3728 + 1240 + 411 + 135 + 43 + 12 + 2 = 50346.
#
# What is the sum of the fuel requirements for all of the modules on your spacecraft when also taking into account the mass of the added fuel?
import os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
from utils.decorators import *
@timeit('Part 1')
def part_one(mass_list):
'''Given a list of integer values, computes the fuel requirements'''
mass_to_fuel = lambda x: max(0, x // 3 - 2)
return sum(map(mass_to_fuel, mass_list))
@timeit('Part 2')
def part_two(mass_list):
'''Given a list of integer values, computes the fuel requirements considering the fuel itself.'''
mass_to_fuel = lambda x: max(0, x // 3 - 2)
fuel = []
for mass in mass_list:
temp_mass = mass
while len(fuel) == 0 or temp_mass != 0:
amount_fuel = mass_to_fuel(temp_mass)
fuel.append(amount_fuel)
temp_mass = amount_fuel
return sum(fuel)
def part_two_visualized(mass_list):
'''Given a list of integer values, computes the fuel requirements considering the fuel itself.'''
import matplotlib.pyplot as plt
import numpy as np
mass_to_fuel = lambda x: max(0, x // 3 - 2)
fuel = []
plt.figure(1)
for mass in mass_list:
temp_mass = mass
while len(fuel) == 0 or temp_mass != 0:
amount_fuel = mass_to_fuel(temp_mass)
fuel.append(amount_fuel)
temp_mass = amount_fuel
y = [mass]
temp = mass
for entry in fuel:
temp += entry
y.append(temp)
plt.plot(y)
fuel = []
plt.ylabel('Mass carried (arb.)')
plt.xlabel('Step # (0 = weight of component)')
plt.show()
def test():
'''Test functions'''
test_input_list_one = [12, 14, 1969, 100756]
test_output_one = [2, 2, 654, 33583]
output_one = part_one(test_input_list_one)
assert(sum(test_output_one) == output_one)
test_input_list_two = [14, 1969, 100756]
test_output_two = [2, 966, 50346]
output_two = part_two(test_input_list_two)
assert(sum(test_output_two) == output_two)
@timeit('Total')
def main(args):
with open(args[1], 'r') as f:
mass_list = [int(line.strip()) for line in f.readlines()]
print('Part 1 Result: {}'.format(part_one(mass_list)))
print('Part 2 Result: {}'.format(part_two(mass_list)))
part_two_visualized(mass_list)
if __name__ == '__main__':
# test()
main(sys.argv) | StarcoderdataPython |
1909355 | from .job_click_message import JobClick
from .root_click_message import RootClick
__all__ = ("JobClick", "RootClick")
| StarcoderdataPython |
1851617 | # -*- coding: utf-8 -*-
"""Top-level package for nuclei."""
__author__ = """<NAME>"""
__email__ = '<EMAIL>'
__version__ = '0.1.0'
| StarcoderdataPython |
6603330 | <reponame>jonasfreyr/Net-forritun
import socket
import urllib.request
HOST = '127.0.0.1' # Standard loopback interface address (localhost)
PORT = 65432 # Port to listen on (non-privileged ports are > 1023)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((HOST, PORT))
s.listen(2)
print("Server Started")
conn, addr = s.accept()
while True:
url = conn.recv(1024).decode()
try:
fhand = urllib.request.urlopen("https://"+url+"/")
characters = 0
for line in fhand:
words = line.decode() #\n is considered a character
#amend to line.decode().rstrip() if need
characters = characters + len(words)
if characters < 3000:
conn.sendall(line.decode().strip().encode())
conn.sendall(str(characters).encode())
conn.sendall(b"None")
except:
conn.sendall(b"Url Doesn't Exist!")
# conn.sendall(str(tel).encode())
| StarcoderdataPython |
8004891 | def square(x):
'''
x: int or float.
'''
return x**2
def fourthPower(x):
'''
x: int or float.
'''
square(x)*square(x)
return x
| StarcoderdataPython |
5194246 | <reponame>jakejhansen/minesweeper_solver<filename>q_learning/backup_output_net1_discount_0_batch_400_best/test_environments_step.py
from environments import *
# Just shows some game-play of an agent
#env = AtariGymEnvironment(display=True, game="BeamRider-v0")
#env.new_random_game_play() | StarcoderdataPython |
9651372 | from vint.linting.policy_loader import register_policy
@register_policy
class PolicyFixture2(object):
pass
| StarcoderdataPython |
12806958 | import logging
import re
import sys
from jinja2 import Environment, FileSystemLoader
from jupyter_core.application import JupyterApp, NoStart
from tornado.log import LogFormatter
from tornado.web import RedirectHandler
from traitlets import Any, Bool, Dict, HasTraits, List, Unicode, default
from traitlets.config import Config
from jupyter_server.serverapp import ServerApp
from jupyter_server.transutils import _i18n
from jupyter_server.utils import is_namespace_package, url_path_join
from .handler import ExtensionHandlerMixin
# -----------------------------------------------------------------------------
# Util functions and classes.
# -----------------------------------------------------------------------------
def _preparse_for_subcommand(Application, argv):
"""Preparse command line to look for subcommands."""
# Read in arguments from command line.
if len(argv) == 0:
return
# Find any subcommands.
if Application.subcommands and len(argv) > 0:
# we have subcommands, and one may have been specified
subc, subargv = argv[0], argv[1:]
if re.match(r"^\w(\-?\w)*$", subc) and subc in Application.subcommands:
# it's a subcommand, and *not* a flag or class parameter
app = Application()
app.initialize_subcommand(subc, subargv)
return app.subapp
def _preparse_for_stopping_flags(Application, argv):
"""Looks for 'help', 'version', and 'generate-config; commands
in command line. If found, raises the help and version of
current Application.
This is useful for traitlets applications that have to parse
the command line multiple times, but want to control when
when 'help' and 'version' is raised.
"""
# Arguments after a '--' argument are for the script IPython may be
# about to run, not IPython iteslf. For arguments parsed here (help and
# version), we want to only search the arguments up to the first
# occurrence of '--', which we're calling interpreted_argv.
try:
interpreted_argv = argv[: argv.index("--")]
except ValueError:
interpreted_argv = argv
# Catch any help calls.
if any(x in interpreted_argv for x in ("-h", "--help-all", "--help")):
app = Application()
app.print_help("--help-all" in interpreted_argv)
app.exit(0)
# Catch version commands
if "--version" in interpreted_argv or "-V" in interpreted_argv:
app = Application()
app.print_version()
app.exit(0)
# Catch generate-config commands.
if "--generate-config" in interpreted_argv:
app = Application()
app.write_default_config()
app.exit(0)
class ExtensionAppJinjaMixin(HasTraits):
"""Use Jinja templates for HTML templates on top of an ExtensionApp."""
jinja2_options = Dict(
help=_i18n(
"""Options to pass to the jinja2 environment for this
"""
)
).tag(config=True)
def _prepare_templates(self):
# Get templates defined in a subclass.
self.initialize_templates()
# Add templates to web app settings if extension has templates.
if len(self.template_paths) > 0:
self.settings.update({f"{self.name}_template_paths": self.template_paths})
# Create a jinja environment for logging html templates.
self.jinja2_env = Environment(
loader=FileSystemLoader(self.template_paths),
extensions=["jinja2.ext.i18n"],
autoescape=True,
**self.jinja2_options,
)
# Add the jinja2 environment for this extension to the tornado settings.
self.settings.update({f"{self.name}_jinja2_env": self.jinja2_env})
# -----------------------------------------------------------------------------
# ExtensionApp
# -----------------------------------------------------------------------------
class JupyterServerExtensionException(Exception):
"""Exception class for raising for Server extensions errors."""
# -----------------------------------------------------------------------------
# ExtensionApp
# -----------------------------------------------------------------------------
class ExtensionApp(JupyterApp):
"""Base class for configurable Jupyter Server Extension Applications.
ExtensionApp subclasses can be initialized two ways:
1. Extension is listed as a jpserver_extension, and ServerApp calls
its load_jupyter_server_extension classmethod. This is the
classic way of loading a server extension.
2. Extension is launched directly by calling its `launch_instance`
class method. This method can be set as a entry_point in
the extensions setup.py
"""
# Subclasses should override this trait. Tells the server if
# this extension allows other other extensions to be loaded
# side-by-side when launched directly.
load_other_extensions = True
# A useful class property that subclasses can override to
# configure the underlying Jupyter Server when this extension
# is launched directly (using its `launch_instance` method).
serverapp_config = {}
# Some subclasses will likely override this trait to flip
# the default value to False if they don't offer a browser
# based frontend.
open_browser = Bool(
help="""Whether to open in a browser after starting.
The specific browser used is platform dependent and
determined by the python standard library `webbrowser`
module, unless it is overridden using the --browser
(ServerApp.browser) configuration option.
"""
).tag(config=True)
@default("open_browser")
def _default_open_browser(self):
return self.serverapp.config["ServerApp"].get("open_browser", True)
@property
def config_file_paths(self):
"""Look on the same path as our parent for config files"""
# rely on parent serverapp, which should control all config loading
return self.serverapp.config_file_paths
# The extension name used to name the jupyter config
# file, jupyter_{name}_config.
# This should also match the jupyter subcommand used to launch
# this extension from the CLI, e.g. `jupyter {name}`.
name = None
@classmethod
def get_extension_package(cls):
parts = cls.__module__.split(".")
if is_namespace_package(parts[0]):
# in this case the package name is `<namespace>.<package>`.
return ".".join(parts[0:2])
return parts[0]
@classmethod
def get_extension_point(cls):
return cls.__module__
# Extension URL sets the default landing page for this extension.
extension_url = "/"
default_url = Unicode().tag(config=True)
@default("default_url")
def _default_url(self):
return self.extension_url
file_url_prefix = Unicode("notebooks")
# Is this linked to a serverapp yet?
_linked = Bool(False)
# Extension can configure the ServerApp from the command-line
classes = [
ServerApp,
]
# A ServerApp is not defined yet, but will be initialized below.
serverapp = Any()
@default("serverapp")
def _default_serverapp(self):
# load the current global instance, if any
if ServerApp.initialized():
try:
return ServerApp.instance()
except Exception:
# error retrieving instance, e.g. MultipleInstanceError
pass
# serverapp accessed before it was defined,
# declare an empty one
return ServerApp()
_log_formatter_cls = LogFormatter
@default("log_level")
def _default_log_level(self):
return logging.INFO
@default("log_format")
def _default_log_format(self):
"""override default log format to include date & time"""
return (
"%(color)s[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s]%(end_color)s %(message)s"
)
static_url_prefix = Unicode(
help="""Url where the static assets for the extension are served."""
).tag(config=True)
@default("static_url_prefix")
def _default_static_url_prefix(self):
static_url = f"static/{self.name}/"
return url_path_join(self.serverapp.base_url, static_url)
static_paths = List(
Unicode(),
help="""paths to search for serving static files.
This allows adding javascript/css to be available from the notebook server machine,
or overriding individual files in the IPython
""",
).tag(config=True)
template_paths = List(
Unicode(),
help=_i18n(
"""Paths to search for serving jinja templates.
Can be used to override templates from notebook.templates."""
),
).tag(config=True)
settings = Dict(help=_i18n("""Settings that will passed to the server.""")).tag(config=True)
handlers = List(help=_i18n("""Handlers appended to the server.""")).tag(config=True)
def _config_file_name_default(self):
"""The default config file name."""
if not self.name:
return ""
return "jupyter_{}_config".format(self.name.replace("-", "_"))
def initialize_settings(self):
"""Override this method to add handling of settings."""
pass
def initialize_handlers(self):
"""Override this method to append handlers to a Jupyter Server."""
pass
def initialize_templates(self):
"""Override this method to add handling of template files."""
pass
def _prepare_config(self):
"""Builds a Config object from the extension's traits and passes
the object to the webapp's settings as `<name>_config`.
"""
traits = self.class_own_traits().keys()
self.extension_config = Config({t: getattr(self, t) for t in traits})
self.settings[f"{self.name}_config"] = self.extension_config
def _prepare_settings(self):
# Make webapp settings accessible to initialize_settings method
webapp = self.serverapp.web_app
self.settings.update(**webapp.settings)
# Add static and template paths to settings.
self.settings.update(
{
f"{self.name}_static_paths": self.static_paths,
f"{self.name}": self,
}
)
# Get setting defined by subclass using initialize_settings method.
self.initialize_settings()
# Update server settings with extension settings.
webapp.settings.update(**self.settings)
def _prepare_handlers(self):
webapp = self.serverapp.web_app
# Get handlers defined by extension subclass.
self.initialize_handlers()
# prepend base_url onto the patterns that we match
new_handlers = []
for handler_items in self.handlers:
# Build url pattern including base_url
pattern = url_path_join(webapp.settings["base_url"], handler_items[0])
handler = handler_items[1]
# Get handler kwargs, if given
kwargs = {}
if issubclass(handler, ExtensionHandlerMixin):
kwargs["name"] = self.name
try:
kwargs.update(handler_items[2])
except IndexError:
pass
new_handler = (pattern, handler, kwargs)
new_handlers.append(new_handler)
# Add static endpoint for this extension, if static paths are given.
if len(self.static_paths) > 0:
# Append the extension's static directory to server handlers.
static_url = url_path_join(self.static_url_prefix, "(.*)")
# Construct handler.
handler = (
static_url,
webapp.settings["static_handler_class"],
{"path": self.static_paths},
)
new_handlers.append(handler)
webapp.add_handlers(".*$", new_handlers)
def _prepare_templates(self):
# Add templates to web app settings if extension has templates.
if len(self.template_paths) > 0:
self.settings.update({f"{self.name}_template_paths": self.template_paths})
self.initialize_templates()
def _jupyter_server_config(self):
base_config = {
"ServerApp": {
"default_url": self.default_url,
"open_browser": self.open_browser,
"file_url_prefix": self.file_url_prefix,
}
}
base_config["ServerApp"].update(self.serverapp_config)
return base_config
def _link_jupyter_server_extension(self, serverapp):
"""Link the ExtensionApp to an initialized ServerApp.
The ServerApp is stored as an attribute and config
is exchanged between ServerApp and `self` in case
the command line contains traits for the ExtensionApp
or the ExtensionApp's config files have server
settings.
Note, the ServerApp has not initialized the Tornado
Web Application yet, so do not try to affect the
`web_app` attribute.
"""
self.serverapp = serverapp
# Load config from an ExtensionApp's config files.
self.load_config_file()
# ServerApp's config might have picked up
# config for the ExtensionApp. We call
# update_config to update ExtensionApp's
# traits with these values found in ServerApp's
# config.
# ServerApp config ---> ExtensionApp traits
self.update_config(self.serverapp.config)
# Use ExtensionApp's CLI parser to find any extra
# args that passed through ServerApp and
# now belong to ExtensionApp.
self.parse_command_line(self.serverapp.extra_args)
# If any config should be passed upstream to the
# ServerApp, do it here.
# i.e. ServerApp traits <--- ExtensionApp config
self.serverapp.update_config(self.config)
# Acknowledge that this extension has been linked.
self._linked = True
def initialize(self):
"""Initialize the extension app. The
corresponding server app and webapp should already
be initialized by this step.
1) Appends Handlers to the ServerApp,
2) Passes config and settings from ExtensionApp
to the Tornado web application
3) Points Tornado Webapp to templates and
static assets.
"""
if not self.serverapp:
msg = (
"This extension has no attribute `serverapp`. "
"Try calling `.link_to_serverapp()` before calling "
"`.initialize()`."
)
raise JupyterServerExtensionException(msg)
self._prepare_config()
self._prepare_templates()
self._prepare_settings()
self._prepare_handlers()
def start(self):
"""Start the underlying Jupyter server.
Server should be started after extension is initialized.
"""
super().start()
# Start the server.
self.serverapp.start()
async def stop_extension(self):
"""Cleanup any resources managed by this extension."""
def stop(self):
"""Stop the underlying Jupyter server."""
self.serverapp.stop()
self.serverapp.clear_instance()
@classmethod
def _load_jupyter_server_extension(cls, serverapp):
"""Initialize and configure this extension, then add the extension's
settings and handlers to the server's web application.
"""
extension_manager = serverapp.extension_manager
try:
# Get loaded extension from serverapp.
point = extension_manager.extension_points[cls.name]
extension = point.app
except KeyError:
extension = cls()
extension._link_jupyter_server_extension(serverapp)
extension.initialize()
return extension
@classmethod
def load_classic_server_extension(cls, serverapp):
"""Enables extension to be loaded as classic Notebook (jupyter/notebook) extension."""
extension = cls()
extension.serverapp = serverapp
extension.load_config_file()
extension.update_config(serverapp.config)
extension.parse_command_line(serverapp.extra_args)
# Add redirects to get favicons from old locations in the classic notebook server
extension.handlers.extend(
[
(
r"/static/favicons/favicon.ico",
RedirectHandler,
{"url": url_path_join(serverapp.base_url, "static/base/images/favicon.ico")},
),
(
r"/static/favicons/favicon-busy-1.ico",
RedirectHandler,
{
"url": url_path_join(
serverapp.base_url, "static/base/images/favicon-busy-1.ico"
)
},
),
(
r"/static/favicons/favicon-busy-2.ico",
RedirectHandler,
{
"url": url_path_join(
serverapp.base_url, "static/base/images/favicon-busy-2.ico"
)
},
),
(
r"/static/favicons/favicon-busy-3.ico",
RedirectHandler,
{
"url": url_path_join(
serverapp.base_url, "static/base/images/favicon-busy-3.ico"
)
},
),
(
r"/static/favicons/favicon-file.ico",
RedirectHandler,
{
"url": url_path_join(
serverapp.base_url, "static/base/images/favicon-file.ico"
)
},
),
(
r"/static/favicons/favicon-notebook.ico",
RedirectHandler,
{
"url": url_path_join(
serverapp.base_url,
"static/base/images/favicon-notebook.ico",
)
},
),
(
r"/static/favicons/favicon-terminal.ico",
RedirectHandler,
{
"url": url_path_join(
serverapp.base_url,
"static/base/images/favicon-terminal.ico",
)
},
),
(
r"/static/logo/logo.png",
RedirectHandler,
{"url": url_path_join(serverapp.base_url, "static/base/images/logo.png")},
),
]
)
extension.initialize()
@classmethod
def initialize_server(cls, argv=None, load_other_extensions=True, **kwargs):
"""Creates an instance of ServerApp and explicitly sets
this extension to enabled=True (i.e. superceding disabling
found in other config from files).
The `launch_instance` method uses this method to initialize
and start a server.
"""
jpserver_extensions = {cls.get_extension_package(): True}
find_extensions = cls.load_other_extensions
if "jpserver_extensions" in cls.serverapp_config:
jpserver_extensions.update(cls.serverapp_config["jpserver_extensions"])
cls.serverapp_config["jpserver_extensions"] = jpserver_extensions
find_extensions = False
serverapp = ServerApp.instance(jpserver_extensions=jpserver_extensions, **kwargs)
serverapp.aliases.update(cls.aliases)
serverapp.initialize(
argv=argv or [],
starter_extension=cls.name,
find_extensions=find_extensions,
)
return serverapp
@classmethod
def launch_instance(cls, argv=None, **kwargs):
"""Launch the extension like an application. Initializes+configs a stock server
and appends the extension to the server. Then starts the server and routes to
extension's landing page.
"""
# Handle arguments.
if argv is None:
args = sys.argv[1:] # slice out extension config.
else:
args = argv
# Handle all "stops" that could happen before
# continuing to launch a server+extension.
subapp = _preparse_for_subcommand(cls, args)
if subapp:
subapp.start()
return
# Check for help, version, and generate-config arguments
# before initializing server to make sure these
# arguments trigger actions from the extension not the server.
_preparse_for_stopping_flags(cls, args)
serverapp = cls.initialize_server(argv=args)
# Log if extension is blocking other extensions from loading.
if not cls.load_other_extensions:
serverapp.log.info(
"{ext_name} is running without loading "
"other extensions.".format(ext_name=cls.name)
)
# Start the server.
try:
serverapp.start()
except NoStart:
pass
| StarcoderdataPython |
1865781 | <gh_stars>1-10
import json
import os
from collections import defaultdict
path = '/home/manuto/Documents/world_bank/bert_twitter_labor/twitter-labor-data/data/evaluation_metrics/US/diversity/threshold_calibrated_distance_with_seed'
json_list = os.listdir(path)
final_dict = defaultdict(set)
for json_file in json_list:
with open(os.path.join(path, json_file), 'r') as JSON:
json_dict = json.load(JSON)
for k, v in json_dict.items(): # d.items() in Python 3+
if k not in final_dict.keys():
final_dict[k] = dict()
for k_2, v_2 in json_dict[k].items():
if k_2 not in final_dict[k].keys():
final_dict[k][k_2] = dict()
for k_3, v_3 in json_dict[k][k_2].items():
if k_3 not in final_dict[k][k_2].keys():
final_dict[k][k_2][k_3] = json_dict[k][k_2][k_3]
print(final_dict['our_method'])
print('\n')
print(final_dict['adaptive'])
print('\n')
print(final_dict['uncertainty']) | StarcoderdataPython |
5084788 | <reponame>MrMikeWolf/F16Dynamics<gh_stars>1-10
from trim_f16 import cost_trim_f16
from params_f16 import load_f16
from engine_f16 import tgear
from eqm import eqm
from scipy.optimize import minimize
import pandas as pd
from scipy.integrate import odeint
from numpy import arange, sin, cos
import matplotlib.pyplot as plot
params = load_f16()
params.xcg = .35
params.coordinated_turn = 0
params.turn_rate_rps = 0.0
params.roll_rate_rps = 0.0
params.pitch_rate_rps = 0.0
params.phi_rad = 0.0
params.gamma_rad = 0.0
params.stability_axis_roll = 0
params.VT_ftps = 502
params.alt_ft = 0
def costf16(x):
y = cost_trim_f16(x,params)
return y
S0 = [
.0, #throttle 0-1
0.0, #elev_deg
0.0, #alpha_rad
#0.0#ail_deg
#0.0#rudder_deg
#0.0#beta_rad
]
S = minimize(costf16, S0)['x']
X0 = [
params.VT_ftps, #VT_fps
S[2], #alpha_rad
0.0, #beta_rad
0.0, #phi_rad
S[2], #theta_rad
0.0, #psi_rad
0.0, #p_rps
0.0, #q_rps
0.0, #r_rps
0.0, #north position ft
0.0, #east position ft
params.alt_ft, #alt_ft
tgear(S[0]), #power_perc
]
# PYTHON simulation
controls=pd.Series()
controls.throttle = S[0]
controls.elev_deg = S[1]
controls.ail_deg = 0.0
controls.rudder_deg = 0.0
def f16_model(t,X):
xd, _ = eqm(t, X, controls, params)
return xd
def elev_step(t):
if (t<0.5):
y = S[1]
elif (t>=0.5 and t<=0.53):
y = S[1] - 1/0.03*(t-0.5)
else:
y = S[1]-1
return y
t = arange(0,3,0.001)
controls.elev_deg = elev_step
print('Simulating...')
y = odeint(func=f16_model, y0=X0, t=t, tfirst=True)
print('Calculating further outputs...')
nz_g = 0*t
nx_g = 0*t
nzs_g = 0*t
mach = 0*t
thrust_pound = 0*t
for i in range(0,len(t)):
xd,outputs = eqm(t[i], y[:][i], controls, params)
nz_g[i] = outputs.nz_g
nx_g[i] = outputs.nx_g
nzs_g[i] = nx_g[i]*sin(y[i,1])+nz_g[i]*cos(y[i,1])
mach[i] = outputs.mach
thrust_pound[i] = outputs.thrust_pound*sin(y[i,4])
ax1=plot.subplot(311)
ax1.plot(t, [elev_step(ti) for ti in t]);
ax1.set_xlabel('Time(s)');
ax1.set_ylabel('Elevator(deg)')
ax2=plot.subplot(312)
ax2.plot(t, nzs_g);
ax2.set_xlabel('Time(s)')
ax2.set_ylabel('Nz(g)')
ax3=plot.subplot(313)
ax3.plot(t, y[:,11])
ax3.set_xlabel('Time(s)')
ax3.set_ylabel('H(ft)')
| StarcoderdataPython |
289824 | # Python script that parses an OpenJ9 javacore and displays
# the number of classes in SCC and outside SCC.
# Depending on the configuration it will also print
# all classes that were shared (in SCC) or not shared
# If `displayClassLoaderHierarchy` is `True` we also print all the unique (by name)
# class hierarchies that lead to a class being non shared (or shared)
# The goal is to understand why some methods are not AOT compiled, despite -Xaot:forceaot
# Usage: python3 findNonSCCClassesFromJavacore.py javacore
#
# Author: <NAME>
import re # for regular expressions
import sys # for accessing parameters and exit
displaySharedClasses = False
displayNonSharedClasss = True
displayClassLoaderHierarchy = True
'''
1CLTEXTCLLSS 12345678: 1=primordial,2=extension,3=shareable,4=middleware,5=system,6=trusted,7=application,8=delegating
2CLTEXTCLLOADER -----t-- Loader org/jboss/modules/ModuleClassLoader(0x00000000B1701190), Parent jdk/internal/loader/ClassLoaders$AppClassLoader(0x00000000AED4F1C0)
3CLNMBRLOADEDLIB Number of loaded libraries 0
3CLNMBRLOADEDCL Number of loaded classes 15
2CLTEXTCLLOADER -x--st-- Loader jdk/internal/loader/ClassLoaders$PlatformClassLoader(0x00000000AED3DEF0), Parent *none*(0x0000000000000000)
3CLNMBRLOADEDLIB Number of loaded libraries 1
3CLNMBRLOADEDCL Number of loaded classes 77
3CLNMBRSHAREDCL Number of shared classes 72
2CLTEXTCLLOADER -----t-- Loader org/eclipse/osgi/internal/loader/EquinoxClassLoader(0x00000000F16DD068), Parent com/ibm/ws/kernel/internal/classloader/BootstrapChildFirstJarClassloader(0x00000000F014E948)
3CLNMBRLOADEDLIB Number of loaded libraries 0
3CLNMBRLOADEDCL Number of loaded classes 21
3CLNMBRSHAREDCL Number of shared classes 19
...
2CLTEXTCLLOAD Loader org/jboss/modules/ModuleClassLoader(0x00000000B1701190)
3CLTEXTCLASS com/fasterxml/jackson/databind/exc/UnrecognizedPropertyException(0x00000000030DFB00)
3CLTEXTCLASS com/fasterxml/jackson/databind/exc/PropertyBindingException(0x00000000030DF400)
3CLTEXTCLASS com/fasterxml/jackson/databind/exc/MismatchedInputException(0x00000000030DEE00)
3CLTEXTCLASS com/fasterxml/jackson/databind/JsonMappingException(0x00000000030DEA00)
3CLTEXTCLASS com/fasterxml/jackson/databind/SerializationFeature(0x00000000030DDD00)
3CLTEXTCLASS com/fasterxml/jackson/databind/DeserializationFeature(0x00000000030DD500)
...
2CLTEXTCLLOAD Loader jdk/internal/reflect/DelegatingClassLoader(0x00000000EBABE088)
3CLTEXTCLASS jdk/internal/reflect/GeneratedMethodAccessor3(0x0000000002B16400)
...
2CLTEXTCLLOAD Loader org/eclipse/osgi/internal/loader/EquinoxClassLoader(0x00000000F16DD068)
3CLTEXTCLASS com/ibm/ws/sib/jfapchannel/server/impl/JFapDiscriminator(0x0000000000AD5800 shared)
3CLTEXTCLASS com/ibm/ws/sib/jfapchannel/server/impl/JFapChannelInbound(0x0000000000AD5000 shared)
3CLTEXTCLASS com/ibm/ws/jfap/inbound/channel/JFAPInboundServiceContext(0x0000000000AD4A00 shared)
3CLTEXTCLASS com/ibm/ws/jfap/inbound/channel/CommsInboundChain$ChainConfiguration(0x0000000000AD4500 shared)
3CLTEXTCLASS com/ibm/ws/jfap/inbound/channel/JFAPServerInboundChannelFactory(0x0000000000AD3F00 shared)
3CLTEXTCLASS [Lcom/ibm/ws/sib/jfapchannel/server/impl/ServerConnectionManagerImpl$State;(0x0000000000AD3C00)
'''
def parseJavacore(javacore):
classLoaderHash = {}
classHash = {}
foundLegend = False
activeClassLoader = None
totalClasses = 0
totalSharedClasses = 0
uniqueClassLoaderHierarchies = {}
def printClassLoaderHierarchyForClass(classAddr, uniqueClassLoaderHierarchies):
indentLevel = 1
CLAddr = classHash[classAddr]['classLoaderAddr']
print("Class loader hierarchy:")
CLName = classLoaderHash[CLAddr]['classLoaderName']
print("\t{name} {addr:016X}".format(name=CLName, addr=CLAddr))
classLoaderHierarchy = [CLName] # Use name instead of CLAddr to get uniqueness by name (to many loaders with same name but different address)
parentCL = classLoaderHash[CLAddr]['parentCLAddr']
while parentCL != 0:
indentLevel += 1
for i in range(indentLevel):
print("\t", end='')
parentName = classLoaderHash[parentCL]['classLoaderName']
print("{name} {addr:016X}".format(name=parentName, addr=parentCL))
classLoaderHierarchy.append(parentName)
parentCL = classLoaderHash[parentCL]['parentCLAddr']
# Convert the list into a tuple so that we can use as a key in a dictionary
q = tuple(classLoaderHierarchy)
uniqueClassLoaderHierarchies[q] = uniqueClassLoaderHierarchies.get(q, 0) + 1
def printUniqueClassLoaderHierarchies(uniqueClassLoaderHierarchies):
print("Unique class loader hierarchies:")
for key, value in uniqueClassLoaderHierarchies.items(): # key is a tuple
print("\nNum classes loaded by this hierarchy:", value)
indentLevel = 0;
for classLoader in key:
for i in range(indentLevel):
print("\t", end='')
#print(classLoaderHash[classLoader]['classLoaderName'])
print(classLoader)
indentLevel += 1
legendPattern = re.compile('1CLTEXTCLLSS\s+12345678: 1=primordial,2=extension,3=shareable,4=middleware,5=system,6=trusted,7=application,8=delegating')
for line in javacore:
if legendPattern.match(line):
foundLegend = True
break
if not foundLegend:
print("Cannot find classloader legend in the javacore")
exit(-1)
classLoaderPattern = re.compile('^2CLTEXTCLLOADER\s+([-\w]{8}) Loader (\S+)\(0x([0-9A-F]+)\), Parent (\S+)\(0x([0-9A-F]+)\)')
numLibsLoadedPattern = re.compile('^3CLNMBRLOADEDLIB\s+Number of loaded libraries\s+(\d+)')
numClassesLoadedPattern = re.compile('^3CLNMBRLOADEDCL\s+Number of loaded classes\s+(\d+)')
numClassesSharedPattern = re.compile('^3CLNMBRSHAREDCL\s+Number of shared classes\s+(\d+)')
classLoaderHeader = re.compile('^2CLTEXTCLLOAD\s+Loader (\S+)\(0x([0-9A-F]+)\)')
classPattern = re.compile('^3CLTEXTCLASS\s+(\S+)\(0x([0-9A-F]+)(?: shared)?\)') # " shared" is optional
for line in javacore:
if line.startswith("2CLTEXTCLLOADER"):
flags = ""
m = classLoaderPattern.match(line)
if m:
flags = m.group(1)
classLoaderName = m.group(2)
classLoaderAddr = int(m.group(3), base=16)
parentCLName = m.group(4)
parentCLAddr = int(m.group(5), base=16)
activeClassLoader = classLoaderAddr
classLoaderHash[classLoaderAddr] = {"classLoaderName":classLoaderName, "flags":flags, "parentCLName":parentCLName, "parentCLAddr":parentCLAddr}
else:
# The system class loader has no parent specified
clp = re.compile('^2CLTEXTCLLOADER\s+([-\w]{8}) Loader \*System\*\(0x([0-9A-F]+)\)')
m = clp.match(line)
if m:
flags = m.group(1)
classLoaderName = "System"
classLoaderAddr = int(m.group(2), base=16)
activeClassLoader = classLoaderAddr
classLoaderHash[classLoaderAddr] = {"classLoaderName":classLoaderName, "flags":flags, "parentCLName":"*none*", "parentCLAddr":0}
else:
print("Unrecognized classLoaderPattern:", line)
exit(-1)
elif line.startswith("3CLNMBRLOADEDLIB"):
m = numLibsLoadedPattern.match(line)
assert m, "Wrong line with 3CLNMBRLOADEDLIB heading: {l}".format(l=line)
numLibs = int(m.group(1))
classLoaderHash[activeClassLoader]['numLibsLoaded'] = numLibs
elif line.startswith("3CLNMBRLOADEDCL"):
m = numClassesLoadedPattern.match(line)
assert m, "Wrong line with 3CLNMBRLOADEDCL heading: {l}".format(l=line)
numClasses = int(m.group(1))
classLoaderHash[activeClassLoader]['numClassesLoaded'] = numClasses
totalClasses += numClasses
elif line.startswith("3CLNMBRSHAREDCL"):
m = numClassesSharedPattern.match(line)
assert m, "Wrong line with 3CLNMBRSHAREDCL heading: {l}".format(l=line)
numShared = int(m.group(1))
classLoaderHash[activeClassLoader]['numClassesShared'] = numShared
totalSharedClasses += numShared
elif line.startswith("2CLTEXTCLLOAD"):
m = classLoaderHeader.match(line)
assert m, "Wrong line with 2CLTEXTCLLOAD heading: {l}".format(l=line)
classLoaderName = m.group(1)
classLoaderAddr = int(m.group(2), base=16)
assert classLoaderAddr in classLoaderHash, "Class loader must have been seen before"
activeClassLoader = classLoaderAddr
elif line.startswith("3CLTEXTCLASS"):
m = classPattern.match(line)
if m:
className = m.group(1)
classAddr = int(m.group(2), base=16)
shared = True if " shared" in line else False
classHash[classAddr] = {'className':className, 'shared':shared, 'classLoaderAddr':activeClassLoader}
print("Total classes:", totalClasses)
print("Total shared classes", totalSharedClasses)
if displaySharedClasses:
print("Displaying classes in SCC")
numShared = 0
for classAddr, attribs in classHash.items():
if attribs['shared'] == True:
print(attribs['className'])
numShared += 1
if displayClassLoaderHierarchy:
printClassLoaderHierarchyForClass(classAddr, uniqueClassLoaderHierarchies)
print("Num shared classes in dictionary:", numShared)
if displayClassLoaderHierarchy:
printUniqueClassLoaderHierarchies(uniqueClassLoaderHierarchies)
if displayNonSharedClasss:
print("Displaying classes not in SCC")
numNonShared = 0
for classAddr, attribs in classHash.items():
if attribs['shared'] == False:
print(attribs['className'])
numNonShared += 1
if displayClassLoaderHierarchy:
printClassLoaderHierarchyForClass(classAddr, uniqueClassLoaderHierarchies)
print("Num non shared classes in dictionary:", numNonShared)
if displayClassLoaderHierarchy:
printUniqueClassLoaderHierarchies(uniqueClassLoaderHierarchies)
# Get the name of vlog
if len(sys.argv) < 2:
print ("Program must have an argument: the name of the javacore\n")
sys.exit(-1)
# Open my file in read only mode with line buffering
javacoreFileName = str(sys.argv[1])
javacore = open(javacoreFileName, 'r', 1)
parseJavacore(javacore)
| StarcoderdataPython |
6595730 | <reponame>urso/clidec
import argparse
class _namespaced:
"""
All objects implementing _namespaced can be used as namespaces
for adding sub commands.
"""
def __init__(self):
self._subcommands = {}
def namespace(self, name, *opts):
""" Create new sub-namespace without runnable function.
If users execute the namespace by name the help text will be printed.
"""
ns = Namespace(name, *opts)
self.add_subcommand(ns)
def add_subcommand(self, sub):
""" Add a single sub-command or namespace to the current namespace.
"""
self._subcommands[sub.name] = sub
def add_subcommands(self, *cmds):
""" Add a list of sub-commands and namespaces to the current namespace.
"""
for cmd in cmds:
self.add_subcommand(cmd)
def command(self, *opts):
""" Create sub-command decorator.
command creates a new sub-command decorator that can can be used to add
a function as sub-command to the current namespace.
"""
return self._make_command(Command, *opts)
def rawcommand(self, *opts):
""" Create raw sub-command capturing arguments.
Creates a RawCommand, that will collect all arguments into
a string array, without interpreting any arguments.
The raw command is added to the current namespace. All arguments
before the command name will be removed from the input array.
Raw commands can be used to define aliases for other external scripts,
as arguments are captured as is.
"""
return self._make_command(RawCommand, *opts)
def _make_command(self, cmdclass, *opts):
def do(fn):
cmd = cmdclass(fn.__name__, fn, *opts)
for opt in opts:
opt.init_cmd(cmd)
self.add_subcommand(cmd)
return cmd
if len(opts) == 1 and callable(opts[0]):
fn, opts = opts[0], []
return do(fn)
return do
def _add_subparsers(self, parser):
if len(self._subcommands) == 0:
return
subparsers = _SubcommandList(parser.prog)
for cmd in self._subcommands.values():
cmd._add_subparser(subparsers)
parser.add_argument("command",
action=_Subaction,
actions=subparsers.commands,
choices=list(subparsers.commands.keys()),
option_strings=[],
nargs=argparse.PARSER,
)
class Namespace(_namespaced):
""" Namespace defines a namespace that can hold sub-commands and other
namespace.
When an user executes the 'namespace' a help-string with the available
sub-commands will be printed.
A namespace is callable. When called it will parse the arguments and call
the selected sub-command.
"""
def __init__(self, name, *opts):
super(Namespace, self).__init__()
self.name = name
self.doc = name
self._opts = opts
for opt in self._opts:
opt.init_namespace(self)
def __call__(self, *args, **kwargs):
self.run(*args, **kwargs)
def run(self, parser=None, args=None):
if not parser:
parser = argparse.ArgumentParser()
self._init_argparse(parser)
args = parser.parse_args(args)
args.func(args)
def _add_subparser(self, action):
parser = action.add_parser(self.name, description=self.doc)
self._init_argparse(parser)
def _init_argparse(self, parser):
def fn(args):
parser.print_help()
exit(1)
for opt in self._opts:
opt.init_args(parser)
parser.set_defaults(func=fn)
self._add_subparsers(parser)
class Command(_namespaced):
""" Executable sub-command. """
def __init__(self, name, fn, *opts):
super(Command, self).__init__()
self.name = name
self.fn = fn
self._opts = opts
self.doc = ""
def __call__(self, *args, **kwargs):
self.fn(*args, **kwargs)
def _add_subparser(self, action):
doc = self.doc if self.doc else self.fn.__doc__
parser = action.add_parser(self.name, description=doc)
self._init_argparse(parser)
def _init_argparse(self, parser):
for opt in self._opts:
opt.init_args(parser)
parser.set_defaults(func=self.fn)
self._add_subparsers(parser)
class RawCommand:
""" Executable raw sub-command """
def __init__(self, name, fn, *args, **kwargs):
self.name = name
self.fn = fn
self.doc = ""
def _add_subparser(self, action):
parser = action.add_rawparser(self.name, self.fn, description=self.doc)
parser.set_defaults(func=self.fn)
def add_subcommand(self, cmd):
raise Exception("Can not add subcommands to raw commands")
class _SubcommandList:
def __init__(self, prog):
self.commands = {}
self.prog_prefix = prog
def add_parser(self, name, *args, **kwargs):
if kwargs.get('prog') is None:
kwargs['prog'] = '%s %s' % (self.prog_prefix, name)
parser = argparse.ArgumentParser(*args, **kwargs)
self.commands[name] = parser
return parser
def add_rawparser(self, name, fn, *args, **kwargs):
if kwargs.get('prog') is None:
kwargs['prog'] = '%s %s' % (self.prog_prefix, name)
parser = _RawParser("raw", *args, **kwargs)
self.commands[name] = parser
return parser
class _RawParser:
def __init__(self, dest, *args, **kwargs):
self.dest = dest
self._defaults = {}
def parse_args(self, args, namespace=None):
if namespace is None:
namespace = argparse.Namespace()
for k, v in self._defaults.items():
setattr(namespace, k, v)
setattr(namespace, self.dest, args)
return namespace
def set_defaults(self, **kwargs):
self._defaults.update(kwargs)
class _Subaction(argparse.Action):
def __init__(self, actions, *args, **kwargs):
super(_Subaction, self).__init__(*args, **kwargs)
self.actions = actions
def __call__(self, parser, namespace, values, option_string=None):
if not values:
return
name = values[0]
args = values[1:]
setattr(namespace, self.dest, name)
try:
parser = self.actions[name]
except:
choices = ", ".join(self.actions.keys())
raise argparse.ArgumentError(self,
f"unknown parser {name}, ({choices})")
# parse all the remaining options into the namespace
subnamespace = parser.parse_args(args)
for k, v in vars(subnamespace).items():
setattr(namespace, k, v)
class cmdopt:
""" Commando and namespace optionals.
classes implementing cmdopt can modify namespaces, commandos and
the argparse parser.
"""
def init_namespace(self, namespace): pass
def init_cmd(self, cmd): pass
def init_args(self, parser): pass
class command_name(cmdopt):
""" Overwrite the command or namespace name. """
def __init__(self, name):
self._name = name
def init_namespace(self, ns):
ns.name = self._name
def init_cmd(self, cmd):
cmd.name = self._name
class argument(cmdopt):
""" Add an argument to a namespace or command.
The argument options supports all the same options as are
supported by ArgumentParser.add_argument.
"""
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
def init_args(self, parser):
parser.add_argument(*self._args, **self._kwargs)
class with_commands(cmdopt):
""" Add a list of existing commands and namespace to the new namespace.
"""
def __init__(self, *commands):
self._commands = commands
def init_namespace(self, ns): self._add_commands(ns)
def init_cmd(self, cmd): self._add_commands(cmd)
def _add_commands(self, cmd):
for sub in self._commands:
cmd.add_subcommand(sub)
def root(*opts):
""" Create standalone anonymous root namespace.
Existing namespaces can be included via with_commands. The root namespace
acts as the entrypoint when running your application. It should be used to
define the main function like:
main = root(
with_commands(
...,
)
)
if __name__ == "__main__":
main()
"""
return Namespace("", *opts)
def namespace(name, *opts):
""" Namespace declares a new standalone namespace.
Standalone namespaces can be added to other namespaces via with_commands or
executed directly.
"""
ns = Namespace(name, *opts)
return ns
| StarcoderdataPython |
3463721 | <reponame>ysidharthjr/CalculaThor
#!/usr/bin/env python3
from tkinter import *
from tkinter import messagebox
import math
root=Tk()
for x in range(5):
Grid.rowconfigure(root, x, weight=1)
for y in range(7):
Grid.columnconfigure(root, y, weight=1)
root.title("Calcula-Thor")
l1=Label(root,text="Calcula-Thor",fg="green")
l1.config(font=("Courier", 15))
l1.grid(row=6,columnspan=7,sticky=N+S+E+W)
l1=Label(root,text="By: <NAME>",fg="red")
l1.grid(row=7,columnspan=7,sticky=N+S+E+W)
f1=Entry(root,bg="lightyellow")
f1.grid(row=0,columnspan=6,sticky=N+S+E+W)
def calcu():
eq=f1.get()
f1.delete(0,END)
try:
F = eval(eq)
f1.insert(0,F)
except:
messagebox.showerror("Seriously?", "I am Calcula-Thor son of Py-Din and you caused an error !")
b1=Button(root,text="1",command=lambda:f1.insert(END,"1"),bg="yellow")
b1.grid(row=1,column=0,sticky=N+S+E+W)
b1=Button(root,text="2",command=lambda:f1.insert(END,"2"),bg="yellow")
b1.grid(row=1,column=1,sticky=N+S+E+W)
b1=Button(root,text="3",command=lambda:f1.insert(END,"3"),bg="yellow")
b1.grid(row=1,column=2,sticky=N+S+E+W)
b1=Button(root,text="4",command=lambda:f1.insert(END,"4"),bg="yellow")
b1.grid(row=2,column=0,sticky=N+S+E+W)
b1=Button(root,text="5",command=lambda:f1.insert(END,"5"),bg="yellow")
b1.grid(row=2,column=1,sticky=N+S+E+W)
b1=Button(root,text="6",command=lambda:f1.insert(END,"6"),bg="yellow")
b1.grid(row=2,column=2,sticky=N+S+E+W)
b1=Button(root,text="7",command=lambda:f1.insert(END,"7"),bg="yellow")
b1.grid(row=3,column=0,sticky=N+S+E+W)
b1=Button(root,text="8",command=lambda:f1.insert(END,"8"),bg="yellow")
b1.grid(row=3,column=1,sticky=N+S+E+W)
b1=Button(root,text="9",command=lambda:f1.insert(END,"9"),bg="yellow")
b1.grid(row=3,column=2,sticky=N+S+E+W)
b1=Button(root,text=".",command=lambda:f1.insert(END,"."),bg="cyan")
b1.grid(row=4,column=0,sticky=N+S+E+W)
b1=Button(root,text="0",command=lambda:f1.insert(END,"0"),bg="yellow")
b1.grid(row=4,column=1,sticky=N+S+E+W)
b1=Button(root,text="=",command=calcu,bg="lime")
b1.grid(row=4,column=2,sticky=N+S+E+W)
b1=Button(root,text="+",command=lambda:f1.insert(END,"+"),bg="lightblue")
b1.grid(row=1,column=3,sticky=N+S+E+W)
b1=Button(root,text="-",command=lambda:f1.insert(END,"-"),bg="lightblue")
b1.grid(row=2,column=3,sticky=N+S+E+W)
b1=Button(root,text="*",command=lambda:f1.insert(END,"*"),bg="lightblue")
b1.grid(row=3,column=3,sticky=N+S+E+W)
b1=Button(root,text="/",command=lambda:f1.insert(END,"/"),bg="lightblue")
b1.grid(row=4,column=3,sticky=N+S+E+W)
b1=Button(root,text="(",command=lambda:f1.insert(END,"("),bg="blue")
b1.grid(row=1,column=4,sticky=N+S+E+W)
b1=Button(root,text=")",command=lambda:f1.insert(END,")"),bg="blue")
b1.grid(row=1,column=5,sticky=N+S+E+W)
b1=Button(root,text="^",command=lambda:f1.insert(END,"**"),bg="lightgreen")
b1.grid(row=2,column=4,sticky=N+S+E+W)
b1=Button(root,text="%",command=lambda:f1.insert(END,"%"),bg="lightgreen")
b1.grid(row=3,column=4,sticky=N+S+E+W)
def sqroot():
eq=f1.get()
f1.delete(0,END)
try:
F = math.sqrt(eval(eq))
f1.insert(0,F)
except:
messagebox.showerror("Seriously?", "I am Calcula-Thor son of Py-Din and you caused an error !")
def logg():
eq=f1.get()
f1.delete(0,END)
try:
F = math.log10(eval(eq))
f1.insert(0,F)
except:
messagebox.showerror("Seriously?", "I am Calcula-Thor son of Py-Din and you caused an error !")
b1=Button(root,text="Sqrt",command=sqroot,bg="lightgreen")
b1.grid(row=4,column=4,sticky=N+S+E+W)
b1=Button(root,text="log",command=logg,bg="lightgreen")
b1.grid(row=2,column=5,sticky=N+S+E+W)
b1=Button(root,text="pi",command=lambda:f1.insert(END,math.pi),bg="lightgreen")
b1.grid(row=3,column=5,sticky=N+S+E+W)
b1=Button(root,text="e",command=lambda:f1.insert(END,math.e),bg="lightgreen")
b1.grid(row=4,column=5,sticky=N+S+E+W)
def logn():
eq=f1.get()
f1.delete(0,END)
try:
F = math.log(eval(eq))
f1.insert(0,F)
except:
messagebox.showerror("Seriously?", "I am Calcula-Thor son of Py-Din and you caused an error !")
def sine():
eq=f1.get()
f1.delete(0,END)
try:
F = math.sin(eval(eq))
f1.insert(0,F)
except:
messagebox.showerror("Seriously?", "I am Calcula-Thor son of Py-Din and you caused an error !")
def cosine():
eq=f1.get()
f1.delete(0,END)
try:
F = math.cos(eval(eq))
f1.insert(0,F)
except:
messagebox.showerror("Seriously?", "I am Calcula-Thor son of Py-Din and you caused an error !")
def tangent():
eq=f1.get()
f1.delete(0,END)
try:
F = math.tan(eval(eq))
f1.insert(0,F)
except:
messagebox.showerror("Seriously?", "I am Calcula-Thor son of Py-Din and you caused an error !")
b1=Button(root,text="ln",command=logn,bg="lightgreen")
b1.grid(row=1,column=6,sticky=N+S+E+W)
b1=Button(root,text="sin",command=sine,bg="magenta")
b1.grid(row=2,column=6,sticky=N+S+E+W)
b1=Button(root,text="cos",command=cosine,bg="magenta")
b1.grid(row=3,column=6,sticky=N+S+E+W)
b1=Button(root,text="tan",command=tangent,bg="magenta")
b1.grid(row=4,column=6,sticky=N+S+E+W)
b1=Button(root,text="C",command=lambda:f1.delete(0,END),bg="red")
b1.grid(row=0,column=6,sticky=N+S+E+W)
root.mainloop()
| StarcoderdataPython |
8008917 | <gh_stars>1-10
import hashlib
class Image:
def __init__(self, tag, size_within=None, image_format=None):
self._tag = tag
self._size_within = size_within
self._image_format = image_format
@property
def tag(self):
"""
The tag of this image.
:rtype : str
"""
return self._tag
@property
def size_within(self):
return self._size_within
@property
def image_format(self):
return self._image_format
def formatted_to_jpeg(self, quality=0.95):
image_format_str = 'jpeg({})'.format(quality)
return Image(tag=self.tag, size_within=self.size_within, image_format=image_format_str)
def formatted_to_png(self):
return Image(tag=self.tag, size_within=self.size_within, image_format='png')
def sized_within(self, width, height):
return Image(tag=self.tag, size_within="{}x{}".format(width, height), image_format=self.image_format)
def servable_url(self, hostname='localhost', port=9806):
return "http://{}:{}/{}".format(hostname, port, self._qualified_tag())
def aws_s3_servable_url(self, bucket, prefix):
return "https://{}.s3.amazonaws.com/{}{}".format(bucket, prefix, self._qualified_tag())
def _is_original(self):
return self.size_within is None and self.image_format is None
def _qualified_tag(self):
if self._is_original():
return self.tag
else:
def hashed_attributes():
size_attribute = 'size-within=' + self.size_within.lower() if self.size_within else ''
format_attribute = self.image_format.upper() if self.image_format else ''
return hashlib.md5((format_attribute + size_attribute).encode('utf-8')).hexdigest()
extensionless_tag, _ = self.tag.split('.')
return "{}-{}.{}".format(extensionless_tag, hashed_attributes(), self._target_file_name_extension())
def _target_file_name_extension(self):
if not self.image_format:
return self.tag[self.tag.rfind('.')+1:]
elif self.image_format.startswith('jpeg'):
return 'jpg'
elif self.image_format.startswith('png'):
return 'png'
else:
raise RuntimeError("Image format unaccounted for!")
def __repr__(self):
return "graphiqueclient.Image(tag='{}')".format(self.tag)
def image_from_location_url(location_url):
"""
Parses and creates an Image instance out of its URL.
:type location_url: str
:rtype : Image
"""
return Image(tag=location_url[location_url.rfind('/')+1:])
| StarcoderdataPython |
9663956 | #!/usr/bin/env python
import sys
from deep_learning_service import DeepLearningService
sys.path.append('./inference')
dl_service = DeepLearningService()
dl_service.load_all_models()
| StarcoderdataPython |
6538091 | <gh_stars>0
import unittest # Importing the unittest module
from user import User
from credential import Credential
import pyperclip
class TestUser(unittest.TestCase):
'''
A test class that defines test cases for the user class behaviours.
Args:
unittest.TestCase: TestCase class that helps in creating test cases
'''
def setUp(self):
'''
A set up method to run before each test cases.
'''
self.new_user = User('SophieCee','<PASSWORD>!')
def tearDown(self):
'''
The tearDown method that does the clean up after each test case has run.
'''
User.user_list= []
def test_init(self):
'''
The test_init test case to test if the object is initialized properly
'''
self.assertEqual(self.new_user.username,'SophieCee')
self.assertEqual(self.new_user.password,'<PASSWORD>!')
def test_save_user(self):
'''
The test_save_user to check whether we can save our user objects to our users_list
'''
self.new_user.save_user()
self.assertEqual(len(User.user_list),1)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
128710 | <reponame>stvgt/interfaces
SQL_INIT_TABLES_AND_TRIGGERS = '''
CREATE TABLE consumers
(
component TEXT NOT NULL,
subcomponent TEXT NOT NULL,
host TEXT NOT NULL,
itype TEXT NOT NULL,
iprimary TEXT NOT NULL,
isecondary TEXT NOT NULL,
itertiary TEXT NOT NULL,
optional BOOLEAN NOT NULL,
unique (component, subcomponent, host, itype, iprimary, isecondary, itertiary)
);
CREATE TABLE producers
(
component TEXT NOT NULL,
subcomponent TEXT NOT NULL,
host TEXT NOT NULL,
itype TEXT NOT NULL,
iprimary TEXT NOT NULL,
isecondary TEXT NOT NULL,
itertiary TEXT NOT NULL,
deprecated BOOLEAN NOT NULL,
unique (component, subcomponent, host, itype, iprimary, isecondary, itertiary)
);
CREATE OR REPLACE FUNCTION ensure_producer_exists()
RETURNS TRIGGER AS $producers_check$
BEGIN
IF (
NEW.optional
OR
EXISTS(
SELECT 1
FROM producers as other
WHERE other.host = NEW.host
AND other.itype = NEW.itype
AND other.iprimary = NEW.iprimary
AND other.isecondary = NEW.isecondary
AND other.itertiary = NEW.itertiary
)
) THEN
RETURN NEW;
END IF;
RAISE EXCEPTION 'no producer for interface "%" "%" "%" "%" "%"',
NEW.host, NEW.itype, NEW.iprimary, NEW.isecondary, NEW.itertiary;
END;
$producers_check$ LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION ensure_no_consumer_exists()
RETURNS TRIGGER AS $consumers_check$
BEGIN
IF (
EXISTS(
SELECT 1
FROM consumers as other
WHERE other.host = OLD.host
AND other.itype = OLD.itype
AND other.iprimary = OLD.iprimary
AND other.isecondary = OLD.isecondary
AND other.itertiary = OLD.itertiary
AND other.optional = FALSE
)
) THEN
IF (
NOT EXISTS(
SELECT 1
FROM producers as other
WHERE other.host = OLD.host
AND other.itype = OLD.itype
AND other.iprimary = OLD.iprimary
AND other.isecondary = OLD.isecondary
AND other.itertiary = OLD.itertiary
AND other.component <> OLD.component
AND other.subcomponent <> OLD.subcomponent
)
) THEN
RAISE EXCEPTION 'no other producer for used interface "%" "%" "%" "%" "%"',
OLD.host, OLD.itype, OLD.iprimary, OLD.isecondary, OLD.itertiary;
END IF;
END IF;
RETURN OLD;
END;
$consumers_check$ LANGUAGE plpgsql;
CREATE TRIGGER producers_check BEFORE INSERT ON consumers
FOR each row execute procedure ensure_producer_exists();
CREATE TRIGGER consumers_check BEFORE DELETE ON producers
FOR each row execute procedure ensure_no_consumer_exists();
CREATE INDEX consumers_component on consumers (component);
CREATE INDEX producers_component on producers (component);
'''
SQL_DROP_ALL = '''
DROP INDEX IF EXISTS consumers_component;
DROP INDEX IF EXISTS producers_component;
DROP TRIGGER IF EXISTS consumers_check ON producers;
DROP TRIGGER IF EXISTS producers_check ON consumers;
DROP FUNCTION If EXISTS ensure_no_consumer_exists();
DROP FUNCTION IF EXISTS ensure_producer_exists();
DROP TABLE IF EXISTS consumers;
DROP TABLE IF EXISTS producers;
'''
| StarcoderdataPython |
5004677 | # The list of candies to print to the screen
candyList = ["Snickers", "Kit Kat", "Sour Patch Kids", "Juicy Fruit", "Sweedish Fish", "Skittles", "Hershey Bar", "Skittles", "Starbursts", "M&Ms"]
# The amount of candy the user will be allowed to choose
allowance = 5
# The list used to store all of the candies selected inside of
candyCart = []
# Print all of the candies to the screen and their index in brackets
for candy in candyList:
print("[" + str(candyList.index(candy)) + "] " + candy)
# Run through a loop which allows the user to choose which candies to take home with them
for x in range(allowance):
selected = input("Which candy would you like to bring home? ")
# Add the candy at the index chosen to the candyCart list
candyCart.append(candyList[int(selected)])
# Loop through the candyCart to say what candies were brought home
print("I brought home with me...")
for candy in candyCart:
print(candy)
# A For loop moves through a given range of numbers
# If only one number is provided it will loop from 0 to that number
# for x in range(10):
# print(x)
# If two numbers are provided then a For loop will loop from the first number up until it reaches the second number
# for x in range(20,30):
# print(x)
# If a list is provided, then the For loop will loop through each element within the list
# for x in ["Peanut","Butter","Jelly","Time","Is","Now"]:
# print(x)
# A While Loop will continue to loop through the code contained within it until some condition is met
x = "Yes"
while x == "Yes":
print("Whee! Merry-Go-Rounds are great!")
x = input("Would you like to go on the Merry-Go-Round again? ")
| StarcoderdataPython |
4889742 | <reponame>fabiommendes/sidekick<filename>sidekick-functions/sidekick/functions/fn_interfaces.py
import copy
from collections import ChainMap
from functools import reduce, partial, singledispatch
from itertools import chain
from .core_functions import to_callable
from .fn import fn
from ..typing import T, MutableMapping, Callable
#
# Utility functions and types
#
def _raise_key_error(x):
raise KeyError(x)
class UnitFactory:
"""
Unit attribute of monoids.
"""
__slots__ = ("_func",)
def __get__(self, obj, cls=None):
if obj is None:
return cls
return obj()
class IndexedFunc(type(fn)):
"""
A fn-function that accepts indexing.
Indexing is used to register instances of the function specialized to
specific types or contexts.
"""
def __init__(cls, name, typ, ns, **kwargs):
# We want to use singledispatch algorithm to find best implementations
# to type-based missing keys
cls._type_dispatcher = singledispatch(
lambda x, *args: _raise_key_error(type(x))
)
# This dict holds the non-type based keys
cls._registry = {}
# A cache for both types
cls._cache = ChainMap({}, cls._registry)
def __contains__(cls, item):
return item in cls._registry
def __getitem__(cls, key):
try:
return cls._cache[key]
except KeyError:
if not isinstance(key, type):
raise
cls._cache[key] = value = cls._type_dispatcher.dispatch(key)
return value
def __delitem__(cls, key):
raise KeyError(f"cannot delete implementations from {cls.__name__}")
def __setitem__(cls, key, value):
if key in cls:
raise KeyError(f"cannot override {key} implementation in {cls.__name__}")
cls._registry[key] = cls._cache[key] = value
if isinstance(key, type):
cls._type_dispatcher.register(key, value)
cls._cache.clear()
cls._cache.update(cls._registry)
for base in cls.__bases__:
if isinstance(base, IndexedFunc):
base[key] = value
def __len__(cls):
return len(cls._registry)
def __iter__(cls):
yield from cls._registry
# This metaclass interacts poorly with ABCMeta. We just copy the relevant
# methods instead of inheriting from MutableMapping.
keys = MutableMapping.keys
values = MutableMapping.values
items = MutableMapping.items
get = MutableMapping.get
class semigroup(fn, metaclass=IndexedFunc):
"""
A function that implements a semigroup structure.
In sidekick, semigroups are implemented as a variadic function with the
following signatures:
* fn(xs) = fn(*xs)
* fn(x, y) = op(x, y)
* fn(x, y, z) = op(op(x, y), z) = op(x, op(y, z))
* and so on for mor arguments...
``op`` is the associative binary operator that defines the particular
semigroup structure. Calling a semigroup function with more arguments simply
combine all elements using the semigroup definition.
"""
@property
def description(self):
return self.__doc__
@description.setter
def description(self, value):
self.__doc__ = value
@classmethod
def from_operator(cls, op, description=None):
"""
Creates a new semigroup function from the given binary operator.
"""
def semigroup_fn(x_or_seq, /, *xs):
if xs:
return reduce(op, xs, x_or_seq)
return reduce(op, x_or_seq)
return cls(semigroup_fn, description)
@classmethod
def from_reducer(cls, func, description=None):
"""
Creates a new semigroup from a function that reduces a
non-empty sequence of arguments.
"""
def semigroup_fn(*args):
if (n := len(args)) == 1:
return func(args[0])
elif n == 0:
raise TypeError("semigroup requires at least one argument")
return func(args)
return cls(semigroup_fn, description)
@property
def unit(self):
raise TypeError("semigroup does not have a unit element.")
def __init__(self, func, description=None):
super().__init__(func)
self.__doc__ = description
def reduce(self, iterable):
"""
Reduces iterable by sequence of elements.
Similar to calling fn(*iterable), but do not put all elements into
memory at once. This can be slower in many situations, but might have a
better memory footprint.
"""
return reduce(self._func, iterable)
def accumulate(self, iterable):
"""
Accumulate iterable using binary reduction of all of its elements.
"""
op = self._func
iterable = iter(iterable)
try:
x = next(iterable)
except StopIteration:
return
yield x
for y in iterable:
yield (x := op(x, y))
def times(self, x: T, n: int) -> T:
"""
Execute binary operation n times in x.
"""
if n == 0:
return self.unit
elif n == 1:
return x
res = self.times(x, n // 2)
res = self(res, res)
return res if n % 2 == 0 else self(res, x)
def dual(self):
"""
Dual algebraic structure produced by flipping the binary operation.
Commutative operations are self-dual, i.e., the dual is equal to the group
itself.
"""
fn = to_callable(self)
new = copy.copy(self)
new._func = lambda *xs: fn(*xs[::-1])
return new
class monoid(semigroup):
"""
Monoid is a semigroup with a unit element.
In sidekick, monoids are implemented as a variadic functions similar to
semigroups. The main difference is that by calling a monoid with no arguments
return the unit element, instead of raising an error.
"""
@classmethod
def from_semigroup(cls, semigroup, unit=None, unit_factory=None):
"""
Creates a monoid from semigroup, supplying the unit element or the unit
factory.
"""
semigroup_fn = to_callable(semigroup)
if unit is not None:
return cls(lambda *args: semigroup_fn(*args) if args else unit)
elif unit_factory is not None:
return cls(lambda *args: semigroup_fn(*args) if args else unit_factory())
else:
raise TypeError("unit or unit_factory must be given")
@classmethod
def from_operator(cls, op, unit=None, unit_factory=None):
"""
Creates monoid from binary operator.
"""
if unit is not None:
def monoid_fn(*args):
if (n := len(args)) == 0:
return unit
elif n == 1:
return reduce(op, args, unit)
else:
return reduce(op, args, unit)
elif unit_factory is not None:
def monoid_fn(*args):
if (n := len(args)) == 0:
return unit_factory()
elif n == 1:
return reduce(op, args, unit_factory())
else:
return reduce(op, args)
else:
raise TypeError("unit or unit_factory must be given")
return cls(monoid_fn)
@classmethod
def from_reducer(cls, func, description=None):
"""
Creates a new monoid from a function that reduces a sequence of arguments.
"""
def monoid_fn(*args):
return func(args[0]) if len(args) == 1 else func(args)
return cls(monoid_fn, description)
unit = UnitFactory()
def reduce(self, iterable):
try:
return super().reduce(iterable)
except IndexError:
return self.unit
def accumulate(self, iterable, unit=False):
if unit:
return super().accumulate(chain([self.unit], iterable))
return super().accumulate(iterable)
class group(monoid):
"""
A monoid with an inverse operation.
This behaves similarly to a monoid, but also has an inverse method named
inv.
"""
@classmethod
def from_monoid(cls, monoid, *, inv):
"""
Creates group from a monoid and an inverse function.
"""
return cls(to_callable(monoid), inv, description=monoid.description)
# noinspection PyMethodOverriding
@classmethod
def from_semigroup(cls, semigroup, unit=None, unit_factory=None, *, inv):
"""
Creates a group from semigroup, supplying the inverse function and the
unit element or the unit factory.
"""
mono = monoid.from_semigroup(semigroup, unit, unit_factory)
return cls.from_monoid(mono, inv=inv)
# noinspection PyMethodOverriding
@classmethod
def from_operator(cls, op, unit=None, unit_factory=None, *, inv):
"""
Creates group from binary operator.
"""
mono = monoid.from_operator(op, unit, unit_factory)
return cls.from_monoid(mono, inv=inv)
# noinspection PyMethodOverriding
@classmethod
def from_reducer(cls, func, *, inv, description=None):
"""
Creates a group from semigroup, supplying the inverse function and the
unit element or the unit factory.
"""
mono = monoid.from_reducer(func, description)
return cls.from_monoid(mono, inv=inv)
def __init__(self, func, inv, description=None):
super().__init__(func, description)
self.inv = inv
def mtimes(value, n):
"""
Apply monoidal or group operation n times in value.
This function infers the monoid from value.
"""
if n < 0:
instance = group[type(value)]
elif n == 0:
instance = monoid[type(value)]
else:
instance = semigroup[type(value)]
return instance.times(value, n)
def mconcat(*args):
"""
Apply monoidal concat operation in arguments.
This function infers the monoid from value, hence it requires at least
one argument to operate.
"""
values = args[0] if len(args) == 1 else args
instance = semigroup[type(values[0])]
return instance(*values)
#
# Functor, applicative and monad
#
class ApplyMixin:
_func: Callable
def __init__(self, func, wrap=None, description=None):
super().__init__(func)
self._wrap = wrap
self.__doc__ = description
def __call__(self, fn, /, *args, **kwargs):
if args:
if kwargs:
fn = partial(fn, **kwargs)
return self._func(fn, *args)
return partial(self, fn, **kwargs)
def wrap(self, x):
"""
Wrap value into the functor.
"""
if self._wrap is None:
raise ValueError("Functor does not implements a wrap function.")
else:
return self._wrap(x)
class apply(ApplyMixin, fn, metaclass=IndexedFunc):
"""
A function that implements a functor application.
In sidekick, single and applicative functors are implemented as a variadic
function with the following signatures:
* apply(f) - convert ``f(a) -> b`` to ``F[a] -> F[b]``
* apply(f, xs) - apply f(x) to xs.
* apply(f, xs, ys) - apply f(x, y) to xs and ys.
* So on...
"""
description = semigroup.description
def from_binary(self, op):
"""
Create a generic applicative from a function that can apply functions of
one or two arguments.
"""
raise NotImplementedError
@classmethod
def with_wrap(cls, wrap, description=None):
"""
Decorator that defines an apply with a given wrap function.
"""
return lambda func: cls(func, wrap, description)
class apply_flat(ApplyMixin, fn, metaclass=IndexedFunc):
"""
A function that implements monadic bind.
In sidekick, single and applicative functors are implemented as a variadic
function with the following signatures:
* apply_flat(f) - convert ``f(a) -> F[b]`` to ``F[a] -> F[b]``
* apply_flat(f, xs) - apply f(x) to xs, flattening results.
* apply_flat(f, xs, ys) - apply f(x, y) to xs and ys, flattening results.
* So on...
"""
@classmethod
def from_single(cls, rbind, wrap=None, description=None):
def apply_flat(f, *args):
if len(args) == 1:
return rbind(f, args[0])
xs, *args = args
return rbind(lambda x: rbind(partial(f, x), *args), xs)
return cls(apply_flat, wrap, description)
def call_simple(self, f, *args, **kwargs):
"""
Calls a simple function that returns an non-wrapped result.
"""
return self(lambda x: self.wrap(f(x, **kwargs)), *args)
def flatten(self, m):
"""
Flatten monad.
"""
return self(lambda x: x, m)
| StarcoderdataPython |
3558408 | import unittest
import stabpoly.polynomials as polynomials
import numpy
from sympy import Poly
_EPSILON = 1e-10
class TestPolynomials(unittest.TestCase):
def test_product_polynomial(self):
matrix = numpy.array([[2,1],[1,2]])
polynomial = Poly(polynomials.product_polynomial(matrix))
syms = polynomials.getvars(count=2)
true_polynomial = Poly(2 * syms[0] * syms[0] + 5 * syms[0] * syms[1] + 2 * syms[1] * syms[1])
self.assertEqual(polynomial, true_polynomial)
def test_matching_polynomial(self):
matrix = numpy.array([[1,0,0,1],[1,1,0,0],[0,1,1,0],[0,0,1,1]])
polynomial = polynomials.matching_polynomial(matrix)[0]
coeffs = polynomial.coeffs()
coeffs_true = [1, -8, 20, -16, 2]
mse = sum([(x-y)**2 for x,y in zip(coeffs,coeffs_true)])
self.assertAlmostEqual(mse, 0, _EPSILON)
def test_uniform_polynomial_coefficients(self):
m = 3
d = 3
# scaled up: [1,3,2,2/9]
coeffs_true = [27, 81, 54, 6]
coeffs = polynomials.get_uniform_polynomial_coefficients(m,d)
for c,c_true in zip(coeffs, coeffs_true):
self.assertEqual(c,c_true)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
8120090 | <reponame>jamshaidsohail5/stattests
from typing import Tuple, Dict, Optional, Set, List
import imageio
import numpy as np
import scipy.stats
import seaborn as sns
from IPython.core.display import HTML
from matplotlib import pyplot as plt
from matplotlib.axes import Axes
from stattests.data import rpv
from stattests.generation import generate_data
colors = sns.color_palette("deep")
codenames2titles = {
'ttest_successes_count': ('T-test, clicks', colors[0]),
'mannwhitney_successes_count': ('Mann-Whitney test, clicks', colors[1]),
'delta': ('Delta-method, global CTR', colors[2]),
'bootstrap': ('Bootstrap, global CTR', colors[3]),
'permutation_test': ('Permutation, global CTR', colors[8]),
'linearization': ('Linearization, clicks', colors[4]),
'buckets_ctrs': ('Bucketization, global CTR', colors[5]),
't_test_ctrs': ('T-test, user CTR', colors[6]),
'weighted_bootstrap': ('Weighted bootstrap, global CTR', colors[7]),
'weighted_linearization': ('Weighted linearization, global CTR', colors[4]),
'weighted_buckets': ('Weighted bucketization, global CTR', colors[5]),
'weighted_t_test_ctrs': ('Weighted t-test, user CTR', colors[6]),
'weighted_sqr_bootstrap': ('Weighted sqr bootstrap, global CTR', colors[3]),
'weighted_sqr_linearization': ('Weighted sqr linearization, global CTR', colors[4]),
'weighted_sqr_buckets': ('Weighted sqr bucketization, global CTR', colors[5]),
'weighted_sqr_t_test_ctrs': ('Weighted sqr t-test, user CTR', colors[6]),
'ttest_smoothed': ('T-test smoothed user CTR', colors[0]),
'binomial_test': ('Binomial z-test', colors[7]),
}
cdf_h1_title = 'Simulated p-value CDFs under H1 (Sensitivity)'
cdf_h0_title = 'Simulated p-value CDFs under H0 (FPR)'
def save_gif_and_show(path: str, frames: List[np.ndarray]):
reversed_frames = frames.copy()
reversed_frames.reverse()
bounce_frames = frames + reversed_frames
imageio.mimsave(path, bounce_frames, fps=2, format='GIF-PIL')
return HTML(f'<img src="{path}" width="1000px">')
def plot_cdf(data: np.ndarray, label: str, ax: Axes, color: str = colors[0], linewidth: float = 3):
sorted_data = np.sort(data)
position = scipy.stats.rankdata(sorted_data, method='ordinal')
cdf = position / data.shape[0]
sorted_data = np.hstack((sorted_data, 1))
cdf = np.hstack((cdf, 1))
return ax.plot(sorted_data, cdf, color=color, linestyle='solid', label=label, linewidth=linewidth)
def plot_summary(dict2plot: Dict[str, Tuple[np.ndarray, np.ndarray, str]],
views_0: np.ndarray,
ground_truth_success_rates: np.ndarray):
fig = plt.figure(constrained_layout=False, figsize=(4 * 3, 3 * 3), dpi=100)
gs = fig.add_gridspec(3, 3)
ax_h1 = fig.add_subplot(gs[:2, :2])
ax_h0 = fig.add_subplot(gs[0, 2])
ax_views = fig.add_subplot(gs[1, 2])
ax_clicks = fig.add_subplot(gs[2, 2])
ax_powers = fig.add_subplot(gs[2, :2])
fig.subplots_adjust(left=0.2, wspace=0.3, hspace=0.4)
ax_h1.plot(np.linspace(0, 1, 10000), np.linspace(0, 1, 10000), 'k', alpha=0.1)
ax_h0.plot(np.linspace(0, 1, 10000), np.linspace(0, 1, 10000), 'k', alpha=0.1)
# ax_h1.set_xlabel('p-value')
# ax_h0.set_xlabel('p-value')
ax_h1.set_title(cdf_h1_title)
ax_h0.set_title(cdf_h0_title)
# ax_h1.set_ylabel('Sensitivity')
# ax_h0.set_ylabel('FPR')
ax_h1.axvline(0.05, color='k', alpha=0.5)
# ax_h1.set_xticks(list(ax_h1.get_xticks()) + [0.05])
for title, (ab_pvals, aa_pvals, color) in dict2plot.items():
plot_cdf(ab_pvals, title, ax_h1, color, linewidth=3)
plot_cdf(aa_pvals, title, ax_h0, color, linewidth=1.5)
ax_powers.set_title('Test Power')
tests_powers = []
tests_labels = []
tests_colours = []
for title, (ab_pvals, _, color) in dict2plot.items():
tests_labels.append(title)
tests_colours.append(color)
tests_powers.append(np.mean(ab_pvals < 0.05))
ax_powers.barh(np.array(tests_labels), np.array(tests_powers), color=np.array(tests_colours))
sns.distplot(views_0.ravel(),
bins=range(0, 20),
ax=ax_views,
kde=False,
norm_hist=True)
ax_views.set_xlim((0, 20))
views_99_percentile = np.percentile(views_0.ravel(), 99)
ax_views.set_title(f'Views, 99%-ile = {views_99_percentile:<7.1f}')
sns.distplot(ground_truth_success_rates.ravel(),
bins=np.linspace(0, 0.2, 100),
ax=ax_clicks,
kde=False,
norm_hist=True)
ax_clicks.set_xlim((0, 0.1))
success_rate_std = ground_truth_success_rates[:10].flatten().std()
ax_clicks.set_title(f'Ground truth user CTR, std = {success_rate_std:2.3f}')
return fig
def plot_from_params(data_dir: str,
params: Dict,
codenames: Optional[Set[str]] = None):
gen_params = dict(params)
gen_params['NN'] = 10
(views_0, _), _, ground_truth_success_rates = generate_data(**gen_params)
required_codenames2titles = {}
if codenames is not None:
for k, v in codenames2titles.items():
if k in codenames:
required_codenames2titles[k] = v
else:
required_codenames2titles.update(codenames2titles)
dict2plot = {}
for codename, (title, color) in required_codenames2titles.items():
ab_data, aa_data = rpv(data_dir, codename, **params)
dict2plot[title] = (ab_data, aa_data, color)
fig = plot_summary(dict2plot, views_0, ground_truth_success_rates)
return fig
def frame_from_params(data_dir: str,
param: Dict,
codenames: Optional[Set[str]] = None):
fig = plot_from_params(data_dir, param, codenames)
fig.canvas.draw() # draw the canvas, cache the renderer
image = np.frombuffer(fig.canvas.tostring_rgb(), dtype='uint8')
image = image.reshape(fig.canvas.get_width_height()[::-1] + (3,))
plt.close(fig)
return image
| StarcoderdataPython |
8078936 | import tensorflow as tf
var_init = lambda x: tf.variance_scaling_initializer(scale=x, distribution='truncated_normal')
scale = 0.1
class NACCell(object):
def __init__(self, in_dim, out_dim):
with tf.variable_scope('nac-cell') as vs:
self.w = tf.get_variable(name = 'w', shape = [in_dim, out_dim], initializer=var_init(scale))
self.m = tf.get_variable(name = 'm', shape = [in_dim, out_dim], initializer=var_init(scale))
self.var_list = [self.w, self.m]
def __call__(self, x):
w = tf.multiply(tf.tanh(self.w), tf.sigmoid(self.m))
y = tf.matmul(x, w)
return y
class NAC(object):
def __init__(self, in_dim=2, out_dim=1, hidden_dim=2, n_stacks=2):
self.layers = []
self.var_list = []
for i in range(n_stacks):
with tf.variable_scope('nac_' + str(i)) as vs:
self.layers.append(
NACCell(
in_dim if i == 0 else hidden_dim,
out_dim if i == n_stacks-1 else hidden_dim
)
)
for layer in self.layers:
self.var_list += layer.var_list
def __call__(self, x):
y = x
for layer in self.layers:
y = layer(y)
return y
class NALUCell(object):
def __init__(self, in_dim, out_dim, eps=1e-5):
with tf.variable_scope("nalu-cell-add") as vs:
self.add_cell = NACCell(in_dim, out_dim)
with tf.variable_scope("nalu-cell-mul") as vs:
self.mul_cell = NACCell(in_dim, out_dim)
with tf.variable_scope("nalu-cell-g") as vs:
self.G = tf.get_variable('g', shape = [in_dim, out_dim], initializer=var_init(scale))
self.eps = eps
self.var_list = self.add_cell.var_list + self.mul_cell.var_list + [self.G]
def __call__(self, x):
a = self.add_cell(x)
m = self.mul_cell(tf.log(tf.abs(x) + self.eps))
m = tf.exp(m)
g = tf.sigmoid(tf.matmul(x, self.G))
y = tf.multiply(g, a) + tf.multiply(1-g, m)
return y
class NALU(object):
def __init__(self, in_dim, out_dim, hidden_dim, n_stacks, eps):
self.layers = []
self.var_list = []
for i in range(n_stacks):
with tf.variable_scope('nalu_' + str(i)) as vs:
self.layers.append(
NALUCell(
in_dim if i == 0 else hidden_dim,
out_dim if i == n_stacks-1 else hidden_dim
)
)
for layer in self.layers:
self.var_list += layer.var_list
def __call__(self, x):
y = x
for layer in self.layers:
y = layer(y)
return y
| StarcoderdataPython |
244480 | <reponame>fidsusj/HateSpeechDetection
""" Module runs all classifiers in this directory and returns a dataframe with performance metrices """
import multiprocessing
from datetime import datetime
from multiprocessing import Pool
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from classifiers.hyperparameters import hyperparameter_search_space
from classifiers.lstm import LSTMClassifier
from imblearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (
accuracy_score,
confusion_matrix,
f1_score,
precision_score,
recall_score,
)
from sklearn.model_selection import RandomizedSearchCV, train_test_split
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
class ClassifierExecutor:
""" Class runs all classifiers """
def __init__(self, datasets):
classical_ml_methods = [
["random_forest", RandomForestClassifier()],
["decision_tree", DecisionTreeClassifier()],
["svm", SVC()],
["logistic_regression", LogisticRegression()],
]
neural_network_methods = [["lstm", LSTMClassifier()]]
classical_ml_methods_run_params = self._create_run_parameters(
classical_ml_methods, ["unchanged", "undersampled", "oversampled"], datasets
)
neural_network_methods_run_params = self._create_run_parameters(
neural_network_methods, ["unchanged", "undersampled"], datasets
)
start_time = datetime.now()
with Pool(multiprocessing.cpu_count()) as p:
neural_network_methods_results = pd.concat(
p.starmap(self.run_nn, neural_network_methods_run_params)
)
classical_ml_methods_results = pd.concat(
p.starmap(self.run_classical_ml, classical_ml_methods_run_params)
)
self.results = pd.concat(
[neural_network_methods_results, classical_ml_methods_results]
)
print(
"Started computation: {}; Ended computation: {}".format(
start_time, datetime.now()
)
)
print("Results: \n{}".format(self.results))
def _create_run_parameters(self, classifier_list, dataset_type_list, datasets):
run_parameters = []
for dataset_type in dataset_type_list:
for classifier in classifier_list:
run_parameters.append((classifier, dataset_type, datasets))
return run_parameters
def get_results(self):
""" Getter for the results """
return self.results
def run_classical_ml(self, classifier, dataset_type, datasets):
"""Runs passed classifier (classical ML methods) with passed dataset
Parameters:
classifiers: list containing [classifier_name, classifier_class]
dataset_type: string ("raw_datasets"|"extracted_datasets")
datasets: dict with datasets (as in InputData)
Return:
df_evaluation_results: dataframe with the evaluation results
"""
X_train, y_train, X_test, y_test = self._extract_train_and_test(
datasets, "extracted_datasets", dataset_type
)
classifier_name = classifier[0]
classifier_class = classifier[1]
print("OPTIMIZE {}".format(classifier_name))
classifier_pipeline = Pipeline([("classifier", classifier_class)])
gridsearch = RandomizedSearchCV(
classifier_pipeline,
hyperparameter_search_space[classifier_name],
cv=5,
verbose=0,
n_jobs=-1,
)
gridsearch.fit(X_train, y_train)
best_model = gridsearch.best_estimator_
classifier_evaluation = self._evaluate_classifier_on_test_set(
best_model, dataset_type, X_test, y_test, classifier_name
)
df_evaluation_results = pd.DataFrame(
data=[classifier_evaluation],
columns=["classifier", "dataset", "precision", "recall", "accuracy", "f1"],
)
return df_evaluation_results
def run_nn(self, classifier, dataset_type, datasets):
"""Runs passed classifier (nn approach) with passed dataset
Parameters:
Parameters:
classifiers: list containing [classifier_name, classifier_class]
dataset_type: string ("raw_datasets"|"extracted_datasets")
datasets: dict with datasets (as in InputData)
Return:
df_evaluation_results: dataframe with the evaluation results
"""
X_train, y_train, X_test, y_test = self._extract_train_and_test(
datasets, "raw_datasets", dataset_type
)
classifier_name = classifier[0]
classifier_class = classifier[1]
X_train, X_val, y_train, y_val = train_test_split(
X_train,
y_train,
test_size=0.2,
random_state=42,
)
classifier_class.fit(X_train, y_train, X_val, y_val)
classifier_evaluation = self._evaluate_classifier_on_test_set(
classifier_class, dataset_type, X_test, y_test, classifier_name
)
df_evaluation_results = pd.DataFrame(
data=[classifier_evaluation],
columns=["classifier", "dataset", "precision", "recall", "accuracy", "f1"],
)
return df_evaluation_results
def _extract_train_and_test(self, datasets, which_dataset, dataset_type):
X_train = datasets[which_dataset][dataset_type]["X_train"]
y_train = datasets[which_dataset][dataset_type]["y_train"]
X_test = datasets[which_dataset][dataset_type]["X_test"]
y_test = datasets[which_dataset][dataset_type]["y_test"]
return X_train, y_train, X_test, y_test
def _evaluate_classifier_on_test_set(
self, best_model, dataset_type, X_test, y_test, classifier_name
):
"""Evaluates model on test set
Parameters:
best_model: model to be tested
X_test: test dataset features
y_test: test dataset labels
classifier_name: string of the classifier name
Return:
array of [classifier_name, precision, recall, accuracy, f1]
"""
y_predicted = best_model.predict(X_test)
precision, recall, accuracy, f1 = self._calculate_performance_metrices(
y_test, y_predicted, classifier_name
)
return [classifier_name, dataset_type, precision, recall, accuracy, f1]
def _calculate_performance_metrices(self, y, y_hat, classifier_name):
"""Calculates performance metrices of the model
Parameters:
y: test dataset labels
y_hat: preidcted labels
classifier_name: string of the classifier name
Return:
precision, recall, accuracy, f1
"""
cm = confusion_matrix(y, y_hat)
precision = precision_score(y, y_hat)
recall = recall_score(y, y_hat)
accuracy = accuracy_score(y, y_hat)
f1 = f1_score(y, y_hat)
plt.figure()
sns.heatmap(cm, cmap="PuBu", annot=True, fmt="g", annot_kws={"size": 20})
plt.xlabel("predicted", fontsize=18)
plt.ylabel("actual", fontsize=18)
title = "Confusion Matrix for " + classifier_name
plt.title(title, fontsize=18)
# plt.show()
return precision, recall, accuracy, f1
| StarcoderdataPython |
1854969 | <reponame>yishayv/lyacorr
import numpy as np
from scipy import signal
class MeanTransmittance:
def __init__(self, ar_z):
self.ar_z = np.copy(ar_z)
self.ar_total_flux = np.zeros_like(self.ar_z)
self.ar_count = np.zeros_like(self.ar_z)
self.ar_weights = np.zeros_like(self.ar_z)
def add_flux_pre_binned(self, ar_flux, ar_mask, ar_weights):
self.ar_total_flux[ar_mask] += ar_flux[ar_mask] * ar_weights[ar_mask]
self.ar_count[ar_mask] += 1
self.ar_weights[ar_mask] += ar_weights[ar_mask]
def merge(self, mean_flux2):
"""
:type mean_flux2: MeanTransmittance
"""
self.ar_total_flux += mean_flux2.ar_total_flux
self.ar_count += mean_flux2.ar_count
self.ar_weights += mean_flux2.ar_weights
def get_weighted_mean(self):
ar_weights_no_zero = np.copy(self.ar_weights)
ar_weights_no_zero[self.ar_weights == 0] = np.nan
return self.ar_total_flux / ar_weights_no_zero
def get_weighted_mean_with_minimum_count(self, minimum_count):
return self.get_z_with_minimum_count(minimum_count), self.get_weighted_mean()[self.ar_count >= minimum_count]
def get_z_with_minimum_count(self, n):
return self.ar_z[self.ar_count >= n]
def get_low_pass_mean(self, minimum_count=1):
assert minimum_count > 0
ar_z, mean = self.get_weighted_mean_with_minimum_count(minimum_count)
# noinspection PyTupleAssignmentBalance,PyTypeChecker
b, a = signal.butter(N=3, Wn=0.05, analog=False)
low_pass_mean = signal.filtfilt(b=b, a=a, x=mean)
return ar_z, low_pass_mean
def as_np_array(self):
return np.vstack((self.ar_z,
self.ar_total_flux,
self.ar_count,
self.ar_weights))
# noinspection PyMethodMayBeStatic
def as_object(self):
"""
Return data that cannot be easily represented in an array.
"""
pass
@classmethod
def from_np_array(cls, np_array):
new_obj = cls(np.empty(1))
new_obj.ar_z = np_array[0]
new_obj.ar_total_flux = np_array[1]
new_obj.ar_count = np_array[2]
new_obj.ar_weights = np_array[3]
return new_obj
def save(self, filename):
np.save(filename, self.as_np_array())
@classmethod
def load(cls, filename):
stacked_array = np.load(filename)
return cls.from_np_array(stacked_array)
@classmethod
def from_file(cls, filename):
"""
:rtype : MeanTransmittance
"""
return cls.load(filename)
| StarcoderdataPython |
9672087 | """Multiplication."""
from __future__ import absolute_import, print_function
import nengo
from nengo.dists import Choice
from nengo.processes import Piecewise
import matplotlib.pyplot as plt
# model
model = nengo.Network(label="Multiplication")
with model:
A = nengo.Ensemble(100, dimensions=1, radius=10)
B = nengo.Ensemble(100, dimensions=1, radius=10)
combined = nengo.Ensemble(
220, dimensions=2, radius=15)
prod = nengo.Ensemble(100, dimensions=1, radius=20)
combined.encoders = Choice([[1, 1], [-1, 1], [1, -1], [-1, -1]])
with model:
input_A = nengo.Node(Piecewise({0: 0, 2.5: 10, 4: -10}))
input_B = nengo.Node(Piecewise({0: 10, 1.5: 2, 3: 0, 4.5: 2}))
correct_ans = Piecewise({0: 0, 1.5: 0, 2.5: 20, 3: 0, 4: 0, 4.5: -20})
with model:
nengo.Connection(input_A, A)
nengo.Connection(input_B, B)
nengo.Connection(A, combined[0])
nengo.Connection(B, combined[1])
def product(x):
return x[0] * x[1]
nengo.Connection(combined, prod, function=product)
with model:
inputA_probe = nengo.Probe(input_A)
inputB_probe = nengo.Probe(input_B)
A_probe = nengo.Probe(A, synapse=0.01)
B_probe = nengo.Probe(B, synapse=0.01)
combined_probe = nengo.Probe(combined, synapse=0.01)
prod_probe = nengo.Probe(prod, synapse=0.01)
with nengo.Simulator(model) as sim:
sim.run(5)
plt.figure()
plt.plot(sim.trange(), sim.data[A_probe], label="Decoded A")
plt.plot(sim.trange(), sim.data[B_probe], label="Decoded B")
plt.plot(sim.trange(), sim.data[prod_probe], label="Decoded product")
plt.plot(sim.trange(), correct_ans.run(sim.time, dt=sim.dt),
c="k", label="Actual product")
plt.legend(loc="best")
plt.show()
| StarcoderdataPython |
345658 | # Copyright 2018 The GraphNets Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for utils_tf.py in Tensorflow 2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from graph_nets import graphs
from graph_nets import utils_np
from graph_nets import utils_tf
from graph_nets.tests_tf2 import test_utils
import networkx as nx
import numpy as np
from six.moves import range
import tensorflow as tf
import tree
class RepeatTest(tf.test.TestCase, parameterized.TestCase):
"""Tests for `repeat`."""
@parameterized.named_parameters(
("base", (3,), [2, 3, 4], 0),
("empty_group_first", (3,), [0, 3, 4], 0),
("empty_group_middle", (3,), [2, 0, 4], 0),
("double_empty_group_middle", (4,), [2, 0, 0, 4], 0),
("empty_group_last", (3,), [2, 3, 0], 0),
("just_one_group", (1,), [2], 0),
("zero_groups", (0,), [], 0),
("axis 0", (2, 3, 4), [2, 3], 0),
("axis 1", (3, 2, 4), [2, 3], 1),
("axis 2", (4, 3, 2), [2, 3], 2),
("zero_groups_with_shape", (2, 0, 4), [], 1),
)
def test_repeat(self, shape, repeats, axis):
num_elements = np.prod(shape)
t = np.arange(num_elements).reshape(*shape)
expected = np.repeat(t, repeats, axis=axis)
tensor = tf.constant(t)
repeats = tf.constant(repeats, dtype=tf.int32)
actual = utils_tf.repeat(tensor, repeats, axis=axis)
self.assertAllEqual(expected, actual)
@parameterized.named_parameters(("default", "custom_name", None),
("custom", None, "repeat"))
def test_name_scope(self, name, expected_name):
self.skipTest("Uses get_default_graph.")
kwargs = {"name": name} if name else {}
expected_name = expected_name if expected_name else name
t = tf.zeros([3, 2, 4])
indices = tf.constant([2, 3])
with test_utils.assert_new_op_prefixes(self, expected_name + "/"):
utils_tf.repeat(t, indices, axis=1, **kwargs)
def _generate_graph(batch_index, n_nodes=4, add_edges=True):
graph = nx.DiGraph()
for node in range(n_nodes):
node_data = {"features": np.array([node, batch_index], dtype=np.float32)}
graph.add_node(node, **node_data)
if add_edges:
for edge, (receiver, sender) in enumerate(zip([0, 0, 1], [1, 2, 3])):
if sender < n_nodes and receiver < n_nodes:
edge_data = np.array([edge, edge + 1, batch_index], dtype=np.float64)
graph.add_edge(sender, receiver, features=edge_data, index=edge)
graph.graph["features"] = np.array([batch_index], dtype=np.float32)
return graph
class ConcatTest(tf.test.TestCase, parameterized.TestCase):
"""Tests for `concat`, along various axis."""
@parameterized.named_parameters(
("no nones", []), ("stateless graph", ["nodes", "edges", "globals"]),
("no edges", ["edges", "receivers", "senders"]))
def test_concat_first_axis(self, none_fields):
graph_0 = utils_np.networkxs_to_graphs_tuple(
[_generate_graph(0, 3), _generate_graph(1, 2)])
graph_1 = utils_np.networkxs_to_graphs_tuple([_generate_graph(2, 2)])
graph_2 = utils_np.networkxs_to_graphs_tuple([_generate_graph(3, 3)])
graphs_ = [
gr.map(tf.convert_to_tensor, graphs.ALL_FIELDS)
for gr in [graph_0, graph_1, graph_2]
]
graphs_ = [gr.map(lambda _: None, none_fields) for gr in graphs_]
concat_graph = utils_tf.concat(graphs_, axis=0)
for none_field in none_fields:
self.assertEqual(None, getattr(concat_graph, none_field))
concat_graph = concat_graph.map(tf.no_op, none_fields)
if "nodes" not in none_fields:
self.assertAllEqual(
np.array([0, 1, 2, 0, 1, 0, 1, 0, 1, 2]),
[x[0] for x in concat_graph.nodes])
self.assertAllEqual(
np.array([0, 0, 0, 1, 1, 2, 2, 3, 3, 3]),
[x[1] for x in concat_graph.nodes])
if "edges" not in none_fields:
self.assertAllEqual(
np.array([0, 1, 0, 0, 0, 1]), [x[0] for x in concat_graph.edges])
self.assertAllEqual(
np.array([0, 0, 1, 2, 3, 3]), [x[2] for x in concat_graph.edges])
self.assertAllEqual(np.array([3, 2, 2, 3]), concat_graph.n_node)
self.assertAllEqual(np.array([2, 1, 1, 2]), concat_graph.n_edge)
if "senders" not in none_fields:
# [1, 2], [1], [1], [1, 2] and 3, 2, 2, 3 nodes
# So we are summing [1, 2, 1, 1, 2] with [0, 0, 3, 5, 7, 7]
self.assertAllEqual(np.array([1, 2, 4, 6, 8, 9]), concat_graph.senders)
if "receivers" not in none_fields:
# [0, 0], [0], [0], [0, 0] and 3, 2, 2, 3 nodes
# So we are summing [0, 0, 0, 0, 0, 0] with [0, 0, 3, 5, 7, 7]
self.assertAllEqual(np.array([0, 0, 3, 5, 7, 7]), concat_graph.receivers)
if "globals" not in none_fields:
self.assertAllEqual(np.array([[0], [1], [2], [3]]), concat_graph.globals)
def test_concat_last_axis(self):
graph0 = utils_np.networkxs_to_graphs_tuple(
[_generate_graph(0, 3), _generate_graph(1, 2)])
graph1 = utils_np.networkxs_to_graphs_tuple(
[_generate_graph(2, 3), _generate_graph(3, 2)])
graph0 = graph0.map(tf.convert_to_tensor, graphs.ALL_FIELDS)
graph1 = graph1.map(tf.convert_to_tensor, graphs.ALL_FIELDS)
concat_graph = utils_tf.concat([graph0, graph1], axis=-1)
self.assertAllEqual(
np.array([[0, 0, 0, 2], [1, 0, 1, 2], [2, 0, 2, 2], [0, 1, 0, 3],
[1, 1, 1, 3]]), concat_graph.nodes)
self.assertAllEqual(
np.array([[0, 1, 0, 0, 1, 2], [1, 2, 0, 1, 2, 2], [0, 1, 1, 0, 1, 3]]),
concat_graph.edges)
self.assertAllEqual(np.array([3, 2]), concat_graph.n_node)
self.assertAllEqual(np.array([2, 1]), concat_graph.n_edge)
self.assertAllEqual(np.array([1, 2, 4]), concat_graph.senders)
self.assertAllEqual(np.array([0, 0, 3]), concat_graph.receivers)
self.assertAllEqual(np.array([[0, 2], [1, 3]]), concat_graph.globals)
class StopGradientsGraphTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(StopGradientsGraphTest, self).setUp()
self._graph = utils_tf.data_dicts_to_graphs_tuple([{
"senders": tf.zeros([10], dtype=tf.int32),
"receivers": tf.zeros([10], dtype=tf.int32),
"nodes": tf.ones([5, 7]),
"edges": tf.zeros([10, 6]),
"globals": tf.zeros([1, 8])
}])
def _check_if_gradients_exist(self, stopped_gradients_graph):
gradients = []
for field in ["globals", "nodes", "edges"]:
with tf.GradientTape() as tape:
xs = getattr(self._graph, field)
ys = getattr(stopped_gradients_graph, field)
gradient = tape.gradient(ys, xs) if ys is not None else ys
gradients.append(gradient)
return [True if grad is not None else False for grad in gradients]
@parameterized.named_parameters(
("stop_all_fields", True, True, True),
("stop_globals", True, False, False), ("stop_nodes", False, True, False),
("stop_edges", False, False, True), ("stop_none", False, False, False))
def test_stop_gradients_outputs(self, stop_globals, stop_nodes, stop_edges):
stopped_gradients_graph = utils_tf.stop_gradient(
self._graph,
stop_globals=stop_globals,
stop_nodes=stop_nodes,
stop_edges=stop_edges)
gradients_exist = self._check_if_gradients_exist(stopped_gradients_graph)
expected_gradients_exist = [
not stop_globals, not stop_nodes, not stop_edges
]
self.assertAllEqual(expected_gradients_exist, gradients_exist)
@parameterized.named_parameters(("no_nodes", "nodes"), ("no_edges", "edges"),
("no_globals", "globals"))
def test_stop_gradients_with_missing_field_raises(self, none_field):
self._graph = self._graph.map(lambda _: None, [none_field])
with self.assertRaisesRegexp(ValueError, none_field):
utils_tf.stop_gradient(self._graph)
def test_stop_gradients_default_params(self):
"""Tests for the default params of `utils_tf.stop_gradient`."""
stopped_gradients_graph = utils_tf.stop_gradient(self._graph)
gradients_exist = self._check_if_gradients_exist(stopped_gradients_graph)
expected_gradients_exist = [False, False, False]
self.assertAllEqual(expected_gradients_exist, gradients_exist)
class IdentityTest(tf.test.TestCase, parameterized.TestCase):
"""Tests for the `identity` method."""
def setUp(self):
super(IdentityTest, self).setUp()
self._graph = utils_tf.data_dicts_to_graphs_tuple([{
"senders": tf.random.uniform([10], maxval=10, dtype=tf.int32),
"receivers": tf.random.uniform([10], maxval=10, dtype=tf.int32),
"nodes": tf.random.uniform([5, 7]),
"edges": tf.random.uniform([10, 6]),
"globals": tf.random.uniform([1, 8])
}])
def test_name_scope(self):
"""Tests that the name scope are correctly pushed through this function."""
self.skipTest("Tensor.name is meaningless when eager execution is enabled")
@parameterized.named_parameters(
("all fields defined", []), ("no node features", ["nodes"]),
("no edge features", ["edges"]), ("no global features", ["globals"]),
("no edges", ["edges", "receivers", "senders"]))
def test_output(self, none_fields):
"""Tests that this function produces the identity."""
graph = self._graph.map(lambda _: None, none_fields)
with tf.name_scope("test"):
graph_id = utils_tf.identity(graph)
expected_out = utils_tf.nest_to_numpy(graph)
actual_out = utils_tf.nest_to_numpy(graph_id)
for field in [
"nodes", "edges", "globals", "receivers", "senders", "n_node", "n_edge"
]:
if field in none_fields:
self.assertEqual(None, getattr(actual_out, field))
else:
self.assertNDArrayNear(
getattr(expected_out, field), getattr(actual_out, field), err=1e-4)
class RunGraphWithNoneTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(RunGraphWithNoneTest, self).setUp()
self._graph = utils_tf.data_dicts_to_graphs_tuple([{
"senders": tf.random.uniform([10], maxval=10, dtype=tf.int32),
"receivers": tf.random.uniform([10], maxval=10, dtype=tf.int32),
"nodes": tf.random.uniform([5, 7]),
"edges": tf.random.uniform([10, 6]),
"globals": tf.random.uniform([1, 8])
}])
@parameterized.named_parameters(
("all fields defined", []), ("no node features", ["nodes"]),
("no edge features", ["edges"]), ("no global features", ["globals"]),
("no edges", ["edges", "receivers", "senders"]))
def test_output(self, none_fields):
"""Tests that this function produces the identity."""
graph_id = self._graph.map(lambda _: None, none_fields)
graph = graph_id.map(tf.no_op, none_fields)
expected_out = graph
actual_out = graph_id
for field in [
"nodes", "edges", "globals", "receivers", "senders", "n_node", "n_edge"
]:
if field in none_fields:
self.assertEqual(None, getattr(actual_out, field))
else:
self.assertNDArrayNear(
getattr(expected_out, field), getattr(actual_out, field), err=1e-4)
class ComputeOffsetTest(tf.test.TestCase):
"""Tests for the `compute_stacked_offsets` method."""
def setUp(self):
super(ComputeOffsetTest, self).setUp()
self.sizes = [5, 4, 3, 1, 2, 0, 3, 0, 4, 7]
self.repeats = [2, 2, 0, 2, 1, 3, 2, 0, 3, 2]
self.offset = [
0, 0, 5, 5, 12, 12, 13, 15, 15, 15, 15, 15, 18, 18, 18, 22, 22
]
def test_compute_stacked_offsets(self):
offset0 = utils_tf._compute_stacked_offsets(
self.sizes, self.repeats)
offset1 = utils_tf._compute_stacked_offsets(
np.array(self.sizes), np.array(self.repeats))
offset2 = utils_tf._compute_stacked_offsets(
tf.constant(self.sizes, dtype=tf.int32),
tf.constant(self.repeats, dtype=tf.int32))
self.assertAllEqual(self.offset, offset0.numpy().tolist())
self.assertAllEqual(self.offset, offset1.numpy().tolist())
self.assertAllEqual(self.offset, offset2.numpy().tolist())
class DataDictsCompletionTests(test_utils.GraphsTest, parameterized.TestCase):
"""Tests for the methods creating complete graphs from partial graphs."""
def _assert_indices_sizes(self, dict_, n_relation):
for key in ["receivers", "senders"]:
self.assertAllEqual((n_relation,), dict_[key].get_shape().as_list())
@parameterized.named_parameters(
("static", utils_tf._create_complete_edges_from_nodes_static),
("dynamic", utils_tf._create_complete_edges_from_nodes_dynamic),
)
def test_create_complete_edges_from_nodes_include_self_edges(self, method):
for graph_dict in self.graphs_dicts_in:
n_node = graph_dict["nodes"].shape[0]
edges_dict = method(n_node, exclude_self_edges=False)
self._assert_indices_sizes(edges_dict, n_node**2)
@parameterized.named_parameters(
("static", utils_tf._create_complete_edges_from_nodes_static),
("dynamic", utils_tf._create_complete_edges_from_nodes_dynamic),
)
def test_create_complete_edges_from_nodes_exclude_self_edges(self, method):
for graph_dict in self.graphs_dicts_in:
n_node = graph_dict["nodes"].shape[0]
edges_dict = method(n_node, exclude_self_edges=True)
self._assert_indices_sizes(edges_dict, n_node * (n_node - 1))
def test_create_complete_edges_from_nodes_dynamic_number_of_nodes(self):
for graph_dict in self.graphs_dicts_in:
n_node = tf.shape(tf.constant(graph_dict["nodes"]))[0]
edges_dict = utils_tf._create_complete_edges_from_nodes_dynamic(
n_node, exclude_self_edges=False)
n_relation = n_node**2
receivers = edges_dict["receivers"].numpy()
senders = edges_dict["senders"].numpy()
n_edge = edges_dict["n_edge"].numpy()
self.assertAllEqual((n_relation,), receivers.shape)
self.assertAllEqual((n_relation,), senders.shape)
self.assertEqual(n_relation, n_edge)
class GraphsCompletionTests(test_utils.GraphsTest, parameterized.TestCase):
"""Tests for completing partial GraphsTuple."""
def _assert_indices_sizes(self, graph, n_relation):
for key in ["receivers", "senders"]:
self.assertAllEqual((n_relation,),
getattr(graph, key).get_shape().as_list())
@parameterized.named_parameters(("edge size 0", 0), ("edge size 1", 1))
def test_fill_edge_state(self, edge_size):
"""Tests for filling the edge state with a constant content."""
for g in self.graphs_dicts_in:
g.pop("edges")
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
n_edges = np.sum(self.reference_graph.n_edge)
graphs_tuple = utils_tf.set_zero_edge_features(graphs_tuple, edge_size)
self.assertAllEqual((n_edges, edge_size),
graphs_tuple.edges.get_shape().as_list())
@parameterized.named_parameters(("edge size 0", 0), ("edge size 1", 1))
def test_fill_edge_state_dynamic(self, edge_size):
"""Tests for filling the edge state with a constant content."""
for g in self.graphs_dicts_in:
g.pop("edges")
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
graphs_tuple = graphs_tuple._replace(
n_edge=tf.constant(
graphs_tuple.n_edge, shape=graphs_tuple.n_edge.get_shape()))
n_edges = np.sum(self.reference_graph.n_edge)
graphs_tuple = utils_tf.set_zero_edge_features(graphs_tuple, edge_size)
actual_edges = graphs_tuple.edges
self.assertNDArrayNear(
np.zeros((n_edges, edge_size)), actual_edges, err=1e-4)
@parameterized.named_parameters(("global size 0", 0), ("global size 1", 1))
def test_fill_global_state(self, global_size):
"""Tests for filling the global state with a constant content."""
for g in self.graphs_dicts_in:
g.pop("globals")
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
n_graphs = self.reference_graph.n_edge.shape[0]
graphs_tuple = utils_tf.set_zero_global_features(graphs_tuple, global_size)
self.assertAllEqual((n_graphs, global_size),
graphs_tuple.globals.get_shape().as_list())
@parameterized.named_parameters(("global size 0", 0), ("global size 1", 1))
def test_fill_global_state_dynamic(self, global_size):
"""Tests for filling the global state with a constant content."""
for g in self.graphs_dicts_in:
g.pop("globals")
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
# Hide global shape information
graphs_tuple = graphs_tuple._replace(
n_node=tf.constant(
graphs_tuple.n_node, shape=graphs_tuple.n_edge.get_shape()))
n_graphs = self.reference_graph.n_edge.shape[0]
graphs_tuple = utils_tf.set_zero_global_features(graphs_tuple, global_size)
actual_globals = graphs_tuple.globals.numpy()
self.assertNDArrayNear(
np.zeros((n_graphs, global_size)), actual_globals, err=1e-4)
@parameterized.named_parameters(("node size 0", 0), ("node size 1", 1))
def test_fill_node_state(self, node_size):
"""Tests for filling the node state with a constant content."""
for g in self.graphs_dicts_in:
g["n_node"] = g["nodes"].shape[0]
g.pop("nodes")
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
n_nodes = np.sum(self.reference_graph.n_node)
graphs_tuple = utils_tf.set_zero_node_features(graphs_tuple, node_size)
self.assertAllEqual((n_nodes, node_size),
graphs_tuple.nodes.get_shape().as_list())
@parameterized.named_parameters(("node size 0", 0), ("node size 1", 1))
def test_fill_node_state_dynamic(self, node_size):
"""Tests for filling the node state with a constant content."""
for g in self.graphs_dicts_in:
g["n_node"] = g["nodes"].shape[0]
g.pop("nodes")
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
graphs_tuple = graphs_tuple._replace(
n_node=tf.constant(
graphs_tuple.n_node, shape=graphs_tuple.n_node.get_shape()))
n_nodes = np.sum(self.reference_graph.n_node)
graphs_tuple = utils_tf.set_zero_node_features(graphs_tuple, node_size)
actual_nodes = graphs_tuple.nodes.numpy()
self.assertNDArrayNear(
np.zeros((n_nodes, node_size)), actual_nodes, err=1e-4)
def test_fill_edge_state_with_missing_fields_raises(self):
"""Edge field cannot be filled if receivers or senders are missing."""
for g in self.graphs_dicts_in:
g.pop("receivers")
g.pop("senders")
g.pop("edges")
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
with self.assertRaisesRegexp(ValueError, "receivers"):
graphs_tuple = utils_tf.set_zero_edge_features(graphs_tuple, edge_size=1)
def test_fill_state_default_types(self):
"""Tests that the features are created with the correct default type."""
for g in self.graphs_dicts_in:
g.pop("nodes")
g.pop("globals")
g.pop("edges")
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
graphs_tuple = utils_tf.set_zero_edge_features(graphs_tuple, edge_size=1)
graphs_tuple = utils_tf.set_zero_node_features(graphs_tuple, node_size=1)
graphs_tuple = utils_tf.set_zero_global_features(
graphs_tuple, global_size=1)
self.assertEqual(tf.float32, graphs_tuple.edges.dtype)
self.assertEqual(tf.float32, graphs_tuple.nodes.dtype)
self.assertEqual(tf.float32, graphs_tuple.globals.dtype)
@parameterized.parameters(
(tf.float64,),
(tf.int32,),
)
def test_fill_state_user_specified_types(self, dtype):
"""Tests that the features are created with the correct default type."""
for g in self.graphs_dicts_in:
g.pop("nodes")
g.pop("globals")
g.pop("edges")
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
graphs_tuple = utils_tf.set_zero_edge_features(graphs_tuple, 1, dtype)
graphs_tuple = utils_tf.set_zero_node_features(graphs_tuple, 1, dtype)
graphs_tuple = utils_tf.set_zero_global_features(graphs_tuple, 1, dtype)
self.assertEqual(dtype, graphs_tuple.edges.dtype)
self.assertEqual(dtype, graphs_tuple.nodes.dtype)
self.assertEqual(dtype, graphs_tuple.globals.dtype)
@parameterized.named_parameters(
("no self edges", False),
("self edges", True),
)
def test_fully_connect_graph_dynamic(self, exclude_self_edges):
for g in self.graphs_dicts_in:
g.pop("edges")
g.pop("receivers")
g.pop("senders")
n_relation = 0
for g in self.graphs_dicts_in:
n_node = g["nodes"].shape[0]
if exclude_self_edges:
n_relation += n_node * (n_node - 1)
else:
n_relation += n_node * n_node
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
graphs_tuple = utils_tf.fully_connect_graph_dynamic(graphs_tuple,
exclude_self_edges)
actual_receivers = graphs_tuple.receivers.numpy()
actual_senders = graphs_tuple.senders.numpy()
self.assertAllEqual((n_relation,), actual_receivers.shape)
self.assertAllEqual((n_relation,), actual_senders.shape)
self.assertAllEqual((len(self.graphs_dicts_in),),
graphs_tuple.n_edge.get_shape().as_list())
@parameterized.named_parameters(
("no self edges", False),
("self edges", True),
)
def test_fully_connect_graph_dynamic_with_dynamic_sizes(
self, exclude_self_edges):
for g in self.graphs_dicts_in:
g.pop("edges")
g.pop("receivers")
g.pop("senders")
n_relation = 0
for g in self.graphs_dicts_in:
n_node = g["nodes"].shape[0]
if exclude_self_edges:
n_relation += n_node * (n_node - 1)
else:
n_relation += n_node * n_node
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
graphs_tuple = graphs_tuple.map(test_utils.mask_leading_dimension,
["nodes", "globals", "n_node", "n_edge"])
graphs_tuple = utils_tf.fully_connect_graph_dynamic(graphs_tuple,
exclude_self_edges)
actual_receivers = graphs_tuple.receivers.numpy()
actual_senders = graphs_tuple.senders.numpy()
actual_n_edge = graphs_tuple.n_edge.numpy()
self.assertAllEqual((n_relation,), actual_receivers.shape)
self.assertAllEqual((n_relation,), actual_senders.shape)
self.assertAllEqual((len(self.graphs_dicts_in),), actual_n_edge.shape)
expected_edges = []
offset = 0
for graph in self.graphs_dicts_in:
n_node = graph["nodes"].shape[0]
for e1 in range(n_node):
for e2 in range(n_node):
if not exclude_self_edges or e1 != e2:
expected_edges.append((e1 + offset, e2 + offset))
offset += n_node
actual_edges = zip(actual_receivers, actual_senders)
self.assertSetEqual(set(actual_edges), set(expected_edges))
class GraphsTupleConversionTests(test_utils.GraphsTest, parameterized.TestCase):
"""Tests for the method converting between data dicts and GraphsTuple."""
@parameterized.named_parameters(("all fields defined", []), (
"no edge features",
["edges"],
), (
"no node features",
["nodes"],
), (
"no globals",
["globals"],
), (
"no edges",
["edges", "receivers", "senders"],
))
def test_data_dicts_to_graphs_tuple(self, none_fields):
"""Fields in `none_fields` will be cleared out."""
for field in none_fields:
for graph_dict in self.graphs_dicts_in:
if field in graph_dict:
if field == "nodes":
graph_dict["n_node"] = graph_dict["nodes"].shape[0]
graph_dict[field] = None
self.reference_graph = self.reference_graph._replace(**{field: None})
if field == "senders":
self.reference_graph = self.reference_graph._replace(
n_edge=np.zeros_like(self.reference_graph.n_edge))
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
for field in none_fields:
self.assertEqual(None, getattr(graphs_tuple, field))
graphs_tuple = graphs_tuple.map(tf.no_op, none_fields)
self._assert_graph_equals_np(self.reference_graph, graphs_tuple)
@parameterized.parameters(("receivers",), ("senders",))
def test_data_dicts_to_graphs_tuple_raises(self, none_field):
"""Fields that cannot be missing."""
for graph_dict in self.graphs_dicts_in:
graph_dict[none_field] = None
with self.assertRaisesRegexp(ValueError, none_field):
utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
def test_data_dicts_to_graphs_tuple_no_raise(self):
"""Not having nodes is fine, if the number of nodes is provided."""
for graph_dict in self.graphs_dicts_in:
graph_dict["n_node"] = graph_dict["nodes"].shape[0]
graph_dict["nodes"] = None
utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
def test_data_dicts_to_graphs_tuple_cast_types(self):
"""Index and number fields should be cast to tensors of the right type."""
for graph_dict in self.graphs_dicts_in:
graph_dict["n_node"] = np.array(
graph_dict["nodes"].shape[0], dtype=np.int64)
graph_dict["receivers"] = graph_dict["receivers"].astype(np.int16)
graph_dict["senders"] = graph_dict["senders"].astype(np.float64)
graph_dict["nodes"] = graph_dict["nodes"].astype(np.float64)
graph_dict["edges"] = tf.constant(graph_dict["edges"], dtype=tf.float64)
out = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
for key in ["n_node", "n_edge", "receivers", "senders"]:
self.assertEqual(tf.int32, getattr(out, key).dtype)
self.assertEqual(type(tf.int32), type(getattr(out, key).dtype))
for key in ["nodes", "edges"]:
self.assertEqual(type(tf.float64), type(getattr(out, key).dtype))
self.assertEqual(tf.float64, getattr(out, key).dtype)
class GraphsIndexingTests(test_utils.GraphsTest, parameterized.TestCase):
"""Tests for the `get_graph` method."""
@parameterized.named_parameters(("int_index", False),
("tensor_index", True))
def test_getitem_one(self, use_tensor_index):
index = 2
expected = self.graphs_dicts_out[index]
if use_tensor_index:
index = tf.constant(index)
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
graph = utils_tf.get_graph(graphs_tuple, index)
graph = utils_tf.nest_to_numpy(graph)
actual, = utils_np.graphs_tuple_to_data_dicts(graph)
for k, v in expected.items():
self.assertAllClose(v, actual[k])
self.assertEqual(expected["nodes"].shape[0], actual["n_node"])
self.assertEqual(expected["edges"].shape[0], actual["n_edge"])
@parameterized.named_parameters(("int_slice", False),
("tensor_slice", True))
def test_getitem(self, use_tensor_slice):
index = slice(1, 3)
expected = self.graphs_dicts_out[index]
if use_tensor_slice:
index = slice(tf.constant(index.start), tf.constant(index.stop))
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
graphs2 = utils_tf.get_graph(graphs_tuple, index)
graphs2 = utils_tf.nest_to_numpy(graphs2)
actual = utils_np.graphs_tuple_to_data_dicts(graphs2)
for ex, ac in zip(expected, actual):
for k, v in ex.items():
self.assertAllClose(v, ac[k])
self.assertEqual(ex["nodes"].shape[0], ac["n_node"])
self.assertEqual(ex["edges"].shape[0], ac["n_edge"])
@parameterized.named_parameters(
("index_bad_type", 1.,
TypeError, "Index must be a valid scalar integer", False, False),
("index_bad_shape", [0, 1],
TypeError, "Valid tensor indices must be scalars", True, False),
("index_bad_dtype", 1.,
TypeError, "Valid tensor indices must have types", True, False),
("slice_bad_type_stop", 1.,
TypeError, "Valid tensor indices must be integers", False, True),
("slice_bad_shape_stop", [0, 1],
TypeError, "Valid tensor indices must be scalars", True, True),
("slice_bad_dtype_stop", 1.,
TypeError, "Valid tensor indices must have types", True, True),
("slice_bad_type_start", slice(0., 1),
TypeError, "Valid tensor indices must be integers", False, False),
("slice_with_step", slice(0, 1, 1),
ValueError, "slices with step/stride are not supported", False, False),
)
def test_raises(self, index, error_type, message, use_constant, use_slice):
if use_constant:
index = tf.constant(index)
if use_slice:
index = slice(index)
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
with self.assertRaisesRegexp(error_type, message):
utils_tf.get_graph(graphs_tuple, index)
class TestNumGraphs(test_utils.GraphsTest):
"""Tests for the `get_num_graphs` function."""
def setUp(self):
super(TestNumGraphs, self).setUp()
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
self.empty_graph = graphs_tuple.map(lambda _: None,
graphs.GRAPH_DATA_FIELDS)
def test_num_graphs(self):
graph = self.empty_graph.replace(n_node=tf.zeros([3], dtype=tf.int32))
self.assertEqual(3, utils_tf.get_num_graphs(graph))
class TestNestToNumpy(test_utils.GraphsTest):
"""Test that graph with tf.Tensor fields get converted to numpy."""
def setUp(self):
super(TestNestToNumpy, self).setUp()
self._graph = utils_tf.data_dicts_to_graphs_tuple([{
"senders": tf.random.uniform([10], maxval=10, dtype=tf.int32),
"receivers": tf.random.uniform([10], maxval=10, dtype=tf.int32),
"nodes": tf.random.uniform([5, 7]),
"edges": tf.random.uniform([10, 6]),
"globals": tf.random.uniform([1, 8])
}])
def test_single_graph(self):
numpy_graph = utils_tf.nest_to_numpy(self._graph)
for field in graphs.ALL_FIELDS:
self.assertIsInstance(getattr(numpy_graph, field), np.ndarray)
self.assertNDArrayNear(
getattr(self._graph, field).numpy(),
getattr(numpy_graph, field), 1e-8)
def test_mixed_graph_conversion(self):
graph = self._graph.replace(nodes=None)
graph = graph.map(lambda x: x.numpy(), ["edges"])
converted_graph = utils_tf.nest_to_numpy(graph)
self.assertIsNone(converted_graph.nodes)
self.assertIsInstance(converted_graph.edges, np.ndarray)
def test_nested_structure(self):
regular_graph = self._graph
graph_with_nested_fields = regular_graph.map(
lambda x: {"a": x, "b": tf.random.uniform([4, 6])})
nested_structure = [
None,
regular_graph,
(graph_with_nested_fields,),
tf.random.uniform([10, 6])]
nested_structure_numpy = utils_tf.nest_to_numpy(nested_structure)
tree.assert_same_structure(nested_structure, nested_structure_numpy)
for tensor_or_none, array_or_none in zip(
tree.flatten(nested_structure),
tree.flatten(nested_structure_numpy)):
if tensor_or_none is None:
self.assertIsNone(array_or_none)
continue
self.assertIsNotNone(array_or_none)
self.assertNDArrayNear(
tensor_or_none.numpy(),
array_or_none, 1e-8)
if __name__ == "__main__":
tf.test.main()
| StarcoderdataPython |
8122100 | def main():
from sys import stdin,stdout
from math import sin
for _ in[0]*int(input()):
a,b,c=map(int,stdin.readline().split())
p=0
r=c
while(r-p>0.000009):
q=(p+r)/2
if(a*q+b*sin(q)>c):r=q
else :p=q
print("%.6f"%q)
main()
| StarcoderdataPython |
1908286 | {
"name" : "Product Image",
"version" : "0.1",
"author" : "<NAME>, Rove.design GmbH",
"website" : "http://www.rove.de/",
"description": """
This Module overwrites openerp.web.list.Binary field to show the product image in the listview. A new column with product image is added.
""",
"depends" : [
"sale",
"sale_stock",
"stock"
],
"data": [
'views/product_view.xml',
'views/sale_view.xml',
'views/stock_view.xml'
],
'installable': True,
"active": False,
}
| StarcoderdataPython |
3250362 | '''Create charts for viewing on Raspberry Pi Web Server.'''
# Raspi-sump, a sump pump monitoring system.
# <NAME>
# http://www.linuxnorth.org/raspi-sump/
#
# All configuration changes should be done in raspisump.conf
# MIT License -- http://www.linuxnorth.org/raspi-sump/license.htmlimport os
import os
import subprocess
import time
from raspisump import todaychart
from datetime import date, timedelta
def create_folders(year, month, homedir):
'''Check if folders exist in charts folder and create them if they don't'''
if not os.path.isdir('{}charts/{}/'.format(homedir, year)):
_year = 'mkdir {}charts/{}'.format(homedir, year)
create_year = _year.split(' ')
subprocess.call(create_year)
if not os.path.isdir('{}charts/{}/{}/'.format(homedir, year, month)):
_month = 'mkdir {}charts/{}/{}'.format(homedir, year, month)
create_month = _month.split(' ')
subprocess.call(create_month)
def create_chart(homedir):
'''Create a chart of sump pit activity and save to web folder'''
csv_file = '{}charts/csv/waterlevel-{}.csv'.format(
homedir, time.strftime('%Y%m%d')
)
filename = '{}charts/today.png'.format(homedir)
bytes2str = todaychart.bytesdate2str('%H:%M:%S')
todaychart.graph(csv_file, filename, bytes2str)
def copy_chart(year, month, today, homedir):
'''Copy today.png to year/month/day folder for web viewing'''
copy_cmd = 'cp {}charts/today.png {}charts/{}/{}/{}.png'.format(
homedir, homedir, year, month, today
)
copy_file = copy_cmd.split(' ')
subprocess.call(copy_file)
yesterday = date.today() - timedelta(1)
yesterday_day = yesterday.strftime('%d')
yesterday = date.today() - timedelta(1)
yesterday_day = yesterday.strftime('%d')
yesterday_copy_cmd = 'cp {}charts/{}/{}/{}{}{}.png {}charts/yesterday.png'.format(homedir, year, month, year, month, yesterday_day, homedir
)
yesterday_copy_file = yesterday_copy_cmd.split(' ')
subprocess.call(yesterday_copy_file)
dbf = date.today() - timedelta(2)
dbf_day = dbf.strftime('%d')
dbf_copy_cmd = 'cp {}charts/{}/{}/{}{}{}.png {}charts/dbf.png'.format(
homedir, year, month, year, month, dbf_day, homedir
)
dbf_copy_file = dbf_copy_cmd.split(' ')
subprocess.call(dbf_copy_file)
dbf2 = date.today() - timedelta(3)
dbf2_day = dbf2.strftime('%d')
dbf2_copy_cmd = 'cp {}charts/{}/{}/{}{}{}.png {}charts/dbf2.png'.format(
homedir, year, month, year, month, dbf2_day, homedir
)
dbf2_copy_file = dbf2_copy_cmd.split(' ')
subprocess.call(dbf2_copy_file)
| StarcoderdataPython |
373026 | <reponame>NicolasLM/python-runabove
# -*- encoding: utf-8 -*-
#
# Copyright (c) 2014, OVH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Except as contained in this notice, the name of OVH and or its trademarks
# (and among others RunAbove) shall not be used in advertising or otherwise to
# promote the sale, use or other dealings in this Software without prior
# written authorization from OVH.
"""RunAbove instance service library."""
from base import Resource, BaseManagerWithList
class InstanceManager(BaseManagerWithList):
"""Manage instances for a RunAbove account."""
basepath = '/instance'
def get_by_id(self, instance_id):
"""Get one instance from a RunAbove account.
:param instance_id: ID of the instance to retrieve
"""
url = self.basepath + '/' + self._api.encode_for_api(instance_id)
instance = self._api.get(url)
return self._en_dict_to_obj(instance)
def _load_vnc(self, instance):
"""Load the VNC link to an instance.
:param instance: Instance to get VNC console from
"""
try:
instance_id = instance.id
except AttributeError:
instance_id = instance
url = self.basepath + '/' + instance_id + '/vnc'
vnc = self._api.get(url)
return vnc['url']
def _dict_to_obj(self, ins):
"""Converts a dict to an instance object."""
region = self._handler.regions._name_to_obj(ins['region'])
return Instance(self,
ins.get('instanceId'),
ins.get('name'),
ins.get('ip'),
region,
ins.get('flavorId'),
ins.get('imageId'),
ins.get('keyName'),
ins.get('status'),
ins.get('created'))
def _en_dict_to_obj(self, ins):
"""Converts an enhanced dict to an instance object.
The enhanced dict got with the GET of one instance allows
to build the flavor, image and SSH key objects directly
without making a call for each of them. However SSH key
is not mandatory so can be None.
"""
try:
ssh_key_name = ins['sshKey']['name']
ssh_key = self._handler.ssh_keys._dict_to_obj(ins['sshKey'])
except TypeError:
ssh_key_name = None
ssh_key = None
region = self._handler.regions._name_to_obj(ins['region'])
flavor = self._handler.flavors._dict_to_obj(ins['flavor'])
image = self._handler.images._dict_to_obj(ins['image'])
return Instance(self,
ins['instanceId'],
ins.get('name'),
ins.get('ipv4'),
region,
ins['flavor']['id'],
ins['image']['id'],
ssh_key_name,
ins.get('status'),
ins.get('created'),
ips=ins.get('ips'),
flavor=flavor,
image=image,
ssh_key=ssh_key)
def create(self, region, name, flavor, image, ssh_key=None):
"""Launch a new instance inside a region with a public key.
:param region: Name or object region of the new instance
:param name: Name of the new instance
:param flavor: ID or object flavor used for this Instance
:param image: ID or object image for the instance
:param ssh_key: Name or object SSH key to install
"""
try:
region_name = region.name
except AttributeError:
region_name = region
try:
flavor_id = flavor.id
except AttributeError:
flavor_id = flavor
try:
image_id = image.id
except AttributeError:
image_id = image
content = {
'flavorId': flavor_id,
'imageId': image_id,
'name': name,
'region': region_name
}
if ssh_key:
try:
content['sshKeyName'] = ssh_key.name
except AttributeError:
content['sshKeyName'] = ssh_key
instance_id = self._api.post(self.basepath, content)['instanceId']
return self.get_by_id(instance_id)
def rename(self, instance, new_name):
"""Rename an existing instance.
:param instance: instance_id or Instance object to be deleted
:param new_name: new name of instance
"""
content = {
'name': new_name
}
try:
id = instance.id
except AttributeError:
id = instance
url = self.basepath + '/' + self._api.encode_for_api(id)
self._api.put(url, content)
def delete(self, instance):
"""Delete an instance from an account.
:param instance: instance_id or Instance object to be deleted
"""
try:
id = instance.id
except AttributeError:
id = instance
url = self.basepath + '/' + self._api.encode_for_api(id)
self._api.delete(url)
class Instance(Resource):
"""Represents one instance."""
def __init__(self, manager, id, name, ip, region, flavor_id, image_id,
ssh_key_name, status, created, ips=None,
flavor=None, image=None, ssh_key=None):
self._manager = manager
self.id = id
self.name = name
self.ip = ip
self.created = created
self.status = status
self.region = region
self._flavor_id = flavor_id
self._flavor = flavor
self._image_id = image_id
self._image = image
self._ssh_key_name = ssh_key_name
self._ssh_key = ssh_key
self._vnc = None
self._ips = ips
@property
def flavor(self):
"""Lazy loading of flavor object."""
if not self._flavor:
self._flavor = self._manager._handler.\
flavors.get_by_id(self._flavor_id)
return self._flavor
@property
def image(self):
"""Lazy loading of image object."""
if not self._image:
self._image = self._manager._handler.\
images.get_by_id(self._image_id)
return self._image
@property
def ssh_key(self):
"""Lazy loading of ssh_key object."""
if not self._ssh_key_name:
return None
if not self._ssh_key:
self._ssh_key = self._manager._handler.\
ssh_keys.get_by_name(self.region, self._ssh_key_name)
return self._ssh_key
@property
def ips(self):
"""Lazy loading of the list of IPs."""
if self._ips is None:
self._ips = self._manager.get_by_id(self.id)._ips
return self._ips
@property
def vnc(self):
"""Lazy loading of VNC link."""
if not self._vnc:
self._vnc = self._manager._load_vnc(self)
return self._vnc
def delete(self):
"""Delete instance represented by this object from the account."""
self._manager.delete(self)
def rename(self, new_name):
"""Rename instance represented by this object."""
self._manager.rename(self, new_name)
self.name = new_name
| StarcoderdataPython |
28197 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import os.path
import uuid
from yamlfred.utils import remove_default, merge_dicts
from yamlfred.utils import Include
defaults = {
'alfred.workflow.output.notification': {
'config': {'removeextension': False, 'output': 0, 'lastpathcomponent': False, 'onlyshowifquerypopulated': False, 'sticky': False},
'version': 0,
},
'alfred.workflow.trigger.hotkey': {
'config': {'leftcursor': False, 'argument': 0, 'relatedAppsMode': 0, 'action': 0, 'hotkey': 0, 'hotstring': '', 'hotmod': 0, 'modsmode': 0},
'version': 1, },
'alfred.workflow.action.openfile': {
'config': {},
'version': 1,
},
'alfred.workflow.input.keyword': {
'config': {'argumenttype': 0, 'withspace': True},
'version': 0,
},
'alfred.workflow.trigger.external': {
'config': {},
'version': 0,
},
'alfred.workflow.output.largetype': {
'version': 0,
},
'alfred.workflow.action.revealfile': {
'version': 0,
},
'alfred.workflow.input.filefilter': {
'config': {'scopes': [], 'includesystem': False, 'withspace': True, 'anchorfields': True, 'daterange': 0, 'types': []},
'version': 0,
},
'alfred.workflow.input.scriptfilter': {
'config': {'withspace': True, 'escaping': 102, 'script': '', 'argumenttype': 0, 'type': 0,
'queuedelaycustom': 3, 'queuedelayimmediatelyinitially': True, 'queuedelaymode': 0, 'queuemode': 1},
'version': 0,
},
'alfred.workflow.action.browseinalfred': {
'config': {},
'version': 0,
},
'alfred.workflow.trigger.action': {
'config': {'filetypes': [], 'acceptsmulti': False},
'version': 0,
},
'alfred.workflow.output.clipboard': {
'config': {'clipboardtext': '', 'autopaste': False},
'version': 0, },
'alfred.workflow.output.script': {
'config': {'escaping': 102, 'type': 0, 'script': '', 'concurrently': False},
'version': 0, },
'alfred.workflow.action.launchfiles': {
'config': {'paths': [], 'toggle': False},
'version': 0,
},
'alfred.workflow.trigger.contact': {
'config': {},
'version': 0,
},
'alfred.workflow.action.systemwebsearch': {
'config': {},
'version': 0,
},
'alfred.workflow.trigger.fallback': {
'config': {},
'version': 0,
},
'alfred.workflow.action.openurl': {
'config': {'utf8': True, 'plusspaces': False},
'version': 0,
},
'alfred.workflow.action.systemcommand': {
'config': {'command': 0, 'confirm': False},
'version': 1,
},
'alfred.workflow.action.itunescommand': {
'config': {'command': 0},
'version': 0,
},
'alfred.workflow.action.script': {
'config': {'escaping': 102, 'type': 0, 'script': '', 'concurrently': False},
'version': 0,
},
'alfred.workflow.action.applescript': {
'config': {'cachescript': False, 'applescript': ''},
'version': 0,
},
'alfred.workflow.action.terminalcommand': {
'config': {'escaping': 0},
'version': 0,
},
'alfred.workflow.trigger.remote': {
'config': {'argumenttype': 0, 'workflowonly': False},
'version': 0,
},
}
class AlfredObject(object):
def __init__(self, dic):
self.type = dic['type']
default = defaults[self.type] if self.type in defaults else {}
self.prop = merge_dicts(default, dic)
if 'uid' not in self.prop:
self.prop['uid'] = uuid.uuid4()
self.script_type = None
if self.type == 'alfred.workflow.action.applescript':
self.script_type = 'applescript'
elif self.type in ['alfred.workflow.input.scriptfilter',
'alfred.workflow.output.script',
'alfred.workflow.action.script']:
self.script_type = 'script'
return
def dump(self, script_dir='.'):
default = defaults[self.type] if self.type in defaults else {}
prop = remove_default(self.prop, default)
if self.script_type:
path = os.path.join(script_dir, self.prop['uid'])
with open(path, 'w') as f:
script = self.prop['config'].get(self.script_type)
f.write(script)
prop['config'][self.script_type] = Include(path)
return prop
| StarcoderdataPython |
6601550 | <gh_stars>1-10
# -*- coding: utf-8 -*-
'''
Provide instance of analytics to track events and timings.
'''
from mlab_api.analytics.google_analytics import GoogleAnalyticsClient
from mlab_api.app import app
TRACKING_ID = app.config['GA_TRACKING_ID']
ANALYTICS = GoogleAnalyticsClient(TRACKING_ID)
| StarcoderdataPython |
3203421 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import os
import yaml
import necbaas as baas
def load_config():
"""
Load test config from ~/.baas/python_test_config.yaml
Returns:
dict: Loaded config
"""
f = open(os.path.expanduser("~/.baas/python_test_config.yaml"), 'r')
config = yaml.load(f)
f.close()
return config
def create_service(master=False, key="service"):
"""
Create service from config file.
Args:
master (bool): use master key
key (str): service key in config
Returns:
Service: service"
"""
c = load_config()
param = c[key]
if master:
param["appKey"] = param["masterKey"]
return baas.Service(param)
def remove_all_users(key="service"):
s = create_service(True, key=key)
users = baas.User.query(s)
for u in users:
print(u)
baas.User.remove(s, u["_id"])
def remove_all_groups():
s = create_service(True)
groups = baas.Group.query(s)
for g in groups:
print(g)
group = baas.Group(s, g["name"])
group.remove()
def setup_user(service):
remove_all_users()
# Register user
user = baas.User(service)
user.username = "user1"
user.email = "<EMAIL>"
user.password = "<PASSWORD>"
user.register()
# Login
baas.User.login(service, username=user.username, password=user.password)
return user
| StarcoderdataPython |
369870 | #!/usr/bin/env python3
import json
import os
from armor import Armor
def convert_armor(armor):
armor.sort(key=lambda a: int(a.name), reverse=True)
cols_printed = False
f = "{:<32}\t{:<16}\t{:>2}\t{:>5}\t{:>5}\t{:<16}\t{}"
for a in armor:
if not cols_printed:
print(f.format(
"Name",
"Category",
"DR",
"Weight",
"Value",
"Faction",
"Effect"))
cols_printed = True
if a.is_highlighted:
# Not using unique items
continue
print(f.format(
a.name,
a.category,
a.dr,
a.weight,
a.value,
a.faction,
a.effect))
def load_armor():
armor = []
filepath = None
armor_dir = "../out/armor"
for filename in os.listdir(armor_dir):
if not filename.endswith(".json"):
continue
filepath = os.path.join(armor_dir, filename)
with open(filepath, 'r') as f:
for json_obj in json.load(f):
armor.append(Armor(filepath, json_obj))
convert_armor(armor)
def main():
load_armor()
if __name__ == '__main__':
main()
| StarcoderdataPython |
5060168 | <filename>chap9/ages.py
# Exercise 9.9
# I think the child is 57, and they're 17-18 years apart
# I really shouldn't rewrite functions, but it's a short one and it's more convenient now.
def is_reverse(word1, word2):
return word1==word2[::-1]
# After looking at the solutions, this should really cover reversed numbers with two different
# differences, since people without the same birthday n+ years apart will have ages
# separated by n and n+1 every year. I added that into the if statement.
def find_num_reversals(diff):
"""Find all two digit reversed numbers with a certain difference, e.g. all
palindromes separated by 11.
"""
i = 0
count = 0
while i < 100-diff:
if is_reverse(str(i).zfill(2), str(i+diff).zfill(2)) or \
is_reverse(str(i).zfill(2), str(i+diff+1).zfill(2)):
count += 1
if count==6:
# Candidate could also refer to i and i+diff+1, but that'll be clear
# from the printout
print("Candidate:", i, "and", i+diff)
i += 1
return count
def find_age_diff():
"""Find an age difference with 8 reversals in it
"""
i = 1
while i < 100:
num_reversals = find_num_reversals(i)
if num_reversals>= 6:
print("Age difference of",i,"has", num_reversals, "reversals.")
i += 1
find_age_diff()
| StarcoderdataPython |
8194142 | <reponame>Hvedrug/NLP-project-chatbot_writing_web_page
import data
import htmlObject
def generateHTML():
res = htmlObject.createHTML()
for x in data.listID:
#print(x[0])
#print(data.arrayHTML[x[0]])
if data.arrayHTML[x[0]][0] == "p":
res += "<p id=\"" +data.arrayHTML[x[0]][1] +"\" class=\"" +data.arrayHTML[x[0]][2].strip() +"\">" +data.arrayHTML[x[0]][3] +"</p>\n"
elif data.arrayHTML[x[0]][0] == "div":
res += "<div id=\"" +data.arrayHTML[x[0]][1] +"\" class=\"" +data.arrayHTML[x[0]][2].strip() +"\">\n"
elif data.arrayHTML[x[0]][0] == "cldiv":
res += "</div>\n"
elif data.arrayHTML[x[0]][0] == "h":
res += "<h" +data.arrayHTML[x[0]][3] +" id=\"" +data.arrayHTML[x[0]][1] +"\" class=\"" +data.arrayHTML[x[0]][2].strip() +"\">" +data.arrayHTML[x[0]][4] +"</h" +data.arrayHTML[x[0]][3] +">\n"
elif data.arrayHTML[x[0]][0] == "err":
res += "<!-- " +data.arrayHTML[x[0]][1] +"-->\n"
elif data.arrayHTML[x[0]][0] == "a":
res += "<a id=\"" +data.arrayHTML[x[0]][1] +"\" class=\"" +data.arrayHTML[x[0]][2].strip() +"\" href=\"" +data.arrayHTML[x[0]][3] +"\">" +data.arrayHTML[x[0]][4] +"</a>\n"
elif data.arrayHTML[x[0]][0] == "img":
res += "<img id=\"" +data.arrayHTML[x[0]][1] +"\" class=\"" +data.arrayHTML[x[0]][2].strip() +"\" src=\"" +data.arrayHTML[x[0]][3] +"\" alt=\"" +data.arrayHTML[x[0]][4] +"\">\n"
for i in range(data.countOpenDiv):
res += "</div>\n"
res+=htmlObject.endHTML()
return res
def addItemToDict(myItem):
'''myDict.update(myItem)
data.listID.append(myItem.keys())'''
if list(myItem.keys()) in data.listID:
data.displayText("you tried to add item but it already exist")
#updateItemInDict(myDict, myItem, data.listID)
else:
data.arrayHTML.update(myItem)
data.listID.append(list(myItem.keys()))
def updateItemInDict(myItem):
data.displayText("newobj : " + str(myItem))
data.arrayHTML.update(myItem)
def deleteItemInDict(myItemKey):
data.arrayHTML.pop(myItemKey, None)
"""
def updateItemInDict(myItem):
if list(myItem.values())[0][1] in data.listID:
data.arrayHTML.update(myItem)
else:
print("bot>> you tried to update an item but it didn't exist\n")
""" | StarcoderdataPython |
6454806 | <filename>src/gameview.py
from config import bg_color, resolution, font_size, steps_per_cell
from events import BoardBuiltEvent, BoardUpdatedEvent, RecipeMatchEvent, \
GameBuiltEvent, VTickEvent, FruitKilledEvent, FruitSpeedEvent, FruitPlacedEvent, \
QuitEvent
from fruitspr import FruitSpr
from pygame.rect import Rect
from pygame.sprite import LayeredDirty
from pygame.surface import Surface
from widgets import TextLabelWidget, RecipesWidget, CPUDisplayWidget
import pygame
class GameView:
def __init__(self, em, ev):
""" Score and recipe widgets on the right, game board on the left.
em is the mode's event manager,
ev is an event containing data from the previous mode (e.g. menu
or level transition). ev contains the level number.
"""
pygame.display.init() # OK to init multiple times
pygame.font.init()
self._em = em
window = pygame.display.set_mode(resolution)
self.window = window
pygame.display.set_caption('Smoothie Factory - In Play')
# blit the bg screen: all black
bg = Surface(window.get_size())
bg.fill((0, 0, 0))
bg = bg.convert()
self.window_bg = bg
self.window.blit(bg, (0, 0))
# fruit sprites
self.fruit_to_spr = {} # map a fruit to its sprite
self.fruit_sprites = LayeredDirty() # only reblit when dirty=1
self.interp_steps = 0 # 2 interpolation steps between 2 model updates
# build GUI
self.gui = self._build_gui() # return a sprite group
em.subscribe(BoardBuiltEvent, self.on_board_built)
em.subscribe(GameBuiltEvent, self.on_game_built)
em.subscribe(FruitKilledEvent, self.on_fruit_killed)
em.subscribe(FruitPlacedEvent, self.on_fruit_spawned)
em.subscribe(FruitSpeedEvent, self.on_speed_change)
em.subscribe(QuitEvent, self.on_quit)
def _build_gui(self):
""" Add a score widget on the right """
gui = LayeredDirty() # only reblit when dirty=1
# score at top-right of the window
rec = Rect(600 + 10, 0,
100, font_size * 1.5)
evt_txt_dict = {RecipeMatchEvent: 'current_score'}
score_widget = TextLabelWidget(self._em, '0',
events_attrs=evt_txt_dict,
rect=rec,
txtcolor=(0, 0, 0),
bgcolor=(222, 222, 222))
gui.add(score_widget)
# CPU at bottom-right of the window
rec = Rect(600 + 10, 600 - font_size * 1.5,
100, font_size * 1.5)
cpu_widget = CPUDisplayWidget(self._em, '0',
rect=rec,
txtcolor=(0, 0, 0),
bgcolor=(222, 222, 222))
gui.add(cpu_widget)
# the recipe widget added when the game is built
return gui
def on_game_built(self, ev):
""" Build the recipe GUI, and set the spr movement timer. """
# recipe widget
evt_recipe_dict = {RecipeMatchEvent: 'recipe'}
rec = Rect(600, font_size * 1.5, 150, 400)
# ev.recipes maps tuples of fruit type to score
rwid = RecipesWidget(self._em,
ev.recipes,
evt_recipe_dict,
rect=rec,
txtcolor=(222, 222, 222),
bgcolor=(0, 0, 0))
self.gui.add(rwid)
# spr movement timer
model_mvt_timer = 1000 / ev.fruit_speed
self.base_spr_timer = model_mvt_timer / steps_per_cell
self.spr_timer = self.base_spr_timer
self._em.subscribe(VTickEvent, self.on_tick)
def on_board_built(self, ev):
""" Build the board background. """
width, height = ev.width, ev.height
board = ev.board # to obtain cells from coords
win_height = self.window.get_height()
bg = Surface((win_height, win_height))
bg = bg.convert()
bg.fill(bg_color)
for left in range(width):
for top in range(height):
cell = board.get_cell(left, top)
bg.blit(cell.image, cell.rect)
# blit the board bg onto the window's bg
self.window_bg.blit(bg, (0, 0))
self._em.subscribe(BoardUpdatedEvent, self.on_board_update)
def on_fruit_spawned(self, ev):
""" When a fruit appears, add it to the sprite group """
fruit = ev.fruit
fruit_spr = FruitSpr(fruit, self.interp_steps)
self.fruit_to_spr[fruit] = fruit_spr
self.fruit_sprites.add(fruit_spr)
def on_fruit_killed(self, ev):
""" When a fruit is killed, remove the spr """
fruit = ev.fruit
fruit_spr = self.fruit_to_spr[fruit]
fruit_spr.kill() # remove fruit_spr from self.fruit_sprites
del self.fruit_to_spr[fruit]
def on_board_update(self, ev):
""" Store the new fruits' positions.
The actual display happens at clock tick.
"""
# prepare spr interpolation timer and step counter
self.spr_timer = self.base_spr_timer
self.interp_steps = 0 # restart interpolating fruit positions
for fruit_spr in self.fruit_sprites:
fruit_spr.resync(self.interp_steps)
def on_tick(self, ev):
""" Blit the active board elements and the GUI on the screen. """
if not pygame.display.get_init(): # if the display is ON
return
# spr positions
duration = ev.loopduration
self.spr_timer -= duration
if self.spr_timer <= 0:
self.spr_timer = self.base_spr_timer
self.interp_steps += 1
# interpolate 3 positions,
# but the last one is done when board is updated (so only 2)
if self.interp_steps < steps_per_cell:
for fruit in self.fruit_sprites:
fruit.resync(self.interp_steps)
# display
gui = self.gui
fruits = self.fruit_sprites
screen = self.window
bg = self.window_bg
# clear the window from all the sprites, replacing them with the bg
gui.clear(screen, bg)
fruits.clear(screen, bg)
gui.update(duration) # call update() on each sprite of the group
fruits.update(duration) # reset the dirty flag to 0
#collect the display areas that need to be redrawn
dirty_gui = gui.draw(screen)
dirty_fruits = fruits.draw(screen)
dirty_rects = dirty_gui + dirty_fruits
pygame.display.update(dirty_rects) # redisplay those areas only
# flip the screen
pygame.display.flip()
def on_speed_change(self, ev):
""" When the fruit speed changes, update the speed of fruit sprites. """
model_mvt_timer = 1000 / ev.speed
self.base_spr_timer = model_mvt_timer / steps_per_cell
def on_quit(self, ev):
""" Shut down the display """
pygame.display.quit()
| StarcoderdataPython |
1868851 | from pyzzle import datasource, etl, recon
from .base_job import JobConfigException | StarcoderdataPython |
1774372 | <reponame>mathieui/twisted
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.trial._dist.disttrial}.
"""
import os
import sys
from twisted.internet.protocol import Protocol, ProcessProtocol
from twisted.internet.defer import fail, gatherResults, maybeDeferred, succeed
from twisted.internet.task import Cooperator, deferLater
from twisted.internet.main import CONNECTION_DONE
from twisted.internet import reactor, interfaces, error
from twisted.python.compat import NativeStringIO as StringIO
from twisted.python.failure import Failure
from twisted.python.lockfile import FilesystemLock
from twisted.test.test_cooperator import FakeScheduler
from twisted.test.proto_helpers import MemoryReactorClock
from twisted.trial.unittest import SynchronousTestCase, TestCase
from twisted.trial.reporter import Reporter, TreeReporter
from twisted.trial.reporter import UncleanWarningsReporterWrapper
from twisted.trial.runner import TrialSuite, ErrorHolder
from twisted.trial._dist.disttrial import DistTrialRunner
from twisted.trial._dist.distreporter import DistReporter
from twisted.trial._dist.worker import LocalWorker
from zope.interface import implementer, verify
class FakeTransport(object):
"""
A simple fake process transport.
"""
def writeToChild(self, fd, data):
"""
Ignore write calls.
"""
@implementer(interfaces.IReactorProcess)
class CountingReactor(MemoryReactorClock):
"""
A fake reactor that counts the calls to L{IReactorCore.run},
L{IReactorCore.stop}, and L{IReactorProcess.spawnProcess}.
"""
spawnCount = 0
stopCount = 0
runCount = 0
def __init__(self, workers):
MemoryReactorClock.__init__(self)
self._workers = workers
def spawnProcess(self, worker, *args, **kwargs):
"""
See L{IReactorProcess.spawnProcess}.
@param worker: See L{IReactorProcess.spawnProcess}.
@param args: See L{IReactorProcess.spawnProcess}.
@param kwargs: See L{IReactorProcess.spawnProcess}.
"""
self._workers.append(worker)
worker.makeConnection(FakeTransport())
self.spawnCount += 1
def stop(self):
"""
See L{IReactorCore.stop}.
"""
MemoryReactorClock.stop(self)
self.stopCount += 1
def run(self):
"""
See L{IReactorCore.run}.
"""
self.runCount += 1
# The same as IReactorCore.run, except no stop.
self.running = True
self.hasRun = True
for f, args, kwargs in self.whenRunningHooks:
f(*args, **kwargs)
class CountingReactorTests(SynchronousTestCase):
"""
Tests for L{CountingReactor}.
"""
def setUp(self):
self.workers = []
self.reactor = CountingReactor(self.workers)
def test_providesIReactorProcess(self):
"""
L{CountingReactor} instances provide L{IReactorProcess}.
"""
verify.verifyObject(interfaces.IReactorProcess, self.reactor)
def test_spawnProcess(self):
"""
The process protocol for a spawned process is connected to a
transport and appended onto the provided C{workers} list, and
the reactor's C{spawnCount} increased.
"""
self.assertFalse(self.reactor.spawnCount)
proto = Protocol()
for count in [1, 2]:
self.reactor.spawnProcess(proto, sys.executable,
arg=[sys.executable])
self.assertTrue(proto.transport)
self.assertEqual(self.workers, [proto] * count)
self.assertEqual(self.reactor.spawnCount, count)
def test_stop(self):
"""
Stopping the reactor increments its C{stopCount}
"""
self.assertFalse(self.reactor.stopCount)
for count in [1, 2]:
self.reactor.stop()
self.assertEqual(self.reactor.stopCount, count)
def test_run(self):
"""
Running the reactor increments its C{runCount}, does not imply
C{stop}, and calls L{IReactorCore.callWhenRunning} hooks.
"""
self.assertFalse(self.reactor.runCount)
whenRunningCalls = []
self.reactor.callWhenRunning(whenRunningCalls.append, None)
for count in [1, 2]:
self.reactor.run()
self.assertEqual(self.reactor.runCount, count)
self.assertEqual(self.reactor.stopCount, 0)
self.assertEqual(len(whenRunningCalls), count)
class EternalTerminationPredicateFactory(object):
"""
A rigged terminationPredicateFactory for which time never pass.
"""
def __call__(self):
"""
See: L{task._Timer}
"""
return False
class DistTrialRunnerTests(TestCase):
"""
Tests for L{DistTrialRunner}.
"""
def setUp(self):
"""
Create a runner for testing.
"""
self.runner = DistTrialRunner(TreeReporter, 4, [],
workingDirectory=self.mktemp())
self.runner._stream = StringIO()
def reap(self, workers):
"""
Reap the workers and trap L{ConnectionDone} failures on their
C{endDeferred}s.
@param workers: The workers to reap.
@type workers: An iterable of L{LocalWorker}
"""
for worker in workers:
worker.endDeferred.addErrback(Failure.trap, error.ConnectionDone)
worker.processEnded(Failure(CONNECTION_DONE))
def getFakeSchedulerAndEternalCooperator(self):
"""
Helper to create fake scheduler and cooperator in tests.
The cooperator has a termination timer which will never inform
the scheduler that the task needs to be terminated.
@return: L{tuple} of (scheduler, cooperator)
"""
scheduler = FakeScheduler()
cooperator = Cooperator(
scheduler=scheduler,
terminationPredicateFactory=EternalTerminationPredicateFactory,
)
return scheduler, cooperator
def test_writeResults(self):
"""
L{DistTrialRunner.writeResults} writes to the stream specified in the
init.
"""
stringIO = StringIO()
result = DistReporter(Reporter(stringIO))
self.runner.writeResults(result)
self.assertTrue(stringIO.tell() > 0)
def test_createLocalWorkers(self):
"""
C{createLocalWorkers} iterates the list of protocols and create one
L{LocalWorker} for each.
"""
protocols = [object() for x in range(4)]
workers = self.runner.createLocalWorkers(protocols, "path")
for s in workers:
self.assertIsInstance(s, LocalWorker)
self.assertEqual(4, len(workers))
def test_launchWorkerProcesses(self):
"""
Given a C{spawnProcess} function, C{launchWorkerProcess} launches a
python process with an existing path as its argument.
"""
protocols = [ProcessProtocol() for i in range(4)]
arguments = []
environment = {}
def fakeSpawnProcess(processProtocol, executable, args=(), env={},
path=None, uid=None, gid=None, usePTY=0,
childFDs=None):
arguments.append(executable)
arguments.extend(args)
environment.update(env)
self.runner.launchWorkerProcesses(
fakeSpawnProcess, protocols, ["foo"])
self.assertEqual(arguments[0], arguments[1])
self.assertTrue(os.path.exists(arguments[2]))
self.assertEqual("foo", arguments[3])
self.assertEqual(os.pathsep.join(sys.path),
environment["TRIAL_PYTHONPATH"])
def test_run(self):
"""
C{run} starts the reactor exactly once and spawns each of the workers
exactly once.
"""
workers = []
fakeReactor = CountingReactor(workers)
self.addCleanup(self.reap, workers)
suite = TrialSuite()
for i in range(10):
suite.addTest(TestCase())
self.runner.run(suite, fakeReactor)
self.assertEqual(fakeReactor.runCount, 1)
self.assertEqual(fakeReactor.spawnCount, self.runner._workerNumber)
def test_runUsedDirectory(self):
"""
L{DistTrialRunner} checks if the test directory is already locked, and
if it is generates a name based on it.
"""
class CountingReactorWithLock(CountingReactor):
def spawnProcess(oself, worker, *args, **kwargs):
oself._workers.append(worker)
self.assertEqual(os.path.abspath(worker._logDirectory),
os.path.abspath(
os.path.join(workingDirectory + "-1",
str(oself.spawnCount))))
localLock = FilesystemLock(workingDirectory + "-1.lock")
self.assertFalse(localLock.lock())
oself.spawnCount += 1
worker.makeConnection(FakeTransport())
worker._ampProtocol.run = lambda *args: succeed(None)
newDirectory = self.mktemp()
os.mkdir(newDirectory)
workingDirectory = os.path.join(newDirectory, "_trial_temp")
lock = FilesystemLock(workingDirectory + ".lock")
lock.lock()
self.addCleanup(lock.unlock)
self.runner._workingDirectory = workingDirectory
workers = []
fakeReactor = CountingReactorWithLock(workers)
self.addCleanup(self.reap, workers)
suite = TrialSuite()
for i in range(10):
suite.addTest(TestCase())
self.runner.run(suite, fakeReactor)
def test_minimalWorker(self):
"""
L{DistTrialRunner} doesn't try to start more workers than the number of
tests.
"""
workers = []
fakeReactor = CountingReactor(workers)
self.addCleanup(self.reap, workers)
self.runner.run(TestCase(), fakeReactor)
self.assertEqual(fakeReactor.runCount, 1)
self.assertEqual(fakeReactor.spawnCount, 1)
def test_runUncleanWarnings(self):
"""
Running with the C{unclean-warnings} option makes L{DistTrialRunner}
uses the L{UncleanWarningsReporterWrapper}.
"""
workers = []
fakeReactor = CountingReactor(workers)
self.addCleanup(self.reap, workers)
self.runner._uncleanWarnings = True
result = self.runner.run(TestCase(), fakeReactor)
self.assertIsInstance(result, DistReporter)
self.assertIsInstance(result.original,
UncleanWarningsReporterWrapper)
def test_runWithoutTest(self):
"""
When the suite contains no test, L{DistTrialRunner} takes a shortcut
path without launching any process or starting the reactor.
"""
fakeReactor = object()
suite = TrialSuite()
result = self.runner.run(suite, fakeReactor)
self.assertIsInstance(result, DistReporter)
output = self.runner._stream.getvalue()
self.assertIn("Running 0 test", output)
self.assertIn("PASSED", output)
def test_runWithoutTestButWithAnError(self):
"""
Even if there is no test, the suite can contain an error (most likely,
an import error): this should make the run fail, and the error should
be printed.
"""
fakeReactor = object()
error = ErrorHolder("an error", Failure(RuntimeError("foo bar")))
result = self.runner.run(error, fakeReactor)
self.assertIsInstance(result, DistReporter)
output = self.runner._stream.getvalue()
self.assertIn("Running 0 test", output)
self.assertIn("foo bar", output)
self.assertIn("an error", output)
self.assertIn("errors=1", output)
self.assertIn("FAILED", output)
def test_runUnexpectedError(self):
"""
If for some reasons we can't connect to the worker process, the test
suite catches and fails.
"""
class CountingReactorWithFail(CountingReactor):
def spawnProcess(self, worker, *args, **kwargs):
self._workers.append(worker)
worker.makeConnection(FakeTransport())
self.spawnCount += 1
worker._ampProtocol.run = self.failingRun
def failingRun(self, case, result):
return fail(RuntimeError("oops"))
scheduler, cooperator = self.getFakeSchedulerAndEternalCooperator()
workers = []
fakeReactor = CountingReactorWithFail(workers)
self.addCleanup(self.reap, workers)
result = self.runner.run(TestCase(), fakeReactor,
cooperator.cooperate)
self.assertEqual(fakeReactor.runCount, 1)
self.assertEqual(fakeReactor.spawnCount, 1)
scheduler.pump()
self.assertEqual(1, len(result.original.failures))
def test_runStopAfterTests(self):
"""
L{DistTrialRunner} calls C{reactor.stop} and unlocks the test directory
once the tests have run.
"""
class CountingReactorWithSuccess(CountingReactor):
def spawnProcess(self, worker, *args, **kwargs):
self._workers.append(worker)
worker.makeConnection(FakeTransport())
self.spawnCount += 1
worker._ampProtocol.run = self.succeedingRun
def succeedingRun(self, case, result):
return succeed(None)
workingDirectory = self.runner._workingDirectory
workers = []
fakeReactor = CountingReactorWithSuccess(workers)
self.runner.run(TestCase(), fakeReactor)
def check():
localLock = FilesystemLock(workingDirectory + ".lock")
self.assertTrue(localLock.lock())
self.assertEqual(1, fakeReactor.stopCount)
self.assertEqual(list(fakeReactor.triggers.keys()), ["before"])
self.assertEqual(list(fakeReactor.triggers["before"]), ["shutdown"])
self.reap(workers)
return deferLater(reactor, 0, check)
def test_runWaitForProcessesDeferreds(self):
"""
L{DistTrialRunner} waits for the worker processes to stop when the
reactor is stopping, and then unlocks the test directory, not trying to
stop the reactor again.
"""
workers = []
workingDirectory = self.runner._workingDirectory
fakeReactor = CountingReactor(workers)
self.runner.run(TestCase(), fakeReactor)
def check(ign):
# Let the AMP deferreds fire
return deferLater(reactor, 0, realCheck)
def realCheck():
localLock = FilesystemLock(workingDirectory + ".lock")
self.assertTrue(localLock.lock())
# Stop is not called, as it ought to have been called before
self.assertEqual(0, fakeReactor.stopCount)
self.assertEqual(list(fakeReactor.triggers.keys()), ["before"])
self.assertEqual(list(fakeReactor.triggers["before"]), ["shutdown"])
self.reap(workers)
return gatherResults([
maybeDeferred(f, *a, **kw)
for f, a, kw in fakeReactor.triggers["before"]["shutdown"]
]).addCallback(check)
def test_runUntilFailure(self):
"""
L{DistTrialRunner} can run in C{untilFailure} mode where it will run
the given tests until they fail.
"""
called = []
class CountingReactorWithSuccess(CountingReactor):
def spawnProcess(self, worker, *args, **kwargs):
self._workers.append(worker)
worker.makeConnection(FakeTransport())
self.spawnCount += 1
worker._ampProtocol.run = self.succeedingRun
def succeedingRun(self, case, result):
called.append(None)
if len(called) == 5:
return fail(RuntimeError("oops"))
return succeed(None)
workers = []
fakeReactor = CountingReactorWithSuccess(workers)
self.addCleanup(self.reap, workers)
scheduler, cooperator = self.getFakeSchedulerAndEternalCooperator()
result = self.runner.run(
TestCase(), fakeReactor, cooperate=cooperator.cooperate,
untilFailure=True)
scheduler.pump()
self.assertEqual(5, len(called))
self.assertFalse(result.wasSuccessful())
output = self.runner._stream.getvalue()
self.assertIn("PASSED", output)
self.assertIn("FAIL", output)
| StarcoderdataPython |
321912 | import collections
from xml.sax.saxutils import escape
from census.helpers import domain_from_url, is_chaff_domain, is_known
from census.html_writer import HtmlOutlineWriter
from census.report_helpers import get_known_domains, hash_sites_together, sort_sites
CSS = """\
html {
font-family: sans-serif;
}
pre {
font-family: Consolas, monospace;
}
.url {
font-weight: bold;
}
.hash {
color: #aaa;
font-size: 75%;
}
.strategy {
font-style: italic;
}
.tag {
display: inline-block;
background: #cccccc;
font-size: 75%;
margin: 0 .1em .1em;
padding: 0 .3em;
border-radius: 3px;
}
.tag.slow {
background: #d5efa7;
}
.tag.none {
background: #f09393;
}
.tag.drastic {
background: #ebe377;
}
.tag.new {
background: yellow;
border: 1px solid #ddd;
margin: -1px 0;
}
.tag.ssl {
background: #ff8080;
}
.tag.bad {
background: red;
border: 2px solid black;
color: white;
padding: 0 .5em;
}
.tag.version {
background: lime;
border-radius: 1em;
padding: 0 .5em;
}
.info {
margin: 0 0 0 1.5em;
}
"""
def html_report(out_file, sites, old, new, all_courses=None, all_orgs=None, only_new=False):
sites = sort_sites(sites)
known_domains = get_known_domains()
writer = HtmlOutlineWriter(out_file, css=CSS, title=f"Census: {len(sites)} sites")
header = f"{len(sites)} sites: {old}"
if new != old:
header += f" → {new}"
writer.start_section(header)
for site in sites:
write_site(site, writer, known_domains)
writer.end_section()
if all_courses:
total_course_ids = sum(len(sites) for sites in all_courses.values())
writer.start_section(f"<p>Course IDs: {total_course_ids}</p>")
all_courses_items = sorted(all_courses.items())
all_courses_items = sorted(all_courses_items, key=lambda item: len(item[1]), reverse=True)
for course_id, cid_sites in all_courses_items:
writer.start_section(f"{course_id}: {len(cid_sites)}")
for site in sorted(cid_sites, key=lambda s: s.url):
writer.write(f"<p><a class='url' href='{site.url}'>{site.url}</a></p>")
writer.end_section()
writer.end_section()
if all_orgs:
shared_orgs = [(org, sites) for org, sites in all_orgs.items() if len(sites) > 1]
writer.start_section(f"<p>Shared orgs: {len(shared_orgs)}</p>")
for org, org_sites in sorted(shared_orgs):
writer.start_section(f"{org}: {len(org_sites)}")
for site in sorted(org_sites, key=lambda s: s.url):
writer.write(f"<p><a class='url' href='{site.url}'>{site.url}</a></p>")
writer.end_section()
writer.end_section()
hashed_sites = hash_sites_together(sites, known_domains, only_new)
versions = collections.defaultdict(list)
tags = collections.defaultdict(list)
for hashed_site in hashed_sites:
versions[hashed_site.version or "none"].append(hashed_site)
for tag in hashed_site.tags():
tags[tag].append(hashed_site)
writer.start_section(f"<p>Versions</p>")
for version in sorted(versions.keys()):
hsites = versions[version]
writer.start_section(f"<p>{version}: {len(hsites)}</p>")
for hashed_site in hsites:
write_hashed_site(hashed_site, writer, known_domains)
writer.end_section()
writer.end_section()
writer.start_section(f"<p>Tags</p>")
for tag in sorted(tags.keys()):
hsites = tags[tag]
writer.start_section(f"<p>{tag}: {len(hsites)}</p>")
for hashed_site in hsites:
write_hashed_site(hashed_site, writer, known_domains)
writer.end_section()
writer.end_section()
writer.start_section(f"<p>Hashed: {len(hashed_sites)}</p>")
for hashed_site in hashed_sites:
write_hashed_site(hashed_site, writer, known_domains)
writer.end_section()
def write_hashed_site(hashed_site, writer, known_domains):
tags = Tags()
if hashed_site.all_chaff():
tags.add("Chaff")
if hashed_site.is_new:
tags.add("New")
if hashed_site.all_ssl_err():
tags.add("SSL")
url = hashed_site.best_url()
ncourses = hashed_site.current_courses()
nsites = len(hashed_site.sites)
writer.start_section(
f"<a class='url' href='{url}'>{url}</a> "
f"<b>{ncourses}</b> {pluralize(ncourses, 'course')}, "
f"{nsites} {pluralize(nsites, 'site')} {tags.html()}"
)
for site in hashed_site.sites:
write_site(site, writer, known_domains)
info = hashed_site.other_info()
if info:
writer.write(f"<p class='info'><b>Info:</b> {'; '.join(sorted(info))}</p>")
writer.end_section()
def write_site(site, writer, known_domains):
old, new = site.latest_courses, site.current_courses
tags = Tags()
new_text = ""
if new is None:
tags.add("None")
else:
if new != old:
new_text = f"<b> → {new}</b>"
if old != 0 and new != 0 and abs(new - old) > 10 and not (0.5 >= old/new >= 1.5):
tags.add("Drastic")
if is_chaff_domain(domain_from_url(site.url)):
tags.add("Chaff")
elif not is_known(site, known_domains):
tags.add("New")
# Times are not right now that we limit requests, not sites.
#if site.time > 5:
# tags.add(f"{site.time:.1f}s", "slow")
for tag, style in sorted(site.styled_tags()):
tags.add(tag, style)
writer.start_section(f"<a class='url' href='{site.url}'>{site.url}</a>: {old}{new_text} {tags.html()}")
for attempt in site.tried:
strategy = attempt.strategy
tb = attempt.error
if tb is not None:
lines = tb.splitlines()
if len(lines) > 1:
line = tb.splitlines()[-1][:100]
writer.start_section(f"<span class='strategy'>{strategy}:</span> {escape(line)}")
writer.write("""<pre class="stdout">""")
writer.write(escape(tb))
writer.write("""</pre>""")
writer.end_section()
else:
writer.write(f"<p>{strategy}: {lines[0]}")
else:
writer.write(f"<p>{strategy}: counted {attempt.courses} courses</p>")
writer.end_section()
class Tags:
def __init__(self):
self.tags = []
def add(self, text, tag_name=None):
self.tags.append(f"<span class='tag {tag_name or text.lower()}'>{text}</span>")
def html(self):
return ''.join(self.tags)
def pluralize(n, s, ss=None):
"""Make a word plural (in English)"""
if ss is None:
ss = s + "s"
if n == 1:
return s
else:
return ss
| StarcoderdataPython |
3462535 | <reponame>chaurwu/DunkIt
import RPi.GPIO as GPIO
import time
import requests
TRIG = 23
ECHO = 24
GPIO.setmode(GPIO.BCM)
GPIO.setup(TRIG, GPIO.OUT)
GPIO.setup(ECHO, GPIO.IN)
print("Start detecting basketballs...")
while True:
GPIO.output(TRIG, False)
# time.sleep(0.01)
GPIO.output(TRIG, True)
time.sleep(0.00001)
GPIO.output(TRIG, False)
while GPIO.input(ECHO) == 0:
pulse_start = time.time()
while GPIO.input(ECHO) == 1:
pulse_end = time.time()
distance = (pulse_end - pulse_start) * 17150
distance = round(distance, 2)
if distance < 16:
print("Distance:", distance, "cm")
requests.get("http://localhost:5000/api/score/increment")
print("Stop detecting basketballs.") | StarcoderdataPython |
5131568 | <filename>dev.py<gh_stars>1-10
debug = True
xheaders = False
static_path = 'static'
font_file = 'src/cloudplayer/iokit/font/RobotoMono-Regular.ttf'
cookie_file = 'tok_v1.cookie'
allowed_origins = [
'http://localhost:4200',
'http://localhost:8050',
'http://radio.cloud-player.io',
'https://radio.cloud-player.io'
]
api_base_url = 'https://api.cloud-player.io'
| StarcoderdataPython |
3330764 | <gh_stars>0
import sys
sys.path.insert(0, '..')
from ast_node import ASTNode
class InsertInto(ASTNode):
def __init__(self, table_name, insert_list, line, column):
ASTNode.__init__(self, line, column)
self.table_name = table_name
self.insert_list = insert_list
def execute(self, table, tree):
super().execute(table, tree)
return True
class InsertItem(ASTNode):
def __init__(self, column_list, values_list, line, column):
ASTNode.__init__(self, line, column)
self.column_list = column_list
self.values_list = values_list
def execute(self, table, tree):
super().execute(table, tree)
return True | StarcoderdataPython |
3472633 | <filename>gehomesdk/erd/values/ac/__init__.py
from .common_enums import *
from .wac_enums import *
from .sac_enums import * | StarcoderdataPython |
6633444 | <gh_stars>0
"""
File: blur.py
Name:黃稚程 mike
-------------------------------
This file shows the original image first,
smiley-face.png, and then compare to its
blurred image. The blur algorithm uses the
average RGB values of a pixel's nearest neighbors
"""
from simpleimage import SimpleImage
def blur(img):
"""
:param img: image, which is the original image
:return: image, which has been edited
"""
new_img = SimpleImage.blank(img.width, img.height)
for x in range(img.width):
for y in range(img.height):
new_pixel = new_img.get_pixel(x, y)
total_red = 0
total_green = 0
total_blue = 0
count = 0
for i in range(-1, 2):
for j in range(-1, 2):
pixel_x = x + i
pixel_y = y +j
if 0 <= pixel_x < img.width:
if 0 <= pixel_y < img.height:
pixel = img.get_pixel(pixel_x, pixel_y)
total_red += pixel.red
total_green += pixel.green
total_blue += pixel.blue
count += 1
new_pixel.red = total_red / count
new_pixel.green = total_green / count
new_pixel.blue = total_blue / count
return new_img
def main():
"""
This function will blur the original picture.
"""
old_img = SimpleImage("images/smiley-face.png")
old_img.show()
blurred_img = blur(old_img)
for i in range(15):
blurred_img = blur(blurred_img)
blurred_img.show()
if __name__ == '__main__':
main()
| StarcoderdataPython |
8011994 | <gh_stars>0
from datetime import date
from django.shortcuts import redirect
class BootstrapFormMixin:
fields = {}
def _init_bootstrap_form_controls(self):
for _, field in self.fields.items():
if not hasattr(field.widget, 'attrs'):
setattr(field.widget, 'attrs', {})
if 'class' not in field.widget.attrs:
field.widget.attrs['class'] = ''
field.widget.attrs['class'] += ' form-control'
class UserAndProfileData:
VALID_USER_CREDENTIALS = {
'email': '<EMAIL>',
'password': '<PASSWORD>',
}
VALID_PROFILE_DATA = {
'first_name': 'Petko',
'last_name': 'Stankov',
'picture': 'http://petko.com',
'date_of_birth': date(2000, 4, 28),
'gender': 'male',
'account_balance': 100,
}
class RedirectToDashboard:
def dispatch(self, request, *args, **kwargs):
if request.user.is_authenticated:
return redirect('index')
return super().dispatch(request, *args, **kwargs)
| StarcoderdataPython |
5181970 | <gh_stars>1-10
from zabbix_enums.common import _ZabbixEnum
class ItemAllowTraps(_ZabbixEnum):
NO = 0
YES = 1
class ItemFollowRedirects(_ZabbixEnum):
NO = 0
YES = 1
class ItemVerifyHost(_ZabbixEnum):
NO = 0
YES = 1
class ItemVerifyPeer(_ZabbixEnum):
NO = 0
YES = 1
class ItemAuthTypeHTTP(_ZabbixEnum):
NONE = 0
BASIC = 1
NTLM = 2
KERBEROS = 3
class ItemOutputFormat(_ZabbixEnum):
RAW = 0
JSON = 1
class ItemPostType(_ZabbixEnum):
RAW = 0
JSON = 2
XML = 3
class ItemRequestMethod(_ZabbixEnum):
GET = 0
POST = 1
PUT = 2
HEAD = 3
class ItemRetrieveMode(_ZabbixEnum):
BODY = 0
HEADERS = 1
BODY_AND_HEADERS = 2
BOTH = 2
| StarcoderdataPython |
48803 | import shared_module
from shared_module import module_function as my_function, ModuleClass
class NewParent(object):
def do_useful_stuff(self):
i = shared_module.MODULE_CONTANT
my_function()
ModuleClass() | StarcoderdataPython |
244314 | """
Purpose: To simulate expected educational attainment gains from embryo selection between families.
Date: 10/09/2019
"""
import numpy as np
import pandas as pd
from scipy.stats import norm
from between_family_ea_simulation import (
get_random_index,
get_max_pgs_index,
select_embryos_by_index,
calc_phenotype_diffs
)
from scipy.stats import norm
import argparse
def calc_within_family_values(n, num_embryos, heritability, correlation_mz, rsquared):
"""
Purpose: To get the ghat_i and y_i for each family pair, where i={1,...,num_embryos}.
Arguments:
n: integer number of parent pairs
num_embryos: integer number of embryos for each parent pair
heritability: heritability of clinical trait
correlation_mz: twin correlation of clinical trait
rsquared: Ancestry-specific R^2 value for PGS prediction of trait
Returns:
{'pgs':df_pgs, 'liability':df_liability}.
Each dataframe has size (n x num_embryos) and holds polygenic scores and phenotype liability values, respectively.
"""
df_a = pd.DataFrame()
df_pgs = pd.DataFrame()
df_liability = pd.DataFrame()
# calculate average parent additive component
a_mean = np.random.normal(loc=0, scale=(heritability/2)**0.5, size=int(n))
# calculate shared environmental component (constant within-family)
c = np.random.normal(loc=0, scale=(correlation_mz - heritability)**0.5, size=int(n))
# calculate liability and PGS for clinical trait, for each embryo
for i in range(num_embryos):
# generate individual embryo additive components
df_a['a_{}'.format(i)] = np.random.normal(loc=a_mean, scale=(heritability/2)**0.5, size=int(n))
# calculate PGS
df_pgs['ghat_{}'.format(i)] = np.random.normal(loc=df_a['a_{}'.format(i)], \
scale=(heritability**2/rsquared - heritability)**0.5, size=int(n))
# calculate phenotypic liability
df_liability['liability_{}'.format(i)] = np.random.normal(loc=(df_a['a_{}'.format(i)] + c), \
scale=(1 - correlation_mz)**0.5, size=int(n))
return {'pgs':df_pgs, 'liability':df_liability}
def process_arguments():
"""
Parses command line arguments.
Args:
-----
None
Returns:
--------
parser: :class: argparse.Namespace
arguments passed in from command line
"""
parser = argparse.ArgumentParser()
parser.add_argument('--n', default=1000000, type=int,
help='Number of parent pairs to simulate. Defaults to 1e6.')
parser.add_argument('--embryos', default=10, type=int,
help='Number of embryos from which to choose. Defaults to 10.')
parser.add_argument('--ancestry', required=True, type=str,
help='Ancestry of interest.')
parser.add_argument('--heritability', required=True, type=float, nargs='+',
help='List of heritabilities on the liability scale for conditions. \
Index of heritabilities must match index of rsquared, correlation_mz, and prevalence.')
parser.add_argument('--rsquared', required=True, type=float, nargs='+',
help='List of r2 for conditions. Index of r2 must match index of \
heritability, correlation_mz, and prevalence.')
parser.add_argument('--correlation_mz', required=True, type=float, nargs='+',
help='List of monozygotic twin correlations on the liability scale for conditions. \
Index of correlations must match index of rsquared, heritabilities, and prevalence.')
parser.add_argument('--prevalence', required=True, type=float, nargs='+',
help='List of prevalences for conditions. \
Index of prevalences must match index of rsquared, heritabilities, and correlations.')
parser.add_argument('--condition', required=True, type=str, nargs='+',
help='Name of conditions. \
Index must match index of rsquared, heritabilities, and correlations.')
return parser.parse_args()
if __name__ == "__main__":
# import arguments
args = process_arguments()
N = args.n
NUM_EMBRYOS = args.embryos
HERITABILITY = args.heritability # list
RSQUARED = args.rsquared # list
CORR_MZ = args.correlation_mz # list
PREVALENCE = args.prevalence # list
CONDITION = args.condition # list
# check lengths
assert len(HERITABILITY) == len(RSQUARED), 'Your lists aren\'t the same length!'
assert len(HERITABILITY) == len(CORR_MZ), 'Your lists aren\'t the same length!'
assert len(HERITABILITY) == len(PREVALENCE), 'Your lists aren\'t the same length!'
assert len(HERITABILITY) == len(CONDITION), 'Your lists aren\'t the same length!'
# print intro
print('This is a simulation for within-family selection of embryos.')
print('This analysis is for parents of ' + args.ancestry + ' ancestry who are choosing from ' + str(NUM_EMBRYOS) + ' embryos.')
#### begin simualation
for i in range(len(args.condition)):
# calculate values
values = calc_within_family_values(n=N, num_embryos=NUM_EMBRYOS,
heritability=HERITABILITY[i], correlation_mz=CORR_MZ[i], rsquared=RSQUARED[i])
# generate indices
rand_index = get_random_index(NUM_EMBRYOS, N)
max_index = get_max_pgs_index(values['pgs'])
# get max/random liability values
max_liability = select_embryos_by_index(values['liability'], max_index)
rand_liability = select_embryos_by_index(values['liability'], rand_index)
# get fraction of individuals who will have disease (convert liability to binary trait)
max_frac = (max_liability <= norm.ppf(PREVALENCE[i])).astype(int).mean()
rand_frac = (rand_liability <= norm.ppf(PREVALENCE[i])).astype(int).mean()
# print summary
print('For ' + CONDITION[i] + ', the within-family prevalence is ' + str(rand_frac['selected_values']) + \
' in random embryos and ' + str(max_frac['selected_values']) + ' in selected embryos.')
| StarcoderdataPython |
9614295 | <gh_stars>1-10
#!/bin/env python3
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
def parse_requirements(requirements):
with open(requirements) as reqf:
items = [line.strip("\n") for line in reqf if not line.startswith("#")]
return list(filter(lambda s: s.strip() != "", items))
setup(
name="hachiko-bapu", version="0.1.8",
python_requires=">=3.5",
author="duangsuse", author_email="<EMAIL>",
url="https://github.com/duangsuse-valid-projects/Hachiko",
description="Simple pygame GUI tool for creating pitch timeline",
long_description="""
Simple tool for creating pitch timeline, this program divides midi creation into pitches and timeline part.
When creating timeline, press A to give position/duration information, and use S to split different notes directly when holding A.
This program requires system FluidSynth library to run, this package also provides command utility srt2mid and lrc_merge.
""",
classifiers=[
"Intended Audience :: End Users/Desktop",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Multimedia",
"Topic :: Multimedia :: Sound/Audio",
"Topic :: Utilities"
],
packages=find_packages(),
package_data={ "": ["*.sf2"] },
install_requires=parse_requirements("requirements.txt"),
extras_require={
"synthesize buffer": ["numpy>=1.0"],
"funutils codegen": ["pyparsing>=2.4"]
},
entry_points={
"console_scripts": [
"hachiko = hachiko_bapu.hachi:main",
"hachiko-groups = hachiko_bapu.hachi_groups:main",
"srt2mid = hachiko_bapu.cli_tools.srt2mid:main",
"lrc_merge = hachiko_bapu.cli_tools.lrc_merge:main"
]
})
| StarcoderdataPython |
321651 | <reponame>fisabiliyusri/proxy
# -*- coding: utf-8 -*-
"""
proxy.py
~~~~~~~~
⚡⚡⚡ Fast, Lightweight, Pluggable, TLS interception capable proxy server focused on
Network monitoring, controls & Application development, testing, debugging.
:copyright: (c) 2013-present by <NAME> and contributors.
:license: BSD, see LICENSE for more details.
"""
from typing import NamedTuple
Socks4Operations = NamedTuple(
'Socks4Operations', [
('CONNECT', int),
('BIND', int),
],
)
socks4Operations = Socks4Operations(1, 2)
| StarcoderdataPython |
12803735 | import copy
import collections
import inspect
import itertools
import sys
import time
import types
import uuid
from pyevents.event import Event
from pyevents.manager import EventDispatcher
from specter.util import (
get_real_last_traceback, convert_camelcase, find_by_metadata,
extract_metadata, children_with_tests_with_metadata,
remove_empty_entries_from_dict, find_by_names, children_with_tests_named,
)
import six
class TimedObject(object):
def __init__(self):
super(TimedObject, self).__init__()
self.start_time = 0
self.end_time = 0
def start(self):
self.start_time = time.time()
def stop(self):
self.end_time = time.time()
@property
def elapsed_time(self):
elapsed = self.end_time - self.start_time
return elapsed if elapsed >= 0 else 0
class CaseWrapper(TimedObject):
def __init__(self, case_func, parent, execute_kwargs=None, metadata={}):
super(CaseWrapper, self).__init__()
self.id = str(uuid.uuid4())
self.case_func = case_func
self.expects = []
self.parent = parent
self.failed = None
self.error = None
self.skipped = False
self.incomplete = False
self.skip_reason = None
self.execute_kwargs = execute_kwargs
self.metadata = metadata
def serialize(self):
""" Serializes the CaseWrapper object for collection.
Warning, this will only grab the available information.
It is strongly that you only call this once all specs and
tests have completed.
"""
expects = [exp.serialize() for exp in self.expects]
converted_dict = {
'id': self.id,
'name': self.pretty_name,
'raw_name': self.name,
'doc': self.doc,
'error': self.error,
'skipped': self.skipped,
'skip_reason': self.skip_reason,
'execute_kwargs': self.safe_execute_kwargs,
'metadata': self.metadata,
'start': self.start_time,
'end': self.end_time,
'expects': expects,
'success': self.success
}
return remove_empty_entries_from_dict(converted_dict)
def execute(self, context=None):
kwargs = {}
if self.execute_kwargs:
kwargs.update(self.execute_kwargs)
self.start()
try:
types.MethodType(self.case_func, context or self)(**kwargs)
except TestIncompleteException as e:
self.incomplete = True
# If thrown during decorators
if e.real_func:
self.case_func = e.real_func
except TestSkippedException as e:
self.skipped = True
self.skip_reason = e.reason if type(e.reason) is str else ''
# If thrown during decorators
if e.real_func:
self.case_func = e.real_func
except FailedRequireException:
pass
except Exception as e:
self.error = get_real_last_traceback(e)
self.stop()
@property
def name(self):
return convert_camelcase(self.case_func.__name__)
@property
def pretty_name(self):
return self.case_func.__name__.replace('_', ' ')
@property
def doc(self):
return self.case_func.__doc__
@property
def success(self):
return (self.complete and not self.failed and not self.error and
len([exp for exp in self.expects if not exp.success]) == 0)
@property
def complete(self):
return self.end_time > 0.0
@property
def safe_execute_kwargs(self):
safe_kwargs = copy.deepcopy(self.execute_kwargs)
if not safe_kwargs:
return
for k, v in six.iteritems(safe_kwargs):
if type(v) not in [str, int, list, bool, dict]:
safe_kwargs[k] = str(v)
return safe_kwargs
def __getstate__(self):
altered = dict(self.__dict__)
if 'case_func' in altered:
altered['case_func'] = self.id
if 'parent' in altered:
altered['parent'] = self.parent.id
return altered
def __eq__(self, other):
if isinstance(other, CaseWrapper):
return self.id == other.id
return False
def __ne__(self, other):
return not self == other
class Describe(EventDispatcher):
__FIXTURE__ = False
#: List of methods to be called after every test
hooks = ()
def __init__(self, parent=None):
super(Describe, self).__init__()
self.id = str(uuid.uuid4())
wrappers = self.__wrappers__
self.parent = parent
self.cases = wrappers
self.describes = [desc_type(parent=self)
for desc_type in self.describe_types]
self._num_completed_cases = 0
self._state = self.__create_state_obj__()
@property
def name(self):
return convert_camelcase(self.__class__.__name__)
@property
def complete(self):
cases_completed = self._num_completed_cases == len(self.cases)
descs_not_completed = [desc for desc in self.describes
if not desc.complete]
return cases_completed and len(descs_not_completed) == 0
@property
def real_class_path(self):
ancestors = [self.__class__.__name__]
parent = self.parent
while parent:
ancestors.insert(0, parent.__class__.__name__)
parent = parent.parent
real_path = '.'.join(ancestors)
return '{base}.{path}'.format(base=self.__module__, path=real_path)
@property
def doc(self):
return type(self).__doc__
@property
def total_time(self):
total = 0.0
for key, case in six.iteritems(self.cases):
total += case.elapsed_time
for describe in self.describes:
total += describe.total_time
return total
@property
def success(self):
ok = True
case_successes = [case.success
for key, case in six.iteritems(self.cases)]
spec_successes = [spec.success for spec in self.describes]
if case_successes and False in case_successes:
ok = False
if spec_successes and False in spec_successes:
ok = False
return ok
@property
def __wrappers__(self):
wrappers = {}
for case_func in self.case_funcs:
case_func, metadata = extract_metadata(case_func)
wrapper = CaseWrapper(case_func, parent=self, metadata=metadata)
wrappers[wrapper.id] = wrapper
return wrappers
@classmethod
def __cls_members__(cls):
all_members = {}
classes = list(cls.__bases__) + [cls]
for klass in classes:
pairs = dict((key, val) for key, val in vars(klass).items())
all_members.update(pairs)
return all_members
@classmethod
def __get_all_child_describes__(cls):
members = cls.__cls_members__()
child_describes = [val for key, val in members.items()
if Describe.plugin_filter(val)]
all_children = child_describes + [cls]
for child in child_describes:
all_children.extend(child.__get_all_child_describes__())
return set(all_children)
@property
def __members__(self):
return type(self).__cls_members__()
@property
def describe_types(self):
return [val for key, val in self.__members__.items()
if Describe.plugin_filter(val)]
@property
def case_funcs(self):
return [val for key, val in self.__members__.items()
if Describe.case_filter(val)]
@property
def top_parent(self):
parent_above = last_parent = self.parent or self
while parent_above is not None:
last_parent = parent_above
parent_above = parent_above.parent
return last_parent
@classmethod
def is_fixture(cls):
return vars(cls).get('__FIXTURE__') is True
def serialize(self):
""" Serializes the Spec/Describe object for collection.
Warning, this will only grab the available information.
It is strongly that you only call this once all specs and
tests have completed.
"""
cases = [case.serialize() for key, case in six.iteritems(self.cases)]
specs = [spec.serialize() for spec in self.describes]
converted_dict = {
'id': self.id,
'name': self.name,
'class_path': self.real_class_path,
'doc': self.doc,
'cases': cases,
'specs': specs
}
return converted_dict
def _run_hooks(self):
"""Calls any registered hooks providing the current state."""
for hook in self.hooks:
getattr(self, hook)(self._state)
def __create_state_obj__(self):
""" Generates the clean state object magic. Here be dragons! """
stops = [Describe, Spec, DataDescribe, EventDispatcher]
mros = [mro for mro in inspect.getmro(type(self)) if mro not in stops]
mros.reverse()
# Create generic object
class GenericStateObj(object):
def before_all(self):
pass
def after_all(self):
pass
def before_each(self):
pass
def after_each(self):
pass
# Duplicate inheritance chain
chain = [GenericStateObj]
for mro in mros:
cls_name = '{0}StateObj'.format(mro.__name__)
cls = type(cls_name, (chain[-1:][0],), dict(mro.__dict__))
cls.__spec__ = self
chain.append(cls)
# Removing fallback
chain.pop(0)
state_cls = chain[-1:][0]
return state_cls()
def _sort_cases(self, cases):
sorted_cases = sorted(
cases.items(), key=lambda case: case[1].case_func.__name__)
return collections.OrderedDict(sorted_cases)
def parallel_execution(self, manager, select_metadata=None,
select_tests=None):
self.top_parent.dispatch(DescribeEvent(DescribeEvent.START, self))
self._state.before_all()
for key, case in six.iteritems(self.cases):
manager.add_to_queue(case)
for describe in self.describes:
describe.execute(select_metadata, select_tests, manager)
def standard_execution(self, select_metadata=None, select_tests=None):
self.top_parent.dispatch(DescribeEvent(DescribeEvent.START, self))
self._state.before_all()
# Execute Cases
for key, case in six.iteritems(self.cases):
self._state.before_each()
case.execute(context=self._state)
self._state.after_each()
self._run_hooks()
self._num_completed_cases += 1
self.top_parent.dispatch(TestEvent(case))
# Execute Suites
for describe in self.describes:
describe.execute(
select_metadata=select_metadata,
select_tests=select_tests
)
self._state.after_all()
self.top_parent.dispatch(DescribeEvent(DescribeEvent.COMPLETE, self))
def execute(self, select_metadata=None, select_tests=None,
parallel_manager=None):
if select_metadata:
self.cases = find_by_metadata(select_metadata, self.cases)
self.describes = children_with_tests_with_metadata(
select_metadata, self)
if select_tests:
self.cases = find_by_names(select_tests, self.cases)
self.describes = children_with_tests_named(select_tests, self)
# If it doesn't have tests or describes don't run it
if len(self.cases) <= 0 and len(self.describes) <= 0:
return
# Sort suite case funcs to ensure stable order of execution
self.cases = self._sort_cases(self.cases)
if parallel_manager:
self.parallel_execution(
parallel_manager,
select_metadata,
select_tests
)
else:
self.standard_execution(select_metadata, select_tests)
@classmethod
def plugin_filter(cls, other):
if not isinstance(other, type):
return False
if hasattr(other, 'is_fixture') and other.is_fixture():
return False
return (issubclass(other, Describe) and
other is not cls and
other is not Spec and
other is not DataSpec and
other is not DataDescribe)
@classmethod
def case_filter(cls, obj):
if not isinstance(obj, types.FunctionType):
return False
reserved = [
'execute', 'standard_execution', 'parallel_execution', 'serialize',
'before_each', 'after_each', 'before_all', 'after_all'
]
func_name = obj.__name__
return (not func_name.startswith('_') and
func_name not in reserved)
class Spec(Describe):
pass
class DataDescribe(Describe):
DATASET = {}
def __init__(self, parent=None):
super(DataDescribe, self).__init__(parent=parent)
self.cases = {}
# Generate new functions and monkey-patch
for case_func in self.case_funcs:
extracted_func, base_metadata = extract_metadata(case_func)
for name, data in self.DATASET.items():
args, meta = data, dict(base_metadata)
# Handle complex dataset item
if 'args' in data and 'meta' in data:
args = data.get('args', {})
meta.update(data.get('meta', {}))
# Extract name, args and duplicate function
func_name = '{0}_{1}'.format(extracted_func.__name__, name)
new_func = copy_function(extracted_func, func_name)
kwargs = get_function_kwargs(extracted_func, args)
# Monkey-patch and add to cases list
setattr(self, func_name, new_func)
wrapper = CaseWrapper(new_func, parent=self,
execute_kwargs=kwargs, metadata=meta)
self.cases[wrapper.id] = wrapper
class DataSpec(DataDescribe):
pass
def convert_to_hashable(obj):
hashed = obj
if isinstance(obj, dict):
hashed = tuple([(k, convert_to_hashable(v))
for k, v in six.iteritems(obj)])
elif isinstance(obj, list):
hashed = tuple(convert_to_hashable(item) for item in obj)
return hashed
def fixture(cls):
""" A simple decorator to set the fixture flag on the class."""
setattr(cls, '__FIXTURE__', True)
return cls
def copy_function(func, name):
py3 = (3, 0, 0)
code = (func.func_code
if sys.version_info < py3 else func.__code__)
globals = (func.func_globals
if sys.version_info < py3 else func.__globals__)
return types.FunctionType(code, globals, name)
def get_function_kwargs(old_func, new_args):
args, _, _, defaults = inspect.getargspec(old_func)
if 'self' in args:
args.remove('self')
# Make sure we take into account required arguments
izip = (itertools.izip_longest
if sys.version_info < (3, 0, 0) else itertools.zip_longest)
kwargs = dict(
izip(args[::-1], list(defaults or ())[::-1], fillvalue=None))
kwargs.update(new_args)
return kwargs
class DescribeEvent(Event):
START = 'start'
COMPLETE = 'complete'
class TestEvent(Event):
COMPLETE = 'test_complete'
def __init__(self, payload):
super(TestEvent, self).__init__(TestEvent.COMPLETE, payload=payload)
class FailedRequireException(Exception):
pass
class TestSkippedException(Exception):
def __init__(self, func, reason=None, other_data={}):
self.func = func
self.reason = reason
self.other_data = other_data
self.real_func = other_data.get('real_func')
class TestIncompleteException(Exception):
def __init__(self, func, reason=None, other_data={}):
self.func = func
self.reason = reason
self.other_data = other_data
self.real_func = other_data.get('real_func')
| StarcoderdataPython |
249553 | import collections
import inspect
import os.path
import shutil
import sys
from functools import wraps
# inspired by: http://stackoverflow.com/a/6618825
def flo(string):
'''Return the string given by param formatted with the callers locals.'''
callers_locals = {}
frame = inspect.currentframe()
try:
outerframe = frame.f_back
callers_locals = outerframe.f_locals
finally:
del frame
return string.format(**callers_locals)
# does not work if called from another package (with other globals)
def doc1():
'''Return the first line of the (callers) docstring.'''
return globals()[inspect.stack()[1][3]].__doc__.splitlines()[0]
def _wrap_with(color_code):
'''Color wrapper.
Example:
>>> blue = _wrap_with('34')
>>> print(blue('text'))
\033[34mtext\033[0m
'''
def inner(text, bold=False):
'''Inner color function.'''
code = color_code
if bold:
code = flo("1;{code}")
return flo('\033[{code}m{text}\033[0m')
return inner
black = _wrap_with('30')
red = _wrap_with('31')
green = _wrap_with('32')
yellow = _wrap_with('33')
blue = _wrap_with('34')
magenta = _wrap_with('35')
cyan = _wrap_with('36')
white = _wrap_with('37')
default_color = _wrap_with('0')
def first_paragraph(multiline_str, without_trailing_dot=True, maxlength=None):
'''Return first paragraph of multiline_str as a oneliner.
When without_trailing_dot is True, the last char of the first paragraph
will be removed, if it is a dot ('.').
Examples:
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str))
first line second line
>>> multiline_str = 'first \\n second \\n \\n next paragraph '
>>> print(first_paragraph(multiline_str))
first second
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str, maxlength=3))
fir
>>> multiline_str = 'first line\\nsecond line\\n\\nnext paragraph'
>>> print(first_paragraph(multiline_str, maxlength=78))
first line second line
>>> multiline_str = 'first line.'
>>> print(first_paragraph(multiline_str))
first line
>>> multiline_str = 'first line.'
>>> print(first_paragraph(multiline_str, without_trailing_dot=False))
first line.
>>> multiline_str = ''
>>> print(first_paragraph(multiline_str))
<BLANKLINE>
'''
stripped = '\n'.join([line.strip() for line in multiline_str.splitlines()])
paragraph = stripped.split('\n\n')[0]
res = paragraph.replace('\n', ' ')
if without_trailing_dot:
res = res.rsplit('.', 1)[0]
if maxlength:
res = res[0:maxlength]
return res
# for decorator with arguments see: http://stackoverflow.com/a/5929165
def print_doc1(*args, **kwargs):
'''Print the first paragraph of the docstring of the decorated function.
The paragraph will be printed as a oneliner.
May be invoked as a simple, argument-less decorator (i.e. ``@print_doc1``)
or with named arguments ``color``, ``bold``, ``prefix`` of ``tail``
(eg. ``@print_doc1(color=utils.red, bold=True, prefix=' ')``).
Examples:
>>> @print_doc1
... def foo():
... """First line of docstring.
...
... another line.
... """
... pass
...
>>> foo()
\033[34mFirst line of docstring\033[0m
>>> @print_doc1
... def foo():
... """First paragraph of docstring which contains more than one
... line.
...
... Another paragraph.
... """
... pass
...
>>> foo()
\033[34mFirst paragraph of docstring which contains more than one line\033[0m
'''
# output settings from kwargs or take defaults
color = kwargs.get('color', blue)
bold = kwargs.get('bold', False)
prefix = kwargs.get('prefix', '')
tail = kwargs.get('tail', '\n')
def real_decorator(func):
'''real decorator function'''
@wraps(func)
def wrapper(*args, **kwargs):
'''the wrapper function'''
try:
prgf = first_paragraph(func.__doc__)
print(color(prefix + prgf + tail, bold))
except AttributeError as exc:
name = func.__name__
print(red(flo('{name}() has no docstring')))
raise(exc)
return func(*args, **kwargs)
return wrapper
invoked = bool(not args or kwargs)
if not invoked:
# invoke decorator function which returns the wrapper function
return real_decorator(func=args[0])
return real_decorator
def print_full_name(*args, **kwargs):
'''Decorator, print the full name of the decorated function.
May be invoked as a simple, argument-less decorator (i.e. ``@print_doc1``)
or with named arguments ``color``, ``bold``, or ``prefix``
(eg. ``@print_doc1(color=utils.red, bold=True, prefix=' ')``).
'''
color = kwargs.get('color', default_color)
bold = kwargs.get('bold', False)
prefix = kwargs.get('prefix', '')
tail = kwargs.get('tail', '')
def real_decorator(func):
'''real decorator function'''
@wraps(func)
def wrapper(*args, **kwargs):
'''the wrapper function'''
first_line = ''
try:
first_line = func.__module__ + '.' + func.__qualname__
except AttributeError as exc:
first_line = func.__name__
print(color(prefix + first_line + tail, bold))
return func(*args, **kwargs)
return wrapper
invoked = bool(not args or kwargs)
if not invoked:
# invoke decorator function which returns the wrapper function
return real_decorator(func=args[0])
return real_decorator
# taken from: http://stackoverflow.com/a/3041990
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no", or None (which means an answer
of the user is required).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True, '1': True,
"no": False, "n": False, '0': False, }
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = raw_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
def query_input(question, default=None, color=default_color):
"""Ask a question for input via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
The "answer" return value is a str.
"""
if default is None or default == '':
prompt = ' '
elif type(default) == str:
prompt = flo(' [{default}] ')
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(color(question + prompt))
choice = raw_input()
if default is not None and choice == '':
return default
if choice != '':
return choice
def filled_out_template_str(template, **substitutions):
'''Return str template with applied substitutions.
Example:
>>> template = 'Asyl for {{name}} {{surname}}!'
>>> filled_out_template_str(template, name='Edward', surname='Snowden')
'Asyl for <NAME>!'
>>> template = '[[[foo]]] was substituted by {{foo}}'
>>> filled_out_template_str(template, foo='bar')
'{{foo}} was substituted by bar'
>>> template = 'names wrapped by {single} {curly} {braces} {{curly}}'
>>> filled_out_template_str(template, curly='remains unchanged')
'names wrapped by {single} {curly} {braces} remains unchanged'
'''
template = template.replace('{', '{{')
template = template.replace('}', '}}')
template = template.replace('{{{{', '{')
template = template.replace('}}}}', '}')
template = template.format(**substitutions)
template = template.replace('{{', '{')
template = template.replace('}}', '}')
template = template.replace('[[[', '{{')
template = template.replace(']]]', '}}')
return template
def filled_out_template(filename, **substitutions):
'''Return content of file filename with applied substitutions.'''
res = None
with open(filename, 'r') as fp:
template = fp.read()
res = filled_out_template_str(template, **substitutions)
return res
# cf. http://stackoverflow.com/a/126389
def update_or_append_line(filename, prefix, new_line, keep_backup=True,
append=True):
'''Search in file 'filename' for a line starting with 'prefix' and replace
the line by 'new_line'.
If a line starting with 'prefix' not exists 'new_line' will be appended.
If the file not exists, it will be created.
Return False if new_line was appended, else True (i.e. if the prefix was
found within of the file).
'''
same_line_exists, line_updated = False, False
filename = os.path.expanduser(filename)
if os.path.isfile(filename):
backup = filename + '~'
shutil.move(filename, backup)
# with open(filename, 'w') as dest, open(backup, 'r') as source:
with open(filename, 'w') as dest:
with open(backup, 'r') as source:
# try update..
for line in source:
if line == new_line:
same_line_exists = True
if line.startswith(prefix):
dest.write(new_line + '\n')
line_updated = True
else:
dest.write(line)
# ..or append
if not (same_line_exists or line_updated) and append:
dest.write(new_line + '\n')
if not keep_backup:
os.remove(backup)
else:
with open(filename, 'w') as dest:
dest.write(new_line + '\n')
return same_line_exists or line_updated
def comment_out_line(filename, line, comment='#',
update_or_append_line=update_or_append_line):
'''Comment line out by putting a comment sign in front of the line.
If the file does not contain the line, the files content will not be
changed (but the file will be touched in every case).
'''
update_or_append_line(filename, prefix=line, new_line=comment+line,
append=False)
def uncomment_or_update_or_append_line(filename, prefix, new_line, comment='#',
keep_backup=True,
update_or_append_line=update_or_append_line):
'''Remove the comment of an commented out line and make the line "active".
If such an commented out line not exists it would be appended.
'''
uncommented = update_or_append_line(filename, prefix=comment+prefix,
new_line=new_line,
keep_backup=keep_backup, append=False)
if not uncommented:
update_or_append_line(filename, prefix, new_line,
keep_backup=keep_backup, append=True)
# namedtuple with defaults
def namedtuple(typename, field_names, **kwargs):
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names))
field_names_without_defaults = []
defaults = []
for name in field_names:
list_ = name.split('=', 1)
if len(list_) > 1:
name, default = list_
defaults.append(eval(default))
elif len(defaults) != 0:
raise ValueError('non-keyword arg after keyword arg')
field_names_without_defaults.append(name)
result = collections.namedtuple(typename, field_names_without_defaults,
**kwargs)
result.__new__.__defaults__ = tuple(defaults)
return result
if __name__ == '__main__':
import doctest
doctest.testmod()
# Repo = namedtuple('Repo', "url, name=None, basedir='~/repos'")
Repo = namedtuple('Repo', "url, name=None, basedir='~/repos'")
assert Repo.__new__.__defaults__ == (None, '~/repos')
r = Repo(url='https://github.com/theno/fabsetup.git')
assert r.__repr__() == 'Repo(' \
'url=\'https://github.com/theno/fabsetup.git\', ' \
'name=None, basedir=\'~/repos\')'
| StarcoderdataPython |
6405322 | <filename>python/pySimE/space/tests/sapce_elevator.py
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 20 19:47:14 2013
@author: asiJa
"""
# ==============================================
print 'space elevator'
from sympy import *
import pylab
GM,v,h0,r0,x, s, omega = symbols('GM v h0 r0 x s omega')
m = Function('m')(x)
#print dsolve(Derivative(m, x) - m(x)*((GM/((r0+x)**2))+(v**2/(h0-x)))/s , m(x) )
#print dsolve(Derivative(m, x) - m(x)*( (GM/(x**2)) + (v**2/(r0-x)) )/s , m(x) )
print dsolve(Derivative(m, x) - m(x)*( (GM/(x**2)) - ((omega**2)*x) )/s , m(x) )
G = 6.67384e-11 # N/kg^2/m^2
M = 5.9736e+24 # kg
s = 3766e+3 # N*m/kg
r0 = 6378100+1000e+3 # m
v = 6.89e+3 # m/s
omega = 7.2921150e-5
x0 = 6378100
def constG(x):
return pylab.exp( + 9.81*x/s)
def justGravity(x):
return pylab.exp( - ( G*M/x ) / s)
def Full(x):
#print type(x)
if (type(x) == pylab.ndarray):
for xx in x:
grav = G*M/xx
cent = ((xx*omega)**2)/2
# print grav, cent, grav/cent
return pylab.exp( - ( G*M/x + ((x*omega)**2)/2 ) / s)
xs = pylab.arange( 1,36000e+3,1000e+3 )
# Zylon
s = 3766e+3 # N*m/kg
msConst = constG (xs )
msGrav = justGravity(xs+x0)/justGravity(x0)
msFull = Full (xs+x0)/Full (x0)
#pylab.plot(xs,msConst,'r-x')
pylab.plot(xs/1000,msGrav,'b-o') #
pylab.plot(xs/1000,msFull,'b-x') #
# Carbon nanotube
s = 46268e+3 # N*m/kg
msConst = constG (xs )
msGrav = justGravity(xs+x0)/justGravity(x0)
msFull = Full (xs+x0)/Full (x0)
pylab.plot(xs/1000,msGrav,'r-o') #
pylab.plot(xs/1000,msFull,'r-x') #
# Carbon nanotube / 2 theoretical
s = 46268e+3/2.0 # N*m/kg
msConst = constG (xs )
msGrav = justGravity(xs+x0)/justGravity(x0)
msFull = Full (xs+x0)/Full (x0)
pylab.plot(xs/1000,msGrav,'m-o') #
pylab.plot(xs/1000,msFull,'m-x') #
pylab.yscale('log')
pylab.show() | StarcoderdataPython |
9701414 | <reponame>Avik32223/gala-iam-api
from pydantic.error_wrappers import ValidationError
from db.database import Database
from models.base_record_manager import BaseRecordManager
from models.resource.resource_manager import ResourceManager
from models.resource_action.resource_action_manager import \
ResourceActionManager
from models.role.role_model import (ROLE_MODEL_NAME, Role, RoleCreate,
RolePartial)
class RoleManager(BaseRecordManager):
"""RoleManager to handle CRUD functionality"""
model = Role
model_name = ROLE_MODEL_NAME
@classmethod
def validate_role(cls, db: Database, record: RoleCreate):
"""Validates role record
Arguments:
db {Database} -- Database connection
record {RoleCreate} -- New Role data
Raises:
ValidationError: Raises ValidationError if subject kind is not supported
"""
new_role = record
for rule in new_role.rules:
if rule.resource:
resources = ResourceManager.find(db, filter_params={
"metadata.name": rule.resource,
"metadata.resource_kind": rule.resource_kind,
})
if not resources:
message = f"Kind [{rule.resource_kind}] doesn't exists."
if rule.resource:
message = f"Resource [{rule.resource}] of {message}"
raise ValidationError(message)
for resource_action in rule.resource_actions:
resource_kind = rule.resource_kind
resource = rule.resource
filter_params = {
"metadata.name": resource_action,
"metadata.resource_kind": resource_kind,
"metadata.resource": resource
}
actions_on_resource = ResourceActionManager.find(
db, filter_params=filter_params)
if not actions_on_resource:
filter_params["metadata.resource"] = None
actions_on_resource_kind = ResourceActionManager.find(
db, filter_params=filter_params)
if not actions_on_resource_kind:
raise ValidationError(
f"ResourceAction with [{filter_params}] constraint doesn't exist.")
@classmethod
def create(cls, db: Database, record: RoleCreate) -> Role:
"""Creates a new Role after validating subjects.
Arguments:
db {Database} -- Database connection
record {RoleCreate} -- New Role data
Returns:
Role -- newly created role
"""
existing_role = RoleManager.find_by_name(db, record.metadata.name)
if existing_role:
raise ValidationError(
"Role with name [%s] already exists" % record.metadata.name)
cls.validate_role(db, record)
return super(RoleManager, cls).create(db, record)
@classmethod
def update(cls, db: Database, record_uuid: str, record: RolePartial) -> Role:
"""Updates the existing Role after validating data
Arguments:
db {Database} -- Database connection
record_uuid {str} -- unique record uuid
record {BaseModel} -- updating record
Returns:
BaseRecord -- Updated record
"""
existing_role = cls.find_by_uuid(db, record_uuid)
updated_record = cls.model(**record.dict(), uuid=record_uuid)
if updated_record.metadata.name != existing_role.metadata.name:
if RoleManager.find_by_name(db, updated_record.metadata.name):
raise ValidationError(
"Role with name [%s] already exists" % record.metadata.name)
cls.validate_role(db, updated_record)
return super(RoleManager, cls).update(db, record_uuid, record)
| StarcoderdataPython |
3580151 | #!/usr/bin/env python
"""
Data Object
A very generic object intended to be used to store and transmit simple data.
This is similar to using a dict. Hoever, fields are accessed using
attributes, and this is intended to be extended as future needs warrant.
The key to using this module correctly is defining the fields necessary for
your object in your subclass' constructor. Once defined there, this class
eliminates dual-maintenance issues.
"""
import json
#=============================================================================
class Data( object ):
"""
Data Object Base Class
"""
#=========================================================================
def __init__( self, **kwargs ):
"""
Constructor.
@param **kwargs Anything or everything, but not nothing
"""
# check for secret handshake
if '_vars' in kwargs:
if 'self' in kwargs[ '_vars' ]:
del kwargs[ '_vars' ][ 'self' ]
self.__dict__.update( **kwargs[ '_vars' ] )
del kwargs[ '_vars' ]
# load keyword arguments into object state
self.__dict__.update( **kwargs )
# record a separate list of keys in case the subclass adds more later
self._keys = self.__dict__.keys()
self._iter = 0
#=========================================================================
def __contains__( self, key ):
"""
Object "contains" magic method for "in" queries.
@param key Member name to check
@return If this object has that member
"""
return hasattr( self, key )
#=========================================================================
def __delitem__( self, key ):
"""
Deletes a member from the object when using "del" operator.
@param key Member name to delete
"""
delattr( self, key )
#=========================================================================
def __iter__( self ):
"""
Iterator protocol support.
@return The iterable object
"""
self._iter = 0
return self
#=========================================================================
def __len__( self ):
"""
Support "len" built-in function.
@return Number of members in object
"""
return len( self._keys )
#=========================================================================
def __getitem__( self, key ):
"""
Support array-notation retrieval.
@param key Member name to retrieve
@return The value of the requested member
"""
return getattr( self, key )
#=========================================================================
def __getstate__( self ):
"""
Support pickle protocol to store an instance.
@return A dictionary containing all member data
"""
return dict( ( k, self.__dict__[ k ] ) for k in self._keys )
#=========================================================================
def __setitem__( self, key, value ):
"""
Support array-notation mutation.
@param key Member name to mutate
@param value The value to store in the requested member
"""
setattr( self, key, value )
#=========================================================================
def __str__( self ):
"""
Convert object data to a string (JSON).
@return String representation of object data
"""
return json.dumps( self.__getstate__(), separators = ( ', ', ':' ) )
#=========================================================================
def __setstate__( self, data ):
"""
Support pickle protocol to restore an instance.
@param data A dictionary containing all member data
"""
self.__dict__.update( data )
self._keys = data.keys()
#=========================================================================
def keys( self ):
"""
Support a dictionary-style request for a list of all members.
@return A list of object member names
"""
return self._keys
#=========================================================================
def next( self ):
"""
Iterator protocol support.
@return Next member in object
"""
if self._iter >= len( self._keys ):
raise StopIteration
key = self._keys[ self._iter ]
self._iter += 1
return getattr( self, key )
#=========================================================================
def super_init( self, data ):
"""
Magical superclass initializer alias.
@param data A dictionary of member data to load into the object
"""
super( self.__class__, self ).__init__( _vars = data )
#=========================================================================
def super_pairs( self, pairs ):
"""
Magical superclass initializer alias using pair-wise data.
@param pairs A list of key-value pairs of member data.
"""
super( self.__class__, self ).__init__( _vars = dict( pairs ) )
#=============================================================================
class _Test( Data ):
#=========================================================================
def __init__( self, a, b = 1, c = '2', d = None ):
self.super_init( vars() )
#=============================================================================
class _Test2( Data ):
#=========================================================================
def __init__( self, a, b = 1, c = '2', d = None ):
d = vars()
self.super_pairs( zip( d.keys(), d.values() ) )
#=============================================================================
def main( argv ):
"""
Script execution entry point
@param argv Arguments passed to the script
@return Exit code (0 = success)
"""
t = _Test( a = 8 )
print t
t = _Test2( a = 9 )
print t
# return success
return 0
#=============================================================================
if __name__ == "__main__":
import sys
sys.exit( main( sys.argv ) )
| StarcoderdataPython |
9609039 | from dagster_graphql.client.query import START_PIPELINE_EXECUTION_MUTATION, SUBSCRIPTION_QUERY
from dagster_graphql.implementation.context import DagsterGraphQLContext
from dagster_graphql.implementation.pipeline_execution_manager import SubprocessExecutionManager
from dagster_graphql.schema import create_schema
from dagster_graphql.test.utils import execute_dagster_graphql
from graphql import graphql
from graphql.execution.executors.sync import SyncExecutor
from dagster import ExecutionTargetHandle
from dagster.core.instance import DagsterInstance
from dagster.utils import file_relative_path
def test_execute_hammer_through_dagit():
handle = ExecutionTargetHandle.for_pipeline_python_file(
file_relative_path(__file__, '../../../../examples/dagster_examples/toys/hammer.py'),
'hammer_pipeline',
)
instance = DagsterInstance.local_temp()
execution_manager = SubprocessExecutionManager(instance)
context = DagsterGraphQLContext(
handle=handle, execution_manager=execution_manager, instance=instance
)
executor = SyncExecutor()
variables = {
'executionParams': {
'environmentConfigData': {'storage': {'filesystem': {}}, 'execution': {'dask': {}}},
'selector': {'name': handle.build_pipeline_definition().name},
'mode': 'default',
}
}
start_pipeline_result = graphql(
request_string=START_PIPELINE_EXECUTION_MUTATION,
schema=create_schema(),
context=context,
variables=variables,
executor=executor,
)
run_id = start_pipeline_result.data['startPipelineExecution']['run']['runId']
context.execution_manager.join()
subscription = execute_dagster_graphql(context, SUBSCRIPTION_QUERY, variables={'runId': run_id})
subscribe_results = []
subscription.subscribe(subscribe_results.append)
messages = [x['__typename'] for x in subscribe_results[0].data['pipelineRunLogs']['messages']]
assert 'PipelineStartEvent' in messages
assert 'PipelineSuccessEvent' in messages
| StarcoderdataPython |
6502232 | <filename>SimpleNeuralNets/Layers/__init__.py
__author__ = 'jotterbach'
| StarcoderdataPython |
3419515 | import logging
LOG = logging.getLogger(__name__)
class ProfileVariantMixin():
def add_profile_variants(self, profile_variants):
"""Add several variants to the profile_variant collection in the
database
Args:
profile_variants(list(models.ProfileVariant))
"""
results = self.db.profile_variant.insert_many(profile_variants)
return results
def profile_variants(self):
"""Get all profile variants from the database
Returns:
profile_variants (Iterable(ProfileVariant))
"""
return self.db.profile_variant.find()
| StarcoderdataPython |
1884258 | import re, os
from warnings import warn
class Mutation:
"""
Stores the mutation. Not to be confused with the namedtuple Variant, which stores gnomAD mutations.
>>> Mutation('p.M12D')
>>> Mutation('M12D')
>>> Mutation(gnomAD_variant_instance)
This class does not do analyses with Protein, but ProteinAnalysis do. Here however, wordy conversions happen.
"""
# the following variable was made in apriori_effect.py
_apriori_data = { 'A>A': 'identical',
'A>C': 'bigger|more polar',
'A>D': 'differently charged|bigger',
'A>E': 'differently charged|bigger',
'A>F': 'bigger',
'A>G': 'more polar|smaller|more flexible',
'A>H': 'differently charged|bigger|from a non-aromatic to an aromatic',
'A>I': 'bigger',
'A>K': 'differently charged|bigger',
'A>L': 'bigger',
'A>M': 'bigger',
'A>N': 'bigger|more polar',
'A>P': 'bigger|from a non-aromatic to an aromatic|more flexible|more polar',
'A>Q': 'bigger|more polar',
'A>R': 'differently charged|bigger',
'A>S': 'bigger|more flexible|more polar',
'A>T': 'bigger|more polar',
'A>V': 'bigger',
'A>W': 'bigger|from a non-aromatic to an aromatic',
'A>Y': 'bigger|from a non-aromatic to an aromatic',
'C>A': 'smaller|more hydrophobic',
'C>C': 'identical',
'C>D': 'differently charged|bigger',
'C>E': 'differently charged|bigger',
'C>F': 'bigger|more hydrophobic',
'C>G': 'smaller|more flexible',
'C>H': 'differently charged|bigger|from a non-aromatic to an aromatic',
'C>I': 'bigger|more hydrophobic',
'C>K': 'differently charged|bigger',
'C>L': 'bigger|more hydrophobic',
'C>M': 'bigger|more hydrophobic',
'C>N': 'bigger',
'C>P': 'from a non-aromatic to an aromatic|smaller|more flexible',
'C>Q': 'bigger',
'C>R': 'differently charged|bigger',
'C>S': 'equally sized|more flexible',
'C>T': 'bigger',
'C>V': 'bigger|more hydrophobic',
'C>W': 'bigger|from a non-aromatic to an aromatic|more hydrophobic',
'C>Y': 'bigger|from a non-aromatic to an aromatic|more hydrophobic',
'D>A': 'differently charged|smaller',
'D>C': 'differently charged|smaller',
'D>D': 'identical',
'D>E': 'bigger',
'D>F': 'differently charged|bigger',
'D>G': 'differently charged|smaller|more flexible',
'D>H': 'differently charged|from a non-aromatic to an aromatic|differently shaped',
'D>I': 'differently charged|differently shaped',
'D>K': 'differently charged|bigger',
'D>L': 'differently charged|equally sized',
'D>M': 'differently charged|differently shaped',
'D>N': 'differently charged|equally sized',
'D>P': 'differently charged|from a non-aromatic to an aromatic|smaller|more flexible',
'D>Q': 'differently charged|bigger',
'D>R': 'differently charged|bigger',
'D>S': 'differently charged|smaller|more flexible',
'D>T': 'differently charged|differently shaped',
'D>V': 'differently charged|differently shaped',
'D>W': 'differently charged|bigger|from a non-aromatic to an aromatic',
'D>Y': 'differently charged|bigger|from a non-aromatic to an aromatic',
'E>A': 'differently charged|smaller',
'E>C': 'differently charged|smaller',
'E>D': 'smaller',
'E>E': 'identical',
'E>F': 'differently charged|bigger',
'E>G': 'differently charged|smaller|more flexible',
'E>H': 'differently charged|from a non-aromatic to an aromatic|differently shaped',
'E>I': 'differently charged|differently shaped',
'E>K': 'differently charged|bigger',
'E>L': 'differently charged|differently shaped',
'E>M': 'differently charged|smaller',
'E>N': 'differently charged|smaller',
'E>P': 'differently charged|from a non-aromatic to an aromatic|smaller|more flexible',
'E>Q': 'differently charged|equally sized',
'E>R': 'differently charged|bigger',
'E>S': 'differently charged|smaller|more flexible',
'E>T': 'differently charged|differently shaped',
'E>V': 'differently charged|differently shaped',
'E>W': 'differently charged|from a non-aromatic to an aromatic|differently shaped',
'E>Y': 'differently charged|bigger|from a non-aromatic to an aromatic',
'F>A': 'smaller',
'F>C': 'more polar|smaller',
'F>D': 'differently charged|smaller',
'F>E': 'differently charged|smaller',
'F>F': 'identical',
'F>G': 'more polar|smaller|more flexible',
'F>H': 'differently charged|from a non-aromatic to an aromatic|smaller',
'F>I': 'differently shaped',
'F>K': 'differently charged|differently shaped',
'F>L': 'smaller',
'F>M': 'smaller',
'F>N': 'more polar|differently shaped',
'F>P': 'from a non-aromatic to an aromatic|smaller|more flexible|more polar',
'F>Q': 'more polar|differently shaped',
'F>R': 'differently charged|differently shaped',
'F>S': 'more polar|smaller|more flexible',
'F>T': 'more polar|smaller',
'F>V': 'smaller',
'F>W': 'from a non-aromatic to an aromatic|differently shaped',
'F>Y': 'bigger|from a non-aromatic to an aromatic',
'G>A': 'bigger|more rigid|more hydrophobic',
'G>C': 'bigger|more rigid',
'G>D': 'differently charged|bigger|more rigid',
'G>E': 'differently charged|bigger|more rigid',
'G>F': 'bigger|more rigid|more hydrophobic',
'G>G': 'identical',
'G>H': 'differently charged|bigger|from a non-aromatic to an aromatic|more rigid',
'G>I': 'bigger|more rigid|more hydrophobic',
'G>K': 'differently charged|bigger|more rigid',
'G>L': 'bigger|more rigid|more hydrophobic',
'G>M': 'bigger|more rigid|more hydrophobic',
'G>N': 'bigger|more rigid',
'G>P': 'bigger|from a non-aromatic to an aromatic|more flexible',
'G>Q': 'bigger|more rigid',
'G>R': 'differently charged|bigger|more rigid',
'G>S': 'bigger|more rigid',
'G>T': 'bigger|more rigid',
'G>V': 'bigger|more rigid|more hydrophobic',
'G>W': 'bigger|from a non-aromatic to an aromatic|more rigid|more hydrophobic',
'G>Y': 'bigger|from a non-aromatic to an aromatic|more rigid|more hydrophobic',
'H>A': 'differently charged|from an aromatic to a non-aromatic|smaller',
'H>C': 'differently charged|from an aromatic to a non-aromatic|smaller',
'H>D': 'differently charged|from an aromatic to a non-aromatic|differently shaped',
'H>E': 'differently charged|from an aromatic to a non-aromatic|differently shaped',
'H>F': 'differently charged|bigger|from an aromatic to a non-aromatic',
'H>G': 'differently charged|from an aromatic to a non-aromatic|smaller|more flexible',
'H>H': 'identical',
'H>I': 'differently charged|from an aromatic to a non-aromatic|differently shaped',
'H>K': 'from an aromatic to a non-aromatic|differently shaped',
'H>L': 'differently charged|from an aromatic to a non-aromatic|differently shaped',
'H>M': 'differently charged|from an aromatic to a non-aromatic|differently shaped',
'H>N': 'differently charged|from an aromatic to a non-aromatic|differently shaped',
'H>P': 'differently charged|smaller|more flexible',
'H>Q': 'differently charged|from an aromatic to a non-aromatic|differently shaped',
'H>R': 'from an aromatic to a non-aromatic|differently shaped',
'H>S': 'differently charged|from an aromatic to a non-aromatic|smaller|more flexible',
'H>T': 'differently charged|from an aromatic to a non-aromatic|differently shaped',
'H>V': 'differently charged|from an aromatic to a non-aromatic|differently shaped',
'H>W': 'differently charged|bigger',
'H>Y': 'differently charged|bigger',
'I>A': 'smaller',
'I>C': 'more polar|smaller',
'I>D': 'differently charged|differently shaped',
'I>E': 'differently charged|differently shaped',
'I>F': 'differently shaped',
'I>G': 'more polar|smaller|more flexible',
'I>H': 'differently charged|from a non-aromatic to an aromatic|differently shaped',
'I>I': 'identical',
'I>K': 'differently charged|differently shaped',
'I>L': 'differently shaped',
'I>M': 'differently shaped',
'I>N': 'more polar|differently shaped',
'I>P': 'from a non-aromatic to an aromatic|smaller|more flexible|more polar',
'I>Q': 'more polar|differently shaped',
'I>R': 'differently charged|differently shaped',
'I>S': 'more polar|smaller|more flexible',
'I>T': 'more polar|differently shaped',
'I>V': 'smaller',
'I>W': 'from a non-aromatic to an aromatic|differently shaped',
'I>Y': 'from a non-aromatic to an aromatic|differently shaped',
'K>A': 'differently charged|smaller',
'K>C': 'differently charged|smaller',
'K>D': 'differently charged|smaller',
'K>E': 'differently charged|smaller',
'K>F': 'differently charged|differently shaped',
'K>G': 'differently charged|smaller|more flexible',
'K>H': 'from a non-aromatic to an aromatic|differently shaped',
'K>I': 'differently charged|differently shaped',
'K>K': 'identical',
'K>L': 'differently charged|differently shaped',
'K>M': 'differently charged|smaller',
'K>N': 'differently charged|differently shaped',
'K>P': 'differently charged|from a non-aromatic to an aromatic|smaller|more flexible',
'K>Q': 'differently charged|differently shaped',
'K>R': 'bigger',
'K>S': 'differently charged|smaller|more flexible',
'K>T': 'differently charged|differently shaped',
'K>V': 'differently charged|differently shaped',
'K>W': 'differently charged|from a non-aromatic to an aromatic|differently shaped',
'K>Y': 'differently charged|from a non-aromatic to an aromatic|differently shaped',
'L>A': 'smaller',
'L>C': 'more polar|smaller',
'L>D': 'differently charged|equally sized',
'L>E': 'differently charged|differently shaped',
'L>F': 'bigger',
'L>G': 'more polar|smaller|more flexible',
'L>H': 'differently charged|from a non-aromatic to an aromatic|differently shaped',
'L>I': 'differently shaped',
'L>K': 'differently charged|differently shaped',
'L>L': 'identical',
'L>M': 'differently shaped',
'L>N': 'more polar|equally sized',
'L>P': 'from a non-aromatic to an aromatic|smaller|more flexible|more polar',
'L>Q': 'more polar|differently shaped',
'L>R': 'differently charged|differently shaped',
'L>S': 'more polar|smaller|more flexible',
'L>T': 'more polar|differently shaped',
'L>V': 'smaller',
'L>W': 'bigger|from a non-aromatic to an aromatic',
'L>Y': 'bigger|from a non-aromatic to an aromatic',
'M>A': 'smaller',
'M>C': 'more polar|smaller',
'M>D': 'differently charged|differently shaped',
'M>E': 'differently charged|bigger',
'M>F': 'bigger',
'M>G': 'more polar|smaller|more flexible',
'M>H': 'differently charged|from a non-aromatic to an aromatic|differently shaped',
'M>I': 'differently shaped',
'M>K': 'differently charged|bigger',
'M>L': 'differently shaped',
'M>M': 'identical',
'M>N': 'more polar|differently shaped',
'M>P': 'from a non-aromatic to an aromatic|smaller|more flexible|more polar',
'M>Q': 'bigger|more polar',
'M>R': 'differently charged|bigger',
'M>S': 'more polar|smaller|more flexible',
'M>T': 'more polar|differently shaped',
'M>V': 'differently shaped',
'M>W': 'from a non-aromatic to an aromatic|differently shaped',
'M>Y': 'bigger|from a non-aromatic to an aromatic',
'N>A': 'smaller|more hydrophobic',
'N>C': 'smaller',
'N>D': 'differently charged|equally sized',
'N>E': 'differently charged|bigger',
'N>F': 'differently shaped|more hydrophobic',
'N>G': 'smaller|more flexible',
'N>H': 'differently charged|from a non-aromatic to an aromatic|differently shaped',
'N>I': 'differently shaped|more hydrophobic',
'N>K': 'differently charged|differently shaped',
'N>L': 'equally sized|more hydrophobic',
'N>M': 'differently shaped|more hydrophobic',
'N>N': 'identical',
'N>P': 'from a non-aromatic to an aromatic|smaller|more flexible',
'N>Q': 'bigger',
'N>R': 'differently charged|differently shaped',
'N>S': 'smaller|more flexible',
'N>T': 'differently shaped',
'N>V': 'differently shaped|more hydrophobic',
'N>W': 'from a non-aromatic to an aromatic|differently shaped|more hydrophobic',
'N>Y': 'from a non-aromatic to an aromatic|differently shaped|more hydrophobic',
'P>A': 'from an aromatic to a non-aromatic|smaller|more rigid|more hydrophobic',
'P>C': 'bigger|from an aromatic to a non-aromatic|more rigid',
'P>D': 'differently charged|bigger|from an aromatic to a non-aromatic|more rigid',
'P>E': 'differently charged|bigger|from an aromatic to a non-aromatic|more rigid',
'P>F': 'bigger|from an aromatic to a non-aromatic|more rigid|more hydrophobic',
'P>G': 'from an aromatic to a non-aromatic|smaller|more rigid',
'P>H': 'differently charged|bigger|more rigid',
'P>I': 'bigger|from an aromatic to a non-aromatic|more rigid|more hydrophobic',
'P>K': 'differently charged|bigger|from an aromatic to a non-aromatic|more rigid',
'P>L': 'bigger|from an aromatic to a non-aromatic|more rigid|more hydrophobic',
'P>M': 'bigger|from an aromatic to a non-aromatic|more rigid|more hydrophobic',
'P>N': 'bigger|from an aromatic to a non-aromatic|more rigid',
'P>P': 'identical',
'P>Q': 'bigger|from an aromatic to a non-aromatic|more rigid',
'P>R': 'differently charged|bigger|from an aromatic to a non-aromatic|more rigid',
'P>S': 'bigger|from an aromatic to a non-aromatic|more rigid',
'P>T': 'bigger|from an aromatic to a non-aromatic|more rigid',
'P>V': 'bigger|from an aromatic to a non-aromatic|more rigid|more hydrophobic',
'P>W': 'bigger|more rigid|more hydrophobic',
'P>Y': 'bigger|more rigid|more hydrophobic',
'Q>A': 'smaller|more hydrophobic',
'Q>C': 'smaller',
'Q>D': 'differently charged|smaller',
'Q>E': 'differently charged|equally sized',
'Q>F': 'differently shaped|more hydrophobic',
'Q>G': 'smaller|more flexible',
'Q>H': 'differently charged|from a non-aromatic to an aromatic|differently shaped',
'Q>I': 'differently shaped|more hydrophobic',
'Q>K': 'differently charged|differently shaped',
'Q>L': 'differently shaped|more hydrophobic',
'Q>M': 'smaller|more hydrophobic',
'Q>N': 'smaller',
'Q>P': 'from a non-aromatic to an aromatic|smaller|more flexible',
'Q>Q': 'identical',
'Q>R': 'differently charged|differently shaped',
'Q>S': 'smaller|more flexible',
'Q>T': 'differently shaped',
'Q>V': 'differently shaped|more hydrophobic',
'Q>W': 'from a non-aromatic to an aromatic|differently shaped|more hydrophobic',
'Q>Y': 'from a non-aromatic to an aromatic|differently shaped|more hydrophobic',
'R>A': 'differently charged|smaller',
'R>C': 'differently charged|smaller',
'R>D': 'differently charged|smaller',
'R>E': 'differently charged|smaller',
'R>F': 'differently charged|differently shaped',
'R>G': 'differently charged|smaller|more flexible',
'R>H': 'from a non-aromatic to an aromatic|differently shaped',
'R>I': 'differently charged|differently shaped',
'R>K': 'smaller',
'R>L': 'differently charged|differently shaped',
'R>M': 'differently charged|smaller',
'R>N': 'differently charged|differently shaped',
'R>P': 'differently charged|from a non-aromatic to an aromatic|smaller|more flexible',
'R>Q': 'differently charged|differently shaped',
'R>R': 'identical',
'R>S': 'differently charged|smaller|more flexible',
'R>T': 'differently charged|differently shaped',
'R>V': 'differently charged|differently shaped',
'R>W': 'differently charged|from a non-aromatic to an aromatic|differently shaped',
'R>Y': 'differently charged|from a non-aromatic to an aromatic|differently shaped',
'S>A': 'smaller|more rigid|more hydrophobic',
'S>C': 'equally sized|more rigid',
'S>D': 'differently charged|bigger|more rigid',
'S>E': 'differently charged|bigger|more rigid',
'S>F': 'bigger|more rigid|more hydrophobic',
'S>G': 'smaller|more flexible',
'S>H': 'differently charged|bigger|from a non-aromatic to an aromatic|more rigid',
'S>I': 'bigger|more rigid|more hydrophobic',
'S>K': 'differently charged|bigger|more rigid',
'S>L': 'bigger|more rigid|more hydrophobic',
'S>M': 'bigger|more rigid|more hydrophobic',
'S>N': 'bigger|more rigid',
'S>P': 'from a non-aromatic to an aromatic|smaller|more flexible',
'S>Q': 'bigger|more rigid',
'S>R': 'differently charged|bigger|more rigid',
'S>S': 'identical',
'S>T': 'bigger|more rigid',
'S>V': 'bigger|more rigid|more hydrophobic',
'S>W': 'bigger|from a non-aromatic to an aromatic|more rigid|more hydrophobic',
'S>Y': 'bigger|from a non-aromatic to an aromatic|more rigid|more hydrophobic',
'T>A': 'smaller|more hydrophobic',
'T>C': 'smaller',
'T>D': 'differently charged|differently shaped',
'T>E': 'differently charged|differently shaped',
'T>F': 'bigger|more hydrophobic',
'T>G': 'smaller|more flexible',
'T>H': 'differently charged|from a non-aromatic to an aromatic|differently shaped',
'T>I': 'differently shaped|more hydrophobic',
'T>K': 'differently charged|differently shaped',
'T>L': 'differently shaped|more hydrophobic',
'T>M': 'differently shaped|more hydrophobic',
'T>N': 'differently shaped',
'T>P': 'from a non-aromatic to an aromatic|smaller|more flexible',
'T>Q': 'differently shaped',
'T>R': 'differently charged|differently shaped',
'T>S': 'smaller|more flexible',
'T>T': 'identical',
'T>V': 'equally sized|more hydrophobic',
'T>W': 'bigger|from a non-aromatic to an aromatic|more hydrophobic',
'T>Y': 'bigger|from a non-aromatic to an aromatic|more hydrophobic',
'V>A': 'smaller',
'V>C': 'more polar|smaller',
'V>D': 'differently charged|differently shaped',
'V>E': 'differently charged|differently shaped',
'V>F': 'bigger',
'V>G': 'more polar|smaller|more flexible',
'V>H': 'differently charged|from a non-aromatic to an aromatic|differently shaped',
'V>I': 'bigger',
'V>K': 'differently charged|differently shaped',
'V>L': 'bigger',
'V>M': 'differently shaped',
'V>N': 'more polar|differently shaped',
'V>P': 'from a non-aromatic to an aromatic|smaller|more flexible|more polar',
'V>Q': 'more polar|differently shaped',
'V>R': 'differently charged|differently shaped',
'V>S': 'more polar|smaller|more flexible',
'V>T': 'more polar|equally sized',
'V>V': 'identical',
'V>W': 'bigger|from a non-aromatic to an aromatic',
'V>Y': 'bigger|from a non-aromatic to an aromatic',
'W>A': 'from an aromatic to a non-aromatic|smaller',
'W>C': 'more polar|from an aromatic to a non-aromatic|smaller',
'W>D': 'differently charged|from an aromatic to a non-aromatic|smaller',
'W>E': 'differently charged|from an aromatic to a non-aromatic|differently shaped',
'W>F': 'from an aromatic to a non-aromatic|differently shaped',
'W>G': 'more polar|from an aromatic to a non-aromatic|smaller|more flexible',
'W>H': 'differently charged|smaller',
'W>I': 'from an aromatic to a non-aromatic|differently shaped',
'W>K': 'differently charged|from an aromatic to a non-aromatic|differently shaped',
'W>L': 'from an aromatic to a non-aromatic|smaller',
'W>M': 'from an aromatic to a non-aromatic|differently shaped',
'W>N': 'more polar|from an aromatic to a non-aromatic|differently shaped',
'W>P': 'more polar|smaller|more flexible',
'W>Q': 'more polar|from an aromatic to a non-aromatic|differently shaped',
'W>R': 'differently charged|from an aromatic to a non-aromatic|differently shaped',
'W>S': 'more polar|from an aromatic to a non-aromatic|smaller|more flexible',
'W>T': 'more polar|from an aromatic to a non-aromatic|smaller',
'W>V': 'from an aromatic to a non-aromatic|smaller',
'W>W': 'identical',
'W>Y': 'differently shaped',
'Y>A': 'from an aromatic to a non-aromatic|smaller',
'Y>C': 'more polar|from an aromatic to a non-aromatic|smaller',
'Y>D': 'differently charged|from an aromatic to a non-aromatic|smaller',
'Y>E': 'differently charged|from an aromatic to a non-aromatic|smaller',
'Y>F': 'from an aromatic to a non-aromatic|smaller',
'Y>G': 'more polar|from an aromatic to a non-aromatic|smaller|more flexible',
'Y>H': 'differently charged|smaller',
'Y>I': 'from an aromatic to a non-aromatic|differently shaped',
'Y>K': 'differently charged|from an aromatic to a non-aromatic|differently shaped',
'Y>L': 'from an aromatic to a non-aromatic|smaller',
'Y>M': 'from an aromatic to a non-aromatic|smaller',
'Y>N': 'more polar|from an aromatic to a non-aromatic|differently shaped',
'Y>P': 'more polar|smaller|more flexible',
'Y>Q': 'more polar|from an aromatic to a non-aromatic|differently shaped',
'Y>R': 'differently charged|from an aromatic to a non-aromatic|differently shaped',
'Y>S': 'more polar|from an aromatic to a non-aromatic|smaller|more flexible',
'Y>T': 'more polar|from an aromatic to a non-aromatic|smaller',
'Y>V': 'from an aromatic to a non-aromatic|smaller',
'Y>W': 'differently shaped',
'Y>Y': 'identical'}
names =(('A', 'Ala', 'Alanine'),
('B', 'Asx', 'Aspartate/asparagine'),
('C', 'Cys', 'Cysteine'),
('D', 'Asp', 'Aspartate'),
('E', 'Glu', 'Glutamate'),
('F', 'Phe', 'Phenylanine'),
('G', 'Gly', 'Glycine'),
('H', 'His', 'Histidine'),
('I', 'Ile', 'Isoleucine'),
('K', 'Lys', 'Lysine'),
('L', 'Leu', 'Leucine'),
('M', 'Met', 'Methionine'),
('N', 'Asn', 'Asparagine'),
('P', 'Pro', 'Proline'),
('Q', 'Gln', 'Glutamine'),
('R', 'Arg', 'Arginine'),
('S', 'Ser', 'Serine'),
('T', 'Thr', 'Threonine'),
('U', 'Sel', 'Selenocysteine'),
('V', 'Val', 'Valine'),
('W', 'Trp', 'Tryptophan'),
('X', 'Xaa', 'Any'),
('Y', 'Tyr', 'Tyrosine'),
('Z', 'Glx', 'Glutamate/glutamine'),
('*','Stop', 'Stop'))
aa_list = tuple('QWERTYIPASDFGHKLCVNM*')
def __init__(self, mutation=None):
self.from_residue = ''
self.to_residue = ''
self.residue_index = 0
self.apriori_effect = 'TBD'
self.surface_expose = ''
self.clean_mutation = None
#self.exposure_effect see property.getter exposure_effect
self.elm = [] # michelanglo_protein.check_elm(mutation) fills it.
if mutation:
if not isinstance(mutation,str): #it is the namedtuple `Variant` (gnomAD snp)!
mutation = mutation.description
self.parse_mutation(mutation)
def __str__(self):
# note that this is not file-safe
return self.from_residue+str(self.residue_index)+self.to_residue
#raise NotImplementedError('Under upgrade')
def parse_mutation(self, mutation):
### clean
assert mutation.find('.c') == -1, 'Chromosome mutation not accepted. Use Protein.'
# remove the p.
mutation = mutation.replace('p.', '').replace('P.', '')
for (single, triple, full) in self.names:
if mutation.find(triple) != -1 or mutation.find(triple.lower()) != -1 or mutation.find(triple.upper()):
mutation = mutation.replace(triple, single).replace(triple.upper(), single).replace(triple.lower(), single)
self.mutation = mutation
### split
if self.mutation.find('fs') != -1 or self.mutation.find('*') != -1:
rex = re.match('(\w)(\d+)', self.mutation)
if rex:
self.from_residue = rex.group(1)
self.residue_index = int(rex.group(2))
self.to_residue = '*'
self.file_friendly_mutation = self.from_residue + str(self.residue_index) + 'stop'
self.clean_mutation = self.from_residue + str(self.residue_index) + '*'
else:
raise ValueError('odd mutation of type fs' + self.mutation)
elif self.mutation.find('del') != -1 or self.mutation.find('\N{GREEK CAPITAL LETTER DELTA}') != -1:
self.mutation = self.mutation.replace('\N{GREEK CAPITAL LETTER DELTA}',
'del') # would love the other way round but unicode in 2018 still crashes stuff...
rex = re.match('del(\w)(\d+)', self.mutation)
if not rex:
rex = re.match('(\w)(\d+)del', self.mutation)
if rex:
self.from_residue = rex.group(1)
self.residue_index = int(rex.group(2))
self.to_residue = rex.group(1) # technically not but shush...
self.file_friendly_mutation = 'del' + self.from_residue + str(self.residue_index)
self.clean_mutation = 'del' + self.from_residue + str(self.residue_index)
warn('Mutation parsing: Deletions are not handled correctly atm...')
else:
raise ValueError('odd mutation of type deletion' + self.mutation)
else:
rex = re.match('(\w)(\d+)(\w)', self.mutation)
if rex:
assert rex.group(1) in self.aa_list, 'The from mutant is not a valid amino acid'
assert rex.group(3) in self.aa_list, 'The to mutant is not a valid amino acid'
self.from_residue = rex.group(1)
self.residue_index = int(rex.group(2))
self.to_residue = rex.group(3)
self.file_friendly_mutation = self.from_residue + str(self.residue_index) + self.to_residue
self.clean_mutation = self.from_residue + str(self.residue_index) + self.to_residue
#self.blossum_score = self.blossum[self.from_residue][self.to_residue]
else:
raise ValueError(self.mutation + ' is an odd_mutation')
## classify apriori effect
"""
{'S': 'smaller',
'B': 'bigger',
'E': 'equally sized',
'F': 'more flexible',
'R': 'more rigid',
'C': 'differently charged',
'H': 'more hydrophobic',
'P': 'more polar',
'I': 'identical',
'D': 'differently shaped',
'a': 'from an aromatic to a non-aromatic',
'A': 'from a non-aromatic to an aromatic'}"""
if '*' in self.to_residue:
self.apriori_effect = 'The mutation results in a truncation.'
elif '*' in self.from_residue:
self.apriori_effect = 'The mutation results in a longer sequence.'
elif len(self.to_residue) > 1:
self.apriori_effect = 'The mutation results in a frameshift.'
else:
self.apriori_effect = 'The mutation changes one amino acid to another that is '+\
', '.join(self._apriori_data[self.from_residue+'>'+self.to_residue].split('|'))+\
'.'
return self
@property
def exposure_effect(self):
lowconc = '(lowering protein concentrations)'
lessbind = '(less binding means less activation or inhibition)'
neglegible = 'The effect should be negligible.'
if self.surface_expose == 'buried':
if self.to_residue == 'P':
return f'Proline is highly destabilising in protein cores (it cannot be part of α-helices for example) {lowconc}.'
elif 'differently charged' in self.apriori_effect:
return f'Charge changes in protein cores are highly destabilising {lowconc}.'
elif 'bigger' in self.apriori_effect or 'shaped' in self.apriori_effect:
return f'Larger residues in protein cores are highly destabilising {lowconc}.'
elif 'polar' in self.apriori_effect:
return f'Protein cores are generally hydrophobic, so a change in polarity is generally destabilising {lowconc}.'
elif 'smaller' in self.apriori_effect:
return f'Changes to smaller residues remove some interactions, thus weakly destabilising the protein (potentially lowering michelanglo_protein concentrations) but most likely have no effect.'
else:
return f'Mutations in protein cores are generally destabilising {lowconc}, but the mutation is very mild.'
elif self.surface_expose == 'partially buried':
if self.to_residue == 'P':
return f'Proline is highly destabilising in protein cores (it cannot be part of α-helices for example) {lowconc}.'
elif 'differently charged' in self.apriori_effect:
return f'Charge changes are most often destabilising {lowconc}.'
elif 'bigger' in self.apriori_effect or 'shaped' in self.apriori_effect:
return f'Larger residues are most often destabilising {lowconc}.'
elif 'polar' in self.apriori_effect:
return f'A change in polarity is generally destabilising {lowconc} depending how buried it is.'
elif 'smaller' in self.apriori_effect:
return f'Changes to smaller residues remove some interactions, but most likely have no effect.'
else:
return neglegible
elif self.surface_expose == 'surface':
if 'differently charged' in self.apriori_effect:
return f'A difference in charge on the surface may strongly affect membrane and partner binding {lessbind}.'
elif 'more hydrophobic' in self.apriori_effect:
return f'A more hydrophobic residue on the surface may result in aggregation {lowconc}.'
elif 'bigger' in self.apriori_effect:
return f'A larger residue on the surface may inhibit binding if it is part of a interface surface {lessbind}.'
else:
return neglegible
else: #no surface_expose value. fill with the ProteinAnalyser
return '*Could not be calculated*'
@classmethod
def long_name(cls, letter):
"""
Single amino acid letter to a short string with the name spelt out three ways.
:param letter: 1 AA letter
:type letter: str
:return: str
"""
return ['{n} ({s}, {t})'.format(n=n,s=s,t=t) for s, t, n in cls.names if s == letter][0]
@classmethod
def aa3to1(cls, value:str):
if len(value) == 3:
v = value.strip().title()
for one, three, full in cls.names:
if three == v:
return one
else:
return value # not an aminoacid
elif len(value) == 1:
return value
else:
return value # not an aminoacid
# raise ValueError
| StarcoderdataPython |
5021746 | <gh_stars>0
'''
hamming distance is the simplest of string comparison algorithms
all it does is look for differences between the two strings
i.e the number of substitutions needed to make the strings match
'''
def hammingDistance(s1, s2):
"""Return the Hamming distance between equal-length sequences"""
if len(s1) != len(s2):
raise ValueError("Undefined for sequences of unequal length")
return sum(bool(ord(ch1) - ord(ch2)) for ch1, ch2 in zip(s1, s2))
# split up algorithm over multiple lines to see what python native functions do
def myHammingDistance(s1, s2):
if len(s1) != len(s2):
raise ValueError("Undefined for sequences of unequal length")
total = 0
for num in range(0, len(s1)) :
charAOrd = ord(s1[num])
charBOrd = ord(s2[num])
result = bool(charAOrd - charBOrd)
total += result
return total
def main():
print 'hamming distance'
wordA = "wamming"
wordB = "hammony"
print wordA + " " + wordB
print hammingDistance(wordA, wordB)
print myHammingDistance(wordA, wordB)
# Standard boilerplate to call the main() function.
if __name__ == '__main__':
main()
| StarcoderdataPython |
6585294 | import json
import requests
import os
import time
import sys
## Pair: BTCUSD, USDBTC, DASHUSD, etc
class PriceRetriever:
def __init__(self):
self.localbitcoins = None
self.gemini = None
self.bitinka = None
self.ripio = None
self.cexio = None
self.coinbase = None
def ask(self):
# print(requests.get('https://localbitcoins.com//bitcoinaverage/ticker-all-currencies/').content)
try:
self.localbitcoins = json.loads(requests.get('https://localbitcoins.com/bitcoinaverage/ticker-all-currencies/', timeout=10).content.decode('utf-8'))
except:
self.localbitcoins = None
try:
self.gemini = json.loads(requests.get('https://api.gemini.com/v1/pubticker/btcusd', timeout=10).content.decode('utf-8'))
except:
self.gemini = None
try:
self.bitinka = json.loads(requests.get('https://www.bitinka.com/api/apinka/ticker?format=json', timeout=10).content.decode('utf-8'))
except:
self.bitinka = None
try:
self.ripio = json.loads(requests.get('https://www.ripio.com/api/v1/rates/', timeout=10).content.decode('utf-8'))
except:
self.ripio = None
try:
self.cexio = json.loads(requests.get('https://cex.io/api/ticker/BTC/USD', timeout=10).content.decode('utf-8'))
except:
self.cexio = None
try:
self.binance = json.loads(requests.get('https://api.binance.com/api/v1/ticker/price?symbol=BTCUSDT', timeout=10).content.decode('utf-8'))
except:
self.binance = None
try:
self.coinbase = json.loads(requests.get('https://api.coinbase.com/v2/exchange-rates?currency=BTC', timeout=10).content.decode('utf-8'))
except:
self.coinbase = None
def avg(self, symbol, prices):
count = 0
average = 0
if not (symbol in prices and prices[symbol]['prices']): return 0
for k,v in prices[symbol]['prices'].items():
average += v
count += 1
return average/count
def getPriceInformation(self):
try:
file = open(os.path.join('/home/cashhom2/Cash4Home/static', "BTCPrice.json"), "r")
except Exception as e:
open(os.path.join('/home/cashhom2/Cash4Home/static', "BTCPrice.json"), "w").close()
file = open(os.path.join('/home/cashhom2/Cash4Home/static', "BTCPrice.json"), "r")
try:
prices = json.loads(file.read())
except Exception as e:
prices = {
'USD': { 'prices':{}, 'name':'', 'avg': 0 },
'VES': { 'prices':{}, 'name':'', 'avg': 0 },
'ARS': { 'prices':{}, 'name':'', 'avg': 0 },
'PEN': { 'prices':{}, 'name':'', 'avg': 0 },
}
try:
if self.localbitcoins:
prices['USD']['prices']['Localbitcoins'] = float(self.localbitcoins["USD"]["rates"]['last'])
prices['VES']['prices']['Localbitcoins'] = float(self.localbitcoins['VES']['rates']['last'])
prices['ARS']['prices']['Localbitcoins'] = float(self.localbitcoins['ARS']['rates']['last'])
prices['PEN']['prices']['Localbitcoins'] = float(self.localbitcoins['PEN']['rates']['last'])
except Exception as e:
pass
try:
if self.gemini:
prices['USD']['prices']['Gemini'] = float(self.gemini['ask'])
except Exception as e:
pass
try:
if self.bitinka:
prices['USD']['prices']['Bitinka'] = float(self.bitinka['USD'][0]['ask'])
prices['ARS']['prices']['Bitinka'] = float(self.bitinka['ARS'][0]['ask'])
prices['PEN']['prices']['Bitinka'] = float(self.bitinka['PEN'][0]['ask'])
except Exception as e:
pass
try:
if self.ripio:
prices['USD']['prices']['Ripio'] = float(self.ripio['rates']['USD_SELL'])
# prices['USD']['prices']['Ripio (Compra)'] = float(self.ripio['rates']['USD_BUY'])
# prices['ARS']['prices']['Ripio'] = float(self.ripio['rates']['ARS_SELL'])
prices['ARS']['prices']['Ripio'] = float(self.ripio['rates']['ARS_BUY'])
# prices['PEN']['prices']['Ripio'] = float(self.ripio['rates']['PEN_SELL'])
prices['PEN']['prices']['Ripio'] = float(self.ripio['rates']['PEN_BUY'])
except Exception as e:
pass
try:
if self.binance:
prices['USD']['prices']['Binance'] = float(self.binance['price'])
except Exception as e:
pass
try:
if self.cexio:
prices['USD']['prices']['Cex.io'] = float(self.cexio['ask'])
except Exception as e:
pass
try:
if self.coinbase:
prices['USD']['prices']['Coinbase'] = float(self.coinbase['data']['rates']['USD'])
prices['ARS']['prices']['Coinbase'] = float(self.coinbase['data']['rates']['ARS'])
prices['PEN']['prices']['Coinbase'] = float(self.coinbase['data']['rates']['PEN'])
except Exception as e:
pass
prices['USD']['avg'] = self.avg('USD', prices)
prices['VES']['avg'] = self.avg('VES', prices)
prices['ARS']['avg'] = self.avg('ARS', prices)
prices['PEN']['avg'] = self.avg('PEN', prices)
prices['USD']['symbol'] = '$'
prices['VES']['symbol'] = 'Bs.'
prices['ARS']['symbol'] = '$'
prices['PEN']['symbol'] = '$'
file.close()
return prices
class UpdateBTCPrice():
RUN_EVERY_SECS = 10
def __init__(self):
self.BTCPrice = PriceRetriever()
def do(self):
try:
self.BTCPrice.ask()
info = json.dumps(self.BTCPrice.getPriceInformation())
file = open(os.path.join('/home/cashhom2/Cash4Home/static', "BTCPrice.json"), "w")
file.write(info)
file.close()
except Exception as e:
pass
x = UpdateBTCPrice()
# x.do()
while(True):
x.do()
time.sleep(x.RUN_EVERY_SECS)
| StarcoderdataPython |
11281991 | <reponame>ybdesire/machinelearning
import numpy as np
a = np.array(
[
[1,0,1,0,1,0,1],
[0,0,0,1,1,1,1],
[1,2,1,2,1,2,1],
[2,3,3,3,3,3,3]
]
)
y,x = np.where(a==2)#find the index of 2 in a
print(y,x)
# result is [2 2 2 3] [1 3 5 0], that is
# a[2][1] = 2
# a[2][3] = 2
# a[2][5] = 2
# a[3][0] = 2
a[y,x]=555# all 2 in a where be replaced to 555
print(a) | StarcoderdataPython |
1965705 | <filename>abm1559/utils.py
import numpy as np
constants = {
"BASEFEE_MAX_CHANGE_DENOMINATOR": 8,
"TARGET_GAS_USED": 10000000,
"MAX_GAS_EIP1559": 20000000,
"EIP1559_DECAY_RANGE": 800000,
"EIP1559_GAS_INCREMENT_AMOUNT": 10,
"INITIAL_BASEFEE": 1 * (10 ** 9),
"PER_TX_GASLIMIT": 8000000,
"SIMPLE_TRANSACTION_GAS": 21000,
}
rng = np.random.default_rng()
def get_basefee_bounds(basefee, blocks):
# We want to know how high/low the basefee can be after `blocks` steps, starting from `basefee`
lb = basefee * (1 - 1.0 / constants["BASEFEE_MAX_CHANGE_DENOMINATOR"])
ub = basefee * (1 + 1.0 / constants["BASEFEE_MAX_CHANGE_DENOMINATOR"])
return { "lb": lb, "ub": ub }
| StarcoderdataPython |
8127922 | <reponame>seberg/pandas
import pandas.core.config as cf
from pandas.core.config import is_int,is_bool,is_text,is_float
from pandas.core.format import detect_console_encoding
"""
This module is imported from the pandas package __init__.py file
in order to ensure that the core.config options registered here will
be available as soon as the user loads the package. if register_option
is invoked inside specific modules, they will not be registered until that
module is imported, which may or may not be a problem.
If you need to make sure options are available even before a certain
module is imported, register them here rather then in the module.
"""
###########################################
# options from the "print" namespace
pc_precision_doc="""
: int
Floating point output precision (number of significant digits). This is
only a suggestion
"""
pc_colspace_doc="""
: int
Default space for DataFrame columns, defaults to 12
"""
pc_max_rows_doc="""
: int
This sets the maximum number of rows pandas should output when printing
out various output. For example, this value determines whether the repr()
for a dataframe prints out fully or just an summary repr.
"""
pc_max_cols_doc="""
: int
max_rows and max_columns are used in __repr__() methods to decide if
to_string() or info() is used to render an object to a string.
Either one, or both can be set to 0 (experimental). Pandas will figure
out how big the terminal is and will not display more rows or/and
columns that can fit on it.
"""
pc_nb_repr_h_doc="""
: boolean
When True (default), IPython notebook will use html representation for
pandas objects (if it is available).
"""
pc_date_dayfirst_doc="""
: boolean
When True, prints and parses dates with the day first, eg 20/01/2005
"""
pc_date_yearfirst_doc="""
: boolean
When True, prints and parses dates with the year first, eg 2005/01/20
"""
pc_pprint_nest_depth="""
: int
Defaults to 3.
Controls the number of nested levels to process when pretty-printing
"""
pc_multi_sparse_doc="""
: boolean
Default True, "sparsify" MultiIndex display (don't display repeated
elements in outer levels within groups)
"""
pc_encoding_doc="""
: str/unicode
Defaults to the detected encoding of the console.
Specifies the encoding to be used for strings returned by to_string,
these are generally strings meant to be displayed on the console.
"""
float_format_doc="""
: callable
The callable should accept a floating point number and return
a string with the desired format of the number. This is used
in some places like SeriesFormatter.
See core.format.EngFormatter for an example.
"""
max_colwidth_doc="""
: int
The maximum width in characters of a column in the repr of
a pandas data structure. When the column overflows, a "..."
placeholder is embedded in the output.
"""
colheader_justify_doc="""
: 'left'/'right'
Controls the justification of column headers. used by DataFrameFormatter.
"""
pc_expand_repr_doc="""
: boolean
Default False
Whether to print out the full DataFrame repr for wide DataFrames
across multiple lines.
If False, the summary representation is shown.
"""
pc_line_width_doc="""
: int
Default 80
When printing wide DataFrames, this is the width of each line.
"""
with cf.config_prefix('print'):
cf.register_option('precision', 7, pc_precision_doc, validator=is_int)
cf.register_option('float_format', None, float_format_doc)
cf.register_option('column_space', 12, validator=is_int)
cf.register_option('max_rows', 100, pc_max_rows_doc, validator=is_int)
cf.register_option('max_colwidth', 50, max_colwidth_doc, validator=is_int)
cf.register_option('max_columns', 20, pc_max_cols_doc, validator=is_int)
cf.register_option('colheader_justify', 'right', colheader_justify_doc,
validator=is_text)
cf.register_option('notebook_repr_html', True, pc_nb_repr_h_doc,
validator=is_bool)
cf.register_option('date_dayfirst', False, pc_date_dayfirst_doc,
validator=is_bool)
cf.register_option('date_yearfirst', False, pc_date_yearfirst_doc,
validator=is_bool)
cf.register_option('pprint_nest_depth', 3, pc_pprint_nest_depth,
validator=is_int)
cf.register_option('multi_sparse', True, pc_multi_sparse_doc,
validator=is_bool)
cf.register_option('encoding', detect_console_encoding(), pc_encoding_doc,
validator=is_text)
cf.register_option('expand_frame_repr', True, pc_expand_repr_doc)
cf.register_option('line_width', 80, pc_line_width_doc)
tc_interactive_doc="""
: boolean
Default False
Whether to simulate interactive mode for purposes of testing
"""
with cf.config_prefix('test'):
cf.register_option('interactive', False, tc_interactive_doc)
| StarcoderdataPython |
197485 | from .stationarybootstrap import Bootstrap
from .crossquantilogram import CrossQuantilogram
from .qtests import BoxPierceQ,LjungBoxQ
from .utils import DescriptiveStatistics
from .api import CQBS,CQBS_alphas,CQBS_years
from .plot import bar_example,heatmap_example,rolling_example
__doc__ = """The `Cross-Quantilogram`(CQ) is a correlation statistics that measures the quantile dependence between two time series. It can test the hypothesis that one time series has no directional predictability to another. Stationary bootstrap method helps establish the asymptotic distribution for CQ statistics and other corresponding test statistics.""" | StarcoderdataPython |
5158325 | from unittest import TestCase, mock
from src.patch_listitem import app
import pymysql
def good_api_event():
return {
"body": '{ "listItemID": "id", "listItem": "name"}',
"queryStringParameters": None
}
def bad_api_event():
return {
"body": None,
"queryStringParameters": None
}
class TestPatchListitem(TestCase):
def test_bad_api_call(self):
assert app.handler(bad_api_event(), "")['statusCode'] == 400
@mock.patch('src.helpers.rds_config.pymysql', autospec=True)
def test_non_existing_listitem(self, mock_pymysql):
mock_cursor = mock.MagicMock()
mock_cursor.fetchone.return_value = None
mock_pymysql.connect.return_value.cursor.return_value.__enter__.return_value = mock_cursor
assert app.handler(good_api_event(), "")['statusCode'] == 404
@mock.patch('src.helpers.rds_config.pymysql', autospec=True)
def test_error(self, mock_pymysql):
mock_cursor = mock.MagicMock()
mock_pymysql.connect.return_value.commit.side_effect = pymysql.MySQLError(
'Test')
mock_cursor.fetchone.return_value = ['id']
mock_pymysql.connect.return_value.cursor.return_value.__enter__.return_value = mock_cursor
assert app.handler(good_api_event(), "")['statusCode'] == 400
@mock.patch('src.helpers.rds_config.pymysql', autospec=True)
def test_success(self, mock_pymysql):
mock_cursor = mock.MagicMock()
mock_cursor.fetchone.return_value = ['id']
mock_pymysql.connect.return_value.cursor.return_value.__enter__.return_value = mock_cursor
assert app.handler(good_api_event(), "")['statusCode'] == 200
| StarcoderdataPython |
1667176 | <filename>test_sched_slack_bot/test_controller.py
import dataclasses
import datetime
import os
import uuid
from typing import List
from unittest import mock
import pytest
from slack_bolt import App
from slack_sdk import WebClient
from sched_slack_bot.controller import AppController
from sched_slack_bot.data.schedule_access import ScheduleAccess
from sched_slack_bot.model.reminder import Reminder
from sched_slack_bot.model.schedule import Schedule
from sched_slack_bot.reminder.scheduler import ReminderScheduler
from sched_slack_bot.reminder.sender import ReminderSender
from sched_slack_bot.utils.slack_typing_stubs import SlackEvent, SlackBody, SlackBodyUser, SlackView, SlackState, \
SlackAction
from sched_slack_bot.views.app_home import get_app_home_view
from sched_slack_bot.views.schedule_dialog import SCHEDULE_NEW_DIALOG
@pytest.fixture
def controller() -> AppController:
return AppController()
@pytest.fixture()
def mocked_reminder_sender() -> mock.MagicMock:
return mock.MagicMock(spec=ReminderSender)
@pytest.fixture()
def mocked_app() -> mock.MagicMock:
return mock.MagicMock(spec=App)
@pytest.fixture()
def mocked_schedule_access() -> mock.MagicMock:
return mock.MagicMock(spec=ScheduleAccess)
@pytest.fixture()
def mocked_reminder_scheduler() -> mock.MagicMock:
return mock.MagicMock(spec=ReminderScheduler)
@pytest.fixture()
def mocked_slack_client() -> mock.MagicMock:
return mock.MagicMock(spec=WebClient)
@pytest.fixture()
def controller_with_mocks(controller: AppController, mocked_reminder_sender: mock.MagicMock,
mocked_app: mock.MagicMock,
mocked_schedule_access: mock.MagicMock,
mocked_reminder_scheduler: mock.MagicMock,
mocked_slack_client: mock.MagicMock) -> AppController:
controller._reminder_sender = mocked_reminder_sender
controller._app = mocked_app
controller._reminder_scheduler = mocked_reminder_scheduler
controller._slack_client = mocked_slack_client
controller._schedule_access = mocked_schedule_access
return controller
@pytest.fixture
def schedule() -> Schedule:
return Schedule(id=str(uuid.uuid4()),
display_name="Rotation Schedule",
members=["U1", "U2"],
next_rotation=datetime.datetime.now() + datetime.timedelta(milliseconds=100),
time_between_rotations=datetime.timedelta(hours=2),
channel_id_to_notify_in="C1",
created_by="creator")
@pytest.fixture()
def slack_body() -> SlackBody:
return SlackBody(trigger_id="trigger",
user=SlackBodyUser(id="user",
username="username",
name="name",
team_id="T123"),
view=SlackView(state=SlackState(values={}), id="view"),
actions=[])
@pytest.mark.parametrize("required_missing_variable_name", ["MONGO_URL", "SLACK_BOT_TOKEN",
"SLACK_SIGNING_SECRET"])
def test_controller_fails_without_required_environment_variables(controller: AppController,
required_missing_variable_name: str) -> None:
os.environ.pop(required_missing_variable_name, None)
with pytest.raises(RuntimeError):
controller.start()
def test_handle_reminder_executed_saves_updated_schedule(controller_with_mocks: AppController,
mocked_schedule_access: mock.MagicMock,
schedule: Schedule) -> None:
controller_with_mocks.handle_reminder_executed(next_schedule=schedule)
mocked_schedule_access.update_schedule.assert_called_once_with(schedule_id_to_update=schedule.id,
new_schedule=schedule)
def test_app_home_opened_opens_app_home(controller_with_mocks: AppController,
mocked_schedule_access: mock.MagicMock,
mocked_slack_client: mock.MagicMock,
schedule: Schedule) -> None:
mocked_schedule_access.get_available_schedules.return_value = [schedule]
user = "someUser"
event = SlackEvent(user=user)
controller_with_mocks.handle_app_home_opened(event=event)
assert_published_home_view(mocked_slack_client=mocked_slack_client,
schedules=[schedule],
user=user)
def assert_published_home_view(mocked_slack_client: mock.MagicMock, schedules: List[Schedule], user: str) -> None:
mocked_slack_client.views_publish.assert_called_once_with(user_id=user,
view=get_app_home_view(schedules=schedules))
def test_handle_clicked_create_opens_schedule_dialog(controller_with_mocks: AppController,
mocked_schedule_access: mock.MagicMock,
mocked_slack_client: mock.MagicMock,
slack_body: SlackBody,
schedule: Schedule) -> None:
mocked_schedule_access.get_available_schedules.return_value = [schedule]
ack = mock.MagicMock()
controller_with_mocks.handle_clicked_create_schedule(ack=ack, body=slack_body)
ack.assert_called_once()
mocked_slack_client.views_open.assert_called_once_with(trigger_id=slack_body["trigger_id"],
view=SCHEDULE_NEW_DIALOG)
def test_handle_clicked_handle_submit_creates_new_schedule(controller_with_mocks: AppController,
mocked_schedule_access: mock.MagicMock,
mocked_slack_client: mock.MagicMock,
slack_body: SlackBody,
mocked_reminder_scheduler: mock.MagicMock,
mocked_reminder_sender: mock.MagicMock,
schedule: Schedule) -> None:
mocked_schedule_access.get_available_schedules.return_value = [schedule]
ack = mock.MagicMock()
with mock.patch("sched_slack_bot.controller.Schedule.from_modal_submission") as mocked_from_model_submission:
mocked_from_model_submission.return_value = schedule
controller_with_mocks.handle_submitted_create_schedule(ack=ack, body=slack_body)
mocked_reminder_scheduler.schedule_reminder.assert_called_once_with(schedule=schedule,
reminder_sender=mocked_reminder_sender)
ack.assert_called_once()
mocked_schedule_access.save_schedule.assert_called_once_with(schedule=schedule)
assert_published_home_view(mocked_slack_client=mocked_slack_client,
schedules=[schedule],
user=slack_body["user"]["id"])
def test_handle_delete_does_nothing_without_schedule(controller_with_mocks: AppController,
mocked_schedule_access: mock.MagicMock,
mocked_slack_client: mock.MagicMock,
slack_body: SlackBody,
mocked_reminder_scheduler: mock.MagicMock,
schedule: Schedule) -> None:
ack = mock.MagicMock()
controller_with_mocks.handle_clicked_delete_button(ack=ack,
body=slack_body)
ack.assert_called_once()
mocked_schedule_access.delete_schedule.assert_not_called()
def test_handle_delete_deletes_matching_schedule(controller_with_mocks: AppController,
mocked_schedule_access: mock.MagicMock,
mocked_slack_client: mock.MagicMock,
slack_body: SlackBody,
mocked_reminder_scheduler: mock.MagicMock,
schedule: Schedule) -> None:
slack_body["actions"] = [SlackAction(action_id="DELETE", block_id=schedule.id)]
ack = mock.MagicMock()
controller_with_mocks.handle_clicked_delete_button(ack=ack,
body=slack_body)
ack.assert_called_once()
mocked_schedule_access.delete_schedule.assert_called_once_with(schedule_id=schedule.id)
mocked_reminder_scheduler.remove_reminder_for_schedule.assert_called_once_with(schedule_id=schedule.id)
assert_published_home_view(mocked_slack_client=mocked_slack_client,
schedules=[],
user=slack_body["user"]["id"])
def test_handle_skip_does_nothing_without_schedule(controller_with_mocks: AppController,
mocked_schedule_access: mock.MagicMock,
mocked_slack_client: mock.MagicMock,
slack_body: SlackBody,
mocked_reminder_scheduler: mock.MagicMock,
schedule: Schedule) -> None:
ack = mock.MagicMock()
controller_with_mocks.handle_clicked_confirm_skip(ack=ack,
body=slack_body)
ack.assert_called_once()
mocked_schedule_access.update_schedule.assert_not_called()
def test_handle_skip_does_nothing_without_matching_schedule(controller_with_mocks: AppController,
mocked_schedule_access: mock.MagicMock,
mocked_slack_client: mock.MagicMock,
slack_body: SlackBody,
mocked_reminder_scheduler: mock.MagicMock,
schedule: Schedule) -> None:
mocked_schedule_access.get_schedule.return_value = None
slack_body["actions"] = [SlackAction(action_id="SKIP", block_id=schedule.id)]
ack = mock.MagicMock()
controller_with_mocks.handle_clicked_confirm_skip(ack=ack,
body=slack_body)
ack.assert_called_once()
mocked_schedule_access.update_schedule.assert_not_called()
def test_handle_skip_skips_matching_schedule(controller_with_mocks: AppController,
mocked_schedule_access: mock.MagicMock,
mocked_slack_client: mock.MagicMock,
slack_body: SlackBody,
mocked_reminder_sender: mock.MagicMock,
mocked_reminder_scheduler: mock.MagicMock,
schedule: Schedule) -> None:
schedule_with_skipped_index = dataclasses.replace(schedule, current_index=schedule.next_index)
mocked_schedule_access.get_schedule.return_value = schedule
slack_body["actions"] = [SlackAction(action_id="SKIP", block_id=schedule.id)]
ack = mock.MagicMock()
controller_with_mocks.handle_clicked_confirm_skip(ack=ack,
body=slack_body)
ack.assert_called_once()
mocked_reminder_sender.send_skip_message.assert_called_once_with(reminder=Reminder(schedule))
mocked_schedule_access.update_schedule.assert_called_once_with(schedule_id_to_update=schedule.id,
new_schedule=schedule_with_skipped_index)
mocked_reminder_scheduler.remove_reminder_for_schedule.assert_called_once_with(schedule_id=schedule.id)
mocked_reminder_scheduler.schedule_reminder.assert_called_once_with(schedule=schedule_with_skipped_index,
reminder_sender=mocked_reminder_sender)
| StarcoderdataPython |
1636524 | import os
from typing import Tuple
from typing import Union
import matplotlib.pyplot as plt
import torch.nn.functional as F
from torch import Tensor
from torchvision.transforms import transforms
from auxiliary.utils import correct, rescale, scale
from classes.core.ModelTCCNet import ModelTCCNet
from classes.modules.multiframe.conf_tccnet.ConfTCCNet import ConfTCCNet
class ModelConfTCCNet(ModelTCCNet):
def __init__(self, hidden_size: int, kernel_size: int, deactivate: str):
super().__init__()
self._network = ConfTCCNet(hidden_size, kernel_size, deactivate).float().to(self._device)
def predict(self, x: Tensor, m: Tensor = None, return_steps: bool = False) -> Union[Tuple, Tensor]:
pred, rgb, confidence = self._network(x)
if return_steps:
return pred, rgb, confidence
return pred
def vis_confidence(self, model_output: dict, path_to_plot: str):
model_output = {k: v.clone().detach().to(self._device) for k, v in model_output.items()}
x, y, pred = model_output["x"], model_output["y"], model_output["pred"]
rgb, c = model_output["rgb"], model_output["c"]
original = transforms.ToPILImage()(x.squeeze()).convert("RGB")
est_corrected = correct(original, pred)
size = original.size[::-1]
weighted_est = rescale(scale(rgb * c), size).squeeze().permute(1, 2, 0)
rgb = rescale(rgb, size).squeeze(0).permute(1, 2, 0)
c = rescale(c, size).squeeze(0).permute(1, 2, 0)
masked_original = scale(F.to_tensor(original).to(self._device).permute(1, 2, 0) * c)
plots = [(original, "original"), (masked_original, "masked_original"), (est_corrected, "correction"),
(rgb, "per_patch_estimate"), (c, "confidence"), (weighted_est, "weighted_estimate")]
stages, axs = plt.subplots(2, 3)
for i in range(2):
for j in range(3):
plot, text = plots[i * 3 + j]
if isinstance(plot, Tensor):
plot = plot.cpu()
axs[i, j].imshow(plot, cmap="gray" if "confidence" in text else None)
axs[i, j].set_title(text)
axs[i, j].axis("off")
os.makedirs(os.sep.join(path_to_plot.split(os.sep)[:-1]), exist_ok=True)
epoch, loss = path_to_plot.split(os.sep)[-1].split("_")[-1].split(".")[0], self.get_loss(pred, y)
stages.suptitle("EPOCH {} - ERROR: {:.4f}".format(epoch, loss))
stages.savefig(os.path.join(path_to_plot), bbox_inches='tight', dpi=200)
plt.clf()
plt.close('all')
| StarcoderdataPython |
4802238 | <reponame>tharrrk/pydigitemp
"""
Conceptual Overview
-------------------
Properly configured with respect to baud rate, data bits per character, parity and number of stop bits,
a 115,200 bit per second capable UART provides the input and output timing necessary to implement a 1-Wire master.
The UART produces the 1-Wire reset pulse, as well as read- and write-time slots. The microprocessor simply puts
one-byte character codes into the UART transmit register to send a 1-Wire 1 or 0 bit and the UART does the work.
Conversely, the microprocessor reads single-byte character codes corresponding to a 1 or 0 bit read from a 1-Wire device.
All 1-Wire bit transfers require the bus master, the UART, to begin the cycle by driving the 1-Wire bus low.
Therefore, each 1-Wire bit cycle includes a byte transmit and byte receive by the UART. When reading, the received data
is of interest, when writing, however, the receive byte is discarded. Depending on the UART's read and write first-in,
first-out (FIFO) buffer depth, the UART can also frame 1-Wire bits into byte values further reducing the processor
overhead.
For details see:
Using an UART to Implement a 1-Wire Bus Master (http://www.maximintegrated.com/en/app-notes/index.mvp/id/214)
"""
import time
import serial
import platform
from .utils import *
from .exceptions import DeviceError, AdapterError, CRCError
if PY3:
from typing import Optional, List
if platform.system() == "Windows":
def fcntl_flock():
pass
def fcntl_funlock():
pass
else:
import fcntl
def fcntl_flock(fh):
fcntl.flock(fh, fcntl.LOCK_EX | fcntl.LOCK_NB)
def fcntl_funlock(fh):
fcntl.flock(fh, fcntl.LOCK_UN)
class UART_Adapter(object):
def __init__(self, port, timeout=3, dtr=True):
# type: (str, Optional[float], Optional[boolean|None]) -> None
self.locked = False
try:
self.uart = serial.Serial(port, timeout=timeout)
self.dtr = dtr
if dtr is not None:
self.uart.dtr = dtr
except Exception as e:
raise DeviceError(e)
self._lock()
@property
def name(self):
# type: () -> str
return self.uart.name
def close(self):
# type: () -> None
if self.uart.is_open:
self._unlock()
try:
if self.dtr is not None:
self.uart.dtr = not(self.dtr)
self.uart.close()
except Exception as e:
raise DeviceError(e)
def _lock(self):
# type: () -> None
if self.uart.is_open:
try:
fcntl_flock(self.uart.fileno())
self.locked = True
except IOError: # Already locked
raise DeviceError('Cannot lock serial port: %s' % self.name)
def _unlock(self):
# type: () -> None
"""
Un-lock serial port
"""
if self.locked:
try:
fcntl_funlock(self.uart.fileno())
except IOError:
raise DeviceError('Cannot unlock serial port: %s' % self.name)
self.locked = False
def clear(self):
# type: () -> None
"""
Clear input and output buffers. Just in case.
"""
try:
self.uart.reset_input_buffer()
self.uart.reset_output_buffer()
except Exception as e:
raise DeviceError(e)
# ---[ Data Read/Write Methods ]----
def read_bytes(self, size=1):
# type: (int) -> bytes
"""
Read N bytes from serial line.
"""
data = []
for i in range(size):
data.append(self.read_byte())
return bytesarray2bytes(data)
def write_bytes(self, data):
# type: (bytes) -> None
"""
Write bytes to serial line.
"""
for d in iterbytes(data):
self.write_byte(d)
def read_byte(self):
# type: () -> int
"""
Read one byte from serial line. Same as read_bit but for 8-bits.
:return: integer 0x00..0xff
"""
self.clear()
try:
self.uart.write(b'\xff\xff\xff\xff\xff\xff\xff\xff')
data = self.uart.read(8)
except Exception as e:
raise DeviceError(e)
if len(data) != 8:
raise AdapterError('Read error')
value = 0
for b in reversed(list(iterbytes(data))):
value <<= 1
if b == 0xff:
value += 1
return value
def write_byte(self, data):
# type: (int) -> None
"""
Write one byte to serial line. Same as write_bit but for 8-bits.
:param data: integer 0x00..0xff
"""
bits = []
for i in range(8):
bits.append(0xff if data % 2 else 0x00) # 0 --> 0x00, 1 --> 0xff
data >>= 1
bits = bytesarray2bytes(bits)
self.clear()
try:
self.uart.write(bits)
back = self.uart.read(8)
except Exception as e:
raise DeviceError(e)
if len(back) != 8:
raise AdapterError('Write error')
if bits != back:
raise AdapterError('Noise on the line detected')
def read_bit(self):
# type: () -> int
"""
Read one bit from serial line.
Writing 0xff starts read time slot. If device wants to send 0x0 it will pull the bus low
ad we will read back value < 0xff. Otherwise it is 0x1 was sent.
:return: integer 0x0..0x1
"""
self.clear()
try:
self.uart.write(b'\xff')
b = self.uart.read(1)
except Exception as e:
raise DeviceError(e)
if len(b) != 1:
raise AdapterError('Read error')
value = bord(b)
return 0b1 if value == 0xff else 0b0
def write_bit(self, bit):
# type: (int) -> None
"""
Write one bit to serial line.
0xff - writes 0x1, 0x00 writes 0x0. Read-back value shall match the value we write.
Otherwise someone else was writing to the bus at the same time.
:param bit: integer 0x0..0x1
"""
bit = b'\xff' if bit else b'\x00'
self.clear()
try:
self.uart.write(bit)
back = self.uart.read(1)
except Exception as e:
raise DeviceError(e)
if len(back) != 1:
raise AdapterError('Write error')
if bit != back:
raise AdapterError('Noise on the line detected')
def reset(self):
# type: () -> None
"""
Reset and presence detect.
"""
self.clear()
try:
self.uart.baudrate = 9600
self.uart.write(b'\xf0')
b = self.uart.read(1)
except Exception as e:
raise DeviceError(e)
if len(b) != 1:
raise AdapterError('Read/Write error')
d = bord(b)
try:
self.uart.baudrate = 115200
except Exception as e:
raise DeviceError(e)
if d == 0xf0:
raise AdapterError('No 1-wire device present')
elif 0x10 <= d <= 0xe0:
return
else:
raise AdapterError('Presence error: 0x%02x' % d)
# ---[ ROM Commands ]----
def read_ROM(self):
# type: () -> bytes
"""
READ ROM [33h]
This command can only be used when there is one device on the bus. It allows the bus driver to read the
device's 64-bit ROM code without using the Search ROM procedure. If this command is used when there
is more than one device present on the bus, a data collision will occur when all the devices attempt to
respond at the same time.
"""
self.reset()
self.write_byte(0x33)
rom_code = self.read_bytes(8)
crc = crc8(rom_code[0:7])
if crc != iord(rom_code, 7):
raise CRCError('read_ROM CRC error')
return rom_code
def match_ROM(self, rom_code):
# type: (bytes) -> None
"""
MATCH ROM [55h]
The match ROM command allows to address a specific device on a multidrop or single-drop bus.
Only the device that exactly matches the 64-bit ROM code sequence will respond to the function command
issued by the master; all other devices on the bus will wait for a reset pulse.
"""
self.reset()
self.write_byte(0x55)
self.write_bytes(rom_code)
def skip_ROM(self):
# type: () -> None
"""
SKIP ROM [CCh]
The master can use this command to address all devices on the bus simultaneously without sending out
any ROM code information.
"""
self.reset()
self.write_byte(0xcc)
def search_ROM(self, alarm=False):
# type: (bool) -> List[bytes]
"""
SEARCH ROM [F0h]
The master learns the ROM codes through a process of elimination that requires the master to perform
a Search ROM cycle as many times as necessary to identify all of the devices.
ALARM SEARCH [ECh]
The operation of this command is identical to the operation of the Search ROM command except that
only devices with a set alarm flag will respond.
"""
complete_roms = []
partial_roms = []
def search(current_rom=None):
if current_rom is None:
current_rom = []
else:
current_rom = current_rom[:]
# send search command
self.reset()
self.write_byte(0xec if alarm else 0xf0)
# send known bits
for bit in current_rom:
self.read_bit() # skip bitN
self.read_bit() # skip complement of bitN
self.write_bit(bit)
# read rest of the bits
for i in range(64 - len(current_rom)):
b1 = self.read_bit()
b2 = self.read_bit()
if b1 != b2: # all devices have this bit set to 0 or 1
current_rom.append(b1)
self.write_bit(b1)
elif b1 == b2 == 0b0:
# there are two or more devices on the bus with bit 0 and 1 in this position
# save version with 1 as possible rom ...
rom = current_rom[:]
rom.append(0b1)
partial_roms.append(rom)
# ... and proceed with 0
current_rom.append(0b0)
self.write_bit(0b0)
else: # b1 == b2 == 1:
if alarm:
# In alarm search that means there is no more alarming devices
return
else:
raise AdapterError('Search command got wrong bits (two sequential 0b1)')
complete_roms.append(bits2rom(current_rom))
search()
while len(partial_roms):
search(partial_roms.pop())
return complete_roms
def measure_temperature_all(self):
# type: () -> None
"""
This forces all temperature sensors to calculate temperature and set/unset alarm flag.
"""
self.skip_ROM()
self.write_byte(0x44)
# We do not know if there are any DS18B20 or DS1822 on the line and what are their resolution settings.
# So, we just wait max(T_conv) that is 750ms for currently supported devices.
time.sleep(0.75)
# ---[ Helper Functions ]----
def get_connected_ROMs(self):
# type: () -> List[str]
roms = self.search_ROM(alarm=False)
return [rom2str(rom) for rom in roms]
def alarm_search(self):
# type: () -> List[str]
roms = self.search_ROM(alarm=True)
return [rom2str(rom) for rom in roms]
def is_connected(self, rom_code):
# type: (bytes) -> bool
"""
:return: True if a device with the ROM connected to the bus.
"""
self.reset()
self.write_byte(0xf0)
for bit in rom2bits(rom_code):
b1 = self.read_bit()
b2 = self.read_bit()
if b1 == b2 == 0b1:
return False
self.write_bit(bit)
return True
| StarcoderdataPython |
9619881 | # Copyright 2014 OpenCore LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
from pymongo import MongoClient
from subprocess import Popen, PIPE
class NAT(object):
def __init__(self):
self._current_port = 999
self.reserved_ports = [4000, 5000]
self._init_state_db()
self._clear_nat()
self._init_nat()
self._repop_nat()
def _init_state_db(self):
self.mongo = MongoClient(os.environ['MONGODB'], 27017, connectTimeoutMS=6000)
self.nat_collection = self.mongo['network']['nat']
def _clear_nat(self):
logging.warning("clearing nat")
cmds = ['iptables -t nat -D PREROUTING -m addrtype --dst-type LOCAL -j FERRY_CHAIN',
'iptables -t nat -D OUTPUT -m addrtype --dst-type LOCAL ! --dst 127.0.0.0/8 -j FERRY_CHAIN',
'iptables -t nat -D OUTPUT -m addrtype --dst-type LOCAL -j FERRY_CHAIN',
'iptables -t nat -D OUTPUT -j FERRY_CHAIN',
'iptables -t nat -F FERRY_CHAIN',
'iptables -t nat -D PREROUTING -j FERRY_CHAIN',
'iptables -t nat -X FERRY_CHAIN']
for c in cmds:
logging.warning(c)
Popen(c, shell=True)
def _init_nat(self):
logging.warning("init nat")
cmds = ['iptables -t nat -N FERRY_CHAIN',
'iptables -t nat -A OUTPUT -m addrtype --dst-type LOCAL ! --dst 127.0.0.0/8 -j FERRY_CHAIN',
'iptables -t nat -A PREROUTING -m addrtype --dst-type LOCAL -j FERRY_CHAIN']
for c in cmds:
logging.warning(c)
Popen(c, shell=True)
def _repop_nat(self):
rules = self.nat_collection.find()
for r in rules:
self._save_nat(r['src_ip'], r['src_port'],r['ip'], r['port'])
def _save_nat(self, source_ip, source_port, dest_ip, dest_port):
cmds = ['iptables -I FORWARD 1 ! -i ferry0 -o ferry0 -p tcp --dport %s -d %s -j ACCEPT' % (str(dest_port), dest_ip),
'iptables -t nat -A FERRY_CHAIN -d %s -p tcp --dport %s -j DNAT --to-destination %s:%s' % (source_ip, str(source_port), dest_ip, str(dest_port))]
for c in cmds:
logging.warning(c)
Popen(c, shell=True)
def _delete_nat(self, source_ip, source_port, dest_ip, dest_port):
cmds = ['iptables -D FORWARD ! -i ferry0 -o ferry0 -p tcp --dport %s -d %s -j ACCEPT' % (str(dest_port), dest_ip),
'iptables -t nat -D FERRY_CHAIN -d %s -p tcp --dport %s -j DNAT --to-destination %s:%s' % (source_ip, str(source_port), dest_ip, str(dest_port))]
for c in cmds:
logging.warning(c)
Popen(c, shell=True)
def _save_forwarding_rule(self, source_ip, source_port, dest_ip, dest_port):
self.nat_collection.insert({ 'ip' : dest_ip,
'port' : dest_port,
'src_ip' : source_ip,
'src_port' : source_port })
def _delete_forwarding_rule(self, dest_ip, dest_port):
self.nat_collection.remove( { 'ip' : dest_ip,
'port' : dest_port } )
def random_port(self):
while True:
port = self._current_port
self._current_port += 1
if not port in self.reserved_ports:
return str(port)
def has_rule(self, dest_ip, dest_port):
rule = self.nat_collection.find_one( { 'ip' : dest_ip,
'port' : dest_port } )
if rule:
return rule['src_ip'], rule['src_port']
else:
return None, None
def delete_rule(self, dest_ip, dest_port):
"""
Delete the forwarding rule.
"""
src_ip, src_port = self.has_rule(dest_ip, dest_port)
if src_ip:
self._delete_forwarding_rule(dest_ip, dest_port)
self._delete_nat(src_ip, src_port, dest_ip, dest_port)
else:
logging.warning("no such dest %s:%s" % (dest_ip, dest_port))
def forward_rule(self, source_ip, source_port, dest_ip, dest_port):
"""
Add a new forwarding rule.
"""
if source_port in self.reserved_ports:
logging.warning("cannot use reserved port " + source_port)
return False
src_ip, src_port = self.has_rule(dest_ip, dest_port)
if not src_ip:
self._save_forwarding_rule(source_ip, source_port, dest_ip, dest_port)
self._save_nat(source_ip, source_port, dest_ip, dest_port)
return True
else:
logging.warning("port " + source_port + " already reserved")
return False
| StarcoderdataPython |
1879754 | """
Routing registration support.
Intercepts Flask's normal route registration to inject conventions.
"""
from distutils.util import strtobool
from flask_cors import cross_origin
from microcosm.api import defaults
from microcosm_logging.decorators import context_logger
@defaults(
converters=[
"uuid",
],
enable_audit="true",
enable_basic_auth="false",
enable_context_logger="true",
enable_cors="true",
enable_metrics="false",
)
def configure_route_decorator(graph):
"""
Configure a flask route decorator that operates on `Operation` and `Namespace` objects.
By default, enables CORS support, assuming that service APIs are not exposed
directly to browsers except when using API browsing tools.
Usage:
@graph.route(ns.collection_path, Operation.Search, ns)
def search_foo():
pass
"""
enable_audit = strtobool(graph.config.route.enable_audit)
enable_basic_auth = strtobool(graph.config.route.enable_basic_auth)
enable_context_logger = strtobool(graph.config.route.enable_context_logger)
enable_cors = strtobool(graph.config.route.enable_cors)
enable_metrics = strtobool(graph.config.route.enable_metrics)
# routes depends on converters
graph.use(*graph.config.route.converters)
def route(path, operation, ns):
"""
:param path: a URI path, possibly derived from a property of the `ns`
:param operation: an `Operation` enum value
:param ns: a `Namespace` instance
"""
def decorator(func):
endpoint = ns.endpoint_for(operation)
endpoint_path = graph.build_route_path(path, ns.prefix)
if enable_cors:
func = cross_origin(supports_credentials=True)(func)
if enable_basic_auth or ns.enable_basic_auth:
func = graph.basic_auth.required(func)
if enable_context_logger and ns.controller is not None:
func = context_logger(
graph.request_context,
func,
parent=ns.controller,
)
# set the opaque component data_func to look at the flask request context
func = graph.opaque.initialize(graph.request_context)(func)
if enable_metrics or ns.enable_metrics:
from microcosm_flask.metrics import StatusCodeClassifier
tags = [f"endpoint:{endpoint}", "backend_type:microcosm_flask"]
func = graph.metrics_counting(
"route",
tags=tags,
classifier_cls=StatusCodeClassifier,
)(func)
func = graph.metrics_timing("route", tags=tags)(func)
# keep audit decoration last (before registering the route) so that
# errors raised by other decorators are captured in the audit trail
if enable_audit:
func = graph.audit(func)
graph.app.route(
endpoint_path,
endpoint=endpoint,
methods=[operation.value.method],
)(func)
return func
return decorator
return route
| StarcoderdataPython |
5114052 | from urllib import parse
from urllib.parse import quote
import requests
import xlrd
import os
import piexif
from PIL import Image
import uuid
import json
def help():
print("help ------------------- 帮助选项(查看文档详细信息)")
print("sc [cho] --------------- 搜索数据库信息")
print("de [cho] [name] -------- 删除数据库信息")
print("img [file_name][name] -- 上传图像文件")
print("imgp [path][name] ------ 批量上传图像文件")
print("rd [file_name] --------- 读取excil上传格式")
def main():
print("黑利博瑞_Web_API_控制程序")
print("输入“help” 查看帮助")
while True:
cmd = input(">>")
if len(cmd)==0: continue # 如果传入空字符会阻塞
cmd = cmd.split(" ")
core_cmd(cmd)
def core_cmd(cmd):
if cmd[0] == "help":
help()
if cmd[0] == "de":
help()
if cmd[0] == "sc":
help()
if cmd[0] == "img":
update_img(cmd,"one")
if cmd[0] == "imgp":
update_img(cmd,"one+")
if cmd[0] == "rd":
core_update(cmd)
def update_img(cmd,mod):
if mod == "one+":
path,name = change_img_name(cmd[1],"det")
for x in path:
url = "http://127.0.0.1:5000/api/upload/"
newname = x.split('/')
s = newname[len(newname)-1]
files = {'file':(s,open(x,'rb'),'image/jpg')}
print("照片%r信息处理完成!" %(x))
js = 0
while js < 3:
try:
#r = requests.post(url,files = files, verify=False, timeout=5)
r = requests.post(url,files = files, timeout=5)
result = r.text
print("照片%r传输完成!" %(x))
js = 4
except:
js += 1
print("照片传输超时!正在重连(%r/3)" %(str(js)))
print("\n\n\n")
for x in name:
print(x)
else:
now_name = cmd[1]
uuidx = uuid.uuid4()
js_n_lj = now_name.split('/')
hz = now_name.split('.')
hz = hz[-1]
del js_n_lj[-1]
jdlj = "/".join(js_n_lj)
f_name = jdlj + "/" + cmd[2] + "9046380f-3b5c-4cce-acd6-31a4f0088228" + str(uuidx) + "." + hz
print(f_name)
os.rename(now_name, f_name)
x = f_name
url = "http://127.0.0.1:5000/api/upload/"
newname = x.split("/")
s = newname[len(newname)-1]
files = {'file':(s,open(x,'rb'),'image/jpg')}
print(files)
print("照片%r信息处理完成!" %(x))
js = 0
while js < 3:
try:
#r = requests.post(url,files = files, verify=False, timeout=5)
r = requests.post(url,files = files, timeout=5)
result = r.text
print("照片%r传输完成!" %(x))
js = 4
except:
js += 1
print("照片传输超时!正在重连(%r/3)" %(str(js)))
print("\n\n\n" + f_name)
def read_excil(path):
#用于读取exicil的函数
#返回一个二维数组
word = []
#打开xlsx
workbook = xlrd.open_workbook(path)
#导出一个表
sheet = workbook.sheet_by_index(0)#使用索引导出
#将表中的值按列导出
en2 = sheet.col_values(1)
ch = []
#全部转为str以防出现错误
for x in en2:
y = str(x)
ch.append(y)
return ch
def read_word_for_det_body(path):
#用于读取excil的函数
#返回一个二维数组
word = []
#打开xlsx
workbook = xlrd.open_workbook(path)
#导出一个表
sheet = workbook.sheet_by_index(0)#使用索引导出
#将表中的值按列导出
ch2 = sheet.col_values(2)
en2 = sheet.col_values(3)
ch = []
en = []
#全部转为str以防出现错误
for x in ch2:
y = str(x)
ch.append(y)
for x in en2:
y = str(x)
en.append(y)
#求出列表含有多少个单词
number_word = len(en)
for x in range(number_word):
#组合为一个单词二维列表
add_word = [ch[x],en[x]]
word.append(add_word)
#进行照片与普通类型鉴别 并加入相对路径
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
lbfh_list = []
for x in word:
y = 'f355c387-57f6-4734-af7e-26af5293d970'.join(x)
lbfh_list.append(y)
final_list = '295ff6c3-032b-4a83-a397-1cc0e754f785'.join(lbfh_list)
return final_list
def change_img_name(jdlj,name_head):
a, b = get_img_name(jdlj, name_head)
changejpgexif(a)
#a是带有绝对路径的列表
#b是只有更改后图片的名字的列表
return a, b
def core_update(cmd):
up_date_list = read_excil(cmd[1])
if up_date_list[0] == "year":
api = "update"
cho = up_date_list[0]
year_name = up_date_list[1]
describe = up_date_list[2]
photo_user_path = up_date_list[3]
photo_path = "/static/img/year_min/" + photo_user_path
data = api+"899340aa-5a52-42c4-b0ae-da135b0acb1f"+cho+"899340aa-5a52-42c4-b0ae-da135b0acb1f"+year_name+"899340aa-5a52-42c4-b0ae-da135b0acb1f"+describe+"899340aa-5a52-42c4-b0ae-da135b0acb1f"+photo_path
print("\n")
print("api:"+ api)
print("cho:" + cho)
print("name:" + year_name)
print("describe:" + describe)
print("photo_path:" + photo_path)
print("send_data:" + data)
input("按下回车进行发送...")
send_get_http(data)
if up_date_list[0] == "det":
det_body = read_word_for_det_body(cmd[1])
api = "update"
cho = up_date_list[0]
detname = up_date_list[1]
describe = up_date_list[2]
photo_user_path = up_date_list[3]
photo_path = "/static/img/det_min/" + photo_user_path
body = str(det_body)
act_id = up_date_list[4]
data = api+"899340aa-5a52-42c4-b0ae-da135b0acb1f"+cho+"899340aa-5a52-42c4-b0ae-da135b0acb1f"+detname+"899340aa-5a52-42c4-b0ae-da135b0acb1f"+describe+"899340aa-5a52-42c4-b0ae-da135b0acb1f"+photo_path+"899340aa-5a52-42c4-b0ae-da135b0acb1f"+body+"899340aa-5a52-42c4-b0ae-da135b0acb1f"+act_id
print("\n")
print("api:"+ api)
print("cho:" + cho)
print("name:" + detname)
print("describe:" + describe)
print("photo_path:" + photo_path)
print("body:" + body)
print("act_id:" + act_id)
print("send_data:" + data)
input("按下回车进行发送...")
send_get_http(data)
def send_get_http(data):
url = "http://127.0.0.1:5000/api/"
#重点重点 !!!中文post提交方法
textmod = quote(data, 'utf-8')
#重点重点 !!!中文post提交方法
js = 0
headers = {'application':'json'}
while js < 3 :
try:
response = requests.post(url, headers=headers, data=textmod)
print("申请get方式发送完成!")
js = 4
except:
js += 1
print("传输超时!正在重连(%r/3)" %(str(js)))
def changejpgexif(listb):
print("开始移除照片其他信息")
for x in listb:
try:
zc = 1
exif_dict = piexif.load(x)
if exif_dict is not None:
print("已检测照片属性")
if exif_dict["0th"][274] == 3:
zc = 3
print("照片旋转参数为3")
if exif_dict["0th"][274] == 6:
zc = 6
print("照片旋转参数为6")
if exif_dict["0th"][274] == 8:
zc = 8
print("照片旋转参数为8")
im = Image.open(x)
exif_dict["0th"][274] = 1
bit = piexif.dump(exif_dict)
if zc == 3:
im = im.transpose(Image.ROTATE_180)
if zc == 6:
im = im.transpose(Image.ROTATE_270)
if zc == 8:
im = im.transpose(Image.ROTATE_90)
im.save(x, exif=bit,quality=80)
except:
print("图片未检测到附加属性")
else:
print("图片未检测到附加属性")
#获取文件绝对路径
jdlj = os.path.dirname(os.path.abspath(__file__))
def get_img_name(Jdlj, head_of_img_name):
#一个装有图片名称的列表 名称+路径
name_list = []
#图片的数量
img_num = 0
#uuid列表 名称+路径
uuid_list = []
#现在无路径
now_list = []
#未来路径的列表 带有结对路径
future_list = []
#适用于更改图片名称的二位列表
quadratic_list = []
#添加图片名称到列表
for root, dirs, files in os.walk(Jdlj):
for file in files:
if os.path.splitext(file)[1] == '.jpeg' or os.path.splitext(file)[1] == '.JPEG':
name_list.append(os.path.join(file))
img_num += 1
#生成uuid
uuid_list.append(str(uuid.uuid4()))
elif os.path.splitext(file)[1] == '.jpg' or os.path.splitext(file)[1] == '.JPG':
name_list.append(os.path.join(file))
img_num += 1
#生成uuid
uuid_list.append(str(uuid.uuid4()))
elif os.path.splitext(file)[1] == '.png' or os.path.splitext(file)[1] == '.PNG':
name_list.append(os.path.join(file))
img_num += 1
#生成uuid
uuid_list.append(str(uuid.uuid4()))
#生成现在列表
for a in name_list:
now_list.append(Jdlj + '/' + a)
#生成外来列表
for b in uuid_list:
future_list.append(Jdlj + '/' + head_of_img_name + '9046380f-3b5c-4cce-acd6-31a4f0088228' + b + '.jpg')
#生成二位列表
#生成暂存列表
x = []
for y in range(0, len(now_list)):
x.append(now_list[y])
x.append(future_list[y])
quadratic_list.append(x)
x = []
for o, n in quadratic_list:
os.rename(o, n)
#now_list是带有绝对路径的列表
#name_list是只有更改后图片的名字的列表
return future_list, uuid_list
if __name__ == '__main__':
main()
| StarcoderdataPython |
5004283 | """
This package contains all Pydantic models used for the Amplitude V1 API
requests and responses.
"""
from .identify import Identification, IdentifyAPIRequest, UserProperties
__all__ = ["Identification", "IdentifyAPIRequest", "UserProperties"]
| StarcoderdataPython |
3346969 | <filename>venv/lib/python3.8/site-packages/hypothesis/internal/validation.py
# This file is part of Hypothesis, which may be found at
# https://github.com/HypothesisWorks/hypothesis/
#
# Most of this work is copyright (C) 2013-2020 <NAME>
# (<EMAIL>), but it contains contributions by others. See
# CONTRIBUTING.rst for a full list of people who may hold copyright, and
# consult the git log if you need to determine who owns an individual
# contribution.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
#
# END HEADER
import decimal
import math
from numbers import Rational, Real
from hypothesis.errors import InvalidArgument
from hypothesis.internal.coverage import check_function
@check_function
def check_type(typ, arg, name):
if not isinstance(arg, typ):
if isinstance(typ, tuple):
assert len(typ) >= 2, "Use bare type instead of len-1 tuple"
typ_string = "one of %s" % (", ".join(t.__name__ for t in typ))
else:
typ_string = typ.__name__
if typ_string == "SearchStrategy":
from hypothesis.strategies import SearchStrategy
# Use hypothesis.strategies._internal.strategies.check_strategy
# instead, as it has some helpful "did you mean..." logic.
assert typ is not SearchStrategy, "use check_strategy instead"
raise InvalidArgument(
"Expected %s but got %s=%r (type=%s)"
% (typ_string, name, arg, type(arg).__name__)
)
@check_function
def check_valid_integer(value, name):
"""Checks that value is either unspecified, or a valid integer.
Otherwise raises InvalidArgument.
"""
if value is None:
return
check_type(int, value, name)
@check_function
def check_valid_bound(value, name):
"""Checks that value is either unspecified, or a valid interval bound.
Otherwise raises InvalidArgument.
"""
if value is None or isinstance(value, (int, Rational)):
return
if not isinstance(value, (Real, decimal.Decimal)):
raise InvalidArgument("%s=%r must be a real number." % (name, value))
if math.isnan(value):
raise InvalidArgument("Invalid end point %s=%r" % (name, value))
@check_function
def check_valid_magnitude(value, name):
"""Checks that value is either unspecified, or a non-negative valid
interval bound.
Otherwise raises InvalidArgument.
"""
check_valid_bound(value, name)
if value is not None and value < 0:
raise InvalidArgument("%s=%r must not be negative." % (name, value))
if value is None and name == "min_magnitude":
from hypothesis._settings import note_deprecation
note_deprecation(
"min_magnitude=None is deprecated; use min_magnitude=0 "
"or omit the argument entirely.",
since="2020-05-13",
)
@check_function
def try_convert(typ, value, name):
if value is None:
return None
if isinstance(value, typ):
return value
try:
return typ(value)
except (TypeError, ValueError, ArithmeticError):
raise InvalidArgument(
"Cannot convert %s=%r of type %s to type %s"
% (name, value, type(value).__name__, typ.__name__)
)
@check_function
def check_valid_size(value, name):
"""Checks that value is either unspecified, or a valid non-negative size
expressed as an integer.
Otherwise raises InvalidArgument.
"""
if value is None and name not in ("min_size", "size"):
return
check_type(int, value, name)
if value < 0:
raise InvalidArgument("Invalid size %s=%r < 0" % (name, value))
@check_function
def check_valid_interval(lower_bound, upper_bound, lower_name, upper_name):
"""Checks that lower_bound and upper_bound are either unspecified, or they
define a valid interval on the number line.
Otherwise raises InvalidArgument.
"""
if lower_bound is None or upper_bound is None:
return
if upper_bound < lower_bound:
raise InvalidArgument(
"Cannot have %s=%r < %s=%r"
% (upper_name, upper_bound, lower_name, lower_bound)
)
@check_function
def check_valid_sizes(min_size, max_size):
check_valid_size(min_size, "min_size")
check_valid_size(max_size, "max_size")
check_valid_interval(min_size, max_size, "min_size", "max_size")
| StarcoderdataPython |
3373533 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import pdb, importlib, inspect, time, datetime, json
# from PyFin.api import advanceDateByCalendar
# from data.polymerize import DBPolymerize
from data.storage_engine import StorageEngine
import time
import pandas as pd
import numpy as np
from datetime import timedelta, datetime
from financial import factor_earning
from vision.db.signletion_engine import get_fin_consolidated_statements_pit, get_fundamentals, query
from vision.table.industry_daily import IndustryDaily
from vision.table.fin_cash_flow import FinCashFlow
from vision.table.fin_balance import FinBalance
from vision.table.fin_income import FinIncome
from vision.table.fin_indicator import FinIndicator
from vision.table.fin_balance_ttm import FinBalanceTTM
from vision.table.fin_income_ttm import FinIncomeTTM
from vision.table.fin_cash_flow_ttm import FinCashFlowTTM
from utilities.sync_util import SyncUtil
# pd.set_option('display.max_columns', None)
# pd.set_option('display.max_rows', None)
# from ultron.cluster.invoke.cache_data import cache_data
class CalcEngine(object):
def __init__(self, name, url, methods=[{'packet': 'financial.factor_earning', 'class': 'FactorEarning'}, ]):
self._name = name
self._methods = methods
self._url = url
def get_trade_date(self, trade_date, n, days=365):
"""
获取当前时间前n年的时间点,且为交易日,如果非交易日,则往前提取最近的一天。
:param days:
:param trade_date: 当前交易日
:param n:
:return:
"""
syn_util = SyncUtil()
trade_date_sets = syn_util.get_all_trades('001002', '19900101', trade_date)
trade_date_sets = trade_date_sets['TRADEDATE'].values
time_array = datetime.strptime(str(trade_date), "%Y%m%d")
time_array = time_array - timedelta(days=days) * n
date_time = int(datetime.strftime(time_array, "%Y%m%d"))
if str(date_time) < min(trade_date_sets):
# print('date_time %s is out of trade_date_sets' % date_time)
return str(date_time)
else:
while str(date_time) not in trade_date_sets:
date_time = date_time - 1
# print('trade_date pre %s year %s' % (n, date_time))
return str(date_time)
def _func_sets(self, method):
# 私有函数和保护函数过滤
return list(filter(lambda x: not x.startswith('_') and callable(getattr(method, x)), dir(method)))
def loading_data(self, trade_date):
"""
获取基础数据
按天获取当天交易日所有股票的基础数据
:param trade_date: 交易日
:return:
"""
# 转换时间格式
time_array = datetime.strptime(trade_date, "%Y-%m-%d")
trade_date = datetime.strftime(time_array, '%Y%m%d')
# 读取目前涉及到的因子
trade_date_pre_year = self.get_trade_date(trade_date, 1)
trade_date_pre_year_2 = self.get_trade_date(trade_date, 2)
trade_date_pre_year_3 = self.get_trade_date(trade_date, 3)
trade_date_pre_year_4 = self.get_trade_date(trade_date, 4)
trade_date_pre_year_5 = self.get_trade_date(trade_date, 5)
columns = ['COMPCODE', 'PUBLISHDATE', 'ENDDATE', 'symbol', 'company_id', 'trade_date']
# Report Data
cash_flow_sets = get_fin_consolidated_statements_pit(FinCashFlow,
[FinCashFlow.goods_sale_and_service_render_cash,
FinCashFlow.cash_and_equivalents_at_end,
], dates=[trade_date])
for column in columns:
if column in list(cash_flow_sets.keys()):
cash_flow_sets = cash_flow_sets.drop(column, axis=1)
cash_flow_sets = cash_flow_sets.rename(
columns={'goods_sale_and_service_render_cash': 'goods_sale_and_service_render_cash', # 销售商品、提供劳务收到的现金
'cash_and_equivalents_at_end': 'cash_and_equivalents_at_end', # 期末现金及现金等价物余额
})
income_sets = get_fin_consolidated_statements_pit(FinIncome,
[FinIncome.total_operating_revenue,
FinIncome.operating_revenue,
FinIncome.operating_profit,
FinIncome.np_parent_company_owners,
FinIncome.net_profit,
FinIncome.operating_cost,
], dates=[trade_date])
for column in columns:
if column in list(income_sets.keys()):
income_sets = income_sets.drop(column, axis=1)
income_sets = income_sets.rename(columns={'net_profit': 'net_profit', # 净利润
'total_operating_revenue': 'total_operating_revenue', # 营业总收入
'operating_revenue': 'operating_revenue', # 营业收入
'operating_cost': 'operating_cost', # 营业成本
'operating_profit': 'operating_profit', # 营业利润
'np_parent_company_owners': 'np_parent_company_owners',
# 归属于母公司所有者的净利润
})
tp_earning = pd.merge(cash_flow_sets, income_sets, how='outer', on='security_code')
indicator_sets = get_fin_consolidated_statements_pit(FinIndicator,
[FinIndicator.np_cut, # 扣除非经常损益后的净利润
FinIndicator.roe_weighted,
FinIndicator.roe_ex_weighted
], dates=[trade_date])
for column in columns:
if column in list(indicator_sets.keys()):
indicator_sets = indicator_sets.drop(column, axis=1)
indicator_sets = indicator_sets.rename(columns={'roe_ex_weighted': 'adjusted_profit', # 扣除非经常损益后的净利润
})
tp_earning = pd.merge(indicator_sets, tp_earning, how='outer', on='security_code')
balance_sets = get_fin_consolidated_statements_pit(FinBalance,
[FinBalance.equities_parent_company_owners,
], dates=[trade_date])
for column in columns:
if column in list(balance_sets.keys()):
balance_sets = balance_sets.drop(column, axis=1)
balance_sets = balance_sets.rename(
columns={'equities_parent_company_owners': 'equities_parent_company_owners', # 归属于母公司股东权益合计
})
tp_earning = pd.merge(balance_sets, tp_earning, how='outer', on='security_code')
income_sets_pre_year_1 = get_fin_consolidated_statements_pit(FinIncome,
[FinIncome.operating_revenue, # 营业收入
FinIncome.net_profit, # 净利润
FinIncome.operating_cost, # 营业成本
], dates=[trade_date_pre_year])
for column in columns:
if column in list(income_sets_pre_year_1.keys()):
income_sets_pre_year_1 = income_sets_pre_year_1.drop(column, axis=1)
income_sets_pre_year_1 = income_sets_pre_year_1.rename(columns={'net_profit': 'net_profit_pre_year_1', # 净利润
'operating_revenue': 'operating_revenue_pre_year_1',
# 营业收入
'operating_cost': 'operating_cost_1y', # 营业成本
})
tp_earning = pd.merge(income_sets_pre_year_1, tp_earning, how='outer', on='security_code')
income_sets_pre_year_2 = get_fin_consolidated_statements_pit(FinIncome,
[FinIncome.operating_revenue,
FinIncome.net_profit,
], dates=[trade_date_pre_year_2])
for column in columns:
if column in list(income_sets_pre_year_2.keys()):
income_sets_pre_year_2 = income_sets_pre_year_2.drop(column, axis=1)
income_sets_pre_year_2 = income_sets_pre_year_2.rename(columns={'net_profit': 'net_profit_pre_year_2', # 净利润
'operating_revenue': 'operating_revenue_pre_year_2',
# 营业收入
})
tp_earning = pd.merge(income_sets_pre_year_2, tp_earning, how='outer', on='security_code')
income_sets_pre_year_3 = get_fin_consolidated_statements_pit(FinIncome,
[FinIncome.operating_revenue,
FinIncome.net_profit,
], dates=[trade_date_pre_year_3])
for column in columns:
if column in list(income_sets_pre_year_3.keys()):
income_sets_pre_year_3 = income_sets_pre_year_3.drop(column, axis=1)
income_sets_pre_year_3 = income_sets_pre_year_3.rename(columns={'net_profit': 'net_profit_pre_year_3', # 净利润
'operating_revenue': 'operating_revenue_pre_year_3',
# 营业收入
})
tp_earning = pd.merge(income_sets_pre_year_3, tp_earning, how='outer', on='security_code')
income_sets_pre_year_4 = get_fin_consolidated_statements_pit(FinIncome,
[FinIncome.operating_revenue,
FinIncome.net_profit,
], dates=[trade_date_pre_year_4])
for column in columns:
if column in list(income_sets_pre_year_4.keys()):
income_sets_pre_year_4 = income_sets_pre_year_4.drop(column, axis=1)
income_sets_pre_year_4 = income_sets_pre_year_4.rename(columns={'net_profit': 'net_profit_pre_year_4', # 净利润
'operating_revenue': 'operating_revenue_pre_year_4',
# 营业收入
})
tp_earning = pd.merge(income_sets_pre_year_4, tp_earning, how='outer', on='security_code')
# TTM Data
cash_flow_ttm_sets = get_fin_consolidated_statements_pit(FinCashFlowTTM,
[FinCashFlowTTM.FINNETCFLOW,
], dates=[trade_date])
for column in columns:
if column in list(cash_flow_ttm_sets.keys()):
cash_flow_ttm_sets = cash_flow_ttm_sets.drop(column, axis=1)
cash_flow_ttm_sets = cash_flow_ttm_sets.rename(columns={'FINNETCFLOW': 'net_finance_cash_flow'})
income_ttm_sets = get_fin_consolidated_statements_pit(FinIncomeTTM,
[FinIncomeTTM.operating_revenue, # 营业收入
FinIncomeTTM.net_profit, # 净利润
FinIncomeTTM.administration_expense, # 管理费用
FinIncomeTTM.total_operating_revenue, # 营业总收入
FinIncomeTTM.total_profit, # 利润总额
FinIncomeTTM.financial_expense, # 财务费用
FinIncomeTTM.interest_income, # 利息收入
FinIncomeTTM.sale_expense, # 销售费用
FinIncomeTTM.total_operating_cost, # 营业总成本
FinIncomeTTM.operating_profit, # 营业利润
FinIncomeTTM.np_parent_company_owners, # 归属于母公司所有者的净利润
FinIncomeTTM.operating_cost, # 营业成本
# FinIncomeTTM.ASSOINVEPROF, # 对联营企业和合营企业的投资收益
FinIncomeTTM.operating_tax_surcharges, # 营业税金及附加
FinIncomeTTM.asset_impairment_loss, # 资产减值损失
FinIncomeTTM.income_tax, # 所得税
], dates=[trade_date])
for column in columns:
if column in list(income_ttm_sets.keys()):
income_ttm_sets = income_ttm_sets.drop(column, axis=1)
ttm_earning = pd.merge(income_ttm_sets, cash_flow_ttm_sets, how='outer', on='security_code')
balance_ttm_sets = get_fin_consolidated_statements_pit(FinBalanceTTM,
[FinBalanceTTM.total_assets, # 资产总计
FinBalanceTTM.total_owner_equities, # 所有者权益(或股东权益)合计
FinBalanceTTM.equities_parent_company_owners,
# 归属于母公司股东权益合计
], dates=[trade_date])
for column in columns:
if column in list(balance_ttm_sets.keys()):
balance_ttm_sets = balance_ttm_sets.drop(column, axis=1)
ttm_earning = pd.merge(ttm_earning, balance_ttm_sets, how='outer', on='security_code')
income_ttm_sets_pre_year_1 = get_fin_consolidated_statements_pit(FinIncomeTTM,
[FinIncomeTTM.operating_revenue,
FinIncomeTTM.net_profit,
], dates=[trade_date_pre_year])
for column in columns:
if column in list(income_ttm_sets_pre_year_1.keys()):
income_ttm_sets_pre_year_1 = income_ttm_sets_pre_year_1.drop(column, axis=1)
income_ttm_sets_pre_year_1 = income_ttm_sets_pre_year_1.rename(
columns={'operating_revenue': 'operating_revenue_pre_year_1', # 营业收入
'net_profit': 'net_profit_pre_year_1', # 净利润
})
ttm_earning = pd.merge(ttm_earning, income_ttm_sets_pre_year_1, how='outer', on='security_code')
income_ttm_sets_pre_year_2 = get_fin_consolidated_statements_pit(FinIncomeTTM,
[FinIncomeTTM.operating_revenue,
FinIncomeTTM.net_profit,
], dates=[trade_date_pre_year_2])
for column in columns:
if column in list(income_ttm_sets_pre_year_2.keys()):
income_ttm_sets_pre_year_2 = income_ttm_sets_pre_year_2.drop(column, axis=1)
income_ttm_sets_pre_year_2 = income_ttm_sets_pre_year_2.rename(
columns={'operating_revenue': 'operating_revenue_pre_year_2', # 营业收入
'net_profit': 'net_profit_pre_year_2', # 净利润
})
ttm_earning = pd.merge(ttm_earning, income_ttm_sets_pre_year_2, how='outer', on='security_code')
income_ttm_sets_pre_year_3 = get_fin_consolidated_statements_pit(FinIncomeTTM,
[FinIncomeTTM.operating_revenue,
FinIncomeTTM.net_profit,
], dates=[trade_date_pre_year_3])
for column in columns:
if column in list(income_ttm_sets_pre_year_3.keys()):
income_ttm_sets_pre_year_3 = income_ttm_sets_pre_year_3.drop(column, axis=1)
income_ttm_sets_pre_year_3 = income_ttm_sets_pre_year_3.rename(
columns={'operating_revenue': 'operating_revenue_pre_year_3', # 营业收入
'net_profit': 'net_profit_pre_year_3', # 净利润
})
ttm_earning = pd.merge(ttm_earning, income_ttm_sets_pre_year_3, how='outer', on='security_code')
income_ttm_sets_pre_year_4 = get_fin_consolidated_statements_pit(FinIncomeTTM,
[FinIncomeTTM.operating_revenue,
FinIncomeTTM.net_profit,
], dates=[trade_date_pre_year_4])
for column in columns:
if column in list(income_ttm_sets_pre_year_4.keys()):
income_ttm_sets_pre_year_4 = income_ttm_sets_pre_year_4.drop(column, axis=1)
income_ttm_sets_pre_year_4 = income_ttm_sets_pre_year_4.rename(
columns={'operating_revenue': 'operating_revenue_pre_year_4', # 营业收入
'net_profit': 'net_profit_pre_year_4', # 净利润
})
ttm_earning = pd.merge(ttm_earning, income_ttm_sets_pre_year_4, how='outer', on='security_code')
# indicator_ttm_sets = get_fin_consolidated_statements_pit(IndicatorTTM,
# [IndicatorTTM.ROIC, # 投入资本回报率
# ], dates=[trade_date]).drop(columns, axis=1)
# indicator_ttm_sets = indicator_ttm_sets.rename(columns={'ROIC': '',
# })
# MRQ
balance_mrq_sets = get_fin_consolidated_statements_pit(FinBalance,
[FinBalance.total_assets, # 资产总计
FinBalance.equities_parent_company_owners,
# 归属于母公司股东权益合计
FinBalance.total_owner_equities, # 所有者权益(或股东权益)合计
FinBalance.longterm_loan, # 长期借款
], dates=[trade_date])
for column in columns:
if column in list(balance_mrq_sets.keys()):
balance_mrq_sets = balance_mrq_sets.drop(column, axis=1)
balance_mrq_sets = balance_mrq_sets.rename(columns={'total_assets': 'total_assets_mrq',
'equities_parent_company_owners': 'equities_parent_company_owners_mrq',
# 归属于母公司股东权益合计
'total_owner_equities': 'total_owner_equities_mrq',
# 所有者权益(或股东权益)合计
'longterm_loan': 'longterm_loan_mrq', # 长期借款
})
ttm_earning = pd.merge(ttm_earning, balance_mrq_sets, how='outer', on='security_code')
balance_mrq_sets_pre = get_fin_consolidated_statements_pit(FinBalance,
[FinBalance.total_assets, # 资产总计
FinBalance.total_owner_equities, # 所有者权益(或股东权益)合计
FinBalance.longterm_loan, # 长期借款
], dates=[trade_date])
for column in columns:
if column in list(balance_mrq_sets_pre.keys()):
balance_mrq_sets_pre = balance_mrq_sets_pre.drop(column, axis=1)
balance_mrq_sets_pre = balance_mrq_sets_pre.rename(columns={'total_assets': 'total_assets_mrq_pre',
'total_owner_equities': 'total_owner_equities_mrq_pre',
# 所有者权益(或股东权益)合计
'longterm_loan': 'longterm_loan_mrq_pre', # 长期借款
})
ttm_earning = pd.merge(ttm_earning, balance_mrq_sets_pre, how='outer', on='security_code')
balance_con_sets = get_fin_consolidated_statements_pit(FinBalanceTTM,
[FinBalanceTTM.total_assets, # 资产总计
FinBalanceTTM.total_owner_equities, # 所有者权益(或股东权益)合计
],
dates=[trade_date,
trade_date_pre_year,
trade_date_pre_year_2,
trade_date_pre_year_3,
trade_date_pre_year_4,
])
for column in columns:
if column in list(balance_con_sets.keys()):
balance_con_sets = balance_con_sets.drop(column, axis=1)
balance_con_sets = balance_con_sets.groupby(['security_code'])
balance_con_sets = balance_con_sets.sum()
balance_con_sets = balance_con_sets.rename(columns={'total_assets': 'total_assets',
'total_owner_equities': 'total_owner_equities'})
# cash_flow_con_sets = get_fin_consolidated_statements_pit(FinCashFlow,
# [FinCashFlow.cash_and_equivalents_at_end,
# ],
# dates=[trade_date,
# trade_date_pre_year,
# trade_date_pre_year_2,
# trade_date_pre_year_3,
# trade_date_pre_year_4,
# trade_date_pre_year_5,
# ]).drop(columns, axis=1)
# cash_flow_con_sets = cash_flow_con_sets.groupby(['security_code'])
# cash_flow_con_sets = cash_flow_con_sets.sum()
# cash_flow_con_sets = cash_flow_con_sets.rename(columns={'cash_and_equivalents_at_end':'cash_and_equivalents_at_end'})
income_con_sets = get_fin_consolidated_statements_pit(FinIncome,
[FinIncome.net_profit],
dates=[trade_date,
trade_date_pre_year,
trade_date_pre_year_2,
trade_date_pre_year_3,
trade_date_pre_year_4,
trade_date_pre_year_5,
])
for column in columns:
if column in list(income_con_sets.keys()):
income_con_sets = income_con_sets.drop(column, axis=1)
income_con_sets = income_con_sets.groupby(['security_code'])
income_con_sets = income_con_sets.sum()
income_con_sets = income_con_sets.rename(columns={'net_profit': 'net_profit'}).reset_index()
ttm_earning_5y = pd.merge(balance_con_sets, income_con_sets, how='outer', on='security_code')
ttm_earning_1y = get_fin_consolidated_statements_pit(FinIncomeTTM,
[FinIncomeTTM.operating_cost,
FinIncomeTTM.operating_revenue,
FinIncomeTTM.np_parent_company_owners,
], dates=[trade_date_pre_year])
for column in columns:
if column in list(ttm_earning_1y.keys()):
ttm_earning_1y = ttm_earning_1y.drop(column, axis=1)
ttm_earning_1y = ttm_earning_1y.rename(columns={'operating_revenue': 'operating_revenue_1y', # 营业收入
'operating_cost': 'operating_cost_1y', # 营业成本
'np_parent_company_owners': 'np_parent_company_owners_1y'
})
ttm_earning = pd.merge(ttm_earning, ttm_earning_1y, how='outer', on='security_code')
balance_mrq_1y = get_fin_consolidated_statements_pit(FinBalance,
[FinBalance.equities_parent_company_owners,
], dates=[trade_date_pre_year])
for column in columns:
if column in list(balance_mrq_1y.keys()):
balance_mrq_1y = balance_mrq_1y.drop(column, axis=1)
balance_mrq_1y = balance_mrq_1y.rename(
columns={'equities_parent_company_owners': 'equities_parent_company_owners_mrq_1y',
})
ttm_earning = pd.merge(ttm_earning, balance_mrq_1y, how='outer', on='security_code')
return tp_earning, ttm_earning, ttm_earning_5y
def process_calc_factor(self, trade_date, tp_earning, ttm_earning, ttm_earning_5y):
tp_earning = tp_earning.set_index('security_code')
ttm_earning = ttm_earning.set_index('security_code')
ttm_earning_5y = ttm_earning_5y.set_index('security_code')
earning = factor_earning.FactorEarning()
# 因子计算
earning_sets = pd.DataFrame()
earning_sets['security_code'] = tp_earning.index
earning_sets = earning_sets.set_index('security_code')
# MRQ
earning_sets = earning.Rev5YChg(tp_earning, earning_sets)
earning_sets = earning.ROA5YChg(ttm_earning_5y, earning_sets)
earning_sets = earning.ROE5Y(ttm_earning_5y, earning_sets)
earning_sets = earning.NPCutToNP(tp_earning, earning_sets)
earning_sets = earning.ROE(tp_earning, earning_sets)
earning_sets = earning.ROEAvg(tp_earning, earning_sets)
earning_sets = earning.ROEcut(tp_earning, earning_sets)
earning_sets = earning.DGPR(tp_earning, earning_sets)
earning_sets = earning.ROEWeight(tp_earning, earning_sets)
earning_sets = earning.ROEDilutedWeight(tp_earning, earning_sets)
# TTM
# factor_earning = earning.invest_r_associates_to_tp_latest(tp_earning, earning_sets)
earning_sets = earning.NetNonOiToTP(ttm_earning, earning_sets)
earning_sets = earning.GPM1YChgTTM(ttm_earning, earning_sets)
earning_sets = earning.DROE(ttm_earning, earning_sets)
earning_sets = earning.NetPft5YAvgChgTTM(ttm_earning, earning_sets)
earning_sets = earning.Sales5YChgTTM(ttm_earning, earning_sets)
# factor_earning = earning.roa(ttm_earning, earning_sets)
earning_sets = earning.AdminExpRtTTM(ttm_earning, earning_sets)
earning_sets = earning.BerryRtTTM(ttm_earning, earning_sets)
earning_sets = earning.CFARatioMinusROATTM(ttm_earning, earning_sets)
earning_sets = earning.CostRtTTM(ttm_earning, earning_sets)
earning_sets = earning.EBITToTORevTTM(ttm_earning, earning_sets)
earning_sets = earning.PeridCostTTM(ttm_earning, earning_sets)
earning_sets = earning.FinExpRtTTM(ttm_earning, earning_sets)
earning_sets = earning.ImpLossToTOITTM(ttm_earning, earning_sets)
earning_sets = earning.OIAToOITTM(ttm_earning, earning_sets)
earning_sets = earning.ROAexTTM(ttm_earning, earning_sets)
earning_sets = earning.NetProfitRtTTM(ttm_earning, earning_sets)
earning_sets = earning.NPToTORevTTM(ttm_earning, earning_sets)
earning_sets = earning.ExpRtTTM(ttm_earning, earning_sets)
earning_sets = earning.OptProfitRtTTM(ttm_earning, earning_sets)
# factor_earning = earning.operating_profit_to_tor(ttm_earning, earning_sets)
earning_sets = earning.ROCTTM(ttm_earning, earning_sets)
earning_sets = earning.ROTATTM(ttm_earning, earning_sets)
earning_sets = earning.ROETTM(ttm_earning, earning_sets)
earning_sets = earning.ROICTTM(ttm_earning, earning_sets)
earning_sets = earning.OwnROETTM(ttm_earning, earning_sets)
earning_sets = earning.TolTaxToPTTM(ttm_earning, earning_sets)
earning_sets = earning.SalesGrossMarginTTM(ttm_earning, earning_sets)
earning_sets = earning.TaxRTTM(ttm_earning, earning_sets)
earning_sets = earning.TotaProfRtTTM(ttm_earning, earning_sets)
earning_sets = earning.TaxRtTTM(ttm_earning, earning_sets)
# factor_earning = earning.invest_r_associates_to_tp_ttm(ttm_earning, earning_sets)
earning_sets = earning_sets.reset_index()
earning_sets['trade_date'] = str(trade_date)
earning_sets.replace([-np.inf, np.inf, None], np.nan, inplace=True)
return earning_sets
def local_run(self, trade_date):
print('trade_date %s' % trade_date)
tic = time.time()
tp_earning, ttm_earning, ttm_earning_5y = self.loading_data(trade_date)
print('data load time %s' % (time.time() - tic))
storage_engine = StorageEngine(self._url)
result = self.process_calc_factor(trade_date, tp_earning, ttm_earning, ttm_earning_5y)
print('cal_time %s' % (time.time() - tic))
storage_engine.update_destdb(str(self._methods[-1]['packet'].split('.')[-1]), trade_date, result)
# storage_engine.update_destdb('factor_earning', trade_date, result)
# def remote_run(self, trade_date):
# total_data = self.loading_data(trade_date)
# #存储数据
# session = str(int(time.time() * 1000000 + datetime.datetime.now().microsecond))
# cache_data.set_cache(session, 'alphax', total_data.to_json(orient='records'))
# distributed_factor.delay(session, json.dumps(self._methods), self._name)
#
# def distributed_factor(self, total_data):
# mkt_df = self.calc_factor_by_date(total_data,trade_date)
# result = self.calc_factor('alphax.alpha191','Alpha191',mkt_df,trade_date)
# @app.task
# def distributed_factor(session, trade_date, packet_sets, name):
# calc_engines = CalcEngine(name, packet_sets)
# content = cache_data.get_cache(session, factor_name)
# total_data = json_normalize(json.loads(content))
# calc_engines.distributed_factor(total_data)
#
# # @app.task()
# def factor_calculate(**kwargs):
# print("constrain_kwargs: {}".format(kwargs))
# date_index = kwargs['date_index']
# session = kwargs['session']
# factor_name = kwargs['factor_name']
# content1 = cache_data.get_cache(session + str(date_index) + "1", date_index)
# content2 = cache_data.get_cache(session + str(date_index) + "2", date_index)
# content3 = cache_data.get_cache(session + str(date_index) + "3", date_index)
# print("len_con1: %s" % len(content1))
# print("len_con2: %s" % len(content2))
# print("len_con3: %s" % len(content3))
# tp_earning = json_normalize(json.loads(str(content1, encoding='utf8')))
# ttm_earning_5y = json_normalize(json.loads(str(content2, encoding='utf8')))
# ttm_earning = json_normalize(json.loads(str(content3, encoding='utf8')))
# # cache_date.get_cache使得index的名字丢失, 所以数据需要按照下面的方式设置index
# tp_earning.set_index('security_code', inplace=True)
# ttm_earning.set_index('security_code', inplace=True)
# ttm_earning_5y.set_index('security_code', inplace=True)
# # total_earning_data = {'tp_earning': tp_earning, 'ttm_earning_5y': ttm_earning_5y, 'ttm_earning': ttm_earning}
# calculate(date_index, tp_earning, ttm_earning, ttm_earning_5y, factor_name)
| StarcoderdataPython |
3542256 | <gh_stars>1-10
# constants.py
league = 'expedition'
TRADE_BASE_URL = 'https://www.pathofexile.com/api/trade/' | StarcoderdataPython |
9696752 | import numpy as np
import scipy.misc
import urllib.request as urllib
import utils.dataloaders as dataloaders
from models.wideresnet import *
from models.lenet import *
from utils.helpers import *
import methods.entropy.curriculum_labeling as curriculum_labeling
import torch
class Wrapper:
"""
All steps for our Curriculum Learning approach can be called from here.
Args:
args (dictionary): all user defined parameters
"""
def __init__(self, args):
"""
Initiazile the Model with all the parameters predifined by the user - check for the command_line_example.py file for all variables -
All possible configurations should be explicitly defined and passed through a dictionary (args)
Args:
args (dictionary): all user defined parameters
"""
args.set_labeled_classes = [int(item) for item in args.set_labeled_classes.split(',')]
args.set_unlabeled_classes = [int(item) for item in args.set_unlabeled_classes.split(',')]
self.args = args
self.model = None
self.ema_model = None
self.model_optimizer = None
# #@property
# def get_model(self):
# return self.model
# #@property
# def get_ema_model(self):
# return self.ema_model
def create_network(self, ema=False):
"""
Creates a model based on the parameter selection:
- [WRN-28-2] was proposed by Oliver et. al. in "Realistic Evaluation of Deep Semi-Supervised Learning Algorithms" (https://arxiv.org/abs/1804.09170).
- [CNN13] some papers still report top-1 test error using this architecture - Springenberg et. al. in "Striving for simplicity: The all convolutional net" (https://arxiv.org/abs/1412.6806).
- [ResNet50] usually trained for ImageNet experiments - He et. al. in "Deep residual learning for image recognition" (https://arxiv.org/abs/1512.03385).
Args:
ema (bool, optional): if the model is a Teacher model or not. Defaults to False.
"""
print('Build network -> ' + self.args.arch)
print ('Dataset -> ' + self.args.dataset)
print('Num classes ->', self.args.num_classes)
if self.args.use_zca:
print('Use ZCA')
if self.args.arch in ['cnn13','WRN28_2']:
net = eval(self.args.arch)(self.args.num_classes, self.args.dropout)
elif self.args.arch in ['resnet50']:
import torchvision
net = torchvision.models.resnet50(pretrained=False)
else:
assert False, "Error : Network should be cnn13, WRN28_2 or resnet50"
if ema:
for param in net.parameters():
param.detach_()
self.ema_model = net
else:
self.model = net
def set_data(self, data):
"""
Sets/updates data values to corresponding dictionary entry - executed after any dataset operation
Args:
data (array): dataset references
"""
num_classes, train_data, train_data_noT, test_data = data
# set dataset references
self.args.num_classes = num_classes
self.args.train_data = train_data
self.args.train_data_noT = train_data_noT
self.args.test_data = test_data
def set_loaders(self, loaders):
"""
Sets/updates data values to corresponding dictionary entry - executed after any dataset operation
Args:
loaders (array): subsets of dataloaders, samplers and indices
"""
trainloader, \
validloader, \
unlabelledloader, \
train_sampler, \
unlabelled_sampler, \
indices_train, \
indices_unlabelled, \
train_index_order, \
unlabeled_index_order = loaders
# update loaders
self.args.trainloader = trainloader
self.args.validloader = validloader
self.args.unlabelledloader = unlabelledloader
self.args.train_sampler = train_sampler
self.args.unlabelled_sampler = unlabelled_sampler
self.args.indices_train = indices_train
self.args.indices_unlabelled = indices_unlabelled
self.args.train_index_order = train_index_order
self.args.unlabeled_index_order = unlabeled_index_order
def prepare_datasets(self):
"""
Prepare datasets for training based on the predifined parameters
1) Download precomputed zca components and mean for CIFAR10
2) Load training and test raw sets (download if necessary)
3) Get subsets for labeled, unlabeled and validation samples (based on seed)
4) [Optional] Get test set if in debug mode
"""
# download precomputed zca components and mean for CIFAR10
urllib.urlretrieve("http://cs.virginia.edu/~pcascante/zca_components.npy", "zca_components.npy")
urllib.urlretrieve("http://cs.virginia.edu/~pcascante/zca_mean.npy", "zca_mean.npy")
# load data
data = dataloaders.load_data_subsets(self.args.augPolicy, self.args.dataset, self.args.data_dir)
self.set_data(data)
# load zca for cifar10
zca_components = np.load('zca_components.npy')
zca_mean = np.load('zca_mean.npy')
self.args.zca_components = zca_components
self.args.zca_mean = zca_mean
# get randomized set for training
loaders = dataloaders.get_train_dataloaders(self.args.dataset, self.args.train_data, self.args.train_data_noT, self.args.batch_size, self.args.n_cpus, self.args.num_labeled, self.args.num_valid_samples, self.args.seed, self.args.set_labeled_classes, self.args.set_unlabeled_classes, ordered=False)
self.set_loaders(loaders)
# get test set if in debug mode and for final evaluation
testloader = dataloaders.get_test_dataloader(self.args.test_data, self.args.batch_size, self.args.n_cpus)
self.args.testloader = testloader
def set_model_hyperparameters(self, ema=False):
"""
Set model hyperparameters based on the user parameter selection
1) Check CUDA availability
2) Allow use of multiple GPUs
Args:
ema (bool, optional): if the model is a Teacher model or not. Defaults to False.
"""
if self.args.doParallel:
if ema:
self.ema_model = torch.nn.DataParallel(self.ema_model)
else:
self.model = torch.nn.DataParallel(self.model)
if torch.cuda.is_available():
if ema:
self.ema_model = self.ema_model.cuda()
else:
self.model = self.model.cuda()
self.args.use_cuda = True
# torch.backends.cudnn.benchmark = True # I personally prefer this one, but lets set deterministic True for the sake of reproducibility
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
else:
self.args.use_cuda = False
def set_model_optimizer(self):
"""
Set model optimizer based on user parameter selection
1) Set SGD or Adam optimizer
2) Set SWA if set (check you have downloaded the library using: pip install torchcontrib)
3) Print if: Use ZCA preprocessing (sometimes useful for CIFAR10) or debug mode is on or off
(to check the model on the test set without taking decisions based on it -- all decisions are taken based on the validation set)
"""
if self.args.optimizer == 'sgd':
prRed ('... SGD ...')
optimizer = torch.optim.SGD(self.model.parameters(), self.args.lr,
momentum=self.args.momentum,
weight_decay=self.args.weight_decay,
nesterov=self.args.nesterov)
else:
prRed ('... Adam optimizer ...')
optimizer = torch.optim.Adam(self.model.parameters(), lr=self.args.lr)
if self.args.swa:
prRed ('Using SWA!')
from torchcontrib.optim import SWA
optimizer = SWA(optimizer)
self.model_optimizer = optimizer
if self.args.use_zca:
prPurple ('*Use ZCA preprocessing*')
if self.args.debug:
prPurple ('*Debug mode on*')
def update_datasets(self, indices_for_rotation, ordered=False):
"""
In the pseudo-labeling case, update the dataset: add the unlabeled samples with their corresponding pseudo annotations to the labeled set
Args:
indices_for_rotation (array): indices of all unlabeled samples that can will be added to the labeled dataset for training
"""
if self.args.augPolicy == 2:
data = dataloaders.load_data_subsets(self.args.augPolicy, self.args.dataset, self.args.data_dir)
self.set_data(data)
loaders = dataloaders.get_train_dataloaders(self.args.dataset, self.args.train_data, self.args.train_data_noT, self.args.batch_size, self.args.n_cpus, self.args.num_labeled, self.args.num_valid_samples, self.args.seed, self.args.set_labeled_classes, self.args.set_unlabeled_classes, ordered=ordered, indices_for_rotation = indices_for_rotation)
self.set_loaders(loaders)
# def order_for_query_datasets(self, indices_for_rotation):
# """
# In the pseudo-labeling case, order the dataset to evaluate all the unlabeled samples with the model trained in the previous rotation
# """
# trainloader, validloader, unlabelledloader, train_sampler, unlabelled_sampler, indices_train, indices_unlabelled, train_index_order, unlabeled_index_order = get_train_dataloaders(self.args.dataset, train_data, train_data_noT, self.args.batch_size, self.args.n_cpus, self.args.num_labeled, self.args.num_valid_samples, self.args.seed, self.args.set_labeled_classes, self.args.set_unlabeled_classes, ordered=True)
def train_cl(self):
"""
Executes the Curriculum Learning standard algorithm.
1) Train only on labeled data
2) Use trained model to get max scores of unlabeled data
3) Compute threshold (check percentiles_holder parameter) based on max scores -> long tail distribution
4) Pseudo-label
5) Train next iteration
6) Do it until (almost) all dataset is covered
"""
cl = curriculum_labeling.Curriculum_Labeling(self.args, self.model, self.model_optimizer)
# train using only labeled subset
cl.train_iteration()
# based on trained model, pseudo-annotate and re-train
iteration = 1
while True:
# evaluate unlabeled set: get max scores, compute percentile, pseudo-annotate
self.update_datasets({}, ordered=True)
# pass updated values to the method
cl.update_args(self.args, None, None)
image_indices_hard_label = cl.do_iteration(iteration)
# reset network
self.create_network()
self.set_model_hyperparameters()
self.set_model_optimizer()
# update indices -- add pseudo-labeled samples to labeled set
self.update_datasets(list(image_indices_hard_label.keys()))
cl.update_args(self.args, self.model, self.model_optimizer, update_model=True)
# re-train
cl.train_iteration(iteration=iteration, image_indices_hard_label=image_indices_hard_label)
# check until almost all or all dataset is pseudo-labeled - stop
if self.args.percentiles_holder * iteration >= 100:
prGreen ('All dataset used. Process finished.')
break
iteration += 1
def eval_cl(self):
"""
Execute the evaluation of Curriculum Learning. Goes over all iterations and select the best one based on the validation accuracy.
"""
cl = curriculum_labeling.Curriculum_Labeling(self.args)
cl.evaluate_all_iterations()
| StarcoderdataPython |
3388624 | from django.views.generic import CreateView, UpdateView, DeleteView
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.urls import reverse_lazy
from django_filters.views import FilterView
from .filters import SubjectFilter
from .models import Subject
from .forms import SubjectForm
class SubjectListView(PermissionRequiredMixin, FilterView):
permission_required = 'admin'
model = Subject
filterset_class = SubjectFilter
template_name_suffix = '_list'
paginate_by = 20
class SubjectCreateView(PermissionRequiredMixin, SuccessMessageMixin, CreateView):
permission_required = 'admin'
model = Subject
form_class = SubjectForm
success_message = '%(code)s is created'
success_url = reverse_lazy('subjects:list')
class SubjectUpdateView(PermissionRequiredMixin, SuccessMessageMixin, UpdateView):
permission_required = 'admin'
model = Subject
form_class = SubjectForm
success_message = '%(code)s is updated'
success_url = reverse_lazy('subjects:list')
class SubjectDeleteView(PermissionRequiredMixin, DeleteView):
permission_required = 'admin'
model = Subject
success_url = reverse_lazy('subjects:list')
| StarcoderdataPython |
5186950 | #!/usr/bin/env python
# encoding: utf-8
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
''' ESP Solace Connector '''
from __future__ import print_function, division, absolute_import, unicode_literals
import numbers
import re
import six
from .base import Connector, prop, map_properties
from ..utils import xml
from ..utils.data import gen_name
class SolaceSubscriber(Connector):
'''
Subscribe to Solace events
Parameters
----------
solhostport : string
Specifies the appliance to connect to, in the form 'host:port'
solvpn : string
Specifies the appliance message VPN to assign the client to
which the session connects.
soltopic : string
Specifies the Solace destination topic to which to publish
urlhostport : string
Specifies the host:port field in the metadata topic subscribed
to on start-up to field metadata requests.
numbufferedmsgs : int
Specifies the maximum number of messages buffered by a standby
subscriber connector.
snapshot : boolean, optional
Specifies whether to send snapshot data
collapse : string, optional
Enables conversion of UPDATE_BLOCK events to make subscriber
output publishable. The default value is disabled.
hotfailover : boolean, optional
Enables hot failover mode.
buspersistence : string, optional
Sets the Solace message delivery mode to Guaranteed Messaging.
The default value is Direct Messaging.
rmretdel : boolean, optional
Specifies to remove all delete events from event blocks
received by a subscriber that were introduced by a window
retention policy.
protofile : string, optional
Specifies the .proto file that contains the Google Protocol
Buffers message definition used to convert event blocks to protobuf
messages. When you specify this parameter, you must also specify
the protomsg parameter.
protomsg : string, optional
Specifies the name of a Google Protocol Buffers message in the
.proto file that you specified with the protofile parameter.
Event blocks are converted into this message.
configfilesection : string, optional
Specifies the name of the section in the connector config file
to parse for configuration parameters. Specify the value
as [configfilesection].
json : boolean, optional
Enables transport of event blocks encoded as JSON messages
dateformat : string, optional
Specifies the format of ESP_DATETIME and ESP_TIMESTAMP fields in
CSV events. The default behavior is these fields are interpreted as
an integer number of seconds (ESP_DATETIME) or microseconds
(ESP_TIMESTAMP) since epoch.
solpasswordencrypted : boolean, optional
Specifies that solpassword is encrypted
Returns
-------
:class:`SolaceSubscriber`
'''
connector_key = dict(cls='sol', type='subscribe')
property_defs = dict(
solhostport=prop('solhostport', dtype='string', required=True),
soluserid=prop('soluserid', dtype='string', required=True),
solpassword=prop('solpassword', dtype='string', required=True),
solvpn=prop('solvpn', dtype='string', required=True),
soltopic=prop('soltopic', dtype='string', required=True),
urlhostport=prop('urlhostport', dtype='string', required=True),
numbufferedmsgs=prop('numbufferedmsgs', dtype='int', required=True),
snapshot=prop('snapshot', dtype='boolean', required=True, default=False),
collapse=prop('collapse', dtype='string'),
hotfailover=prop('hotfailover', dtype='boolean'),
buspersistence=prop('buspersistence', dtype='string'),
rmretdel=prop('rmretdel', dtype='boolean'),
protofile=prop('protofile', dtype='string'),
protomsg=prop('protomsg', dtype='string'),
configfilesection=prop('configfilesection', dtype='string'),
json=prop('json', dtype='boolean'),
dateformat=prop('dateformat', dtype='string'),
solpasswordencrypted=prop('solpasswordencrypted', dtype='boolean')
)
def __init__(self, solhostport=None, soluserid=None, solpassword=None,
solvpn=None, soltopic=None, urlhostport=None,
name=None, is_active=None,
numbufferedmsgs=None, snapshot=None, collapse=None,
hotfailover=None, buspersistence=None, rmretdel=None,
protofile=None, protomsg=None, configfilesection=None,
json=None, dateformat=None, solpasswordencrypted=None):
params = dict(**locals())
params.pop('is_active')
params.pop('self')
name = params.pop('name')
Connector.__init__(self, 'sol', name=name, type='subscribe',
is_active=is_active, properties=params)
@classmethod
def from_parameters(cls, conncls, type=None, name=None, is_active=None,
properties=None):
req, properties = map_properties(cls, properties,
required=['solhostport',
'soluserid',
'solpassword',
'solvpn', 'soltopic',
'urlhostport',
'numbufferedmsgs'],
delete='type')
return cls(req[0], req[1], req[2], req[3], req[4], req[5],
name=name, is_active=is_active, **properties)
class SolacePublisher(Connector):
'''
Publish events to Solace
Parameters
----------
solhostport : string
Specifies the appliance to connect to, in the form “host:port”
soluserid : string
Specifies the user name required to authenticate the connector’s
session with the appliance.
solpassword : string
Specifies the password associated with soluserid
solvpn : string
Specifies the appliance message VPN to assign the client to which
the session connects.
soltopic : string
Specifies the Solace topic to which to subscribe
urlhostport : string
Specifies the host:port field in the metadata topic subscribed
to on start-up to field metadata requests.
buspersistence : boolean, optional
Creates the Guaranteed message flow to bind to the topic endpoint
provisioned on the appliance that the published Guaranteed messages
are delivered and spooled to
buspersistencequeue : string, optional
Specifies the name of the queue to which the Guaranteed message
flow binds.
protofile : string, optional
Specifies the .proto file that contains the Google Protocol Buffers
message definition used to convert event blocks to protobuf
messages. When you specify this parameter, you must also specify
the protomsg parameter.
protomsg : string, optional
Specifies the name of a Google Protocol Buffers message in the
.proto file that you specified with the protofile parameter.
Event blocks are converted into this message.
configfilesection : string, optional
Specifies the name of the section in the connector config file
to parse for configuration parameters. Specify the value
as [configfilesection].
json : boolean, optional
Enables transport of event blocks encoded as JSON messages
publishwithupsert : boolean, optional
Specifies to build with opcode = Upsert instead of opcode = Insert
dateformat : string, optional
Specifies the format of ESP_DATETIME and ESP_TIMESTAMP fields
in CSV events. The default behavior is these fields are
interpreted as an integer number of seconds (ESP_DATETIME) or
microseconds (ESP_TIMESTAMP) since epoch.
solpasswordencrypted : boolean, optional
Specifies that solpassword is encrypted
getmsgfromdestattr : boolean, optional
Specifies to extract the payload from the destination attribute
instead of the message body.
transactional : string, optional
When getmsgfromdestattr is enabled, sets the event block type
to transactional. The default value is normal.
blocksize : int, optional
When getmsgfromdestattr is enabled, specifies the number of
events to include in a published event block. The default
value is 1.
maxevents : int, optional
Specifies the maximum number of events to publish.
Returns
-------
:class:`SolacePublisher`
'''
connector_key = dict(cls='sol', type='publish')
property_defs = dict(
solhostport=prop('solhostport', dtype='string', required=True),
soluserid=prop('soluserid', dtype='string', required=True),
solpassword=prop('solpassword', dtype='string', required=True),
solvpn=prop('solvpn', dtype='string', required=True),
soltopic=prop('soltopic', dtype='string', required=True),
urlhostport=prop('urlhostport', dtype='string', required=True),
buspersistence=prop('buspersistence', dtype='boolean'),
buspersistencequeue=prop('buspersistencequeue', dtype='string'),
protofile=prop('protofile', dtype='string'),
protomsg=prop('protomsg', dtype='string'),
configfilesection=prop('configfilesection', dtype='string'),
json=prop('json', dtype='boolean'),
publishwithupsert=prop('publishwithupsert', dtype='boolean'),
dateformat=prop('dateformat', dtype='string'),
solpasswordencrypted=prop('solpasswordencrypted', dtype='boolean'),
getmsgfromdestattr=prop('getmsgfromdestattr', dtype='boolean'),
transactional=prop('transactional', dtype='string'),
blocksize=prop('blocksize', dtype='int'),
maxevents=prop('maxevents', dtype='int')
)
def __init__(self, solhostport=None, soluserid=None,
solpassword=<PASSWORD>, solvpn=None, soltopic=None,
urlhostport=None, name=None, is_active=None,
buspersistence=None, buspersistencequeue=None,
protofile=None, protomsg=None, configfilesection=None,
json=None, publishwithupsert=None, dateformat=None,
solpasswordencrypted=None, getmsgfromdestattr=None,
transactional=None, blocksize=None, maxevents=None):
params = dict(**locals())
params.pop('is_active')
params.pop('self')
name = params.pop('name')
Connector.__init__(self, 'sol', name=name, type='publish',
is_active=is_active, properties=params)
@classmethod
def from_parameters(cls, conncls, type=None, name=None, is_active=None,
properties=None):
req, properties = map_properties(cls, properties,
required=['solhostport',
'soluserid',
'solpassword',
'solvpn',
'soltopic',
'urlhostport'],
delete='type')
return cls(req[0], req[1], req[2], req[3], req[4], req[5],
name=name, is_active=is_active, **properties)
| StarcoderdataPython |
12845066 | <reponame>winsonluk/Alizon<filename>aliexp.py<gh_stars>10-100
from aliexpress_api_client import AliExpress
import PIL
from PIL import Image, ImageChops
import urllib2 as urllib
import io
from itertools import izip
from libImgComp import comp_imgs
def comp_images(i1, i2):
maxsize = (500, 500)
i1.resize(maxsize)
i2.resize(maxsize)
i1 = i1.convert('RGB')
i2 = i2.convert('RGB')
return comp_imgs(i1, i2)
'''pairs = izip(i1.getdata(), i2.getdata())
if len(i1.getbands()) == 1:
# for gray-scale jpegs
dif = sum(abs(p1-p2) for p1,p2 in pairs)
else:
dif = sum(abs(c1-c2) for p1,p2 in pairs for c1,c2 in zip(p1,p2))
ncomponents = i1.size[0] * i1.size[1] * 3
return 100 - (dif / 255.0 * 100) / ncomponents'''
import math, operator
def process_str(s):
s = s.replace('(', '')
s = s.replace(')', '')
s = s.replace('<b>', '')
s = s.replace('</b>', '')
s = s.replace('<font>', '')
s = s.replace('</font>', '')
s = s.replace('Generic', '')
s = s.replace(',', '')
s = s.replace('.', '')
s = s.replace('-', '')
s = s.replace('/', '')
s = s.replace('\\', '')
s = s.replace(' ', ' ')
return s
def rmsdiff(im1, im2):
"Calculate the root-mean-square difference between two images"
diff = ImageChops.difference(im1, im2)
h = diff.histogram()
sq = (value*((idx%256)**2) for idx, value in enumerate(h))
sum_of_squares = sum(sq)
rms = math.sqrt(sum_of_squares/float(im1.size[0] * im1.size[1]))
return rms
def strdiff(s1, s2):
s1 = s1.lower()
s2 = s2.lower()
count = 0
l1 = s1.split(' ')
for item in l1:
if (s2.find(item) != -1):
count += 1
return count
def get_perc(mi, ma, va):
if (mi == ma):
for item in va:
yield 100.0
else:
for item in va:
yield (((item - mi)/(ma - mi)))*100
def get_perc_w(mi, ma, va):
if (mi == ma):
for item in va:
yield 100.0
else:
for item in va:
n = (item/ma)*100
yield n
def get_max_ind(a):
x = max(a)
for i in range(len(a)):
if (a[i] >= x - 15):
yield i
def get_min_ind(a):
x = min(a)
for i in range(len(a)):
if (a[i] <= x + 15):
return i
def get_m_i(a):
x = max(a)
for i in range(len(a)):
if (a[i] == x):
return i
def get_avg(st, img):
return (1.7*st + 0.3*img) / 2.0
def price_float(s):
return float(s[4:])
def eval_p(prices, or_p):
for price in prices:
print str(price) + ' <- PRICE'
print str(or_p) + ' <- OR_PRICE'
if (5*price > (or_p - price) and price >= 0.45*or_p):
print 'GOT ' + str(price)
yield price
def get_pairs(li):
for i in range(len(li)-1):
yield li[i] + ' ' + li[i + 1]
def get_pairs_strict(li):
for i in range(len(li)/2):
yield li[2*i] + ' ' + li[2*i + 1]
def get_all_maxs(li):
m = max(li)
for i in range(len(li)):
if li[i] == m:
yield i
def get_all_mins(li):
m = min([n for n in li if n>0])
for i in range(len(li)):
if li[i] == m:
yield i
def get_all_maxs_mild(li):
m = max(li)
for i in range(len(li)):
if li[i] >= m - 10:
yield i
def get_all_mins_mild(li):
m = min([n for n in li if n>0])
for i in range(len(li)):
if li[i] <= m + 10:
yield i
def process_pr(li, t):
for i in li:
if i > t:
yield -1
else:
yield i
def calc_result(s_item, or_price, or_img):
# print 'starting Daniils part'
COEFF = 0.7
s_item = process_str(s_item)
item_copy = s_item
aliexpress = AliExpress('YOUR_CODE_HERE')
'''while (not_working):
try:
print ' '.join(s_item.split(' ')[:-count])
products = aliexpress.get_product_list(['productTitle', 'salePrice', 'imageUrl', 'productUrl'], ' '.join(s_item.split(' ')[0:-count]))['products']
cur_len = len(products)
print cur_len
if ((cur_len < old_len or cur_len >= 15) and count >= 3):
if (cur_len < old_len):
products = aliexpress.get_product_list(['productTitle', 'salePrice', 'imageUrl', 'productUrl'], ' '.join(s_item.split(' ')[0:-(count - 1)]))['products']
print 'disabling'
not_working = False
else:
raise ValueError(' fff ')
except:
count += 1;
old_len = cur_len
if (count + 1 == len(item_copy.split(' '))):
break
#print ' '.join(s_item.split(' ')[:count])'''
done = False
old_len = 0
cur_len = 0
products = {}
le_s = len(item_copy.split(' '))
search_query = s_item.split(' ')
previous_max = 20
#a = raw_input()
while (not done):
count = 0
print "Going into the next lap"
print search_query
lens_titles = []
lens_values = []
if (len(search_query) != 1):
search_query = list(get_pairs(search_query))
max_count = len(search_query)
while (count < max_count):
products = aliexpress.get_product_list(['productTitle', 'salePrice', 'imageUrl', 'productUrl'], search_query[count],
originalPriceFrom=str(or_price*COEFF), sort="orignalPriceUp")['products']
lens_titles.append(search_query[count])
lens_values.append(len(products))
count += 1
maxs_i = list(get_all_maxs(lens_values))
print '--------------------------------'
#print maxs_i
if (len(maxs_i) == 0 or lens_values[maxs_i[0]] == 0):
#print maxs_i
#print lens_values[maxs_i[0]]
search_query = list(get_pairs_strict(final_search_query))
print 'Shutting down'
done = True
elif (len(maxs_i) == 1 and lens_values[maxs_i[0]] >= 2):
search_query = [lens_titles[maxs_i[0]]]
#print maxs_i
#print lens_values
print 'Shutting down - one good result'
done = True
elif (len(maxs_i) == 1 and lens_values[maxs_i[0]] < 2):
search_query = list(get_pairs_strict(final_search_query))
#print maxs_i
#print lens_values
print 'Shutting down - one bad result'
done = True
else:
search_query = []
#print maxs_i
print 'Keeping on'
if (len(maxs_i) >= 2 and lens_values[maxs_i[0]] != 0):
final_search_query = []
for item in maxs_i:
k = len(lens_titles[item].split(' '))
final_search_query.append(' '.join(lens_titles[item].split(' ')[:k/2]))
final_search_query.append(' '.join(lens_titles[-1].split(' ')[k/2+1:]))
search_query = list(get_pairs_strict(final_search_query))
#printing the result
'''
for item in search_query:
products = aliexpress.get_product_list(['productTitle', 'salePrice', 'imageUrl', 'productUrl'], item)['products']
print '----------------------------------------------------------------------'
print item
print len(products)
for i in products:
print i['productTitle']
'''
links = []
prices = []
perc = []
diffs = []
print search_query
print 'STARTING CHECK FOR EACH POS ...'
for s in search_query:
print 'INPUT:'
print s
products = aliexpress.get_product_list(['productTitle', 'salePrice', 'imageUrl', 'productUrl'], s,
originalPriceFrom=str(or_price*COEFF), sort="orignalPriceUp")['products']
print len(products)
#a = raw_input()
l, p, perct, diff = search(products, item_copy, or_price, or_img)
links.extend(l)
prices.extend(p)
perc.extend(perct)
diffs.extend(diff)
max_perc = list(get_all_maxs_mild(perc))
min_prices = list(get_all_mins_mild(prices))
print 'ORIG PR : ' + str(or_price)
result = list(set(max_perc).intersection(min_prices))
print 'MAX PERC:'
print max_perc
print 'MIN PRC:'
print min_prices
prices = list(process_pr(prices, or_price))
print prices
print 'RES:'
print result
result_perc = []
for item in result:
print links[item]
print prices[item]
print perc[item]
result_perc.append(perc[item])
if (len(result) != 0):
final_ind = get_m_i(result_perc)
fin = result[final_ind]
#a = raw_input()
if (len(result) != 0):
return links[fin], prices[fin], diffs[fin]
else:
return links[min_prices[0]], prices[min_prices[0]], diffs[min_prices[0]]
def search(products, s_item, or_price, or_img):
print 'Starting search...'
#print len(products)
#try:
#print or_img
fd = urllib.urlopen(or_img)
orig_img_link = io.BytesIO(fd.read())
orig_img = Image.open(orig_img_link)
#except:
#orig_img_link = cStringIO.StringIO(urllib.urlopen('http://cs617219.vk.me/v617219415/c9c4/KUCX_V8m7CQ.jpg').read())
#orig_img = Image.open(orig_img_link)
titles = []
image_diffs = []
img_data = []
#i = 0;
for item in products:
#i += 1;
#img.show()
#print process_str(item['productTitle'])
titles.append(process_str(item['productTitle']))
try:
#print item['productTitle'] + item['salePrice'] + '\n' + item['imageUrl'] + '\n'
fd = urllib.urlopen(item['imageUrl'])
img_link = io.BytesIO(fd.read())
img = Image.open(img_link)
#image_diffs.append(rmsdiff(img, orig_img))
#print comp_images(orig_img, img)
img_data.append(comp_images(orig_img, img))
#a = raw_input();
#print i
#print '___________________________________________________________________________'
except:
img_data.append(50)
string_diffs = map(strdiff, titles, [s_item]*len(titles))
max_strdiff = float(max(string_diffs))
# max_imgdiff = float(max(image_diffs))
min_strdiff = float(min(string_diffs))
# min_imgdiff = float(min(image_diffs))
#print 'CHECK IMG DATA'
#print img_data
#print 'MIN'
#print min(img_data)
#print 'MAX'
#print max(img_data)
str_data = list(get_perc_w(min_strdiff, max_strdiff, string_diffs))
img_data = list(get_perc(min(img_data), max(img_data), img_data))
comp_data = map(get_avg, str_data, img_data)
#print "word matches: "
#print str_data
#print "images:"
#print img_data
#print "comp:"
#print comp_data
ids = list(get_max_ind(comp_data))
#print 'IDs'
#print ids
urls = []
prices = []
percs = []
diffs = []
for item in ids:
urls.append(products[item]['productUrl'])
prices.append(price_float(products[item]['salePrice']))
percs.append(comp_data[item])
diffs.append(or_price - price_float(products[item]['salePrice']))
print urls
print prices
print percs
print diffs
#'''or (or_price - new_price > 5*new_price) or comp_data[ids[get_min_ind(prices)]] < 50'''
return urls, prices, percs, diffs
| StarcoderdataPython |
3295833 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-12 04:38
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.manager
import django.utils.timezone
import model_utils.fields
import phonenumber_field.modelfields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Campaign',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('name', models.CharField(max_length=30)),
('inbound_number', phonenumber_field.modelfields.PhoneNumberField(max_length=128)),
('redirect_number', phonenumber_field.modelfields.PhoneNumberField(max_length=128)),
('welcome_message', models.FileField(upload_to='audio/')),
('is_active', models.FileField(default=False, upload_to='')),
],
options={
'abstract': False,
},
managers=[
('active', django.db.models.manager.Manager()),
],
),
]
| StarcoderdataPython |
3479875 | """
Placeholder file for satellite navigation algorithms
"""
import numpy as np
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.