id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
4838239 | <filename>aes/aes.py
DOCS_PDF = 'http://csrc.nist.gov/publications/fips/fips197/fips-197.pdf'
sbox = ( # Substitution Box
0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76,
0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0,
0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15,
0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75,
0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84,
0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF,
0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8,
0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2,
0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73,
0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB,
0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79,
0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08,
0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A,
0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E,
0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF,
0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16,
)
rsbox = ( # Reverse S-Box
0x52, 0x09, 0x6A, 0xD5, 0x30, 0x36, 0xA5, 0x38, 0xBF, 0x40, 0xA3, 0x9E, 0x81, 0xF3, 0xD7, 0xFB,
0x7C, 0xE3, 0x39, 0x82, 0x9B, 0x2F, 0xFF, 0x87, 0x34, 0x8E, 0x43, 0x44, 0xC4, 0xDE, 0xE9, 0xCB,
0x54, 0x7B, 0x94, 0x32, 0xA6, 0xC2, 0x23, 0x3D, 0xEE, 0x4C, 0x95, 0x0B, 0x42, 0xFA, 0xC3, 0x4E,
0x08, 0x2E, 0xA1, 0x66, 0x28, 0xD9, 0x24, 0xB2, 0x76, 0x5B, 0xA2, 0x49, 0x6D, 0x8B, 0xD1, 0x25,
0x72, 0xF8, 0xF6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xD4, 0xA4, 0x5C, 0xCC, 0x5D, 0x65, 0xB6, 0x92,
0x6C, 0x70, 0x48, 0x50, 0xFD, 0xED, 0xB9, 0xDA, 0x5E, 0x15, 0x46, 0x57, 0xA7, 0x8D, 0x9D, 0x84,
0x90, 0xD8, 0xAB, 0x00, 0x8C, 0xBC, 0xD3, 0x0A, 0xF7, 0xE4, 0x58, 0x05, 0xB8, 0xB3, 0x45, 0x06,
0xD0, 0x2C, 0x1E, 0x8F, 0xCA, 0x3F, 0x0F, 0x02, 0xC1, 0xAF, 0xBD, 0x03, 0x01, 0x13, 0x8A, 0x6B,
0x3A, 0x91, 0x11, 0x41, 0x4F, 0x67, 0xDC, 0xEA, 0x97, 0xF2, 0xCF, 0xCE, 0xF0, 0xB4, 0xE6, 0x73,
0x96, 0xAC, 0x74, 0x22, 0xE7, 0xAD, 0x35, 0x85, 0xE2, 0xF9, 0x37, 0xE8, 0x1C, 0x75, 0xDF, 0x6E,
0x47, 0xF1, 0x1A, 0x71, 0x1D, 0x29, 0xC5, 0x89, 0x6F, 0xB7, 0x62, 0x0E, 0xAA, 0x18, 0xBE, 0x1B,
0xFC, 0x56, 0x3E, 0x4B, 0xC6, 0xD2, 0x79, 0x20, 0x9A, 0xDB, 0xC0, 0xFE, 0x78, 0xCD, 0x5A, 0xF4,
0x1F, 0xDD, 0xA8, 0x33, 0x88, 0x07, 0xC7, 0x31, 0xB1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xEC, 0x5F,
0x60, 0x51, 0x7F, 0xA9, 0x19, 0xB5, 0x4A, 0x0D, 0x2D, 0xE5, 0x7A, 0x9F, 0x93, 0xC9, 0x9C, 0xEF,
0xA0, 0xE0, 0x3B, 0x4D, 0xAE, 0x2A, 0xF5, 0xB0, 0xC8, 0xEB, 0xBB, 0x3C, 0x83, 0x53, 0x99, 0x61,
0x17, 0x2B, 0x04, 0x7E, 0xBA, 0x77, 0xD6, 0x26, 0xE1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0C, 0x7D,
)
rcon = ( # Round Constant
0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1B, 0x36,
0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6,
0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef
)
def _MUL02(n):
return (((n << 1) & (0xFF)) ^ (0x1B if (n & 0x80) else 0x00))
def _MUL03(n):
return _MUL02(n) ^ n
class aes: # Advanced Encryption Standard - 128
def __init__(self, master_key, keysize=128, operation_mode='ECB'):
self.mk = self.__toarray(master_key)
self.__key_expansion(self.mk)
# will be supported
if operation_mode!='ECB' or keysize!=128:
raise ValueError("Oops! It's not supported yet...")
self.mode = operation_mode
self.keysize = keysize
def __toarray(self, ints):
if type(ints)==list and len(ints)==16:
return ints
arr = [((ints >> (8 * (15 - i))) & 0xFF) for i in range(16)]
return arr
def __tobyte(self, arr):
ints = 0
for i in range(len(arr)):
ints += arr[15 - i] * (256**i)
return ints
def __key_expansion(self, mk):
self.rk = mk
for i in range(0, 10):
self.rk.append(self.rk[(i << 4) + 0] ^ sbox[self.rk[(i << 4) + 13]] ^ rcon[i])
self.rk.append(self.rk[(i << 4) + 1] ^ sbox[self.rk[(i << 4) + 14]])
self.rk.append(self.rk[(i << 4) + 2] ^ sbox[self.rk[(i << 4) + 15]])
self.rk.append(self.rk[(i << 4) + 3] ^ sbox[self.rk[(i << 4) + 12]])
self.rk.append(self.rk[(i << 4) + 4] ^ self.rk[((i+1) << 4) + 0])
self.rk.append(self.rk[(i << 4) + 5] ^ self.rk[((i+1) << 4) + 1])
self.rk.append(self.rk[(i << 4) + 6] ^ self.rk[((i+1) << 4) + 2])
self.rk.append(self.rk[(i << 4) + 7] ^ self.rk[((i+1) << 4) + 3])
self.rk.append(self.rk[(i << 4) + 8] ^ self.rk[((i+1) << 4) + 4])
self.rk.append(self.rk[(i << 4) + 9] ^ self.rk[((i+1) << 4) + 5])
self.rk.append(self.rk[(i << 4) + 10] ^ self.rk[((i+1) << 4) + 6])
self.rk.append(self.rk[(i << 4) + 11] ^ self.rk[((i+1) << 4) + 7])
self.rk.append(self.rk[(i << 4) + 12] ^ self.rk[((i+1) << 4) + 8])
self.rk.append(self.rk[(i << 4) + 13] ^ self.rk[((i+1) << 4) + 9])
self.rk.append(self.rk[(i << 4) + 14] ^ self.rk[((i+1) << 4) + 10])
self.rk.append(self.rk[(i << 4) + 15] ^ self.rk[((i+1) << 4) + 11])
# return rk
def encrypt(self, pt, byte=False):
ct = self.__toarray(pt)
self.__addroundkey(ct, self.rk[0:16])
for i in range(1, 10):
self.__subbytes(ct)
self.__shiftrows(ct)
self.__mixcolumns(ct)
self.__addroundkey(ct, self.rk[i*16:(i + 1)*16])
self.__subbytes(ct)
self.__shiftrows(ct)
self.__addroundkey(ct, self.rk[(i + 1)*16:(i + 2)*16])
if byte:
return self.__tobyte(ct)
return ct
def decrypt(self, ct, byte=False):
pt = self.__toarray(ct)
self.__addroundkey(pt, self.rk[10*16:(10 + 1)*16])
self.__inv_shiftrows(pt)
self.__inv_subbytes(pt)
for i in range(9, 0, -1):
self.__addroundkey(pt, self.rk[i*16:(i + 1)*16])
self.__inv_mixcolumns(pt)
self.__inv_shiftrows(pt)
self.__inv_subbytes(pt)
self.__addroundkey(pt, self.rk[0:16])
if byte:
return self.__tobyte(pt)
return pt
def __subbytes(self, s):
for i in range(16):
s[i] = sbox[s[i]]
def __shiftrows(self, s):
s[ 1], s[ 5], s[ 9], s[13] = s[ 5], s[ 9], s[13], s[ 1]
s[ 2], s[ 6], s[10], s[14] = s[10], s[14], s[ 2], s[ 6]
s[ 3], s[ 7], s[11], s[15] = s[15], s[ 3], s[ 7], s[11]
def __mixcolumns(self, s):
for i in range(4):
s0 = _MUL02(s[(i << 2) + 0]) ^ _MUL03(s[(i << 2) + 1]) ^ s[(i << 2) + 2] ^ s[(i << 2) + 3]
s1 = _MUL02(s[(i << 2) + 1]) ^ _MUL03(s[(i << 2) + 2]) ^ s[(i << 2) + 3] ^ s[(i << 2) + 0]
s2 = _MUL02(s[(i << 2) + 2]) ^ _MUL03(s[(i << 2) + 3]) ^ s[(i << 2) + 0] ^ s[(i << 2) + 1]
s3 = _MUL02(s[(i << 2) + 3]) ^ _MUL03(s[(i << 2) + 0]) ^ s[(i << 2) + 1] ^ s[(i << 2) + 2]
s[(i << 2) + 0], s[(i << 2) + 1], s[(i << 2) + 2], s[(i << 2) + 3] = s0, s1, s2, s3
def __inv_subbytes(self, s):
for i in range(16):
s[i] = rsbox[s[i]]
def __inv_shiftrows(self, s):
s[ 1], s[ 5], s[ 9], s[13] = s[13], s[ 1], s[ 5], s[ 9]
s[ 2], s[ 6], s[10], s[14] = s[10], s[14], s[ 2], s[ 6]
s[ 3], s[ 7], s[11], s[15] = s[ 7], s[11], s[15], s[ 3]
def __inv_mixcolumns(self, s):
for i in range(4):
tmp1 = _MUL02(_MUL02(s[(i << 2) + 0] ^ s[(i << 2) + 2]))
tmp2 = _MUL02(_MUL02(s[(i << 2) + 1] ^ s[(i << 2) + 3]))
s[(i << 2) + 0] ^= tmp1
s[(i << 2) + 1] ^= tmp2
s[(i << 2) + 2] ^= tmp1
s[(i << 2) + 3] ^= tmp2
self.__mixcolumns(s)
def __addroundkey(self, s, k):
for i in range(16):
s[i] = s[i] ^ k[i]
| StarcoderdataPython |
3311612 | import contextlib
import numpy as np
# https://stackoverflow.com/questions/2891790/how-to-pretty-printing-a-numpy-array-without-scientific-notation-and-with-given#2891805
@contextlib.contextmanager
def np_printoptions(*args, **kwargs):
original = np.get_printoptions()
np.set_printoptions(*args, **kwargs)
try:
yield
finally:
np.set_printoptions(**original)
| StarcoderdataPython |
4807299 | #!/usr/bin/python
class A:
def f(self):
return self.g()
def g(self):
return 'A'
class B(A):
def g(self):
return 'B'
a = A()
b = B()
print ('a.f(), b.f()')
print ('a.g(), b.g()')
| StarcoderdataPython |
3372846 | <filename>edX/MIT6001x/wk1/wk_1_for1.py
x = 2
for i in range(2, 12, 2):
print(x)
x += 2
print('Goodbye!') | StarcoderdataPython |
120294 | import datetime
import json
import sys
from caresjpsutil import PythonLogger
from pyproj import Proj, transform
import admsTest
from admsAplWriterShip import admsAplWriter
from admsInputDataRetrieverChimney import admsInputDataRetriever
from config import Constants
from adms_apl_builder import *
pythonLogger = PythonLogger('admsTest.py')
def get_input(*args):
sourceCRS = Proj(init='epsg:4326')
targetCRS = Proj(init=args[5][:4].lower() + args[5][4:])
bdn_data = json.loads(args[1].replace("'", '"'))
coor_data = str(args[2]).replace("'", '"')
ships = json.loads(args[3])
working_dir = str(args[4])
coord_sys = args[5][5:]
precipitation = float(str(args[6]))
chimney_iri = str(args[7])
BDN = admsTest.get_bdn(bdn_data)
coords = admsTest.get_coordinates(coor_data)
pollutants = [Constants.POL_CO2, Constants.POL_CO, Constants.POL_NO2, Constants.POL_HC, Constants.POL_NOX,
Constants.POL_PART_001, Constants.POL_PART_SO2, Constants.POL_PART_O3]
ship_coordinates_list = []
chimney_iri_list = []
for ship in ships:
x_coordinate_value = float(ship['lon'])
y_coordinate_value = float(ship['lat'])
ship_coordinates_list.append(list(transform(sourceCRS, targetCRS, x_coordinate_value, y_coordinate_value)))
chimney_iri_list.append(chimney_iri)
test = admsInputDataRetriever(chimney_iri_list, Constants.BLD_TOPNODE, coords, pollutants, 2,
Constants.BLD_LIMIT,
False, BDN, targetCRS)
result = test.get()
pythonLogger.postInfoToLogServer('calling admsAplWriter ...')
result['Bdn'] = BDN
result['CoordiSys'] = coord_sys
latitudemid = (float(coords[Constants.KEY_MIN_Y]) + float(coords[Constants.KEY_MAX_Y])) / 2
longitudemid = (float(coords[Constants.KEY_MIN_X]) + float(coords[Constants.KEY_MAX_X])) / 2
xmid, ymid = transform(targetCRS, sourceCRS, longitudemid, latitudemid)
result['Met'] = working_dir + '/test.met'
result['Lat'] = ymid
result['Bkg'] = working_dir + '/testbackgrnd.bgd'
if "2326" in args[5][5:]:
result['terrindicator'] = "1"
else:
result['terrindicator'] = "0"
result['chemindicator'] = "1"
result['wetindicator'] = "1"
now = datetime.datetime.now()
hournow = now.hour + 1
if not (6 <= hournow <= 18):
result['night'] = "1"
result['dirnight'] = "C:\JPS_DATA\working_dir\JPS\ADMS\chemistrynight.AAI"
else:
result['night'] = "0"
result['dirnight'] = ""
annualprecipitation = precipitation * 365 * 24
if annualprecipitation < 103:
so2washout = 0.000001 / 500 * annualprecipitation
else:
so2washout = 0.0000019 + annualprecipitation * 0.0000000008
if precipitation < 0.5:
pm10washout = 0.0016
elif precipitation > 4:
pm10washout = 0.0072
else:
pm10washout = 0.00363
result['so2washout'] = so2washout
result['pm10washout'] = pm10washout
for idx in range(len(ship_coordinates_list)):
result['Src'][idx].setCoordinates(ship_coordinates_list[idx])
result['Src'][idx].SrcName = "Chimney-{0}".format(idx + 1)
return result, working_dir
def save_apl(*args):
writer = admsAplWriter(get_input(args) + Constants.FILE_NAME_APL)
writer.write()
def main(*args):
try:
builder = AdmsAplShipBuilder()
director = AplDirector()
director.set_builder(builder)
apl = director.get_apl()
apl.specification()
save_apl(args)
except Exception as e:
pythonLogger.postErrorToLogServer(e)
if __name__ == "__main__":
main(sys.argv)
| StarcoderdataPython |
1667699 | <filename>seamless/graphs/multi_module/mytestpackage/sub/mod1.py
from .. import testvalue
from mytestpackage.mod3 import testfunc
from ..mod4 import blah
def func():
return testvalue
| StarcoderdataPython |
1761238 | """
Copyright 2013 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import glob
import csv
import re
def evaluateMappings(result_files, start_point, print_count, output_filename):
output = ""
for result_file in result_files:
m = re.match(r'.*/rdf-(.*)-rd-(.*)-tb-(.*)\.csv', result_file)
if not m:
print "Cannot parse " + result_file
with open(result_file, 'r') as result:
result_reader = csv.reader(result, delimiter=',')
skip = 0
while skip < start_point:
result_reader.next()
skip += 1
moves = 0
std = 0
c = 0
for row in result_reader:
moves += int(row[5])
std += float(row[3])
c += 1
if print_count:
print c
output += m.group(1) + "," + m.group(2) + "," + m.group(3) + "," + str(moves) + "," + str(std) + "\n"
f = open(output_filename, 'w')
f.write(output)
f.close()
def main():
# parse the commandline arguments
parser = argparse.ArgumentParser(description='Evaluate mapping files for topologies from Blobstore')
parser.add_argument("-t", dest='result_path', type=str, required=True, help='path for the result files')
parser.add_argument("-o", dest='output_filename', type=str, required=True, help='output file name')
parser.add_argument("-s", dest='start_point', type=int, required=False, default=1, help='starting point for the calculation')
parser.add_argument("-c", dest='print_count', action="store_true", required=False, default=False, help='print count')
args = parser.parse_args()
# read topology files
result_files = glob.glob(args.result_path + "/*.csv")
evaluateMappings(sorted(result_files), args.start_point, args.print_count, args.output_filename)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3289332 | """The messiest test file in the world.
Runs 'smoke test' to make sure everything runs.
"""
import numpy as np
import tasks
import representations
import matplotlib.pyplot as plt
import plotting
import util
import seaborn as sns
def test_everything_runs():
"""Check everything runs."""
discount = .9
sigma = 1.
n = 32 # line, ring params
dims = [18, 22] # rectangle, four rooms params
n_clique = 5 # clique params
# clique ring params
n_cluster_clqrng = 3
n_in_cluster_clqrng = 5
# tree params
depth = 3
n_branch = 2
# SBM params
n_clu_sbm = 3
n_in_clu_sbm = [10, 20, 25]
p_between_cluster_sbm = .7*np.eye(3) + .2 # .9 w/i, .2 b/t
outputs = [
tasks.line(n),
tasks.ring(n),
tasks.rectangle_mesh(dims),
tasks.four_rooms(dims),
tasks.clique(n_clique),
tasks.tower_of_hanoi(),
tasks.clique_ring(n_cluster_clqrng, n_in_cluster_clqrng),
tasks.tree(depth, n_branch, directed=False),
tasks.stochastic_block(n_clu_sbm, n_in_clu_sbm, p_between_cluster_sbm),
]
for adj, xy, labels in outputs:
fig, ax = plt.subplots(1)
# test reps
randomwalk_transmat = util.l1_normalize_rows(adj)
rep_onehot, _ = representations.state_onehot(randomwalk_transmat)
rep_succ, _ = representations.successor_rep(randomwalk_transmat, discount)
rep_Leig_unnorm, _ = representations.laplacian_eig(adj, "unnorm")
rep_Leig_norm, _ = representations.laplacian_eig(adj, "norm")
rep_Leig_rw, _ = representations.laplacian_eig(adj, "rw")
rep_euc_gaussian, _ = representations.euclidean_gaussian(xy, sigma, None, 100)
reps = [rep_onehot, rep_succ, rep_Leig_unnorm, rep_Leig_norm, rep_Leig_rw, rep_euc_gaussian]
for r in reps:
assert len(r.shape) == 2
assert r.shape[0] == len(adj)
node_color = labels if list(labels) else None
node_color = rep_euc_gaussian[:, 0]
plotting.plot_graph(adj, xy=xy, ax=ax, node_color=node_color, node_size=100)
plt.show()
def main():
test_everything_runs()
print("Passed all tests!")
main()
| StarcoderdataPython |
195241 | <filename>calamari_ocr/test/test_model_zoo.py
import os
import tempfile
import unittest
from glob import glob
from subprocess import check_call
import pytest
from tensorflow.python.keras.backend import clear_session
from tfaip.data.databaseparams import DataPipelineParams
from calamari_ocr.ocr.predict.params import PredictorParams
from calamari_ocr.scripts.predict_and_eval import (
main as predict_and_eval_main,
PredictAndEvalArgs,
)
from calamari_ocr.test.test_train_file import uw3_trainer_params
@pytest.mark.skipif(os.name != "posix", reason="Do not run on windows due to missing wget and untar.")
class TestModelZoo(unittest.TestCase):
def tearDown(self) -> None:
clear_session()
def test_model_zoo(self):
version = "2.0"
url = f"https://github.com/Calamari-OCR/calamari_models/archive/{version}.tar.gz"
with tempfile.TemporaryDirectory() as d:
d = "model_archive_permanent" # for debugging
os.makedirs(d, exist_ok=True)
os.chdir(d)
if not os.path.exists("calamari_models"):
check_call(
[
"sh",
"-c",
" ".join(
[
"wget",
"-q",
"-O",
"-",
url,
"|",
"tar",
"xz",
"&&",
"mv",
f"calamari_models-{version}",
"calamari_models",
]
),
]
)
trainer_params = uw3_trainer_params(with_validation=True)
args = PredictAndEvalArgs(
checkpoint=glob(os.path.join("calamari_models", "uw3-modern-english", "*.ckpt.json")),
predictor=PredictorParams(pipeline=DataPipelineParams(batch_size=5)),
data=trainer_params.gen.val_gen(),
)
full_evaluation = predict_and_eval_main(args)
self.assertLess(
full_evaluation["voted"]["eval"]["avg_ler"],
0.001,
"The accuracy on the test data must be below 0.1%",
)
| StarcoderdataPython |
140025 | config = {
'population_size' : 100,
'mutation_probability' : .1,
'crossover_rate' : .9,
# maximum simulation runs before finishing
'max_runs' : 100,
# maximum timesteps per simulation
'max_timesteps' : 150,
# smoothness value of the line in [0, 1]
'line_smoothness' : .4,
# Bound for our gain parameters (p, i, d)
'max_gain_value' : 3,
# when set to 1, we create a new map this run. When set to 0, loads a new map
'new_map' : True,
'runs_per_screenshot' : 10,
'data_directory' : '/home/monk/genetic_pid_data',
'map_filename' : 'map.csv'
}
| StarcoderdataPython |
15186 | <filename>Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/video_pipeline/forms.py
"""
Defines a form to provide validations for course-specific configuration.
"""
from django import forms
from openedx.core.djangoapps.video_config.forms import CourseSpecificFlagAdminBaseForm
from openedx.core.djangoapps.video_pipeline.models import (
CourseVideoUploadsEnabledByDefault,
VEMPipelineIntegration,
)
class CourseVideoUploadsEnabledByDefaultAdminForm(CourseSpecificFlagAdminBaseForm):
"""
Form for course-specific Video Uploads enabled by default configuration.
"""
class Meta:
model = CourseVideoUploadsEnabledByDefault
fields = '__all__'
class VEMPipelineIntegrationAdminForm(forms.ModelForm):
"""
Form for VEM Pipeline Integration Admin class.
"""
class Meta:
model = VEMPipelineIntegration
fields = '__all__'
| StarcoderdataPython |
3204463 | import os
import numpy as np
import tensorflow as tf
import gpflow
from GPcounts import branchingKernel
from GPcounts import NegativeBinomialLikelihood
from sklearn.cluster import KMeans
import scipy.stats as ss
from pathlib import Path
import pandas as pd
from gpflow.utilities import set_trainable
from tqdm import tqdm
from scipy.signal import savgol_filter
import random
import scipy as sp
from scipy import interpolate
from robustgp import ConditionalVariance
from pandas import DataFrame
from scipy.special import logsumexp
import warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore')
# Get number of cores reserved by the batch system (NSLOTS is automatically set, or use 1 if not)
NUMCORES=int(os.getenv("NSLOTS",1))
# print("Using", NUMCORES, "core(s)" )
# Create session properties
config=tf.compat.v1.ConfigProto(inter_op_parallelism_threads=NUMCORES,intra_op_parallelism_threads=NUMCORES)
tf.compat.v1.Session.intra_op_parallelism_threads = NUMCORES
tf.compat.v1.Session.inter_op_parallelism_threads = NUMCORES
class Fit_GPcounts(object):
def __init__(self,X = None,Y= None,scale = None,sparse = False,M=0,nb_scaled=False,safe_mode = False):
self.safe_mode = safe_mode
self.folder_name = 'GPcounts_models/'
self.transform = True # to use log(count+1) transformation
self.sparse = sparse # use sparse or full inference
self.nb_scaled = nb_scaled
self.X = None # time points (cell,samples, spatial location)
self.M = M # number of inducing points
self.Z = None # inducing points
self.ConditionalVariance = False # set inducing points using conditional variance from robustGP method
self.Y = None # gene expression matrix
self.Y_copy = None #copy of gene expression matrix
self.D = None # number of genes
self.N = None # number of cells
self.scale = scale
self.genes_name = None
self.cells_name = None
self.Scale = None
self.kernel = None
self.bic = None
# statistical test information
self.lik_name = None # the selected likelihood name
self.models_number = None # Total number of models to fit for single gene for selected test
self.model_index = None # index the current model
self.hyper_parameters = {} # model paramaters initialization
self.user_hyper_parameters = [None,None,None,None]# user model paramaters initialization
self.model = None # GP model information
self.var = None # GP variance of posterior predictive
self.mean = None # GP mean of posterior predictive
self.fix = False # fix hyper-parameters
# save likelihood of hyper-parameters of dynamic model to initialize the constant model
self.lik_alpha = None
self.lik_km = None
self.optimize = True # optimize or load model
self.branching = None # DE kernel or RBF kernel
self.xp = -1000. # Put branching time much earlier than zero time
# single gene information
self.y = None
self.index = None
self.global_seed = 0
self.seed_value = 0 # initialize seed
self.count_fix = 0 # counter of number of trails to resolve either local optima or failure duo to numerical issues
# check the X and Y are not missing
if (X is None) or (Y is None):
print('TypeError: GPcounts() missing 2 required positional arguments: X and Y')
else:
self.set_X_Y(X,Y)
def set_X_Y(self,X,Y):
self.seed_value = 0
np.random.seed(self.seed_value)
if X.shape[0] == Y.shape[1]:
self.X = X
self.cells_name = list(map(str,list(X.index.values)))
self.X = X.values.astype(float)
if len(self.X.shape) > 1:
self.X = self.X.reshape([-1,self.X.shape[1]])
else:
self.X = self.X.reshape([-1, 1])
if self.sparse:
if self.M == 0:
self.M = int((5*(len(self.X)))/100) # number of inducing points is 5% of length of time points
self.ConditionalVariance = True
self.Y = Y
self.genes_name = self.Y.index.values.tolist() # gene expression name
self.Y = self.Y.values # gene expression matrix
'''
if self.lik_name == 'Gaussian':
self.Y = self.Y.values # gene expression matrix
else:
self.Y = self.Y.values.astype(int)
self.Y = self.Y.astype(float)
self.Y_copy = self.Y
'''
self.Y_copy = self.Y
self.D = Y.shape[0] # number of genes
self.N = Y.shape[1] # number of cells
else:
print('InvalidArgumentError: Dimension 0 in X shape must be equal to Dimension 1 in Y, but shapes are %d and %d.' %(X.shape[0],Y.shape[1]))
def Infer_trajectory(self,lik_name= 'Negative_binomial',transform = True):
if transform == True:
self.Y = self.Y.astype(int)
self.Y = self.Y.astype(float)
self.Y_copy = self.Y
genes_index = range(self.D)
genes_results = self.run_test(lik_name,1,genes_index)
return genes_results
def One_sample_test(self,lik_name= 'Negative_binomial', transform = True):
if transform == True:
self.Y = self.Y.astype(int)
self.Y = self.Y.astype(float)
self.Y_copy = self.Y
genes_index = range(self.D)
genes_results = self.run_test(lik_name,2,genes_index)
return genes_results
def Model_selection_test(self,lik_name = 'Negative_binomial',kernel = None,transform = True):
if transform == True:
self.Y = self.Y.astype(int)
self.Y = self.Y.astype(float)
self.Y_copy = self.Y
# Run GP model for linear, periodic and rbf kernels and calculate BIC
ker_list = ['Linear','Periodic','RBF']
genes_index = range(self.D)
selection_results = pd.DataFrame()
selection_results['Gene'] = 0
selection_results['Dynamic_model_log_likelihood'] = 0
selection_results['Constant_model_log_likelihood'] = 0
selection_results['log_likelihood_ratio'] = 0
selection_results['p_value'] = 0
selection_results['q_value'] = 0
selection_results['log_likelihood_ratio'] = 0
selection_results['Model'] = 0
selection_results['BIC'] = 0
for word in ker_list:
self.kernel = word
results = self.run_test(lik_name,2,genes_index)
results['BIC'] = -2*results['Dynamic_model_log_likelihood'] + self.K*np.log(self.X.shape[0])
results['Gene'] = self.genes_name
results['Model'] = word
results['p_value'] = 1 - ss.chi2.cdf(2*results['log_likelihood_ratio'], df=1)
results['q_value']= self.qvalue(results['p_value'])
selection_results = selection_results.merge(results, how = 'outer')
# Model probability estimation based on bic based on SpatialDE:identification of spatially variable genes: https://www.nature.com/articles/nmeth.4636
tr = selection_results.groupby(['Gene','Model'])['BIC'].transform(min) == selection_results['BIC']
# select bic values for each kernel and gene
bic_values = -selection_results[tr].pivot_table(values='BIC', index='Gene', columns='Model')
restore_these_settings = np.geterr()
temp_settings = restore_these_settings.copy()
temp_settings["over"] = "ignore"
temp_settings["under"] = "ignore"
np.seterr(**temp_settings)
log_v = logsumexp(bic_values,1)
log_model_prob= (bic_values.T - log_v).T
model_prob = np.exp(log_model_prob).add_suffix('_probability')
tr = selection_results.groupby('Gene')['BIC'].transform(min) == selection_results['BIC']
selection_results_prob = selection_results[tr]
selection_results_prob = selection_results_prob.join(model_prob, on='Gene')
transfer_columns = ['p_value', 'q_value']
np.seterr(**restore_these_settings)
selection_results_prob = selection_results_prob.drop(transfer_columns,1)\
.merge(selection_results,how = 'inner')
return selection_results_prob
def Two_samples_test(self,lik_name= 'Negative_binomial',transform = True):
if transform == True:
self.Y = self.Y.astype(int)
self.Y = self.Y.astype(float)
self.Y_copy = self.Y
genes_index = range(self.D)
genes_results = self.run_test(lik_name,3,genes_index)
return genes_results
def Infer_branching_location(self, cell_labels, bins_num=50, lik_name='Negative_binomial',
branching_point=-1000,transform = True):
if transform == True:
self.Y = self.Y.astype(int)
self.Y = self.Y.astype(float)
self.Y_copy = self.Y
cell_labels = np.array(cell_labels)
self.X = np.c_[self.X, cell_labels[:, None]]
self.branching = True
self.xp = branching_point
# return self.X
genes_index = range(self.D)
log_likelihood = self.run_test(lik_name, 1, genes_index, branching=True)
self.branching_kernel_var = self.model.kernel.kern.variance.numpy()
self.branching_kernel_ls = self.model.kernel.kern.lengthscales.numpy()
# return log_likelihood
return self.infer_branching(lik_name, bins_num)
def infer_branching(self, lik_name, bins_num):
testTimes = np.linspace(min(self.X[:, 0]), max(self.X[:, 0]), bins_num, endpoint=True)
ll = np.zeros(bins_num)
models = list()
genes_index = range(self.D)
self.fix = True
X = self.X
for i in range(0, bins_num):
del self.X
# gpflow.utilities.print_summary(self.model, fmt='notebook')
del self.model
self.xp = testTimes[i]
self.X = X.copy()
self.X[np.where(self.X[:, 0] <= testTimes[i]), 1] = 1
_ = self.run_test(lik_name, 1, genes_index, branching=True)
ll[i] = self.model.log_posterior_density().numpy()
models.append(self.model)
del self.model
# Find MAP model
log_ll = np.zeros(bins_num)
i = 0
for mm in models:
log_ll[i] = mm.log_posterior_density().numpy()
i = i + 1
p = self.CalculateBranchingEvidence({'loglik': log_ll}, testTimes)
ll = p['posteriorBranching']
# tmp = -500. - max(log_ll)
# for i in range(0, bins_num):
# ll[i] = np.exp(log_ll[i] + tmp)
# normalized_ll = ll / ll.sum(0)
iMAP = np.argmax(ll)
# MAP_model = models[iMAP]
self.model = models[iMAP]
# Prediction
Xnew = np.linspace(min(self.X[:,0]), max(self.X[:,0]), 100).reshape(-1)[:, None]
x1 = np.c_[Xnew, np.ones(len(Xnew))[:, None]]
x2 = np.c_[Xnew, (np.ones(len(Xnew)) * 2)[:, None]]
Xtest = np.concatenate((x1, x2))
Xtest[np.where(Xtest[:, 0] <= self.model.kernel.xp), 1] = 1
if self.lik_name == 'Gaussian':
mu, var = self.model.predict_y(Xtest)
else:
mu, var = self.samples_posterior_predictive_distribution(Xtest)
del models
self.branching = False
return {'geneName':self.genes_name,
'branching_probability':ll,
'branching_location':self.model.kernel.xp,
'mean': mu,
'variance':var,
'Xnew':Xnew,
'test_times':testTimes,
'MAP_model':self.model,
'loglik':log_ll,
'logBayesFactor':p['logBayesFactor'],
'likelihood':self.lik_name}
def CalculateBranchingEvidence(self, d, Bsearch):
"""
:param d: output dictionary from FitModel
:param Bsearch: candidate list of branching points
:return: posterior probability of branching at each point and log Bayes factor
of branching vs not branching
"""
# Calculate probability of branching at each point
# o = d['loglik'][:-1]
o = d['loglik']
pn = np.exp(o - np.max(o))
p = pn / pn.sum() # normalize
# Calculate log likelihood ratio by averaging out
o = d['loglik']
Nb = o.size - 1
if Nb != len(Bsearch) - 1:
raise NameError('Passed in wrong length of Bsearch is %g- should be %g' % (len(Bsearch), Nb))
obj = o[:-1]
illmax = np.argmax(obj)
llmax = obj[illmax]
lratiostable = llmax + np.log(1 + np.exp(obj[np.arange(obj.size) != illmax] - llmax).sum()) - o[-1] - np.log(Nb)
return {'posteriorBranching': p, 'logBayesFactor': lratiostable}
def calculate_FDR(self,genes_results):
genes_results['p_value'] = 1 - ss.chi2.cdf(2*genes_results['log_likelihood_ratio'], df=1)
genes_results['q_value']= self.qvalue(genes_results['p_value'])
return genes_results
'''
def set_inducing_points_locations(self,Z):
self.Z = Z
self.M = self.Z.shape[0]
'''
def kmean_algorithm_inducing_points(self,M = 0):
if M != 0:
self.M = M
self.ConditionalVariance = False
# set inducing points by K-mean cluster algorithm
kmeans = KMeans(n_clusters= self.M).fit(self.X)
self.Z = kmeans.cluster_centers_
self.Z = np.sort(self.Z,axis=None).reshape([self.M,1])
self.Z = self.Z.reshape([self.Z.shape[0],1])
# Run the selected test and get likelihoods for all genes
def run_test(self,lik_name,models_number,genes_index,branching = False):
genes_results = {}
genes_state = {}
self.Y = self.Y_copy
self.models_number = models_number
self.lik_name = lik_name
self.optimize = True
#column names for likelihood dataframe
if self.models_number == 1:
column_name = ['Dynamic_model_log_likelihood']
elif self.models_number == 2:
column_name = ['Dynamic_model_log_likelihood','Constant_model_log_likelihood','log_likelihood_ratio']
else:
column_name = ['Shared_log_likelihood','model_1_log_likelihood','model_2_log_likelihood','log_likelihood_ratio']
for self.index in tqdm(genes_index):
self.y = self.Y[self.index].astype(float)
self.y = self.y.reshape([-1,1])
results = self.fit_single_gene(column_name)
genes_results[self.genes_name[self.index]] = results
return pd.DataFrame.from_dict(genes_results, orient='index', columns= column_name)
# fit numbers of GPs = models_number to run the selected test
def fit_single_gene(self,column_name,reset =False):
if self.models_number == 1:
col_name = 0
else:
col_name = 2
self.model_index = 1
model_1_log_likelihood = self.fit_model()
results = [model_1_log_likelihood]
if self.models_number == 2:
if not(np.isnan(model_1_log_likelihood)):
if self.lik_name == 'Negative_binomial':
self.lik_alpha = self.model.likelihood.alpha.numpy()
if self.lik_name == 'Zero_inflated_negative_binomial':
self.lik_km = self.model.likelihood.km.numpy()
self.lik_alpha = self.model.likelihood.alpha.numpy()
self.model_index = 2
model_2_log_likelihood= self.fit_model()
if not(np.isnan(model_2_log_likelihood)):
ll_ratio = model_1_log_likelihood - model_2_log_likelihood
if np.isnan(model_1_log_likelihood) or np.isnan(model_2_log_likelihood):
model_2_log_likelihood = np.nan
ll_ratio = np.nan
results = [model_1_log_likelihood,model_2_log_likelihood,ll_ratio]
if self.models_number == 3:
X_df = pd.DataFrame(data=self.X,index= self.cells_name,columns= ['times'])
Y_df = pd.DataFrame(data=self.Y_copy,index= self.genes_name,columns= self.cells_name)
# initialize X and Y with first time series
self.set_X_Y(X_df[0 : int(self.N/2)],Y_df.iloc[:,0:int(self.N/2)])
self.y = self.Y[self.index].astype(float)
self.y = self.y.reshape([self.N,1])
self.model_index = 2
model_2_log_likelihood = self.fit_model()
# initialize X and Y with second time series
self.set_X_Y(X_df[self.N : :],Y_df.iloc[:,int(self.N) : :])
self.y = self.Y[self.index].astype(float)
self.y = self.y.reshape([self.N,1])
self.model_index = 3
model_3_log_likelihood = self.fit_model()
self.set_X_Y(X_df,Y_df)
if np.isnan(model_1_log_likelihood) or np.isnan(model_2_log_likelihood) or np.isnan(model_3_log_likelihood):
ll_ratio = np.nan
else:
ll_ratio = ((model_2_log_likelihood+model_3_log_likelihood)-model_1_log_likelihood)
results = [model_1_log_likelihood,model_2_log_likelihood,model_3_log_likelihood,ll_ratio]
return results
#Save and get log likelihood of successed fit and set likelihood to Nan in case of failure
def fit_model(self,reset = False):
fit = self.fit_GP(reset)
if fit: # save the model in case of successeded fit
if self.sparse and self.lik_name is not 'Gaussian':
log_likelihood = self.model.log_posterior_density((self.X,self.y)).numpy()
else:
log_likelihood = self.model.log_posterior_density().numpy()
# fix positive likelihood by random restart
if log_likelihood > 0 and self.count_fix < 10 and self.safe_mode and self.lik_name is not 'Gaussian':
self.count_fix = self.count_fix + 1
log_likelihood = self.fit_model(True)
if not np.isnan(log_likelihood):
filename = self.get_file_name()
ckpt = tf.train.Checkpoint(model=self.model, step=tf.Variable(1))
ckpt.write(filename)
else: # set log likelihood to Nan in case of Cholesky decomposition or optimization failure
log_likelihood = np.nan
self.model = np.nan
return log_likelihood
def fit_GP(self,reset = False):
self.init_hyper_parameters(reset=reset)
fit = True
try:
fit = self.fit_GP_with_likelihood()
except tf.errors.InvalidArgumentError as e:
if self.count_fix < 10: # fix failure by random restart
fit = self.fit_GP(True)
else:
print('Can not fit a Gaussian process, Cholesky decomposition was not successful.')
fit = False
if fit and self.optimize and self.count_fix < 5 and not self.branching and self.safe_mode:
self.test_local_optima_case1()
return fit
# Fit a GP with selected kernel,likelihood,run it as sparse or full GP
def fit_GP_with_likelihood(self):
fit = True
#select kernel RBF,constant or branching kernel
if self.hyper_parameters['ls'] == -1.: # flag to fit constant kernel
kern = gpflow.kernels.Constant(variance= self.hyper_parameters['var'])
elif self.kernel:
if 'linear' in self.kernel:
kern = gpflow.kernels.Linear(variance = self.hyper_parameters['var'])
print('Fitting GP with Linear Kernel')
self.K = 3
elif 'periodic' in self.kernel:
kern = gpflow.kernels.Periodic((gpflow.kernels.SquaredExponential(variance = self.hyper_parameters['var'],lengthscales = self.hyper_parameters['ls'])))
print('Fitting GP with Periodic Kernel')
self.K = 4
else:
kern = gpflow.kernels.RBF(variance= self.hyper_parameters['var'],
lengthscales = self.hyper_parameters['ls'])
print('Fitting GP with RBF Kernel')
self.K = 4
else:
kern = gpflow.kernels.RBF(variance= self.hyper_parameters['var'],
lengthscales = self.hyper_parameters['ls'])
if self.branching:
del kern
if self.fix:
kern = gpflow.kernels.RBF(variance=self.branching_kernel_var,
lengthscales=self.branching_kernel_ls)
set_trainable(kern.lengthscales,False)
set_trainable(kern.variance,False)
else:
kern = gpflow.kernels.RBF()
kernel = branchingKernel.BranchKernel(kern,self.xp)
else:
kernel = kern
#select likelihood
if self.lik_name == 'Poisson':
likelihood = gpflow.likelihoods.Poisson()
if self.lik_name == 'Negative_binomial':
# library size scaling
if self.nb_scaled:
scale=pd.DataFrame(self.scale)
self.Scale = self.scale.iloc[:,self.index]
self.Scale=np.array(self.Scale)
self.Scale=np.transpose([self.Scale] * 20)
likelihood = NegativeBinomialLikelihood.NegativeBinomial(self.hyper_parameters['alpha'],scale=self.Scale,nb_scaled=self.nb_scaled)
else:
likelihood = NegativeBinomialLikelihood.NegativeBinomial(self.hyper_parameters['alpha'],nb_scaled=self.nb_scaled)
if self.lik_name == 'Zero_inflated_negative_binomial':
likelihood = NegativeBinomialLikelihood.ZeroInflatedNegativeBinomial(self.hyper_parameters['alpha'],self.hyper_parameters['km'])
# Run model with selected kernel and likelihood
if self.lik_name == 'Gaussian':
if self.transform: # use log(count+1) in case of Gaussian likelihood and transform
self.y = np.log(self.y+1)
if self.sparse:
if self.ConditionalVariance:
init_method = ConditionalVariance()
self.Z = init_method.compute_initialisation(self.X, self.M, kernel)[0]
self.model = gpflow.models.SGPR((self.X,self.y), kernel=kernel,inducing_variable=self.Z)
if self.model_index == 2 and self.models_number == 2:
set_trainable(self.model.inducing_variable.Z,False)
else:
self.model = gpflow.models.GPR((self.X,self.y), kernel)
training_loss = self.model.training_loss
else:
if self.sparse:
if self.ConditionalVariance:
init_method = ConditionalVariance()
self.Z = init_method.compute_initialisation(self.X, self.M, kernel)[0]
self.model = gpflow.models.SVGP( kernel ,likelihood,self.Z)
training_loss = self.model.training_loss_closure((self.X, self.y))
if self.model_index == 2 and self.models_number == 2:
set_trainable(self.model.inducing_variable.Z,False)
else:
self.model = gpflow.models.VGP((self.X, self.y) , kernel , likelihood)
training_loss = self.model.training_loss
if self.optimize:
if self.ConditionalVariance:
set_trainable(self.model.inducing_variable.Z,False)
o = gpflow.optimizers.Scipy()
res = o.minimize(training_loss, variables=self.model.trainable_variables,options=dict(maxiter=5000))
if not(res.success): # test if optimization fail
if self.count_fix < 10: # fix failure by random restart
#print('Optimization fail.')
fit = self.fit_GP(True)
else:
print('Can not Optimaize a Gaussian process, Optimization fail.')
fit = False
return fit
def get_file_name(self):
if not os.path.exists(self.folder_name):
os.mkdir(self.folder_name)
filename = self.folder_name+self.lik_name+'_'
if self.sparse:
filename += 'sparse_'
if self.models_number == 3:
filename += 'tst_'
filename += self.genes_name[self.index]+'_model_'+str(self.model_index)
return filename
# user assign the default values for hyper_parameters
def initialize_hyper_parameters(self,length_scale = None,variance = None,alpha = None,km = None):
if length_scale is None:
self.hyper_parameters['ls'] = (5*(np.max(self.X)-np.min(self.X)))/100
else:
self.hyper_parameters['ls'] = length_scale
if variance is None:
if self.lik_name == 'Gaussian' and not self.transform:
self.hyper_parameters['var'] = np.mean(self.y+1**2)
else:
self.hyper_parameters['var'] = np.mean(np.log(self.y+1)**2)
else:
self.hyper_parameters['var'] = variance
if alpha is None:
self.hyper_parameters['alpha'] = 1.
else:
self.hyper_parameters['alpha'] = alpha
if km is None:
self.hyper_parameters['km'] = 35.
else:
self.hyper_parameters['km'] = km
self.user_hyper_parameters = [length_scale,variance,alpha,km]
# Hyper-parameters initialization or restting in case of failure
def init_hyper_parameters(self,reset = False):
if not reset:
self.seed_value = 0
self.count_fix = 0
np.random.seed(self.seed_value)
self.initialize_hyper_parameters(self.user_hyper_parameters[0],self.user_hyper_parameters[1],
self.user_hyper_parameters[2],self.user_hyper_parameters[3])
# in case of failure change the seed and sample hyper-parameters from uniform distributions
if reset:
self.count_fix = self.count_fix +1
self.seed_value = self.seed_value + 1
np.random.seed(self.seed_value)
self.hyper_parameters['ls'] = np.random.uniform((.25*(np.max(self.X)-np.min(self.X)))/100 ,(30.*(np.max(self.X)-np.min(self.X)))/100)
self.hyper_parameters['var'] = np.random.uniform(0. ,10.)
self.hyper_parameters['alpha'] = np.random.uniform(0., 10.)
self.hyper_parameters['km'] = np.random.uniform(0., 100.)
# set ls to 1000 in case of one sample test when fit the constant model
if self.model_index == 2 and self.models_number == 2:
self.hyper_parameters['ls'] = -1.
if self.optimize and self.count_fix == 0:
if self.lik_name == 'Negative_binomial':
self.hyper_parameters['alpha'] = self.lik_alpha
else:
#save likelihood parameters to initialize constant model
self.lik_alpha = None
self.lik_km = None
if not self.branching:
self.fix = False # fix kernel hyper-parameters
# reset gpflow graph
tf.compat.v1.get_default_graph()
tf.compat.v1.set_random_seed(self.seed_value)
tf.random.set_seed(self.seed_value)
gpflow.config.set_default_float(np.float64)
self.y = self.Y[self.index].astype(float)
self.y = self.y.reshape([-1,1])
self.model = None
self.var = None
self.mean = None
def generate_Samples_from_distribution(self,mean):
y = []
if self.lik_name == 'Poisson':
for i in range(mean.shape[0]):
y.append(ss.poisson.rvs(mean[i], size = 500))
if self.lik_name == 'Negative_binomial':
if self.model.likelihood.alpha.numpy() == 0:
for i in range(mean.shape[0]):
y.append(ss.poisson.rvs(mean[i], size = 500))
else:
r = 1./self.model.likelihood.alpha.numpy() # r number of failures
prob = r / (mean+ r) # p probability of success
for i in range(mean.shape[0]):
y.append(ss.nbinom.rvs(r, prob[i], size = 500))
if self.lik_name == 'Zero_inflated_negative_binomial':
r = 1./self.model.likelihood.alpha.numpy() # r number of failures
prob = r / (mean+ r) # p probability of success
km = self.model.likelihood.km.numpy() # Michaelin-Menten (MM) constant
psi = 1.- (mean/(km+mean)) # psi probability of zeros
for i in range(mean.shape[0]):
B = ss.bernoulli.rvs(size=1,p = 1-psi[i])
if B == 0:
y.append(np.zeros(500))
else:
y.append(ss.nbinom.rvs(r, prob[i], size = 500))
y = np.vstack(y)
return y
def samples_posterior_predictive_distribution(self,xtest):
var = []
f_samples = []
for i in range(20):
f_samples.append(self.model.predict_f_samples(xtest, 5))
f = np.vstack(f_samples)
link_f = np.exp(f[:, :, 0])
var.append(self.generate_Samples_from_distribution(np.mean(link_f, 0)).T)
var = np.vstack(var)
if self.branching:
mean = np.mean(link_f, axis=0)
else:
mean = np.mean(var,axis = 0)
mean = savgol_filter(np.mean(var,axis = 0), int(xtest.shape[0]/2)+1, 3)
mean = [(i > 0) * i for i in mean]
return mean,var
def load_predict_models(self,genes_name,test_name,likelihood = 'Negative_binomial',predict = True):
params = {}
genes_models = []
genes_states = []
genes_means = []
genes_vars = []
self.Y = self.Y_copy
params['test_name'] = test_name
self.lik_name = likelihood
params['likelihood'] = self.lik_name
if test_name == 'One_sample_test':
self.models_number = 2
elif test_name == 'Two_samples_test':
self.models_number = 3
else:
self.models_number = 1
xtest = np.linspace(np.min(self.X)-.1,np.max(self.X)+.1,100)[:,None]
for gene in tqdm(genes_name):
models = []
means = []
variances = []
states = []
self.index = self.genes_name.index(gene)
self.y = self.Y[self.index]
self.y = self.y.reshape([self.N,1])
for model_index in range(self.models_number):
self.optimize = False
self.model_index = model_index + 1
self.init_hyper_parameters(reset = False)
file_name = self.get_file_name()
if self.models_number == 3:
X_df = pd.DataFrame(data=self.X,index= self.cells_name,columns= ['times'])
Y_df = pd.DataFrame(data=self.Y_copy,index= self.genes_name,columns= self.cells_name)
if model_index == 0:
self.set_X_Y(X_df,Y_df)
if model_index == 1: # initialize X and Y with first time series
self.set_X_Y(X_df[0 : int(self.N/2)],Y_df.iloc[:,0:int(self.N/2)])
if model_index == 2: # initialize X and Y with second time series
self.set_X_Y(X_df[int(self.N/2) : :],Y_df.iloc[:,int(self.N/2) : :])
self.y = self.Y[self.index]
self.y = self.y.reshape([self.N,1])
successed_fit = self.fit_GP()
# restore check point
if successed_fit:
ckpt = tf.train.Checkpoint(model=self.model, step=tf.Variable(1))
ckpt.restore(file_name)
if predict:
if self.lik_name == 'Gaussian':
mean, var = self.model.predict_y(xtest)
mean = mean.numpy()
var = var.numpy()
else:
mean, var = self.samples_posterior_predictive_distribution(xtest)
else:
mean = var = 0
else:
mean = var = 0
means.append(mean)
variances.append(var)
models.append(self.model)
if self.models_number == 3 and model_index > 0:
self.set_X_Y(X_df,Y_df)
genes_means.append(means)
genes_vars.append(variances)
genes_models.append(models)
params['means']= genes_means
params['vars'] = genes_vars
params['models']= genes_models
return params
def test_local_optima_case1(self):
# limit number of trial to fix bad solution
if self.sparse:
x = self.Z
else:
x = self.X
if self.X.shape[1] == 1:
xtest = np.linspace(np.min(x),np.max(x),100)[:,None]
else: xtest = self.X
if self.lik_name == 'Gaussian':
mean, var = self.model.predict_y(xtest)
self.mean = mean.numpy()
self.var = var.numpy()
else:
# mean of posterior predictive samples
self.mean,self.var = self.samples_posterior_predictive_distribution(xtest)
mean_mean = np.mean(self.mean)
y_max = np.max(self.y)
mean_max = np.max(self.mean)
y_min = np.min(self.y)
mean_min = np.min(self.mean)
y_mean = np.mean(self.y)
mean_mean = np.mean(self.mean)
if self.N < 100:
diff = 0
else:
diff = 1
if self.model_index == 2 and self.models_number == 2:
if mean_min < y_min or mean_max > y_max or mean_mean == 0.0:
fit = self.fit_GP(True)
if y_mean > 0.0:
diff_mean = abs(round((mean_mean-y_mean)/y_mean))
if diff_mean > diff and mean_min < y_min or diff_mean > diff and mean_max > y_max or mean_mean == 0.0:
fit = self.fit_GP(True)
def qvalue(self,pv, pi0=None):
'''
Estimates q-values from p-values
This function is modified based on https://github.com/nfusi/qvalue
Args
====
pi0: if None, it's estimated as suggested in Storey and Tibshirani, 2003.
'''
assert(pv.min() >= 0 and pv.max() <= 1), "p-values should be between 0 and 1"
original_shape = pv.shape
pv = pv.ravel() # flattens the array in place, more efficient than flatten()
m = float(len(pv))
# if the number of hypotheses is small, just set pi0 to 1
if len(pv) < 100 and pi0 is None:
pi0 = 1.0
elif pi0 is not None:
pi0 = pi0
else:
# evaluate pi0 for different lambdas
pi0 = []
lam = sp.arange(0, 0.90, 0.01)
counts = sp.array([(pv > i).sum() for i in sp.arange(0, 0.9, 0.01)])
for l in range(len(lam)):
pi0.append(counts[l]/(m*(1-lam[l])))
pi0 = sp.array(pi0)
# fit natural cubic spline
tck = interpolate.splrep(lam, pi0, k=3)
pi0 = interpolate.splev(lam[-1], tck)
if pi0 > 1:
pi0 = 1.0
assert(pi0 >= 0 and pi0 <= 1), "pi0 is not between 0 and 1: %f" % pi0
p_ordered = sp.argsort(pv)
pv = pv[p_ordered]
qv = pi0 * m/len(pv) * pv
qv[-1] = min(qv[-1], 1.0)
for i in range(len(pv)-2, -1, -1):
qv[i] = min(pi0*m*pv[i]/(i+1.0), qv[i+1])
# reorder qvalues
qv_temp = qv.copy()
qv = sp.zeros_like(qv)
qv[p_ordered] = qv_temp
# reshape qvalues
qv = qv.reshape(original_shape)
return qv | StarcoderdataPython |
1764336 | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 30 12:29:06 2018
@author: <NAME>
"""
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
#%matplotlib qt
## Calculates calibration coefficients
def calib_cam():
# prepare object points
nx = 9#TODO: enter the number of inside corners in x
ny = 6#TODO: enter the number of inside corners in y
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((ny*nx,3), np.float32)
objp[:,:2] = np.mgrid[0:nx,0:ny].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
# Make a list of calibration images
images = glob.glob('camera_cal/calibration*.jpg')
# Step through the list and search for chessboard corners
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (nx,ny),None)
# If found, add object points, image points
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
# Draw and display the corners (ONLY FOR TESTING)
#img = cv2.drawChessboardCorners(img, (nx,ny), corners, ret)
#cv2.imshow('img',img)
#cv2.waitKey(500)
#print(objpoints)
#print(imgpoints)
cv2.destroyAllWindows()
# Calibrate Camera
ret,mtx,dist,rvecs,tvecs = cv2.calibrateCamera(objpoints,imgpoints,img.shape[1::-1],None,None)
return mtx,dist
## Undistorts image given calibration coeifficients
def un_dis(img,mtx,dist):
undist = cv2.undistort(img,mtx,dist,None,mtx)
return undist | StarcoderdataPython |
3339853 | from abc import ABCMeta, abstractmethod
from base64 import b64decode, b64encode
from dataclasses import dataclass, field
from datetime import datetime, timedelta
from typing import Any, Callable, Dict, FrozenSet, Iterable, Iterator, List, Optional, Set, Type
from uuid import UUID, uuid4
from .policies import CoalescePolicy, ConflictPolicy
class Trigger(Iterator[datetime], metaclass=ABCMeta):
"""Abstract base class that defines the interface that every trigger must implement."""
__slots__ = ()
@abstractmethod
def next(self) -> Optional[datetime]:
"""
Return the next datetime to fire on.
If no such datetime can be calculated, ``None`` is returned.
:raises apscheduler.exceptions.MaxIterationsReached:
"""
@abstractmethod
def __getstate__(self):
"""Return the (JSON compatible) serializable state of the trigger."""
@abstractmethod
def __setstate__(self, state):
"""Initialize an empty instance from an existing state."""
def __iter__(self):
return self
def __next__(self) -> datetime:
dateval = self.next()
if dateval is None:
raise StopIteration
else:
return dateval
@dataclass
class Task:
id: str
func: Callable = field(compare=False)
max_instances: Optional[int] = field(compare=False, default=None)
metadata_arg: Optional[str] = field(compare=False, default=None)
stateful: bool = field(compare=False, default=False)
misfire_grace_time: Optional[timedelta] = field(compare=False, default=None)
@dataclass(unsafe_hash=True)
class Schedule:
id: str
task_id: str = field(compare=False)
trigger: Trigger = field(compare=False)
args: tuple = field(compare=False)
kwargs: Dict[str, Any] = field(compare=False)
coalesce: CoalescePolicy = field(compare=False)
misfire_grace_time: Optional[timedelta] = field(compare=False)
tags: FrozenSet[str] = field(compare=False)
next_fire_time: Optional[datetime] = field(compare=False, default=None)
last_fire_time: Optional[datetime] = field(init=False, compare=False, default=None)
@property
def next_deadline(self) -> Optional[datetime]:
if self.next_fire_time and self.misfire_grace_time:
return self.next_fire_time + self.misfire_grace_time
return None
@dataclass(unsafe_hash=True)
class Job:
id: UUID = field(init=False, default_factory=uuid4)
task_id: str = field(compare=False)
func: Callable = field(compare=False)
args: tuple = field(compare=False)
kwargs: Dict[str, Any] = field(compare=False)
schedule_id: Optional[str] = field(compare=False, default=None)
scheduled_fire_time: Optional[datetime] = field(compare=False, default=None)
start_deadline: Optional[datetime] = field(compare=False, default=None)
tags: Optional[FrozenSet[str]] = field(compare=False, default_factory=frozenset)
started_at: Optional[datetime] = field(init=False, compare=False, default=None)
class Serializer(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def serialize(self, obj) -> bytes:
pass
def serialize_to_unicode(self, obj) -> str:
return b64encode(self.serialize(obj)).decode('ascii')
@abstractmethod
def deserialize(self, serialized: bytes):
pass
def deserialize_from_unicode(self, serialized: str):
return self.deserialize(b64decode(serialized))
@dataclass(frozen=True)
class Event:
timestamp: datetime
@dataclass
class EventSource(metaclass=ABCMeta):
@abstractmethod
def subscribe(self, callback: Callable[[Event], Any],
event_types: Optional[Iterable[Type[Event]]] = None) -> None:
"""
Subscribe to events from this event source.
:param callback: callable to be called with the event object when an event is published
:param event_types: an iterable of concrete Event classes to subscribe to
"""
@abstractmethod
def unsubscribe(self, callback: Callable[[Event], Any],
event_types: Optional[Iterable[Type[Event]]] = None) -> None:
"""
Cancel an event subscription
:param callback:
:param event_types: an iterable of concrete Event classes to unsubscribe from
:return:
"""
@abstractmethod
async def publish(self, event: Event) -> None:
"""
Publish an event.
:param event: the event to publish
"""
class DataStore(EventSource):
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
pass
@abstractmethod
async def get_schedules(self, ids: Optional[Set[str]] = None) -> List[Schedule]:
"""
Get schedules from the data store.
:param ids: a specific set of schedule IDs to return, or ``None`` to return all schedules
:return: the list of matching schedules, in unspecified order
"""
@abstractmethod
async def add_schedule(self, schedule: Schedule, conflict_policy: ConflictPolicy) -> None:
"""
Add or update the given schedule in the data store.
:param schedule: schedule to be added
:param conflict_policy: policy that determines what to do if there is an existing schedule
with the same ID
"""
@abstractmethod
async def remove_schedules(self, ids: Iterable[str]) -> None:
"""
Remove schedules from the data store.
:param ids: a specific set of schedule IDs to remove
"""
@abstractmethod
async def acquire_schedules(self, scheduler_id: str, limit: int) -> List[Schedule]:
"""
Acquire unclaimed due schedules for processing.
This method claims up to the requested number of schedules for the given scheduler and
returns them.
:param scheduler_id: unique identifier of the scheduler
:param limit: maximum number of schedules to claim
:return: the list of claimed schedules
"""
@abstractmethod
async def release_schedules(self, scheduler_id: str, schedules: List[Schedule]) -> None:
"""
Release the claims on the given schedules and update them on the store.
:param scheduler_id: unique identifier of the scheduler
:param schedules: the previously claimed schedules
"""
@abstractmethod
async def add_job(self, job: Job) -> None:
"""
Add a job to be executed by an eligible worker.
:param job: the job object
"""
@abstractmethod
async def get_jobs(self, ids: Optional[Iterable[UUID]] = None) -> List[Job]:
"""
Get the list of pending jobs.
:param ids: a specific set of job IDs to return, or ``None`` to return all jobs
:return: the list of matching pending jobs, in the order they will be given to workers
"""
@abstractmethod
async def acquire_jobs(self, worker_id: str, limit: Optional[int] = None) -> List[Job]:
"""
Acquire unclaimed jobs for execution.
This method claims up to the requested number of jobs for the given worker and returns
them.
:param worker_id: unique identifier of the worker
:param limit: maximum number of jobs to claim and return
:return: the list of claimed jobs
"""
@abstractmethod
async def release_jobs(self, worker_id: str, jobs: List[Job]) -> None:
"""
Releases the claim on the given jobs
:param worker_id: unique identifier of the worker
:param jobs: the previously claimed jobs
"""
| StarcoderdataPython |
1720247 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Add random data to dummy speakers.csv data"""
from random import random
import lorem
import pandas as pd
# institutions
def get_random_institution(row):
insts = [['KCL', 0.3], ['UCL', 0.5], ['Imperial', 0.6], ['Oxford', 0.7],
['Cambridge', 0.8], ['Manchester', 0.9]]
rand = random()
i = 0
floor = 0
while True:
try:
ceil = insts[i][1]
except IndexError:
return ''
if (rand >= floor) & (rand < ceil):
return insts[i][0]
else:
floor = ceil
i += 1
def get_random_plenary(row):
if row.role == 'speaker':
plens = [['Plenary 1', 0.4], ['Plenary 2', 0.6], ['Plenary 3', 1.0]]
rand = random()
i = 0
floor = 0
while True:
try:
ceil = plens[i][1]
except IndexError:
return ''
if (rand >= floor) & (rand < ceil):
return plens[i][0]
else:
floor = ceil
i += 1
else:
return ''
# homepages
def make_homepage(row):
maxval = 1500
url = 'xkcd.com/'+str(int(maxval*random()))
# assume 30% have homepages
if random() < 0.33:
return url
else:
return ''
# twitter handles
def make_twitter(row):
if random() < 0.16:
# assume one in siz gives a twitter handle
return '@'+row['surname']+str(int(random()*1000))
else:
return ''
# Talk titles
def get_random_title(row):
if row.role == 'speaker':
return lorem.sentence()
else:
return ''
df = pd.read_csv('../data-src/speakers.csv')
try:
df.drop('num', inplace=True, axis=1)
df.drop('institution', inplace=True, axis=1)
except ValueError:
# num column not in dataframe
pass
#print(df.apply(get_random_institution, axis=1))
df['university'] = df.apply(get_random_institution, axis=1)
df['homepage'] = df.apply(make_homepage, axis=1)
df['twitter'] = df.apply(make_twitter, axis=1)
df['talk_title'] = df.apply(get_random_title, axis=1)
df['plenary'] = df.apply(get_random_plenary, axis=1)
print('Writing dummy data to csv...')
df.to_csv('../data-src/participant_data.csv')
print('Success!\n')
print(df.head())
#print(get_random_plenary())
| StarcoderdataPython |
54246 | <gh_stars>0
from sqlalchemy import Column, Integer, String, DateTime
from mps_database.models import Base
import datetime
class InputHistory(Base):
"""
InputHistory class (input_history table)
Input data collected from the central node
All derived data is from the mps_configuration database.
Properties:
timestamp: the timestamp of the fault event. Format is as follows
in order to work with sqlite date/time functions: "YYYY-MM-DD HH:MM:SS.SSS"
new_state: the state that was transitioned to in this fault event (either a 0 or 1)
old_state: the state that was transitioned from in this fault event (either a 0 or 1)
channel:
device:
"""
__tablename__ = 'input_history'
id = Column(Integer, primary_key=True)
timestamp = Column(DateTime, default=datetime.datetime.utcnow, nullable=False)
#Old and new satates are based off of named values
new_state = Column(String, nullable=False)
old_state = Column(String, nullable=False)
channel = Column(String, nullable=False) #DigitalChannel
device = Column(String, nullable=False) #DigitalDevice
| StarcoderdataPython |
4813574 | <gh_stars>0
example = 3
data = 314
def func1(data):
buf = [0]
current_pos = 0
for i in range(2017):
current_pos = (current_pos + data) % len(buf)
buf.insert(current_pos+1, i+1)
current_pos += 1
return buf[(current_pos + 1) % len(buf)]
print(func1(example))
print(func1(data))
def func2(data):
current_pos = 0
buf_length = 1
zero_pos = 0
after_zero = None
for i in range(50000000):
current_pos = (current_pos + data) % buf_length
if current_pos == zero_pos:
after_zero = i+1
elif current_pos < zero_pos:
zero_pos += 1
current_pos += 1
buf_length += 1
return after_zero
print(func2(data)) | StarcoderdataPython |
3230814 | <reponame>IMULMUL/PythonForWindows
import sys
import os.path
sys.path.append(os.path.abspath(__file__ + "\..\.."))
import windows
system = windows.system
print("Basic system infos:")
print(" version = {0}".format(system.version))
print(" bitness = {0}".format(system.bitness))
print(" computer_name = {0}".format(system.computer_name))
print(" product_type = {0}".format(system.product_type))
print(" version_name = {0}".format(system.version_name))
print("")
print("There is {0} processes".format(len(system.processes)))
print("There is {0} threads".format(len(system.threads)))
print("")
print("Dumping first logical drive:")
drive = system.logicaldrives[0]
print(" " + str(drive))
print((" " * 8) + "name = {0}".format(drive.name))
print((" " * 8) + "type = {0}".format(drive.type))
print((" " * 8) + "path = {0}".format(drive.path))
print("")
print("Dumping first service:")
serv = windows.system.services[0]
print(" " + str(serv))
print((" " * 8) + "name = {0}".format(serv.name))
print((" " * 8) + "description = {0}".format(serv.description))
print((" " * 8) + "status = {0}".format(serv.status))
print((" " * 8) + "process = {0}".format(repr(serv.process)))
print("")
print("Finding a service in a user process:")
serv = [s for s in windows.system.services if s.process][0]
print(" " + str(serv))
print((" " * 8) + "name = {0}".format(serv.name))
print((" " * 8) + "description = {0}".format(serv.description))
print((" " * 8) + "status = {0}".format(serv.status))
print((" " * 8) + "process = {0}".format(repr(serv.process)))
print("")
print("Enumerating handles:")
handles = system.handles
print(" There are {0} handles:".format(len(handles)))
print(" First handle is: " + str(handles[0]))
print(" Enumerating handles of the current process:")
cp_handles = [h for h in system.handles if h.dwProcessId == windows.current_process.pid]
print(" There are {0} handles for this process".format(len(cp_handles)))
print(" Looking for a File handle:")
file_h = [h for h in cp_handles if h.type == "File"][0]
print(" Handle is {0}".format(file_h))
print(" Name is <{0}>".format(file_h.name))
print("")
print("Dumping the first system module")
kmod = windows.system.modules[0]
print(" " + str(kmod))
print((" " * 8) + "ImageName = {0}".format(kmod.ImageName))
print((" " * 8) + "Base = {0:#x}".format(kmod.Base))
print((" " * 8) + "Size = {0:#x}".format(kmod.Size))
print((" " * 8) + "Flags = {0:#x}".format(kmod.Flags))
print((" " * 8) + "LoadCount = {0}".format(kmod.LoadCount))
| StarcoderdataPython |
1603124 | <reponame>Signbank/signbank
"""Create small videos for GlossVideos that have no small version."""
import os
from django.core.management.base import BaseCommand
from django.core.exceptions import ObjectDoesNotExist
from signbank.settings.base import WRITABLE_FOLDER
from signbank.dictionary.models import Dataset
from signbank.video.models import GlossVideo
class Command(BaseCommand):
help = 'Create small videos for GlossVideos that have no small version.'
def add_arguments(self, parser):
parser.add_argument('dataset-acronym', nargs='+', type=str)
def handle(self, *args, **kwargs):
if 'dataset-acronym' in kwargs:
for dataset_acronym in kwargs['dataset-acronym']:
try:
dataset = Dataset.objects.get(acronym=dataset_acronym)
for gv in GlossVideo.objects.filter(gloss__lemma__dataset=dataset, version=0):
# If there is no small version and original does exist,
# create a small version.
filepath = os.path.join(WRITABLE_FOLDER, gv.videofile.name)
if not gv.small_video() and os.path.exists(filepath.encode('utf-8')):
gv.make_small_video()
except ObjectDoesNotExist as e:
print("Dataset '{}' not found.".format(dataset_acronym), e)
| StarcoderdataPython |
3374764 | <gh_stars>0
from django.urls import path
from . import views
app_name = 'produto'
urlpatterns = [
path('', views.ListaProdutos.as_view(), name='lista'),
path('<slug>', views.DetalheProduto.as_view(), name='detalhe'),
path('addtocart/', views.AddToCart.as_view(), name='addtocart'),
path('removetocart/', views.RemoveToCart.as_view(), name='removetocart'),
path('cart/', views.Cart.as_view(), name='cart'),
path('checkout/', views.Checkout.as_view(), name='checkout'),
path('busca/', views.Busca.as_view(), name='busca'),
]
| StarcoderdataPython |
3393114 | from datetime import datetime
from django.utils.timezone import make_aware
from django.db import IntegrityError
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.permissions import AllowAny
from treeckle.common.exceptions import BadRequest
from treeckle.common.parsers import parse_ms_timestamp_to_datetime
from email_service.logic import send_created_booking_emails, send_updated_booking_emails
from users.permission_middlewares import check_access
from users.models import Role, User
from venues.logic import get_venues
from venues.models import Venue
from .serializers import (
GetBookingSerializer,
PostBookingSerializer,
PatchBookingSerializer,
DeleteBookingSerializer,
)
from .models import BookingStatus
from .logic import (
get_bookings,
get_requested_bookings,
booking_to_json,
create_bookings,
delete_bookings,
DateTimeInterval,
update_booking_statuses,
)
# Create your views here.
class TotalBookingCountView(APIView):
permission_classes = [AllowAny]
def get(self, request):
data = get_bookings().count()
return Response(data, status=status.HTTP_200_OK)
class PendingBookingCountView(APIView):
@check_access(Role.ADMIN)
def get(self, request, requester: User):
data = get_bookings(
status=BookingStatus.PENDING, venue__organization=requester.organization
).count()
return Response(data, status=status.HTTP_200_OK)
class BookingsView(APIView):
@check_access(Role.RESIDENT, Role.ORGANIZER, Role.ADMIN)
def get(self, request, requester: User):
serializer = GetBookingSerializer(data=request.query_params.dict())
serializer.is_valid(raise_exception=True)
validated_data = serializer.validated_data
start_date_time = validated_data.get("start_date_time", None)
end_date_time = validated_data.get("end_date_time", None)
bookings = get_requested_bookings(
organization=requester.organization,
user_id=validated_data.get("user_id", None),
venue_name=validated_data.get("venue_name", None),
start_date_time=parse_ms_timestamp_to_datetime(start_date_time)
if start_date_time is not None
else make_aware(datetime.min),
end_date_time=parse_ms_timestamp_to_datetime(end_date_time)
if end_date_time is not None
else make_aware(datetime.max),
status=validated_data.get("status", None),
)
data = [booking_to_json(booking) for booking in bookings]
return Response(data, status=status.HTTP_200_OK)
@check_access(Role.RESIDENT, Role.ORGANIZER, Role.ADMIN)
def post(self, request, requester: User):
serializer = PostBookingSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
validated_data = serializer.validated_data
try:
venue = get_venues(
organization=requester.organization,
id=validated_data.get("venue_id", None),
).get()
except (Venue.DoesNotExist, Venue.MultipleObjectsReturned) as e:
raise BadRequest("Invalid venue")
## shape: [{start_date_time:, end_date_time:}]
date_time_ranges = validated_data.get("date_time_ranges", [])
## shape: [(start, end)]
new_date_time_intervals = [
DateTimeInterval(
parse_ms_timestamp_to_datetime(date_time_range["start_date_time"]),
parse_ms_timestamp_to_datetime(date_time_range["end_date_time"]),
)
for date_time_range in date_time_ranges
]
try:
new_bookings = create_bookings(
title=validated_data.get("title", ""),
booker=requester,
venue=venue,
new_date_time_intervals=new_date_time_intervals,
form_response_data=validated_data.get("form_response_data", []),
)
except Exception as e:
raise BadRequest(e)
send_created_booking_emails(
bookings=new_bookings, organization=requester.organization
)
data = [booking_to_json(booking) for booking in new_bookings]
return Response(data, status=status.HTTP_201_CREATED)
@check_access(Role.RESIDENT, Role.ORGANIZER, Role.ADMIN)
def patch(self, request, requester: User):
serializer = PatchBookingSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
actions = serializer.validated_data.get("actions", [])
try:
(
updated_bookings,
id_to_previous_booking_status_mapping,
) = update_booking_statuses(actions=actions, user=requester)
except Exception as e:
raise BadRequest(e)
send_updated_booking_emails(
bookings=updated_bookings,
id_to_previous_booking_status_mapping=id_to_previous_booking_status_mapping,
organization=requester.organization,
)
data = [booking_to_json(booking) for booking in updated_bookings]
return Response(data, status=status.HTTP_200_OK)
@check_access(Role.ADMIN)
def delete(self, request, requester: User):
serializer = DeleteBookingSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
booking_ids_to_be_deleted = serializer.validated_data.get("ids", [])
deleted_bookings = delete_bookings(
booking_ids_to_be_deleted, organization=requester.organization
)
data = [booking_to_json(booking) for booking in deleted_bookings]
return Response(data, status=status.HTTP_200_OK)
| StarcoderdataPython |
3359324 | <gh_stars>0
# This file is part of the Indico plugins.
# Copyright (C) 2002 - 2019 CERN
#
# The Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License;
# see the LICENSE file for more details.
from flask import session
from flask_pluginengine import depends
from indico.core.plugins import IndicoPlugin, PluginCategory
from indico_search.forms import SearchForm
from indico_search.plugin import SearchPlugin
@depends('search')
class SearchPluginBase(IndicoPlugin):
"""Base class for search engine plugins"""
#: the SearchEngine subclass to use
engine_class = None
#: the SearchForm subclass to use
search_form = SearchForm
category = PluginCategory.search
def init(self):
super(SearchPluginBase, self).init()
SearchPlugin.instance.engine_plugin = self
@property
def only_public(self):
"""If the search engine only returns public events"""
return session.user is None
def perform_search(self, values, obj=None, obj_type=None):
"""Performs the search.
For documentation on the parameters and return value, see
the documentation of the :class:`SearchEngine` class.
"""
return self.engine_class(values, obj, obj_type).process()
class SearchEngine(object):
"""Base class for a search engine"""
def __init__(self, values, obj, obj_type):
"""
:param values: the values sent by the user
:param obj: object to search in (a `Category` or `Conference`)
"""
self.values = values
self.obj = obj
self.obj_type = obj_type
self.user = session.user
def process(self):
"""Executes the search
:return: an object that's passed directly to the result template.
if a flask response is returned, it is sent to the client
instead (useful to redirect to an external page)
"""
raise NotImplementedError
| StarcoderdataPython |
171167 | <gh_stars>0
# Início do programa
print("\n"*100)
print("Neste jogo você deve convencer Deus a não destruir Sodoma e Gomorra.")
print("No prompt 'Eu' Digite:")
print("--> Senhor, e se houver xyz justos na cidade?")
print("(Onde 'xyz' corresponde a um número entre 0 e 999)")
print("BOA SORTE!!!")
input("Tecle <ENTER> ")
# Início do jogo
print("\n"*100)
numjust = 50
while numjust >= 10:
justos = input("Eu: ")
try:
if int(justos[20:23]) == numjust:
print("Deus: Não destruirei a cidade por amor dos {} justos".format(numjust))
if numjust < 45:
numjust -= 5
numjust -= 5
# Jogo do tipo "quente ou frio".
elif int(justos[20:23]) > numjust:
print("Deus: Você não deveria pedir por menos justos?")
elif int(justos[20:23]) < numjust:
print("Deus: Você não gostaria de pedir por mais justos?")
# Se digitar errado, começa tudo de novo.
except ValueError:
print("Deus: Acaso vou destruir as cidades sem consultar Abraao?")
numjust = 50
input("Tecle <ENTER> ")
# Fim do jogo
print("\n"*100)
print("\nDeus: Anjos, tirem Ló e sua família de lá...")
print("\nAnjos: Sim, Senhor!")
print("\n*** GAME OVER ***\n")
input("\nTecle <ENTER> ")
| StarcoderdataPython |
3234982 | from fastapi import FastAPI
from datetime import datetime
from typing import Optional
from fastapi.encoders import jsonable_encoder
from model.model import Task, TaskList
import model.taskman as taskman
app = FastAPI()
@app.get("/api/tasks")
async def get_tasks():
"""TODO
Fetch the list of all tasks
"""
return "TODO"
@app.get("/api/tasks/{id}")
async def get_task(id: int):
"""TODO
Fetch the task by id
"""
return "TODO"
@app.post("/api/tasks/create")
async def create_task(task: Task):
"""TODO
1. Create a new task and
2. Return the details of task
"""
return "TODO"
@app.put("/api/tasks/{id}/update")
async def update_task(id: int, task: Task):
"""TODO
1. Update the task by id
2. Return the updated task
"""
return "TODO"
@app.delete("/api/tasks/{id}/delete")
async def delete_task(id: int):
"""TODO
1. Delete the task by id
2. Return a confirmation of deletion
"""
return "TODO"
| StarcoderdataPython |
168056 | <reponame>dt/SublimeScalaAddImport<filename>foursquare/source_code_analysis/scala/scala_import_parser.py
# coding=utf-8
# Copyright 2013 Foursquare Labs Inc. All Rights Reserved.
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
import re
from foursquare.source_code_analysis.exception import SourceCodeAnalysisException
from foursquare.source_code_analysis.scala.scala_imports import ScalaImportClause
# A single identifier, e.g., foo, Bar, baz_2, _root_ .
_IDENTIFIER_PATTERN = '(?:\w*)'
# A dotted path of identifiers, e.g., foo.bar.Baz .
_PATH_PATTERN = '(?:{identifier}(\.{identifier})*)'.format(identifier=_IDENTIFIER_PATTERN)
_PATH_RE = re.compile('^{path}$'.format(path=_PATH_PATTERN))
# An identifier rewrite, e.g., Foo => Bar .
_SELECTOR_PATTERN = '{identifier}(?:\s*=>\s*{identifier})?'.format(identifier=_IDENTIFIER_PATTERN)
# A (possibly multiline) import clause.
_IMPORT_PATTERN = ('^(?P<indent> *)import\s*(?P<path>{path})\.'
'(?P<selectors>{identifier}|(?:\{{\s*{selector}(?:\s*,\s*{selector})*\s*\}}))[ \t]*\n').format(
path=_PATH_PATTERN,
identifier=_IDENTIFIER_PATTERN,
selector=_SELECTOR_PATTERN)
IMPORT_RE = re.compile(_IMPORT_PATTERN, re.MULTILINE)
class PathValidator(object):
@staticmethod
def validate(path):
return _PATH_RE.match(path) is not None
class ScalaImportParser(object):
@staticmethod
def find_all(src_text):
"""Returns a list of ScalaImportClauses representing all the imports in the text.
Doesn't interact with a rewrite cursor, so is not useful for rewriting.
"""
return [ ScalaImportParser._create_clause_from_matchobj(m) for m in IMPORT_RE.finditer(src_text) ]
@staticmethod
def search(rewrite_cursor):
"""Returns the next ScalaImportClause found, advancing the cursor as needed.
Skips over, and emits verbatim, anything that isn't an import clause.
Returns None if it finds no import clause.
"""
ret = ScalaImportParser._apply_regex(rewrite_cursor, True)
if ret is None:
rewrite_cursor.finish()
return ret
@staticmethod
def match(rewrite_cursor):
"""If the cursor is currently on an import clause, returns a ScalaImportClause and advances the cursor.
Returns None otherwise.
"""
return ScalaImportParser._apply_regex(rewrite_cursor, False)
@staticmethod
def _apply_regex(rewrite_cursor, search):
if search:
m = IMPORT_RE.search(rewrite_cursor.src_text, rewrite_cursor.src_pos)
else:
m = IMPORT_RE.match(rewrite_cursor.src_text, rewrite_cursor.src_pos)
if m is None:
return None
# Copy whatever we skipped over.
rewrite_cursor.copy_from_src_until(m.start())
# Move past the string we matched.
rewrite_cursor.set_src_pos(m.end())
return ScalaImportParser._create_clause_from_matchobj(m)
@staticmethod
def _create_clause_from_matchobj(m):
indent_string = m.group('indent')
path_string = m.group('path')
selectors_string = m.group('selectors').strip()
if len(selectors_string) == 0:
raise SourceCodeAnalysisException('Something wrong with import: {0}; trailing dot, possibly?'.format(m.group(0)))
if selectors_string[0] == '{':
if selectors_string[-1] != '}':
raise SourceCodeAnalysisException('Bad regex match: opening brace has no closing brace.')
selectors_string = selectors_string[1:-1]
ret = ScalaImportClause(indent_string, path_string, m.group(0), m.start(), m.end())
selectors = [x.strip() for x in selectors_string.split(',')]
for selector in selectors:
parts = [x.strip() for x in selector.split('=>')]
name = parts[0]
if len(parts) == 2:
as_name = parts[1]
else:
as_name = None
ret.add_import(name, as_name)
return ret
| StarcoderdataPython |
3267816 | <gh_stars>0
from django.contrib import admin
# Register your models here.
from .models import Product, Country, Town, StockCard
class StockCardAdmin(admin.ModelAdmin):
pass
admin.site.register(StockCard, StockCardAdmin)
admin.site.register(Country)
admin.site.register(Town)
admin.site.register(Product)
| StarcoderdataPython |
3336596 | <reponame>ChucklesZeClown/learn-python
# create a string variable consisting of some text plus a formatted value
x = "There are %d types of people." % 10
# create a string
binary = "binary"
# create another string
do_not = "don't"
# create a third string, which includes the previous 2 strings using formatted values
y = "Those who know %s and those who %s." % (binary, do_not) # puts a string into a string
# display the 2 complex strings by printing the variables
print x
print y
# now use those complex strings with formating in new strings
print "I said: %r." % x
print "I also said: '%s'." % y
# create a binary variable, and also a string that will include a value if one is passed in
hilarious = False
joke_evaluation = "Isn't that joke so funny?! %r"
# display the string including a value that is passed in to be formatted into the string
print joke_evaluation % hilarious # puts a string into a string
# addition by Selby - display the string without passing in a value to be formatted into the string
print joke_evaluation
# create 2 string variables
w = "This is the left side of..."
e = "a string with a right side."
# and show what happens when you 'add' 2 strings together. This is really a concatenation
print w + e # puts a string into a string | StarcoderdataPython |
1789230 | PERIOD_TYPE_MONTH = "MONTH"
PERIOD_TYPE_QUARTER = "QUARTER"
PERIOD_TYPE_SIX_MONTH = "SIX_MONTH"
PERIOD_TYPE_YEAR = "YEAR"
def detect(dhis2_period):
if len(dhis2_period) == 4:
return PERIOD_TYPE_YEAR
if "Q" in dhis2_period:
return PERIOD_TYPE_QUARTER
if "S" in dhis2_period:
return PERIOD_TYPE_SIX_MONTH
if len(dhis2_period) == 6:
return PERIOD_TYPE_MONTH
raise Exception("unsupported dhis2 period format for '" + dhis2_period + "'")
| StarcoderdataPython |
158101 | import base64
from collections import namedtuple
from datetime import datetime
import hashlib
import os
import secrets
import struct
import sys
import time
from fido2.client import Fido2Client
from fido2.ctap2 import CTAP2
from fido2.ctap2 import CredentialManagement
from fido2.hid import CtapHidDevice
from fido2.utils import sha256, hmac_sha256
from fido2.attestation import Attestation
from fido2.webauthn import PublicKeyCredentialCreationOptions
SubPacket = namedtuple('SubPacket', ['type', 'body'])
RP_ID = "pgp"
def verify_rp_id(rp_id, origin):
return origin == rp_id
class SoloPGP(object):
def __init__(self, dev):
origin = RP_ID
self.client = Fido2Client(dev, origin, verify=verify_rp_id)
self.ctap2 = CTAP2(dev)
self.pin = os.getenv('SOLOPIN')
def _sign_hash(self, cred_id, dgst):
if self.pin:
pin_token = self.client.pin_protocol.get_pin_token(self.pin)
pin_auth = hmac_sha256(pin_token, dgst)[:16]
ret = self.ctap2.send_cbor(0x50, {1: dgst, 2: {"id": cred_id, "type": "public-key"}, 3: pin_auth})
else:
ret = self.ctap2.send_cbor(0x50, {1: dgst, 2: {"id": cred_id, "type": "public-key"}})
der_sig = ret[1]
# Extract 'r' and 's' from the DER signature as described here:
# https://crypto.stackexchange.com/questions/1795/how-can-i-convert-a-der-ecdsa-signature-to-asn-1
r_len = der_sig[3]
r = der_sig[4:4+r_len]
s = der_sig[6+r_len:]
if len(r) > 32:
r = r[-32:]
if len(s) > 32:
s = s[-32:]
return r,s
def _pubkey_packet(self, pubkey, created):
pkt = bytearray()
pkt.append(0x98) # public key packet
pkt.append(0x52) # packet length
pkt.append(0x04) # version
pkt.extend(struct.pack('>I', created))
pkt.append(0x13) # ECDSA algo
pkt.extend(b'\x08\x2A\x86\x48\xCE\x3D\x03\x01\x07') # nistp256 id
pkt.extend(b'\x02\x03') # 0x203 bits MPI
pkt.append(0x04) # uncompressed key
pkt.extend(pubkey[0]) # pubkey x
pkt.extend(pubkey[1]) # pubkey y
return pkt
def _fingerprint(self, pubkey_pkt):
fp = b'\x99\x00\x52' + pubkey_pkt[2:]
m = hashlib.sha1()
m.update(fp)
return m.digest()
def _userid_packet(self, user):
pkt = bytearray()
pkt.append(0xb4)
pkt.append(len(user))
pkt.extend(user.encode('ascii'))
return pkt
def _signature_packet(self, sig_type, cred_id, hashed_prefix, hashed_subpkts, unhashed_subpkts):
pkt = bytearray()
pkt.append(0x04) # version
pkt.append(sig_type)
pkt.append(0x13) # ECDSA algo
pkt.append(0x08) # SHA256
hashed_count = sum([1+len(subpkt.body) for subpkt in hashed_subpkts])
hashed_count += len(hashed_subpkts)
pkt.extend(struct.pack('>H', hashed_count))
for subpkt in hashed_subpkts:
subpkt_len = len(subpkt.body) + 1
pkt.extend(struct.pack('B', subpkt_len))
pkt.append(subpkt.type)
pkt.extend(subpkt.body)
hashed_data = bytearray()
hashed_data.extend(hashed_prefix)
hashed_data.extend(pkt)
hashed_data.extend(b'\x04\xff') # some PGP fuckery
hashed_data.extend(struct.pack('>I', len(pkt)))
unhashed_count = sum([1+len(subpkt.body) for subpkt in unhashed_subpkts])
unhashed_count += len(unhashed_subpkts)
pkt.extend(struct.pack('>H', unhashed_count))
for subpkt in unhashed_subpkts:
subpkt_len = len(subpkt.body) + 1
pkt.extend(struct.pack('B', subpkt_len))
pkt.append(subpkt.type)
pkt.extend(subpkt.body)
m = hashlib.sha256()
m.update(hashed_data)
dgst = m.digest()
pkt.extend(dgst[:2]) # left 16 bits of the hash
r, s = self._sign_hash(cred_id, dgst)
ri = int.from_bytes(r, 'big', signed=False)
si = int.from_bytes(s, 'big', signed=False)
pkt.extend(struct.pack('>H', ri.bit_length()))
pkt.extend(r)
pkt.extend(struct.pack('>H', si.bit_length()))
pkt.extend(s)
pkt_len = len(pkt)
pkt.insert(0, 0x88) # signature packet
pkt.insert(1, pkt_len)
return pkt
def _signature_packet_key(self, cred_id, hashed_prefix, hashed_subpkts, unhashed_subpkts):
return self._signature_packet(0x13, cred_id, hashed_prefix, hashed_subpkts, unhashed_subpkts)
def _signature_packet_data(self, cred_id, hashed_prefix, hashed_subpkts, unhashed_subpkts):
return self._signature_packet(0x00, cred_id, hashed_prefix, hashed_subpkts, unhashed_subpkts)
def _ascii_armor(self, data):
#b64str = base64.b64encode(data).decode('ascii')
#return '\n'.join([b64str[n:n+64] for n in range(0, len(b64str), 64)])
return base64.encodebytes(data).decode('ascii')
def gen_key(self):
name = input('Real name: ')
email = input('Email address: ')
username = "{} <{}>".format(name, email)
created = int(time.time())
rp = {"id": RP_ID, "name": "OpenPGP"}
user = {"id": struct.pack('>I', created), "name": username}
challenge = secrets.token_bytes(32)
options = PublicKeyCredentialCreationOptions(
rp,
user,
challenge,
[{"type": "public-key", "alg": -8},
{"type": "public-key", "alg": -7}],
authenticator_selection={"require_resident_key": True}
)
attestation_object, client_data = self.client.make_credential(options, pin=self.pin)
statement = attestation_object.att_statement
auth_data = attestation_object.auth_data
attestation = Attestation.for_type("packed")()
attestation.verify(statement, auth_data, client_data.hash)
cred_id = auth_data.credential_data.credential_id
pubkey_x = auth_data.credential_data.public_key[-2]
pubkey_y = auth_data.credential_data.public_key[-3]
pubkey = (pubkey_x, pubkey_y)
pubkey_pkt = self._pubkey_packet(pubkey, created)
userid_pkt = self._userid_packet(username)
fp = self._fingerprint(pubkey_pkt)
key_id = fp[-8:]
print("Key ID: {}".format(key_id.hex().upper()))
print("Key fingerprint: {}\n".format(fp.hex().upper()))
hashed_prefix = b'\x99\x00\x52' + pubkey_pkt[2:]
hashed_prefix += b'\xb4' + struct.pack('>I', len(userid_pkt)-2) + userid_pkt[2:]
hashed_subpkts = [SubPacket(0x21, b'\x04'+fp),
SubPacket(0x1B, b'\x03'), # key flags
SubPacket(0x02, struct.pack('>I', created))]
unhashed_subpkts = [SubPacket(0x10, key_id)] # issuer
sig_pkt = self._signature_packet_key(cred_id, hashed_prefix, hashed_subpkts, unhashed_subpkts)
armored = self._ascii_armor(pubkey_pkt + userid_pkt + sig_pkt)
print('-----BEGIN PGP PUBLIC KEY BLOCK-----\n\n{}-----END PGP PUBLIC KEY BLOCK-----'.format(armored))
def sign(self, key_id, data):
key_id = bytes.fromhex(key_id)
token = self.client.pin_protocol.get_pin_token(self.pin)
pin_protocol = 1
cm = CredentialManagement(self.ctap2, pin_protocol, token)
creds = cm.enumerate_creds(sha256(RP_ID.encode('ascii')))
for cred in creds:
user_id = cred[CredentialManagement.RESULT.USER]['id']
created = int.from_bytes(user_id, 'big', signed=False)
username = cred[CredentialManagement.RESULT.USER]['name']
cred_id = cred[CredentialManagement.RESULT.CREDENTIAL_ID]['id']
pubkey_x = cred[CredentialManagement.RESULT.PUBLIC_KEY][-2]
pubkey_y = cred[CredentialManagement.RESULT.PUBLIC_KEY][-3]
pubkey = (pubkey_x, pubkey_y)
pubkey_pkt = self._pubkey_packet(pubkey, created)
fp = self._fingerprint(pubkey_pkt)
curr_key_id = fp[-8:]
if curr_key_id == key_id:
break
else:
print("Key {} not found".format(key_id))
return None
created = int(time.time())
hashed_subpkts = [SubPacket(0x21, b'\x04'+fp),
SubPacket(0x02, struct.pack('>I', created))]
unhashed_subpkts = [SubPacket(0x10, key_id)] # issuer
sig_pkt = self._signature_packet_data(cred_id, data, hashed_subpkts, unhashed_subpkts)
armored = self._ascii_armor(sig_pkt)
print('\n[GNUPG:] SIG_CREATED ', file=sys.stderr)
print('-----BEGIN PGP SIGNATURE-----\n\n{}-----END PGP SIGNATURE-----'.format(armored))
def list(self):
token = self.client.pin_protocol.get_pin_token(self.pin)
pin_protocol = 1
cm = CredentialManagement(self.ctap2, pin_protocol, token)
meta = cm.get_metadata()
existing = meta[CredentialManagement.RESULT.EXISTING_CRED_COUNT]
if existing == 0:
print("No PGP keys found")
return
creds = cm.enumerate_creds(sha256(RP_ID.encode('ascii')))
for cred in creds:
user_id = cred[CredentialManagement.RESULT.USER]['id']
created = int.from_bytes(user_id, 'big', signed=False)
username = cred[CredentialManagement.RESULT.USER]['name']
pubkey_x = cred[CredentialManagement.RESULT.PUBLIC_KEY][-2]
pubkey_y = cred[CredentialManagement.RESULT.PUBLIC_KEY][-3]
pubkey = (pubkey_x, pubkey_y)
pubkey_pkt = self._pubkey_packet(pubkey, created)
fp = self._fingerprint(pubkey_pkt)
key_id = fp[-8:]
created_date = datetime.utcfromtimestamp(created).strftime('%Y-%m-%d %H:%M:%S')
print("Created: {}".format(created_date))
print("User: {}".format(username))
print("ID: {}".format(key_id.hex().upper()))
print("Fingerprint: {}".format(fp.hex().upper()))
print()
def export(self, key_id):
key_id = bytes.fromhex(key_id)
token = self.client.pin_protocol.get_pin_token(self.pin)
pin_protocol = 1
cm = CredentialManagement(self.ctap2, pin_protocol, token)
meta = cm.get_metadata()
existing = meta[CredentialManagement.RESULT.EXISTING_CRED_COUNT]
if existing == 0:
print("No PGP keys found")
return
creds = cm.enumerate_creds(sha256(RP_ID.encode('ascii')))
for cred in creds:
user_id = cred[CredentialManagement.RESULT.USER]['id']
created = int.from_bytes(user_id, 'big', signed=False)
username = cred[CredentialManagement.RESULT.USER]['name']
cred_id = cred[CredentialManagement.RESULT.CREDENTIAL_ID]['id']
pubkey_x = cred[CredentialManagement.RESULT.PUBLIC_KEY][-2]
pubkey_y = cred[CredentialManagement.RESULT.PUBLIC_KEY][-3]
pubkey = (pubkey_x, pubkey_y)
pubkey_pkt = self._pubkey_packet(pubkey, created)
userid_pkt = self._userid_packet(username)
fp = self._fingerprint(pubkey_pkt)
curr_key_id = fp[-8:]
if curr_key_id == key_id:
break
else:
print("Key not found")
return
hashed_prefix = b'\x99\x00\x52' + pubkey_pkt[2:]
hashed_prefix += b'\xb4' + struct.pack('>I', len(userid_pkt)-2) + userid_pkt[2:]
hashed_subpkts = [SubPacket(0x21, b'\x04'+fp),
SubPacket(0x1B, b'\x03'), # key flags
SubPacket(0x02, struct.pack('>I', created))]
unhashed_subpkts = [SubPacket(0x10, key_id)] # issuer
sig_pkt = self._signature_packet_key(cred_id, hashed_prefix, hashed_subpkts, unhashed_subpkts)
armored = self._ascii_armor(pubkey_pkt + userid_pkt + sig_pkt)
print('-----BEGIN PGP PUBLIC KEY BLOCK-----\n\n{}-----END PGP PUBLIC KEY BLOCK-----'.format(armored))
| StarcoderdataPython |
1777537 | from django import forms
from .models import Blog
from django.contrib.auth.models import User
class SignUpForm(forms.ModelForm):
class Meta:
password = forms.CharField(widget=forms.PasswordInput)
model = User
widgets = {
'password': forms.PasswordInput(),
}
fields = ['first_name', 'last_name', 'username', 'email', 'password']
class LoginForm(forms.Form):
username = forms.CharField(max_length=200)
password = forms.CharField(widget=forms.PasswordInput)
widgets = {
'password': forms.PasswordInput(),
}
class Post(forms.ModelForm):
class Meta:
model = Blog
exclude = ['timestamp']
| StarcoderdataPython |
1626631 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ServiceWafConfigurationArgs', 'ServiceWafConfiguration']
@pulumi.input_type
class ServiceWafConfigurationArgs:
def __init__(__self__, *,
waf_id: pulumi.Input[str],
allowed_http_versions: Optional[pulumi.Input[str]] = None,
allowed_methods: Optional[pulumi.Input[str]] = None,
allowed_request_content_type: Optional[pulumi.Input[str]] = None,
allowed_request_content_type_charset: Optional[pulumi.Input[str]] = None,
arg_length: Optional[pulumi.Input[int]] = None,
arg_name_length: Optional[pulumi.Input[int]] = None,
combined_file_sizes: Optional[pulumi.Input[int]] = None,
critical_anomaly_score: Optional[pulumi.Input[int]] = None,
crs_validate_utf8_encoding: Optional[pulumi.Input[bool]] = None,
error_anomaly_score: Optional[pulumi.Input[int]] = None,
high_risk_country_codes: Optional[pulumi.Input[str]] = None,
http_violation_score_threshold: Optional[pulumi.Input[int]] = None,
inbound_anomaly_score_threshold: Optional[pulumi.Input[int]] = None,
lfi_score_threshold: Optional[pulumi.Input[int]] = None,
max_file_size: Optional[pulumi.Input[int]] = None,
max_num_args: Optional[pulumi.Input[int]] = None,
notice_anomaly_score: Optional[pulumi.Input[int]] = None,
paranoia_level: Optional[pulumi.Input[int]] = None,
php_injection_score_threshold: Optional[pulumi.Input[int]] = None,
rce_score_threshold: Optional[pulumi.Input[int]] = None,
restricted_extensions: Optional[pulumi.Input[str]] = None,
restricted_headers: Optional[pulumi.Input[str]] = None,
rfi_score_threshold: Optional[pulumi.Input[int]] = None,
rule_exclusions: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceWafConfigurationRuleExclusionArgs']]]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceWafConfigurationRuleArgs']]]] = None,
session_fixation_score_threshold: Optional[pulumi.Input[int]] = None,
sql_injection_score_threshold: Optional[pulumi.Input[int]] = None,
total_arg_length: Optional[pulumi.Input[int]] = None,
warning_anomaly_score: Optional[pulumi.Input[int]] = None,
xss_score_threshold: Optional[pulumi.Input[int]] = None):
"""
The set of arguments for constructing a ServiceWafConfiguration resource.
:param pulumi.Input[str] waf_id: The ID of the Web Application Firewall that the configuration belongs to
:param pulumi.Input[str] allowed_http_versions: Allowed HTTP versions
:param pulumi.Input[str] allowed_methods: A space-separated list of HTTP method names
:param pulumi.Input[str] allowed_request_content_type: Allowed request content types
:param pulumi.Input[str] allowed_request_content_type_charset: Allowed request content type charset
:param pulumi.Input[int] arg_length: The maximum number of arguments allowed
:param pulumi.Input[int] arg_name_length: The maximum allowed argument name length
:param pulumi.Input[int] combined_file_sizes: The maximum allowed size of all files
:param pulumi.Input[int] critical_anomaly_score: Score value to add for critical anomalies
:param pulumi.Input[bool] crs_validate_utf8_encoding: CRS validate UTF8 encoding
:param pulumi.Input[int] error_anomaly_score: Score value to add for error anomalies
:param pulumi.Input[str] high_risk_country_codes: A space-separated list of country codes in ISO 3166-1 (two-letter) format
:param pulumi.Input[int] http_violation_score_threshold: HTTP violation threshold
:param pulumi.Input[int] inbound_anomaly_score_threshold: Inbound anomaly threshold
:param pulumi.Input[int] lfi_score_threshold: Local file inclusion attack threshold
:param pulumi.Input[int] max_file_size: The maximum allowed file size, in bytes
:param pulumi.Input[int] max_num_args: The maximum number of arguments allowed
:param pulumi.Input[int] notice_anomaly_score: Score value to add for notice anomalies
:param pulumi.Input[int] paranoia_level: The configured paranoia level
:param pulumi.Input[int] php_injection_score_threshold: PHP injection threshold
:param pulumi.Input[int] rce_score_threshold: Remote code execution threshold
:param pulumi.Input[str] restricted_extensions: A space-separated list of allowed file extensions
:param pulumi.Input[str] restricted_headers: A space-separated list of allowed header names
:param pulumi.Input[int] rfi_score_threshold: Remote file inclusion attack threshold
:param pulumi.Input[int] session_fixation_score_threshold: Session fixation attack threshold
:param pulumi.Input[int] sql_injection_score_threshold: SQL injection attack threshold
:param pulumi.Input[int] total_arg_length: The maximum size of argument names and values
:param pulumi.Input[int] warning_anomaly_score: Score value to add for warning anomalies
:param pulumi.Input[int] xss_score_threshold: XSS attack threshold
"""
pulumi.set(__self__, "waf_id", waf_id)
if allowed_http_versions is not None:
pulumi.set(__self__, "allowed_http_versions", allowed_http_versions)
if allowed_methods is not None:
pulumi.set(__self__, "allowed_methods", allowed_methods)
if allowed_request_content_type is not None:
pulumi.set(__self__, "allowed_request_content_type", allowed_request_content_type)
if allowed_request_content_type_charset is not None:
pulumi.set(__self__, "allowed_request_content_type_charset", allowed_request_content_type_charset)
if arg_length is not None:
pulumi.set(__self__, "arg_length", arg_length)
if arg_name_length is not None:
pulumi.set(__self__, "arg_name_length", arg_name_length)
if combined_file_sizes is not None:
pulumi.set(__self__, "combined_file_sizes", combined_file_sizes)
if critical_anomaly_score is not None:
pulumi.set(__self__, "critical_anomaly_score", critical_anomaly_score)
if crs_validate_utf8_encoding is not None:
pulumi.set(__self__, "crs_validate_utf8_encoding", crs_validate_utf8_encoding)
if error_anomaly_score is not None:
pulumi.set(__self__, "error_anomaly_score", error_anomaly_score)
if high_risk_country_codes is not None:
pulumi.set(__self__, "high_risk_country_codes", high_risk_country_codes)
if http_violation_score_threshold is not None:
pulumi.set(__self__, "http_violation_score_threshold", http_violation_score_threshold)
if inbound_anomaly_score_threshold is not None:
pulumi.set(__self__, "inbound_anomaly_score_threshold", inbound_anomaly_score_threshold)
if lfi_score_threshold is not None:
pulumi.set(__self__, "lfi_score_threshold", lfi_score_threshold)
if max_file_size is not None:
pulumi.set(__self__, "max_file_size", max_file_size)
if max_num_args is not None:
pulumi.set(__self__, "max_num_args", max_num_args)
if notice_anomaly_score is not None:
pulumi.set(__self__, "notice_anomaly_score", notice_anomaly_score)
if paranoia_level is not None:
pulumi.set(__self__, "paranoia_level", paranoia_level)
if php_injection_score_threshold is not None:
pulumi.set(__self__, "php_injection_score_threshold", php_injection_score_threshold)
if rce_score_threshold is not None:
pulumi.set(__self__, "rce_score_threshold", rce_score_threshold)
if restricted_extensions is not None:
pulumi.set(__self__, "restricted_extensions", restricted_extensions)
if restricted_headers is not None:
pulumi.set(__self__, "restricted_headers", restricted_headers)
if rfi_score_threshold is not None:
pulumi.set(__self__, "rfi_score_threshold", rfi_score_threshold)
if rule_exclusions is not None:
pulumi.set(__self__, "rule_exclusions", rule_exclusions)
if rules is not None:
pulumi.set(__self__, "rules", rules)
if session_fixation_score_threshold is not None:
pulumi.set(__self__, "session_fixation_score_threshold", session_fixation_score_threshold)
if sql_injection_score_threshold is not None:
pulumi.set(__self__, "sql_injection_score_threshold", sql_injection_score_threshold)
if total_arg_length is not None:
pulumi.set(__self__, "total_arg_length", total_arg_length)
if warning_anomaly_score is not None:
pulumi.set(__self__, "warning_anomaly_score", warning_anomaly_score)
if xss_score_threshold is not None:
pulumi.set(__self__, "xss_score_threshold", xss_score_threshold)
@property
@pulumi.getter(name="wafId")
def waf_id(self) -> pulumi.Input[str]:
"""
The ID of the Web Application Firewall that the configuration belongs to
"""
return pulumi.get(self, "waf_id")
@waf_id.setter
def waf_id(self, value: pulumi.Input[str]):
pulumi.set(self, "waf_id", value)
@property
@pulumi.getter(name="allowedHttpVersions")
def allowed_http_versions(self) -> Optional[pulumi.Input[str]]:
"""
Allowed HTTP versions
"""
return pulumi.get(self, "allowed_http_versions")
@allowed_http_versions.setter
def allowed_http_versions(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "allowed_http_versions", value)
@property
@pulumi.getter(name="allowedMethods")
def allowed_methods(self) -> Optional[pulumi.Input[str]]:
"""
A space-separated list of HTTP method names
"""
return pulumi.get(self, "allowed_methods")
@allowed_methods.setter
def allowed_methods(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "allowed_methods", value)
@property
@pulumi.getter(name="allowedRequestContentType")
def allowed_request_content_type(self) -> Optional[pulumi.Input[str]]:
"""
Allowed request content types
"""
return pulumi.get(self, "allowed_request_content_type")
@allowed_request_content_type.setter
def allowed_request_content_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "allowed_request_content_type", value)
@property
@pulumi.getter(name="allowedRequestContentTypeCharset")
def allowed_request_content_type_charset(self) -> Optional[pulumi.Input[str]]:
"""
Allowed request content type charset
"""
return pulumi.get(self, "allowed_request_content_type_charset")
@allowed_request_content_type_charset.setter
def allowed_request_content_type_charset(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "allowed_request_content_type_charset", value)
@property
@pulumi.getter(name="argLength")
def arg_length(self) -> Optional[pulumi.Input[int]]:
"""
The maximum number of arguments allowed
"""
return pulumi.get(self, "arg_length")
@arg_length.setter
def arg_length(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "arg_length", value)
@property
@pulumi.getter(name="argNameLength")
def arg_name_length(self) -> Optional[pulumi.Input[int]]:
"""
The maximum allowed argument name length
"""
return pulumi.get(self, "arg_name_length")
@arg_name_length.setter
def arg_name_length(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "arg_name_length", value)
@property
@pulumi.getter(name="combinedFileSizes")
def combined_file_sizes(self) -> Optional[pulumi.Input[int]]:
"""
The maximum allowed size of all files
"""
return pulumi.get(self, "combined_file_sizes")
@combined_file_sizes.setter
def combined_file_sizes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "combined_file_sizes", value)
@property
@pulumi.getter(name="criticalAnomalyScore")
def critical_anomaly_score(self) -> Optional[pulumi.Input[int]]:
"""
Score value to add for critical anomalies
"""
return pulumi.get(self, "critical_anomaly_score")
@critical_anomaly_score.setter
def critical_anomaly_score(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "critical_anomaly_score", value)
@property
@pulumi.getter(name="crsValidateUtf8Encoding")
def crs_validate_utf8_encoding(self) -> Optional[pulumi.Input[bool]]:
"""
CRS validate UTF8 encoding
"""
return pulumi.get(self, "crs_validate_utf8_encoding")
@crs_validate_utf8_encoding.setter
def crs_validate_utf8_encoding(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "crs_validate_utf8_encoding", value)
@property
@pulumi.getter(name="errorAnomalyScore")
def error_anomaly_score(self) -> Optional[pulumi.Input[int]]:
"""
Score value to add for error anomalies
"""
return pulumi.get(self, "error_anomaly_score")
@error_anomaly_score.setter
def error_anomaly_score(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "error_anomaly_score", value)
@property
@pulumi.getter(name="highRiskCountryCodes")
def high_risk_country_codes(self) -> Optional[pulumi.Input[str]]:
"""
A space-separated list of country codes in ISO 3166-1 (two-letter) format
"""
return pulumi.get(self, "high_risk_country_codes")
@high_risk_country_codes.setter
def high_risk_country_codes(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "high_risk_country_codes", value)
@property
@pulumi.getter(name="httpViolationScoreThreshold")
def http_violation_score_threshold(self) -> Optional[pulumi.Input[int]]:
"""
HTTP violation threshold
"""
return pulumi.get(self, "http_violation_score_threshold")
@http_violation_score_threshold.setter
def http_violation_score_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "http_violation_score_threshold", value)
@property
@pulumi.getter(name="inboundAnomalyScoreThreshold")
def inbound_anomaly_score_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Inbound anomaly threshold
"""
return pulumi.get(self, "inbound_anomaly_score_threshold")
@inbound_anomaly_score_threshold.setter
def inbound_anomaly_score_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "inbound_anomaly_score_threshold", value)
@property
@pulumi.getter(name="lfiScoreThreshold")
def lfi_score_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Local file inclusion attack threshold
"""
return pulumi.get(self, "lfi_score_threshold")
@lfi_score_threshold.setter
def lfi_score_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "lfi_score_threshold", value)
@property
@pulumi.getter(name="maxFileSize")
def max_file_size(self) -> Optional[pulumi.Input[int]]:
"""
The maximum allowed file size, in bytes
"""
return pulumi.get(self, "max_file_size")
@max_file_size.setter
def max_file_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_file_size", value)
@property
@pulumi.getter(name="maxNumArgs")
def max_num_args(self) -> Optional[pulumi.Input[int]]:
"""
The maximum number of arguments allowed
"""
return pulumi.get(self, "max_num_args")
@max_num_args.setter
def max_num_args(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_num_args", value)
@property
@pulumi.getter(name="noticeAnomalyScore")
def notice_anomaly_score(self) -> Optional[pulumi.Input[int]]:
"""
Score value to add for notice anomalies
"""
return pulumi.get(self, "notice_anomaly_score")
@notice_anomaly_score.setter
def notice_anomaly_score(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "notice_anomaly_score", value)
@property
@pulumi.getter(name="paranoiaLevel")
def paranoia_level(self) -> Optional[pulumi.Input[int]]:
"""
The configured paranoia level
"""
return pulumi.get(self, "paranoia_level")
@paranoia_level.setter
def paranoia_level(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "paranoia_level", value)
@property
@pulumi.getter(name="phpInjectionScoreThreshold")
def php_injection_score_threshold(self) -> Optional[pulumi.Input[int]]:
"""
PHP injection threshold
"""
return pulumi.get(self, "php_injection_score_threshold")
@php_injection_score_threshold.setter
def php_injection_score_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "php_injection_score_threshold", value)
@property
@pulumi.getter(name="rceScoreThreshold")
def rce_score_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Remote code execution threshold
"""
return pulumi.get(self, "rce_score_threshold")
@rce_score_threshold.setter
def rce_score_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "rce_score_threshold", value)
@property
@pulumi.getter(name="restrictedExtensions")
def restricted_extensions(self) -> Optional[pulumi.Input[str]]:
"""
A space-separated list of allowed file extensions
"""
return pulumi.get(self, "restricted_extensions")
@restricted_extensions.setter
def restricted_extensions(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "restricted_extensions", value)
@property
@pulumi.getter(name="restrictedHeaders")
def restricted_headers(self) -> Optional[pulumi.Input[str]]:
"""
A space-separated list of allowed header names
"""
return pulumi.get(self, "restricted_headers")
@restricted_headers.setter
def restricted_headers(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "restricted_headers", value)
@property
@pulumi.getter(name="rfiScoreThreshold")
def rfi_score_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Remote file inclusion attack threshold
"""
return pulumi.get(self, "rfi_score_threshold")
@rfi_score_threshold.setter
def rfi_score_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "rfi_score_threshold", value)
@property
@pulumi.getter(name="ruleExclusions")
def rule_exclusions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ServiceWafConfigurationRuleExclusionArgs']]]]:
return pulumi.get(self, "rule_exclusions")
@rule_exclusions.setter
def rule_exclusions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceWafConfigurationRuleExclusionArgs']]]]):
pulumi.set(self, "rule_exclusions", value)
@property
@pulumi.getter
def rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ServiceWafConfigurationRuleArgs']]]]:
return pulumi.get(self, "rules")
@rules.setter
def rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceWafConfigurationRuleArgs']]]]):
pulumi.set(self, "rules", value)
@property
@pulumi.getter(name="sessionFixationScoreThreshold")
def session_fixation_score_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Session fixation attack threshold
"""
return pulumi.get(self, "session_fixation_score_threshold")
@session_fixation_score_threshold.setter
def session_fixation_score_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "session_fixation_score_threshold", value)
@property
@pulumi.getter(name="sqlInjectionScoreThreshold")
def sql_injection_score_threshold(self) -> Optional[pulumi.Input[int]]:
"""
SQL injection attack threshold
"""
return pulumi.get(self, "sql_injection_score_threshold")
@sql_injection_score_threshold.setter
def sql_injection_score_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "sql_injection_score_threshold", value)
@property
@pulumi.getter(name="totalArgLength")
def total_arg_length(self) -> Optional[pulumi.Input[int]]:
"""
The maximum size of argument names and values
"""
return pulumi.get(self, "total_arg_length")
@total_arg_length.setter
def total_arg_length(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "total_arg_length", value)
@property
@pulumi.getter(name="warningAnomalyScore")
def warning_anomaly_score(self) -> Optional[pulumi.Input[int]]:
"""
Score value to add for warning anomalies
"""
return pulumi.get(self, "warning_anomaly_score")
@warning_anomaly_score.setter
def warning_anomaly_score(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "warning_anomaly_score", value)
@property
@pulumi.getter(name="xssScoreThreshold")
def xss_score_threshold(self) -> Optional[pulumi.Input[int]]:
"""
XSS attack threshold
"""
return pulumi.get(self, "xss_score_threshold")
@xss_score_threshold.setter
def xss_score_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "xss_score_threshold", value)
@pulumi.input_type
class _ServiceWafConfigurationState:
def __init__(__self__, *,
allowed_http_versions: Optional[pulumi.Input[str]] = None,
allowed_methods: Optional[pulumi.Input[str]] = None,
allowed_request_content_type: Optional[pulumi.Input[str]] = None,
allowed_request_content_type_charset: Optional[pulumi.Input[str]] = None,
arg_length: Optional[pulumi.Input[int]] = None,
arg_name_length: Optional[pulumi.Input[int]] = None,
combined_file_sizes: Optional[pulumi.Input[int]] = None,
critical_anomaly_score: Optional[pulumi.Input[int]] = None,
crs_validate_utf8_encoding: Optional[pulumi.Input[bool]] = None,
error_anomaly_score: Optional[pulumi.Input[int]] = None,
high_risk_country_codes: Optional[pulumi.Input[str]] = None,
http_violation_score_threshold: Optional[pulumi.Input[int]] = None,
inbound_anomaly_score_threshold: Optional[pulumi.Input[int]] = None,
lfi_score_threshold: Optional[pulumi.Input[int]] = None,
max_file_size: Optional[pulumi.Input[int]] = None,
max_num_args: Optional[pulumi.Input[int]] = None,
notice_anomaly_score: Optional[pulumi.Input[int]] = None,
paranoia_level: Optional[pulumi.Input[int]] = None,
php_injection_score_threshold: Optional[pulumi.Input[int]] = None,
rce_score_threshold: Optional[pulumi.Input[int]] = None,
restricted_extensions: Optional[pulumi.Input[str]] = None,
restricted_headers: Optional[pulumi.Input[str]] = None,
rfi_score_threshold: Optional[pulumi.Input[int]] = None,
rule_exclusions: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceWafConfigurationRuleExclusionArgs']]]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceWafConfigurationRuleArgs']]]] = None,
session_fixation_score_threshold: Optional[pulumi.Input[int]] = None,
sql_injection_score_threshold: Optional[pulumi.Input[int]] = None,
total_arg_length: Optional[pulumi.Input[int]] = None,
waf_id: Optional[pulumi.Input[str]] = None,
warning_anomaly_score: Optional[pulumi.Input[int]] = None,
xss_score_threshold: Optional[pulumi.Input[int]] = None):
"""
Input properties used for looking up and filtering ServiceWafConfiguration resources.
:param pulumi.Input[str] allowed_http_versions: Allowed HTTP versions
:param pulumi.Input[str] allowed_methods: A space-separated list of HTTP method names
:param pulumi.Input[str] allowed_request_content_type: Allowed request content types
:param pulumi.Input[str] allowed_request_content_type_charset: Allowed request content type charset
:param pulumi.Input[int] arg_length: The maximum number of arguments allowed
:param pulumi.Input[int] arg_name_length: The maximum allowed argument name length
:param pulumi.Input[int] combined_file_sizes: The maximum allowed size of all files
:param pulumi.Input[int] critical_anomaly_score: Score value to add for critical anomalies
:param pulumi.Input[bool] crs_validate_utf8_encoding: CRS validate UTF8 encoding
:param pulumi.Input[int] error_anomaly_score: Score value to add for error anomalies
:param pulumi.Input[str] high_risk_country_codes: A space-separated list of country codes in ISO 3166-1 (two-letter) format
:param pulumi.Input[int] http_violation_score_threshold: HTTP violation threshold
:param pulumi.Input[int] inbound_anomaly_score_threshold: Inbound anomaly threshold
:param pulumi.Input[int] lfi_score_threshold: Local file inclusion attack threshold
:param pulumi.Input[int] max_file_size: The maximum allowed file size, in bytes
:param pulumi.Input[int] max_num_args: The maximum number of arguments allowed
:param pulumi.Input[int] notice_anomaly_score: Score value to add for notice anomalies
:param pulumi.Input[int] paranoia_level: The configured paranoia level
:param pulumi.Input[int] php_injection_score_threshold: PHP injection threshold
:param pulumi.Input[int] rce_score_threshold: Remote code execution threshold
:param pulumi.Input[str] restricted_extensions: A space-separated list of allowed file extensions
:param pulumi.Input[str] restricted_headers: A space-separated list of allowed header names
:param pulumi.Input[int] rfi_score_threshold: Remote file inclusion attack threshold
:param pulumi.Input[int] session_fixation_score_threshold: Session fixation attack threshold
:param pulumi.Input[int] sql_injection_score_threshold: SQL injection attack threshold
:param pulumi.Input[int] total_arg_length: The maximum size of argument names and values
:param pulumi.Input[str] waf_id: The ID of the Web Application Firewall that the configuration belongs to
:param pulumi.Input[int] warning_anomaly_score: Score value to add for warning anomalies
:param pulumi.Input[int] xss_score_threshold: XSS attack threshold
"""
if allowed_http_versions is not None:
pulumi.set(__self__, "allowed_http_versions", allowed_http_versions)
if allowed_methods is not None:
pulumi.set(__self__, "allowed_methods", allowed_methods)
if allowed_request_content_type is not None:
pulumi.set(__self__, "allowed_request_content_type", allowed_request_content_type)
if allowed_request_content_type_charset is not None:
pulumi.set(__self__, "allowed_request_content_type_charset", allowed_request_content_type_charset)
if arg_length is not None:
pulumi.set(__self__, "arg_length", arg_length)
if arg_name_length is not None:
pulumi.set(__self__, "arg_name_length", arg_name_length)
if combined_file_sizes is not None:
pulumi.set(__self__, "combined_file_sizes", combined_file_sizes)
if critical_anomaly_score is not None:
pulumi.set(__self__, "critical_anomaly_score", critical_anomaly_score)
if crs_validate_utf8_encoding is not None:
pulumi.set(__self__, "crs_validate_utf8_encoding", crs_validate_utf8_encoding)
if error_anomaly_score is not None:
pulumi.set(__self__, "error_anomaly_score", error_anomaly_score)
if high_risk_country_codes is not None:
pulumi.set(__self__, "high_risk_country_codes", high_risk_country_codes)
if http_violation_score_threshold is not None:
pulumi.set(__self__, "http_violation_score_threshold", http_violation_score_threshold)
if inbound_anomaly_score_threshold is not None:
pulumi.set(__self__, "inbound_anomaly_score_threshold", inbound_anomaly_score_threshold)
if lfi_score_threshold is not None:
pulumi.set(__self__, "lfi_score_threshold", lfi_score_threshold)
if max_file_size is not None:
pulumi.set(__self__, "max_file_size", max_file_size)
if max_num_args is not None:
pulumi.set(__self__, "max_num_args", max_num_args)
if notice_anomaly_score is not None:
pulumi.set(__self__, "notice_anomaly_score", notice_anomaly_score)
if paranoia_level is not None:
pulumi.set(__self__, "paranoia_level", paranoia_level)
if php_injection_score_threshold is not None:
pulumi.set(__self__, "php_injection_score_threshold", php_injection_score_threshold)
if rce_score_threshold is not None:
pulumi.set(__self__, "rce_score_threshold", rce_score_threshold)
if restricted_extensions is not None:
pulumi.set(__self__, "restricted_extensions", restricted_extensions)
if restricted_headers is not None:
pulumi.set(__self__, "restricted_headers", restricted_headers)
if rfi_score_threshold is not None:
pulumi.set(__self__, "rfi_score_threshold", rfi_score_threshold)
if rule_exclusions is not None:
pulumi.set(__self__, "rule_exclusions", rule_exclusions)
if rules is not None:
pulumi.set(__self__, "rules", rules)
if session_fixation_score_threshold is not None:
pulumi.set(__self__, "session_fixation_score_threshold", session_fixation_score_threshold)
if sql_injection_score_threshold is not None:
pulumi.set(__self__, "sql_injection_score_threshold", sql_injection_score_threshold)
if total_arg_length is not None:
pulumi.set(__self__, "total_arg_length", total_arg_length)
if waf_id is not None:
pulumi.set(__self__, "waf_id", waf_id)
if warning_anomaly_score is not None:
pulumi.set(__self__, "warning_anomaly_score", warning_anomaly_score)
if xss_score_threshold is not None:
pulumi.set(__self__, "xss_score_threshold", xss_score_threshold)
@property
@pulumi.getter(name="allowedHttpVersions")
def allowed_http_versions(self) -> Optional[pulumi.Input[str]]:
"""
Allowed HTTP versions
"""
return pulumi.get(self, "allowed_http_versions")
@allowed_http_versions.setter
def allowed_http_versions(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "allowed_http_versions", value)
@property
@pulumi.getter(name="allowedMethods")
def allowed_methods(self) -> Optional[pulumi.Input[str]]:
"""
A space-separated list of HTTP method names
"""
return pulumi.get(self, "allowed_methods")
@allowed_methods.setter
def allowed_methods(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "allowed_methods", value)
@property
@pulumi.getter(name="allowedRequestContentType")
def allowed_request_content_type(self) -> Optional[pulumi.Input[str]]:
"""
Allowed request content types
"""
return pulumi.get(self, "allowed_request_content_type")
@allowed_request_content_type.setter
def allowed_request_content_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "allowed_request_content_type", value)
@property
@pulumi.getter(name="allowedRequestContentTypeCharset")
def allowed_request_content_type_charset(self) -> Optional[pulumi.Input[str]]:
"""
Allowed request content type charset
"""
return pulumi.get(self, "allowed_request_content_type_charset")
@allowed_request_content_type_charset.setter
def allowed_request_content_type_charset(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "allowed_request_content_type_charset", value)
@property
@pulumi.getter(name="argLength")
def arg_length(self) -> Optional[pulumi.Input[int]]:
"""
The maximum number of arguments allowed
"""
return pulumi.get(self, "arg_length")
@arg_length.setter
def arg_length(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "arg_length", value)
@property
@pulumi.getter(name="argNameLength")
def arg_name_length(self) -> Optional[pulumi.Input[int]]:
"""
The maximum allowed argument name length
"""
return pulumi.get(self, "arg_name_length")
@arg_name_length.setter
def arg_name_length(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "arg_name_length", value)
@property
@pulumi.getter(name="combinedFileSizes")
def combined_file_sizes(self) -> Optional[pulumi.Input[int]]:
"""
The maximum allowed size of all files
"""
return pulumi.get(self, "combined_file_sizes")
@combined_file_sizes.setter
def combined_file_sizes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "combined_file_sizes", value)
@property
@pulumi.getter(name="criticalAnomalyScore")
def critical_anomaly_score(self) -> Optional[pulumi.Input[int]]:
"""
Score value to add for critical anomalies
"""
return pulumi.get(self, "critical_anomaly_score")
@critical_anomaly_score.setter
def critical_anomaly_score(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "critical_anomaly_score", value)
@property
@pulumi.getter(name="crsValidateUtf8Encoding")
def crs_validate_utf8_encoding(self) -> Optional[pulumi.Input[bool]]:
"""
CRS validate UTF8 encoding
"""
return pulumi.get(self, "crs_validate_utf8_encoding")
@crs_validate_utf8_encoding.setter
def crs_validate_utf8_encoding(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "crs_validate_utf8_encoding", value)
@property
@pulumi.getter(name="errorAnomalyScore")
def error_anomaly_score(self) -> Optional[pulumi.Input[int]]:
"""
Score value to add for error anomalies
"""
return pulumi.get(self, "error_anomaly_score")
@error_anomaly_score.setter
def error_anomaly_score(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "error_anomaly_score", value)
@property
@pulumi.getter(name="highRiskCountryCodes")
def high_risk_country_codes(self) -> Optional[pulumi.Input[str]]:
"""
A space-separated list of country codes in ISO 3166-1 (two-letter) format
"""
return pulumi.get(self, "high_risk_country_codes")
@high_risk_country_codes.setter
def high_risk_country_codes(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "high_risk_country_codes", value)
@property
@pulumi.getter(name="httpViolationScoreThreshold")
def http_violation_score_threshold(self) -> Optional[pulumi.Input[int]]:
"""
HTTP violation threshold
"""
return pulumi.get(self, "http_violation_score_threshold")
@http_violation_score_threshold.setter
def http_violation_score_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "http_violation_score_threshold", value)
@property
@pulumi.getter(name="inboundAnomalyScoreThreshold")
def inbound_anomaly_score_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Inbound anomaly threshold
"""
return pulumi.get(self, "inbound_anomaly_score_threshold")
@inbound_anomaly_score_threshold.setter
def inbound_anomaly_score_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "inbound_anomaly_score_threshold", value)
@property
@pulumi.getter(name="lfiScoreThreshold")
def lfi_score_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Local file inclusion attack threshold
"""
return pulumi.get(self, "lfi_score_threshold")
@lfi_score_threshold.setter
def lfi_score_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "lfi_score_threshold", value)
@property
@pulumi.getter(name="maxFileSize")
def max_file_size(self) -> Optional[pulumi.Input[int]]:
"""
The maximum allowed file size, in bytes
"""
return pulumi.get(self, "max_file_size")
@max_file_size.setter
def max_file_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_file_size", value)
@property
@pulumi.getter(name="maxNumArgs")
def max_num_args(self) -> Optional[pulumi.Input[int]]:
"""
The maximum number of arguments allowed
"""
return pulumi.get(self, "max_num_args")
@max_num_args.setter
def max_num_args(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_num_args", value)
@property
@pulumi.getter(name="noticeAnomalyScore")
def notice_anomaly_score(self) -> Optional[pulumi.Input[int]]:
"""
Score value to add for notice anomalies
"""
return pulumi.get(self, "notice_anomaly_score")
@notice_anomaly_score.setter
def notice_anomaly_score(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "notice_anomaly_score", value)
@property
@pulumi.getter(name="paranoiaLevel")
def paranoia_level(self) -> Optional[pulumi.Input[int]]:
"""
The configured paranoia level
"""
return pulumi.get(self, "paranoia_level")
@paranoia_level.setter
def paranoia_level(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "paranoia_level", value)
@property
@pulumi.getter(name="phpInjectionScoreThreshold")
def php_injection_score_threshold(self) -> Optional[pulumi.Input[int]]:
"""
PHP injection threshold
"""
return pulumi.get(self, "php_injection_score_threshold")
@php_injection_score_threshold.setter
def php_injection_score_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "php_injection_score_threshold", value)
@property
@pulumi.getter(name="rceScoreThreshold")
def rce_score_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Remote code execution threshold
"""
return pulumi.get(self, "rce_score_threshold")
@rce_score_threshold.setter
def rce_score_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "rce_score_threshold", value)
@property
@pulumi.getter(name="restrictedExtensions")
def restricted_extensions(self) -> Optional[pulumi.Input[str]]:
"""
A space-separated list of allowed file extensions
"""
return pulumi.get(self, "restricted_extensions")
@restricted_extensions.setter
def restricted_extensions(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "restricted_extensions", value)
@property
@pulumi.getter(name="restrictedHeaders")
def restricted_headers(self) -> Optional[pulumi.Input[str]]:
"""
A space-separated list of allowed header names
"""
return pulumi.get(self, "restricted_headers")
@restricted_headers.setter
def restricted_headers(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "restricted_headers", value)
@property
@pulumi.getter(name="rfiScoreThreshold")
def rfi_score_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Remote file inclusion attack threshold
"""
return pulumi.get(self, "rfi_score_threshold")
@rfi_score_threshold.setter
def rfi_score_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "rfi_score_threshold", value)
@property
@pulumi.getter(name="ruleExclusions")
def rule_exclusions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ServiceWafConfigurationRuleExclusionArgs']]]]:
return pulumi.get(self, "rule_exclusions")
@rule_exclusions.setter
def rule_exclusions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceWafConfigurationRuleExclusionArgs']]]]):
pulumi.set(self, "rule_exclusions", value)
@property
@pulumi.getter
def rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ServiceWafConfigurationRuleArgs']]]]:
return pulumi.get(self, "rules")
@rules.setter
def rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceWafConfigurationRuleArgs']]]]):
pulumi.set(self, "rules", value)
@property
@pulumi.getter(name="sessionFixationScoreThreshold")
def session_fixation_score_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Session fixation attack threshold
"""
return pulumi.get(self, "session_fixation_score_threshold")
@session_fixation_score_threshold.setter
def session_fixation_score_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "session_fixation_score_threshold", value)
@property
@pulumi.getter(name="sqlInjectionScoreThreshold")
def sql_injection_score_threshold(self) -> Optional[pulumi.Input[int]]:
"""
SQL injection attack threshold
"""
return pulumi.get(self, "sql_injection_score_threshold")
@sql_injection_score_threshold.setter
def sql_injection_score_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "sql_injection_score_threshold", value)
@property
@pulumi.getter(name="totalArgLength")
def total_arg_length(self) -> Optional[pulumi.Input[int]]:
"""
The maximum size of argument names and values
"""
return pulumi.get(self, "total_arg_length")
@total_arg_length.setter
def total_arg_length(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "total_arg_length", value)
@property
@pulumi.getter(name="wafId")
def waf_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Web Application Firewall that the configuration belongs to
"""
return pulumi.get(self, "waf_id")
@waf_id.setter
def waf_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "waf_id", value)
@property
@pulumi.getter(name="warningAnomalyScore")
def warning_anomaly_score(self) -> Optional[pulumi.Input[int]]:
"""
Score value to add for warning anomalies
"""
return pulumi.get(self, "warning_anomaly_score")
@warning_anomaly_score.setter
def warning_anomaly_score(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "warning_anomaly_score", value)
@property
@pulumi.getter(name="xssScoreThreshold")
def xss_score_threshold(self) -> Optional[pulumi.Input[int]]:
"""
XSS attack threshold
"""
return pulumi.get(self, "xss_score_threshold")
@xss_score_threshold.setter
def xss_score_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "xss_score_threshold", value)
class ServiceWafConfiguration(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allowed_http_versions: Optional[pulumi.Input[str]] = None,
allowed_methods: Optional[pulumi.Input[str]] = None,
allowed_request_content_type: Optional[pulumi.Input[str]] = None,
allowed_request_content_type_charset: Optional[pulumi.Input[str]] = None,
arg_length: Optional[pulumi.Input[int]] = None,
arg_name_length: Optional[pulumi.Input[int]] = None,
combined_file_sizes: Optional[pulumi.Input[int]] = None,
critical_anomaly_score: Optional[pulumi.Input[int]] = None,
crs_validate_utf8_encoding: Optional[pulumi.Input[bool]] = None,
error_anomaly_score: Optional[pulumi.Input[int]] = None,
high_risk_country_codes: Optional[pulumi.Input[str]] = None,
http_violation_score_threshold: Optional[pulumi.Input[int]] = None,
inbound_anomaly_score_threshold: Optional[pulumi.Input[int]] = None,
lfi_score_threshold: Optional[pulumi.Input[int]] = None,
max_file_size: Optional[pulumi.Input[int]] = None,
max_num_args: Optional[pulumi.Input[int]] = None,
notice_anomaly_score: Optional[pulumi.Input[int]] = None,
paranoia_level: Optional[pulumi.Input[int]] = None,
php_injection_score_threshold: Optional[pulumi.Input[int]] = None,
rce_score_threshold: Optional[pulumi.Input[int]] = None,
restricted_extensions: Optional[pulumi.Input[str]] = None,
restricted_headers: Optional[pulumi.Input[str]] = None,
rfi_score_threshold: Optional[pulumi.Input[int]] = None,
rule_exclusions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceWafConfigurationRuleExclusionArgs']]]]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceWafConfigurationRuleArgs']]]]] = None,
session_fixation_score_threshold: Optional[pulumi.Input[int]] = None,
sql_injection_score_threshold: Optional[pulumi.Input[int]] = None,
total_arg_length: Optional[pulumi.Input[int]] = None,
waf_id: Optional[pulumi.Input[str]] = None,
warning_anomaly_score: Optional[pulumi.Input[int]] = None,
xss_score_threshold: Optional[pulumi.Input[int]] = None,
__props__=None):
"""
Defines a set of Web Application Firewall configuration options that can be used to populate a service WAF. This resource will configure rules, thresholds and other settings for a WAF.
> **Warning:** This provider will take precedence over any changes you make in the UI or API. Such changes are likely to be reversed if you run the provider again.
## Adding a WAF to an existing service
> **Warning:** A two-phase change is required when adding a WAF to an existing service
When adding a `waf` to an existing `Servicev1` and at the same time adding a `ServiceWafConfiguration`
resource with `waf_id = fastly_service_v1.demo.waf[0].waf_id` might result with the in the following error:
> fastly_service_v1.demo.waf is empty list of object
For this scenario, it's recommended to split the changes into two distinct steps:
1. Add the `waf` block to the `Servicev1` and apply the changes
2. Add the `ServiceWafConfiguration` to the HCL and apply the changes
## Import
This is an example of the import command being applied to the resource named `fastly_service_waf_configuration.waf` The resource ID should be the WAF ID.
```sh
$ pulumi import fastly:index/serviceWafConfiguration:ServiceWafConfiguration waf xxxxxxxxxxxxxxxxxxxx
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] allowed_http_versions: Allowed HTTP versions
:param pulumi.Input[str] allowed_methods: A space-separated list of HTTP method names
:param pulumi.Input[str] allowed_request_content_type: Allowed request content types
:param pulumi.Input[str] allowed_request_content_type_charset: Allowed request content type charset
:param pulumi.Input[int] arg_length: The maximum number of arguments allowed
:param pulumi.Input[int] arg_name_length: The maximum allowed argument name length
:param pulumi.Input[int] combined_file_sizes: The maximum allowed size of all files
:param pulumi.Input[int] critical_anomaly_score: Score value to add for critical anomalies
:param pulumi.Input[bool] crs_validate_utf8_encoding: CRS validate UTF8 encoding
:param pulumi.Input[int] error_anomaly_score: Score value to add for error anomalies
:param pulumi.Input[str] high_risk_country_codes: A space-separated list of country codes in ISO 3166-1 (two-letter) format
:param pulumi.Input[int] http_violation_score_threshold: HTTP violation threshold
:param pulumi.Input[int] inbound_anomaly_score_threshold: Inbound anomaly threshold
:param pulumi.Input[int] lfi_score_threshold: Local file inclusion attack threshold
:param pulumi.Input[int] max_file_size: The maximum allowed file size, in bytes
:param pulumi.Input[int] max_num_args: The maximum number of arguments allowed
:param pulumi.Input[int] notice_anomaly_score: Score value to add for notice anomalies
:param pulumi.Input[int] paranoia_level: The configured paranoia level
:param pulumi.Input[int] php_injection_score_threshold: PHP injection threshold
:param pulumi.Input[int] rce_score_threshold: Remote code execution threshold
:param pulumi.Input[str] restricted_extensions: A space-separated list of allowed file extensions
:param pulumi.Input[str] restricted_headers: A space-separated list of allowed header names
:param pulumi.Input[int] rfi_score_threshold: Remote file inclusion attack threshold
:param pulumi.Input[int] session_fixation_score_threshold: Session fixation attack threshold
:param pulumi.Input[int] sql_injection_score_threshold: SQL injection attack threshold
:param pulumi.Input[int] total_arg_length: The maximum size of argument names and values
:param pulumi.Input[str] waf_id: The ID of the Web Application Firewall that the configuration belongs to
:param pulumi.Input[int] warning_anomaly_score: Score value to add for warning anomalies
:param pulumi.Input[int] xss_score_threshold: XSS attack threshold
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ServiceWafConfigurationArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Defines a set of Web Application Firewall configuration options that can be used to populate a service WAF. This resource will configure rules, thresholds and other settings for a WAF.
> **Warning:** This provider will take precedence over any changes you make in the UI or API. Such changes are likely to be reversed if you run the provider again.
## Adding a WAF to an existing service
> **Warning:** A two-phase change is required when adding a WAF to an existing service
When adding a `waf` to an existing `Servicev1` and at the same time adding a `ServiceWafConfiguration`
resource with `waf_id = fastly_service_v1.demo.waf[0].waf_id` might result with the in the following error:
> fastly_service_v1.demo.waf is empty list of object
For this scenario, it's recommended to split the changes into two distinct steps:
1. Add the `waf` block to the `Servicev1` and apply the changes
2. Add the `ServiceWafConfiguration` to the HCL and apply the changes
## Import
This is an example of the import command being applied to the resource named `fastly_service_waf_configuration.waf` The resource ID should be the WAF ID.
```sh
$ pulumi import fastly:index/serviceWafConfiguration:ServiceWafConfiguration waf xxxxxxxxxxxxxxxxxxxx
```
:param str resource_name: The name of the resource.
:param ServiceWafConfigurationArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ServiceWafConfigurationArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allowed_http_versions: Optional[pulumi.Input[str]] = None,
allowed_methods: Optional[pulumi.Input[str]] = None,
allowed_request_content_type: Optional[pulumi.Input[str]] = None,
allowed_request_content_type_charset: Optional[pulumi.Input[str]] = None,
arg_length: Optional[pulumi.Input[int]] = None,
arg_name_length: Optional[pulumi.Input[int]] = None,
combined_file_sizes: Optional[pulumi.Input[int]] = None,
critical_anomaly_score: Optional[pulumi.Input[int]] = None,
crs_validate_utf8_encoding: Optional[pulumi.Input[bool]] = None,
error_anomaly_score: Optional[pulumi.Input[int]] = None,
high_risk_country_codes: Optional[pulumi.Input[str]] = None,
http_violation_score_threshold: Optional[pulumi.Input[int]] = None,
inbound_anomaly_score_threshold: Optional[pulumi.Input[int]] = None,
lfi_score_threshold: Optional[pulumi.Input[int]] = None,
max_file_size: Optional[pulumi.Input[int]] = None,
max_num_args: Optional[pulumi.Input[int]] = None,
notice_anomaly_score: Optional[pulumi.Input[int]] = None,
paranoia_level: Optional[pulumi.Input[int]] = None,
php_injection_score_threshold: Optional[pulumi.Input[int]] = None,
rce_score_threshold: Optional[pulumi.Input[int]] = None,
restricted_extensions: Optional[pulumi.Input[str]] = None,
restricted_headers: Optional[pulumi.Input[str]] = None,
rfi_score_threshold: Optional[pulumi.Input[int]] = None,
rule_exclusions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceWafConfigurationRuleExclusionArgs']]]]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceWafConfigurationRuleArgs']]]]] = None,
session_fixation_score_threshold: Optional[pulumi.Input[int]] = None,
sql_injection_score_threshold: Optional[pulumi.Input[int]] = None,
total_arg_length: Optional[pulumi.Input[int]] = None,
waf_id: Optional[pulumi.Input[str]] = None,
warning_anomaly_score: Optional[pulumi.Input[int]] = None,
xss_score_threshold: Optional[pulumi.Input[int]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ServiceWafConfigurationArgs.__new__(ServiceWafConfigurationArgs)
__props__.__dict__["allowed_http_versions"] = allowed_http_versions
__props__.__dict__["allowed_methods"] = allowed_methods
__props__.__dict__["allowed_request_content_type"] = allowed_request_content_type
__props__.__dict__["allowed_request_content_type_charset"] = allowed_request_content_type_charset
__props__.__dict__["arg_length"] = arg_length
__props__.__dict__["arg_name_length"] = arg_name_length
__props__.__dict__["combined_file_sizes"] = combined_file_sizes
__props__.__dict__["critical_anomaly_score"] = critical_anomaly_score
__props__.__dict__["crs_validate_utf8_encoding"] = crs_validate_utf8_encoding
__props__.__dict__["error_anomaly_score"] = error_anomaly_score
__props__.__dict__["high_risk_country_codes"] = high_risk_country_codes
__props__.__dict__["http_violation_score_threshold"] = http_violation_score_threshold
__props__.__dict__["inbound_anomaly_score_threshold"] = inbound_anomaly_score_threshold
__props__.__dict__["lfi_score_threshold"] = lfi_score_threshold
__props__.__dict__["max_file_size"] = max_file_size
__props__.__dict__["max_num_args"] = max_num_args
__props__.__dict__["notice_anomaly_score"] = notice_anomaly_score
__props__.__dict__["paranoia_level"] = paranoia_level
__props__.__dict__["php_injection_score_threshold"] = php_injection_score_threshold
__props__.__dict__["rce_score_threshold"] = rce_score_threshold
__props__.__dict__["restricted_extensions"] = restricted_extensions
__props__.__dict__["restricted_headers"] = restricted_headers
__props__.__dict__["rfi_score_threshold"] = rfi_score_threshold
__props__.__dict__["rule_exclusions"] = rule_exclusions
__props__.__dict__["rules"] = rules
__props__.__dict__["session_fixation_score_threshold"] = session_fixation_score_threshold
__props__.__dict__["sql_injection_score_threshold"] = sql_injection_score_threshold
__props__.__dict__["total_arg_length"] = total_arg_length
if waf_id is None and not opts.urn:
raise TypeError("Missing required property 'waf_id'")
__props__.__dict__["waf_id"] = waf_id
__props__.__dict__["warning_anomaly_score"] = warning_anomaly_score
__props__.__dict__["xss_score_threshold"] = xss_score_threshold
super(ServiceWafConfiguration, __self__).__init__(
'fastly:index/serviceWafConfiguration:ServiceWafConfiguration',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
allowed_http_versions: Optional[pulumi.Input[str]] = None,
allowed_methods: Optional[pulumi.Input[str]] = None,
allowed_request_content_type: Optional[pulumi.Input[str]] = None,
allowed_request_content_type_charset: Optional[pulumi.Input[str]] = None,
arg_length: Optional[pulumi.Input[int]] = None,
arg_name_length: Optional[pulumi.Input[int]] = None,
combined_file_sizes: Optional[pulumi.Input[int]] = None,
critical_anomaly_score: Optional[pulumi.Input[int]] = None,
crs_validate_utf8_encoding: Optional[pulumi.Input[bool]] = None,
error_anomaly_score: Optional[pulumi.Input[int]] = None,
high_risk_country_codes: Optional[pulumi.Input[str]] = None,
http_violation_score_threshold: Optional[pulumi.Input[int]] = None,
inbound_anomaly_score_threshold: Optional[pulumi.Input[int]] = None,
lfi_score_threshold: Optional[pulumi.Input[int]] = None,
max_file_size: Optional[pulumi.Input[int]] = None,
max_num_args: Optional[pulumi.Input[int]] = None,
notice_anomaly_score: Optional[pulumi.Input[int]] = None,
paranoia_level: Optional[pulumi.Input[int]] = None,
php_injection_score_threshold: Optional[pulumi.Input[int]] = None,
rce_score_threshold: Optional[pulumi.Input[int]] = None,
restricted_extensions: Optional[pulumi.Input[str]] = None,
restricted_headers: Optional[pulumi.Input[str]] = None,
rfi_score_threshold: Optional[pulumi.Input[int]] = None,
rule_exclusions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceWafConfigurationRuleExclusionArgs']]]]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceWafConfigurationRuleArgs']]]]] = None,
session_fixation_score_threshold: Optional[pulumi.Input[int]] = None,
sql_injection_score_threshold: Optional[pulumi.Input[int]] = None,
total_arg_length: Optional[pulumi.Input[int]] = None,
waf_id: Optional[pulumi.Input[str]] = None,
warning_anomaly_score: Optional[pulumi.Input[int]] = None,
xss_score_threshold: Optional[pulumi.Input[int]] = None) -> 'ServiceWafConfiguration':
"""
Get an existing ServiceWafConfiguration resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] allowed_http_versions: Allowed HTTP versions
:param pulumi.Input[str] allowed_methods: A space-separated list of HTTP method names
:param pulumi.Input[str] allowed_request_content_type: Allowed request content types
:param pulumi.Input[str] allowed_request_content_type_charset: Allowed request content type charset
:param pulumi.Input[int] arg_length: The maximum number of arguments allowed
:param pulumi.Input[int] arg_name_length: The maximum allowed argument name length
:param pulumi.Input[int] combined_file_sizes: The maximum allowed size of all files
:param pulumi.Input[int] critical_anomaly_score: Score value to add for critical anomalies
:param pulumi.Input[bool] crs_validate_utf8_encoding: CRS validate UTF8 encoding
:param pulumi.Input[int] error_anomaly_score: Score value to add for error anomalies
:param pulumi.Input[str] high_risk_country_codes: A space-separated list of country codes in ISO 3166-1 (two-letter) format
:param pulumi.Input[int] http_violation_score_threshold: HTTP violation threshold
:param pulumi.Input[int] inbound_anomaly_score_threshold: Inbound anomaly threshold
:param pulumi.Input[int] lfi_score_threshold: Local file inclusion attack threshold
:param pulumi.Input[int] max_file_size: The maximum allowed file size, in bytes
:param pulumi.Input[int] max_num_args: The maximum number of arguments allowed
:param pulumi.Input[int] notice_anomaly_score: Score value to add for notice anomalies
:param pulumi.Input[int] paranoia_level: The configured paranoia level
:param pulumi.Input[int] php_injection_score_threshold: PHP injection threshold
:param pulumi.Input[int] rce_score_threshold: Remote code execution threshold
:param pulumi.Input[str] restricted_extensions: A space-separated list of allowed file extensions
:param pulumi.Input[str] restricted_headers: A space-separated list of allowed header names
:param pulumi.Input[int] rfi_score_threshold: Remote file inclusion attack threshold
:param pulumi.Input[int] session_fixation_score_threshold: Session fixation attack threshold
:param pulumi.Input[int] sql_injection_score_threshold: SQL injection attack threshold
:param pulumi.Input[int] total_arg_length: The maximum size of argument names and values
:param pulumi.Input[str] waf_id: The ID of the Web Application Firewall that the configuration belongs to
:param pulumi.Input[int] warning_anomaly_score: Score value to add for warning anomalies
:param pulumi.Input[int] xss_score_threshold: XSS attack threshold
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ServiceWafConfigurationState.__new__(_ServiceWafConfigurationState)
__props__.__dict__["allowed_http_versions"] = allowed_http_versions
__props__.__dict__["allowed_methods"] = allowed_methods
__props__.__dict__["allowed_request_content_type"] = allowed_request_content_type
__props__.__dict__["allowed_request_content_type_charset"] = allowed_request_content_type_charset
__props__.__dict__["arg_length"] = arg_length
__props__.__dict__["arg_name_length"] = arg_name_length
__props__.__dict__["combined_file_sizes"] = combined_file_sizes
__props__.__dict__["critical_anomaly_score"] = critical_anomaly_score
__props__.__dict__["crs_validate_utf8_encoding"] = crs_validate_utf8_encoding
__props__.__dict__["error_anomaly_score"] = error_anomaly_score
__props__.__dict__["high_risk_country_codes"] = high_risk_country_codes
__props__.__dict__["http_violation_score_threshold"] = http_violation_score_threshold
__props__.__dict__["inbound_anomaly_score_threshold"] = inbound_anomaly_score_threshold
__props__.__dict__["lfi_score_threshold"] = lfi_score_threshold
__props__.__dict__["max_file_size"] = max_file_size
__props__.__dict__["max_num_args"] = max_num_args
__props__.__dict__["notice_anomaly_score"] = notice_anomaly_score
__props__.__dict__["paranoia_level"] = paranoia_level
__props__.__dict__["php_injection_score_threshold"] = php_injection_score_threshold
__props__.__dict__["rce_score_threshold"] = rce_score_threshold
__props__.__dict__["restricted_extensions"] = restricted_extensions
__props__.__dict__["restricted_headers"] = restricted_headers
__props__.__dict__["rfi_score_threshold"] = rfi_score_threshold
__props__.__dict__["rule_exclusions"] = rule_exclusions
__props__.__dict__["rules"] = rules
__props__.__dict__["session_fixation_score_threshold"] = session_fixation_score_threshold
__props__.__dict__["sql_injection_score_threshold"] = sql_injection_score_threshold
__props__.__dict__["total_arg_length"] = total_arg_length
__props__.__dict__["waf_id"] = waf_id
__props__.__dict__["warning_anomaly_score"] = warning_anomaly_score
__props__.__dict__["xss_score_threshold"] = xss_score_threshold
return ServiceWafConfiguration(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="allowedHttpVersions")
def allowed_http_versions(self) -> pulumi.Output[str]:
"""
Allowed HTTP versions
"""
return pulumi.get(self, "allowed_http_versions")
@property
@pulumi.getter(name="allowedMethods")
def allowed_methods(self) -> pulumi.Output[str]:
"""
A space-separated list of HTTP method names
"""
return pulumi.get(self, "allowed_methods")
@property
@pulumi.getter(name="allowedRequestContentType")
def allowed_request_content_type(self) -> pulumi.Output[str]:
"""
Allowed request content types
"""
return pulumi.get(self, "allowed_request_content_type")
@property
@pulumi.getter(name="allowedRequestContentTypeCharset")
def allowed_request_content_type_charset(self) -> pulumi.Output[str]:
"""
Allowed request content type charset
"""
return pulumi.get(self, "allowed_request_content_type_charset")
@property
@pulumi.getter(name="argLength")
def arg_length(self) -> pulumi.Output[int]:
"""
The maximum number of arguments allowed
"""
return pulumi.get(self, "arg_length")
@property
@pulumi.getter(name="argNameLength")
def arg_name_length(self) -> pulumi.Output[int]:
"""
The maximum allowed argument name length
"""
return pulumi.get(self, "arg_name_length")
@property
@pulumi.getter(name="combinedFileSizes")
def combined_file_sizes(self) -> pulumi.Output[int]:
"""
The maximum allowed size of all files
"""
return pulumi.get(self, "combined_file_sizes")
@property
@pulumi.getter(name="criticalAnomalyScore")
def critical_anomaly_score(self) -> pulumi.Output[int]:
"""
Score value to add for critical anomalies
"""
return pulumi.get(self, "critical_anomaly_score")
@property
@pulumi.getter(name="crsValidateUtf8Encoding")
def crs_validate_utf8_encoding(self) -> pulumi.Output[bool]:
"""
CRS validate UTF8 encoding
"""
return pulumi.get(self, "crs_validate_utf8_encoding")
@property
@pulumi.getter(name="errorAnomalyScore")
def error_anomaly_score(self) -> pulumi.Output[int]:
"""
Score value to add for error anomalies
"""
return pulumi.get(self, "error_anomaly_score")
@property
@pulumi.getter(name="highRiskCountryCodes")
def high_risk_country_codes(self) -> pulumi.Output[str]:
"""
A space-separated list of country codes in ISO 3166-1 (two-letter) format
"""
return pulumi.get(self, "high_risk_country_codes")
@property
@pulumi.getter(name="httpViolationScoreThreshold")
def http_violation_score_threshold(self) -> pulumi.Output[int]:
"""
HTTP violation threshold
"""
return pulumi.get(self, "http_violation_score_threshold")
@property
@pulumi.getter(name="inboundAnomalyScoreThreshold")
def inbound_anomaly_score_threshold(self) -> pulumi.Output[int]:
"""
Inbound anomaly threshold
"""
return pulumi.get(self, "inbound_anomaly_score_threshold")
@property
@pulumi.getter(name="lfiScoreThreshold")
def lfi_score_threshold(self) -> pulumi.Output[int]:
"""
Local file inclusion attack threshold
"""
return pulumi.get(self, "lfi_score_threshold")
@property
@pulumi.getter(name="maxFileSize")
def max_file_size(self) -> pulumi.Output[int]:
"""
The maximum allowed file size, in bytes
"""
return pulumi.get(self, "max_file_size")
@property
@pulumi.getter(name="maxNumArgs")
def max_num_args(self) -> pulumi.Output[int]:
"""
The maximum number of arguments allowed
"""
return pulumi.get(self, "max_num_args")
@property
@pulumi.getter(name="noticeAnomalyScore")
def notice_anomaly_score(self) -> pulumi.Output[int]:
"""
Score value to add for notice anomalies
"""
return pulumi.get(self, "notice_anomaly_score")
@property
@pulumi.getter(name="paranoiaLevel")
def paranoia_level(self) -> pulumi.Output[int]:
"""
The configured paranoia level
"""
return pulumi.get(self, "paranoia_level")
@property
@pulumi.getter(name="phpInjectionScoreThreshold")
def php_injection_score_threshold(self) -> pulumi.Output[int]:
"""
PHP injection threshold
"""
return pulumi.get(self, "php_injection_score_threshold")
@property
@pulumi.getter(name="rceScoreThreshold")
def rce_score_threshold(self) -> pulumi.Output[int]:
"""
Remote code execution threshold
"""
return pulumi.get(self, "rce_score_threshold")
@property
@pulumi.getter(name="restrictedExtensions")
def restricted_extensions(self) -> pulumi.Output[str]:
"""
A space-separated list of allowed file extensions
"""
return pulumi.get(self, "restricted_extensions")
@property
@pulumi.getter(name="restrictedHeaders")
def restricted_headers(self) -> pulumi.Output[str]:
"""
A space-separated list of allowed header names
"""
return pulumi.get(self, "restricted_headers")
@property
@pulumi.getter(name="rfiScoreThreshold")
def rfi_score_threshold(self) -> pulumi.Output[int]:
"""
Remote file inclusion attack threshold
"""
return pulumi.get(self, "rfi_score_threshold")
@property
@pulumi.getter(name="ruleExclusions")
def rule_exclusions(self) -> pulumi.Output[Optional[Sequence['outputs.ServiceWafConfigurationRuleExclusion']]]:
return pulumi.get(self, "rule_exclusions")
@property
@pulumi.getter
def rules(self) -> pulumi.Output[Optional[Sequence['outputs.ServiceWafConfigurationRule']]]:
return pulumi.get(self, "rules")
@property
@pulumi.getter(name="sessionFixationScoreThreshold")
def session_fixation_score_threshold(self) -> pulumi.Output[int]:
"""
Session fixation attack threshold
"""
return pulumi.get(self, "session_fixation_score_threshold")
@property
@pulumi.getter(name="sqlInjectionScoreThreshold")
def sql_injection_score_threshold(self) -> pulumi.Output[int]:
"""
SQL injection attack threshold
"""
return pulumi.get(self, "sql_injection_score_threshold")
@property
@pulumi.getter(name="totalArgLength")
def total_arg_length(self) -> pulumi.Output[int]:
"""
The maximum size of argument names and values
"""
return pulumi.get(self, "total_arg_length")
@property
@pulumi.getter(name="wafId")
def waf_id(self) -> pulumi.Output[str]:
"""
The ID of the Web Application Firewall that the configuration belongs to
"""
return pulumi.get(self, "waf_id")
@property
@pulumi.getter(name="warningAnomalyScore")
def warning_anomaly_score(self) -> pulumi.Output[int]:
"""
Score value to add for warning anomalies
"""
return pulumi.get(self, "warning_anomaly_score")
@property
@pulumi.getter(name="xssScoreThreshold")
def xss_score_threshold(self) -> pulumi.Output[int]:
"""
XSS attack threshold
"""
return pulumi.get(self, "xss_score_threshold")
| StarcoderdataPython |
3371894 | import pathlib
import os
import logging
from timeit import default_timer as timer
from decimal import Decimal
from rich import print as rprint
from rich.text import Text
from rich.console import Console
import subprocess
log_format = '%(asctime)s %(filename)s: %(message)s'
logging.basicConfig(filename='../app.log', level=logging.DEBUG, format=log_format, datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger(__name__)
start:float = 0.0
end:float = 0.0
def splice_file_token(filename:str, key:str, new_value:str):
'''
Replaces the value for the key specified with the new_value passed, in the filename given.
'''
logger.debug(f"Splicing file: {locals()}")
lines = None
with open(filename, 'r') as fil:
lines = fil.readlines()
for i, line in enumerate(lines):
if key in line.strip():
lines[i] = f"{key} = \"{new_value}\"\n"
with open(filename, 'w') as fil:
fil.writelines(lines)
def splice_file_list(filename:str, key:str, new_value:list):
'''
Replaces the value for the key specified with the new_value list passed, in the filename given.
'''
logger.debug(f"Splicing file: {locals()}")
lines = None
with open(filename, 'r') as fil:
lines = fil.readlines()
for i, line in enumerate(lines):
if key in line.strip():
lines[i] = str(f"{key} = {new_value}\n").replace("\'", "\"")
with open(filename, 'w') as fil:
fil.writelines(lines)
def do_replace_tokens():
splice_file_token("", "cluster_name")
def create_file_from_template(template:str, filename:str, tokens:dict):
logger.debug(f"Creating {filename} from {template}")
lines = None
with open(template, 'r') as file_in:
lines = file_in.readlines()
for i, line in enumerate(lines):
for key in tokens.keys():
if line.find(key) != -1:
lines[i] = lines[i].replace (key, tokens[key])
with open(filename, 'w') as file_out:
file_out.writelines(lines)
def run_process(args:list, read_output:bool=False, cwd:str="", shell:bool=False) -> str:
logger.debug(f"Running process: {locals()}")
if cwd.strip() == "":
result = subprocess.run(args, capture_output=False, text=True)
return None
else:
result = subprocess.run(args, capture_output=True, text=True, cwd=cwd, shell=shell)
return result.stdout
def run_processes_piped(in_args:list, out_args:list, cwd:str="", encoding:str=""):
'''
Simulates the process of running
$ in_args | out_args
'''
logger.debug (f"Running piped processes: {locals()}")
if cwd.strip() == "":
cwd = '.'
p1 = subprocess.Popen(in_args, stdout=subprocess.PIPE, cwd=cwd)
p2 = subprocess.run(out_args,stdin=p1.stdout, cwd=cwd, capture_output=True, encoding='UTF-8', shell=True)
return f"{p2.stdout}".replace('\n','')
if __name__ == '__main__':
#splice_file_token("test.txt", "cluster_name", "cluster_name = \"reuben\"")
print(pathlib.Path().resolve())
###
# Task Timer
def Timed():
global start
global end
if(start == float(0.0)):
start = timer()
else:
end = timer()
print(f"\nCompleted in {round(Decimal(end - start),5) } seconds.")
start = 0.0
def _cout(text: Text):
'''
Prints the specified Rich Text to Rich Console
'''
console = Console()
console.print(text)
def cout_error(text: str):
'''
Outputs an error message in red
'''
rtext = Text(text)
rtext.stylize("bold red")
_cout(rtext)
return rtext
#rprint(f"[italic red]{text}[/italic red]")
def cout_error_and_exit(text: str, exit_code:int=-1):
cout_error(text)
exit(exit_code)
def cout_success( text: str):
'''
Outputs a success message in green
'''
rtext = Text(text)
rtext.stylize("bold green")
_cout(rtext)
return rtext
#rprint(f"[green]{text}[/green]") | StarcoderdataPython |
1737256 | import os
import subprocess
import sys
from django.test import TestCase
from django.core.exceptions import ImproperlyConfigured
from unittest.mock import patch
from configurations.importer import ConfigurationImporter
ROOT_DIR = os.path.dirname(os.path.dirname(__file__))
TEST_PROJECT_DIR = os.path.join(ROOT_DIR, 'test_project')
class MainTests(TestCase):
def test_simple(self):
from tests.settings import main
self.assertEqual(main.ATTRIBUTE_SETTING, True)
self.assertEqual(main.PROPERTY_SETTING, 1)
self.assertEqual(main.METHOD_SETTING, 2)
self.assertEqual(main.LAMBDA_SETTING, 3)
self.assertNotEqual(main.PRISTINE_LAMBDA_SETTING, 4)
self.assertTrue(lambda: callable(main.PRISTINE_LAMBDA_SETTING))
self.assertNotEqual(main.PRISTINE_FUNCTION_SETTING, 5)
self.assertTrue(lambda: callable(main.PRISTINE_FUNCTION_SETTING))
self.assertEqual(main.ALLOWED_HOSTS, ['base'])
self.assertEqual(main.PRE_SETUP_TEST_SETTING, 6)
self.assertRaises(AttributeError, lambda: main.POST_SETUP_TEST_SETTING)
self.assertEqual(main.Test.POST_SETUP_TEST_SETTING, 7)
def test_global_arrival(self):
from django.conf import settings
self.assertEqual(settings.PROPERTY_SETTING, 1)
self.assertRaises(AttributeError, lambda: settings._PRIVATE_SETTING)
self.assertNotEqual(settings.PRISTINE_LAMBDA_SETTING, 4)
self.assertTrue(lambda: callable(settings.PRISTINE_LAMBDA_SETTING))
self.assertNotEqual(settings.PRISTINE_FUNCTION_SETTING, 5)
self.assertTrue(lambda: callable(settings.PRISTINE_FUNCTION_SETTING))
self.assertEqual(settings.PRE_SETUP_TEST_SETTING, 6)
@patch.dict(os.environ, clear=True, DJANGO_CONFIGURATION='Test')
def test_empty_module_var(self):
self.assertRaises(ImproperlyConfigured, ConfigurationImporter)
@patch.dict(os.environ, clear=True,
DJANGO_SETTINGS_MODULE='tests.settings.main')
def test_empty_class_var(self):
self.assertRaises(ImproperlyConfigured, ConfigurationImporter)
def test_global_settings(self):
from configurations.base import Configuration
self.assertIn('dictConfig', Configuration.LOGGING_CONFIG)
self.assertEqual(repr(Configuration),
"<Configuration 'configurations.base.Configuration'>")
def test_repr(self):
from tests.settings.main import Test
self.assertEqual(repr(Test),
"<Configuration 'tests.settings.main.Test'>")
@patch.dict(os.environ, clear=True,
DJANGO_SETTINGS_MODULE='tests.settings.main',
DJANGO_CONFIGURATION='Test')
def test_initialization(self):
importer = ConfigurationImporter()
self.assertEqual(importer.module, 'tests.settings.main')
self.assertEqual(importer.name, 'Test')
self.assertEqual(
repr(importer),
"<ConfigurationImporter for 'tests.settings.main.Test'>")
@patch.dict(os.environ, clear=True,
DJANGO_SETTINGS_MODULE='tests.settings.inheritance',
DJANGO_CONFIGURATION='Inheritance')
def test_initialization_inheritance(self):
importer = ConfigurationImporter()
self.assertEqual(importer.module,
'tests.settings.inheritance')
self.assertEqual(importer.name, 'Inheritance')
@patch.dict(os.environ, clear=True,
DJANGO_SETTINGS_MODULE='tests.settings.main',
DJANGO_CONFIGURATION='NonExisting')
@patch.object(sys, 'argv', ['python', 'manage.py', 'test',
'--settings=tests.settings.main',
'--configuration=Test'])
def test_configuration_option(self):
importer = ConfigurationImporter(check_options=False)
self.assertEqual(importer.module, 'tests.settings.main')
self.assertEqual(importer.name, 'NonExisting')
importer = ConfigurationImporter(check_options=True)
self.assertEqual(importer.module, 'tests.settings.main')
self.assertEqual(importer.name, 'Test')
def test_configuration_argument_in_cli(self):
"""
Verify that's configuration option has been added to managements
commands
"""
proc = subprocess.Popen(['django-cadmin', 'test', '--help'],
stdout=subprocess.PIPE)
self.assertIn('--configuration', proc.communicate()[0].decode('utf-8'))
proc = subprocess.Popen(['django-cadmin', 'runserver', '--help'],
stdout=subprocess.PIPE)
self.assertIn('--configuration', proc.communicate()[0].decode('utf-8'))
def test_django_setup_only_called_once(self):
proc = subprocess.Popen(
[sys.executable, os.path.join(os.path.dirname(__file__),
'setup_test.py')],
stdout=subprocess.PIPE)
res = proc.communicate()
stdout = res[0].decode('utf-8')
self.assertIn('setup_1', stdout)
self.assertIn('setup_2', stdout)
self.assertIn('setup_done', stdout)
self.assertEqual(proc.returncode, 0)
def test_utils_reraise(self):
from configurations.utils import reraise
class CustomException(Exception):
pass
with self.assertRaises(CustomException) as cm:
try:
raise CustomException
except Exception as exc:
reraise(exc, "Couldn't setup configuration", None)
self.assertEqual(cm.exception.args, ("Couldn't setup configuration: ",))
| StarcoderdataPython |
1741242 | <gh_stars>1-10
from typing import Dict, Any, Optional
from mmic_translator.models import ToolkitModel
from mmelemental.models.forcefield import ForceField
import parmed
from mmic_parmed.components.ff_component import FFToParmedComponent
from mmic_parmed.components.ff_component import ParmedToFFComponent
__all__ = ["ParmedFF"]
class ParmedFF(ToolkitModel):
"""A model for ParmEd Structure storing FF object."""
@classmethod
def engine(cls):
return "parmed", parmed.__version__
@classmethod
def dtype(cls):
"""Returns the fundamental FF object type."""
return parmed.structure.Structure
@classmethod
def isvalid(cls, data):
"""Makes sure the Structure object stores atoms."""
if hasattr(data, "atoms"):
if len(data.atoms):
return data
raise ValueError("ParmEd forcefield object does not contain any atoms!")
@classmethod
def from_file(cls, filename: str, **kwargs) -> "ParmedFF":
"""
Constructs an ParmedFF object from file(s).
Parameters
----------
filename : str
The forcefield filename to read
**kwargs
Any additional keywords to pass to the constructor
Returns
-------
ParmedFF
A constructed ParmedFF object.
"""
kwargs.pop(
"dtype", None
) # load_file doesn't seem to support specifying file formats
ff = parmed.load_file(filename=filename, **kwargs)
return cls(data=ff)
@classmethod
def from_schema(
cls, data: ForceField, version: Optional[int] = None, **kwargs: Dict[str, Any]
) -> "ParmedFF":
"""
Constructs an ParmedFF object from an MMSchema ForceField object.
Parameters
----------
data: ForceField
Data to construct the forcefield object from.
version: int, optional
Schema version e.g. 1. Overwrites data.schema_version.
**kwargs
Additional kwargs to pass to the constructors.
Returns
-------
ParmedFF
A constructed ParmedFF object.
"""
inputs = {
"schema_object": data,
"schema_version": version or data.schema_version,
"schema_name": data.schema_name,
}
out = FFToParmedComponent.compute(inputs)
return cls(data=out.data_object, data_units=out.data_units)
def to_file(self, filename: str, dtype: str = None, **kwargs):
"""Writes the forcefield to a file.
Parameters
----------
filename : str
The filename to write to
dtype : Optional[str], optional
File format
**kwargs
Additional kwargs to pass to the constructors.
"""
if dtype:
kwargs["format"] = dtype
self.data.save(filename, **kwargs)
def to_schema(self, version: Optional[int] = 0, **kwargs) -> ForceField:
"""Converts the forcefield to MMSchema ForceField object.
Parameters
----------
version: int, optional
Schema specification version to comply with e.g. 1.
**kwargs
Additional kwargs to pass to the constructor.
"""
inputs = {
"data_object": self.data,
"schema_version": version,
"schema_name": kwargs.pop("schema_name", ForceField.default_schema_name),
"keywords": kwargs,
}
out = ParmedToFFComponent.compute(inputs)
if version:
assert version == out.schema_version
return out.schema_object
| StarcoderdataPython |
1657287 | import pygame
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
FPS = 60
WIDTH, HEIGHT = 600, 700
ROWS = COLS = 40
TOOLBAR_HEIGHT = HEIGHT - WIDTH
PIXEL_SIZE = WIDTH // COLS
BG_COLOR = WHITE
INVERTED_BG_COLOR = BLACK
DRAW_GRID_LINES = True
def get_font(size):
return pygame.font.SysFont("Sans", size) | StarcoderdataPython |
1736947 | # -*- coding: utf-8 -*-
# Data Preprocessing
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Data.csv')
## [:, :-1] 1st : -> take all the lines | 2nd :-1 -> take all coluqms exvcept last column
X = dataset.iloc[:, :-1].values
## iloc[:, 3] all lines and 3rd column
y = dataset.iloc[:, 3].values
# Taking care of missing data
from sklearn.preprocessing import Imputer
imputer = Imputer(missing_values = 'NaN', strategy = 'mean', axis = 0)
## X[:, 1:3] 1:3 -> take index 1(cloumn2), index 2(column 3)
imputer.fit(X[:, 1:3])
X[:, 1:3] = imputer.transform(X[:, 1:3])
# Encoding categorical data
from sklearn.preprocessing import LabelEncoder
labelencoder_X = LabelEncoder()
## for label of Farance,Spain to the digit number
X[:, 0] = labelencoder_X.fit_transform(X[:, 0])
## dummy Encoding 為了預防machine learing思考 France > Gemmany, Gemmany > Spain
from sklearn.preprocessing import OneHotEncoder
onehotencoder = OneHotEncoder(categorical_features = [0])
X = onehotencoder.fit_transform(X).toarray()
## for label of Yes, No
labelencoder_y = LabelEncoder()
y = labelencoder_y.fit_transform(y)
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature Scaling
## 因為X, y的級距不同,所有要把兩種或多鐘單位的數值,放在同一量級裡
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X. transform(X_test)
| StarcoderdataPython |
119366 | import os
import sys
from openslide_reader import OpenslideReader
import subprocess
__all__ = ("PreprocessReader", )
import logging
logger = logging.getLogger('slideatlas')
from lockfile import LockFile
class PreprocessReader(OpenslideReader):
def __init__(self):
logger.info('PreprocessReader init')
super(PreprocessReader, self).__init__()
def pre_process(self, params):
"""
Converts the files
"""
raise NotImplementedError()
def set_input_params(self, params):
"""
Accepts the input file
"""
params["oldfname"] = params["fname"]
params["fname"] = self.pre_process(params)
super(PreprocessReader, self).set_input_params(params)
class PreprocessReaderJp2(PreprocessReader):
"""
uses kakadu if available or otherwise gdal to convert jp2
files to tiled tiff which are then absorbed by OpenslideReader
"""
def __init__(self, kakadu_dir=None):
# logger.info('PreprocessReaderJp2 init')
self.kakadu_dir = kakadu_dir
super(PreprocessReaderJp2, self).__init__()
def pre_process(self, params):
"""
Converts the files
First pass is to create striped tiff using kakadu if available
and second pass is to convert to tiled tiff.
A third file path is used for lock, if the lock can be acquired
and the output is not ready then create it. If the lock cannot
be acquired then perhaps other process is processing it.
TODO: Decide when to declare not possible ?
"""
# Split the requested filename
dirname, filename = os.path.split(params["fname"])
_, ext = os.path.splitext(filename)
# assert that ext is as expected
assert ext in [".jp2", ".j2k"]
output1 = os.path.join(dirname, filename + "_striped.tif")
output2 = os.path.join(dirname, filename + "_tiled.tif")
lock_path = os.path.join(dirname, filename + ".lock")
lock = LockFile(lock_path)
# logger.info('waiting for lock')
lock.acquire()
# If the file is missing then create it
if not os.path.exists(output2):
# Make sure the processing lock can be acquired
logger.info('processing')
logger.info('# Convert to striped tiff')
if self.kakadu_dir is None:
params = ["gdal_translate", params["fname"], output1]
subprocess.call(params)
else:
# Additional LD_LIBRARY_PATH
environ = os.environ.copy()
if "LD_LIBRARY_PATH" not in environ:
environ["LD_LIBRARY_PATH"] = ""
environ["LD_LIBRARY_PATH"] = self.kakadu_dir + ":" + environ["LD_LIBRARY_PATH"]
params = [os.path.join(self.kakadu_dir, "kdu_expand"), "-i", params["fname"], "-o", output1]
subprocess.call(params, env=environ)
logger.info('# Convert to tiled tiff')
params = ["gdal_translate", "-co", "TILED=YES", "-co", "COMPRESS=JPEG", output1, output2]
subprocess.call(params)
# Then remove output1
os.remove(output1)
lock.release()
return output2
class PreprocessReaderTif(PreprocessReader):
"""
uses vips to convert tiff into tiled format if not already
files to tiled tiff which are then absorbed by OpenslideReader
"""
def __init__(self, kakadu_dir=None):
# logger.info('PreprocessReaderJp2 init')
self.kakadu_dir = kakadu_dir
super(PreprocessReaderTif, self).__init__()
def pre_process(self, params):
"""
Converts the files
First pass is to create striped tiff using kakadu if available
and second pass is to convert to tiled tiff.
A third file path is used for lock, if the lock can be acquired
and the output is not ready then create it. If the lock cannot
be acquired then perhaps other process is processing it.
TODO: Decide when to declare not possible ?
"""
# Split the requested filename
dirname, filename = os.path.split(params["fname"])
name, ext = os.path.splitext(filename)
# assert that ext is as expected
assert ext in [".tif", ".tiff"]
# try reading from the openslidereader first
try:
tempreader = OpenslideReader()
tempreader.set_input_params(params)
logger.info("No preprocess needed")
return params["fname"]
except Exception as e:
# print str(type(e))
pass
# continue
output1 = os.path.join(dirname, name + "_tiled.tif")
lock_path = os.path.join(dirname, filename + ".lock")
lock = LockFile(lock_path)
# logger.info('waiting for lock')
lock.acquire()
# If the file is missing then create it
if not os.path.exists(output1):
# Make sure the processing lock can be acquired
logger.info('processing')
logger.info('# Convert to tiled tiff')
params = ["vips", "tiffsave", params["fname"], output1,
"--compression=jpeg", "--tile", "--tile-width=256",
"--tile-height=256", "--bigtiff"]
subprocess.call(params)
lock.release()
return output1
if __name__ == "__main__":
reader = PreprocessReaderTif()
reader.set_input_params({"fname": sys.argv[1]})
logger.debug('%s', reader.name)
# i = reader.get_tile(26000, 83000)
# i.save("tile.jpg")
| StarcoderdataPython |
3326098 | <filename>diffdirs/cli.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Command Line Interface for diffdirs"""
import argparse
from pprint import pprint
from .diffdirs import diff_dirs
def parse_args():
"""Parse arguments"""
parser = argparse.ArgumentParser(prog="diffdirs")
parser.add_argument(
"original_data_path",
metavar="ORIGINAL_DATA_PATH",
help="The path to the 'original' data. This will be used to compare against",
)
parser.add_argument(
"new_data_paths",
metavar="NEW_DATA_PATH",
nargs="+",
help="The path(s) to the 'new' data. These will be compared against original_data_path",
)
parser.add_argument(
"-g",
"--globs",
metavar="GLOB",
default=["**/*.fits"],
nargs="+",
help=(
"An optional glob pattern for specifying which files to check for regression"
"NOTE: This currently will only work for FITS files"
),
)
return parser.parse_args()
def main():
"""Entry point"""
args = parse_args()
print(args.globs)
diffs = diff_dirs(args.original_data_path, args.new_data_paths, args.globs)
print("DIFFS:")
pprint(diffs)
if __name__ == "__main__":
main()
| StarcoderdataPython |
1678785 | """
Unit tests for the `HasTraits.class_traits` class function.
"""
from __future__ import absolute_import
import six
from traits import _py2to3
from traits.testing.unittest_tools import unittest
from traits.api import HasTraits, Int, List, Str
class A(HasTraits):
x = Int
name = Str(marked=True)
class B(A):
pass
class C(B):
lst = List(marked=False)
y = Int(marked=True)
class TestClassTraits(unittest.TestCase):
def test_all_class_traits(self):
expected = ["x", "name", "trait_added", "trait_modified"]
_py2to3.assertCountEqual(self, A.class_traits(), expected)
# Check that derived classes report the correct traits.
_py2to3.assertCountEqual(self, B.class_traits(), expected)
expected.extend(("lst", "y"))
_py2to3.assertCountEqual(self, C.class_traits(), expected)
def test_class_traits_with_metadata(self):
# Retrieve all traits that have the `marked` metadata
# attribute set to True.
traits = C.class_traits(marked=True)
_py2to3.assertCountEqual(self, list(traits.keys()), ("y", "name"))
# Retrieve all traits that have a `marked` metadata attribute,
# regardless of its value.
marked_traits = C.class_traits(marked=lambda attr: attr is not None)
_py2to3.assertCountEqual(self, marked_traits, ("y", "name", "lst"))
| StarcoderdataPython |
3355633 | <reponame>bmintz/python-snippets<filename>list_compare.py
#!/usr/bin/env python3
# encoding: utf-8
import operator
def list_compare(a, b, op):
if len(a) != len(b) and op is operator.eq or op is operator.ne:
return op is operator.ne
# search for the first index where items are different
for i in range(len(a)):
if a[i] != b[i]:
break
print(i)
if i >= len(a) or i >= len(b):
# no more items to compare — compare sizes
return op(len(a), len(b))
# we have an item that differs, shortcuts for EQ/NE
if op is operator.eq:
return False
if op is operator.ne:
return True
# compare the final item again using the proper operator
return op(a[i], b[i])
| StarcoderdataPython |
102027 |
async def setupAddSelfrole(plugin, ctx, name, role, roles):
role_id = role.id
name = name.lower()
if role_id in [roles[x] for x in roles] or name in roles:
return await ctx.send(plugin.t(ctx.guild, "already_selfrole", _emote="WARN"))
if role.position >= ctx.guild.me.top_role.position:
return await ctx.send(plugin.t(ctx.guild, "role_too_high", _emote="WARN"))
if role == ctx.guild.default_role:
return await ctx.send(plugin.t(ctx.guild, "default_role_forbidden", _emote="WARN"))
roles[name] = str(role_id)
plugin.db.configs.update(ctx.guild.id, "selfroles", roles)
await ctx.send(plugin.t(ctx.guild, "selfrole_added", _emote="YES", role=role.name, name=name, prefix=plugin.bot.get_guild_prefix(ctx.guild))) | StarcoderdataPython |
197515 | """
Copyright 2018 <NAME>
[This program is licensed under the "MIT License"]
Please see the file LICENSE in the source distribution
of this software for license terms.
"""
#======================Imports========================
from flask import Flask, render_template, request, redirect, url_for, jsonify
from model import AppModel
from presenter import Presenter
#=====================================================
#======================Setup==========================
app = Flask(__name__)
model = AppModel(app)
presenter = Presenter(model)
app.sites = presenter.getSites()
#=====================================================
#====================Home Route=======================
@app.route("/")
def home():
return render_template(presenter.home())
#=====================================================
#===============Stack Exchange Route==================
@app.route("/stackexchange", methods=["GET", "POST"])
def stackexchange():
"""
This method handles the GET and POST requests to '/stackexchange'
endpoint. If the request is a GET, it provides the form where the
user selects a site on Stack Exchange to get list of questions to
farm karma. If the request is a POST, it renders the results into
the appropriate tables based on the filter in the model.
"""
if request.method == "GET":
return render_template(presenter.getStackExchange(), sites=app.sites)
else:
url, questions = presenter.getNoAnswerQuestions(request.form['site'])
return render_template(url, sites=app.sites, best=questions[0], good=questions[1], okay=questions[2])
#=====================================================
#===================Reddit Routes=====================
@app.route("/redditaccess", methods=["GET"])
def authReddit():
"""
This methods provides the OAuth URL for the user to
sign in to Reddit to allow access for the application.
"""
return redirect(presenter.userApproveApp())
@app.route("/reddit", methods=["GET", "POST"])
def getReddit():
"""
This method renders the user's subscribed subreddits in
both GET and POST requests. If the request is a POST, it
also renders the newest posts on the specified subreddit.
"""
if request.method == "GET":
return render_template('reddit.html', subreddits=presenter.getUserSubreddits(request.args.get('code')))
else:
newPosts = presenter.getSubredditNew(request.form['subreddit'])
return render_template('reddit.html', subreddits=presenter.getUserSubreddits(code=None), submissions=newPosts)
#=====================================================
#=================Start Server========================
app.debug = True
if __name__ == '__main__':
app.run(host='localhost', port=8080)
#===================================================== | StarcoderdataPython |
19684 | import json
import os
import shutil
import urllib.request
import traceback
import logging
import psutil
from collections import defaultdict
from typing import List, Dict, Tuple
from multiprocessing import Semaphore, Pool
from subprocess import Popen, PIPE
from datetime import datetime, timedelta
from lxml import etree
from lxml.etree import Element
import pyarrow as pa
import pyarrow.parquet as pq
from google.cloud import storage
from diachronic import global_conf, Tags
PROCESS_MEM = psutil.virtual_memory().total / psutil.cpu_count()
# Fraction of (total_mem/cpu_count) that a given process uses before flushing buffer
PROCESS_MEM_LIMIT = .1
DOWNLOAD_SEMAPHORE = Semaphore(global_conf.download_parallelism)
FAILURES = []
def make_path(path: str) -> None:
if not os.path.exists(path):
os.makedirs(path)
def get_wiki_from_filename(wiki_file: str) -> str:
return wiki_file.split("-")[0]
class WikiHandler(object):
def __init__(self):
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
make_path(global_conf.input_path)
def get_filenames(self) -> List[str]:
filenames = []
for wiki in global_conf.wikis:
url_prefix = global_conf.get_url_prefix(wiki)
url = "{}dumpstatus.json".format(url_prefix)
logging.info("Grabbing filenames from {}".format(url))
conn = urllib.request.urlopen(url)
data = json.loads(conn.read().decode())
conn.close()
filenames.extend(list(data["jobs"]["metahistory7zdump"]["files"].keys()))
return filenames
def get_files_to_skip(self) -> List[str]:
client = storage.Client()
return [blob.name for blob in client.get_bucket(global_conf.bucket).list_blobs()]
def get_files_to_run(self, overwrite=False) -> List[str]:
all_filenames = self.get_filenames()
if overwrite:
logging.info("Overwrite enabled, running all {} files".format(len(all_filenames)))
return all_filenames
skipfiles = self.get_files_to_skip()
files_to_run = [f for f in all_filenames
if "{}.{}".format(f, global_conf.output_suffix) not in skipfiles]
skip_count = len(all_filenames) - len(files_to_run)
logging.info("Running {} files and skipping {}".format(len(files_to_run), skip_count))
return files_to_run
def download(self, wiki_file: str) -> None:
logging.info("Downloading {}".format(wiki_file))
wiki = get_wiki_from_filename(wiki_file)
url_prefix = global_conf.get_url_prefix(wiki)
response = urllib.request.urlopen(url_prefix + wiki_file)
download_file = open(global_conf.input_path + wiki_file, 'wb')
shutil.copyfileobj(response, download_file)
response.close()
download_file.close()
logging.info("Downloaded {}".format(wiki_file))
def run_file(self, wiki_file: str) -> None:
try:
with DOWNLOAD_SEMAPHORE:
self.download(wiki_file)
parser = WikiFileParser(wiki_file)
parser.run()
except Exception:
logging.info("Caught exception on {}".format(wiki_file))
logging.error(traceback.format_exc())
FAILURES.append(wiki_file)
os.remove(global_conf.input_path + wiki_file)
def run(self) -> None:
logging.info("Running {}".format(global_conf.month_source))
filenames_to_run = self.get_files_to_run()
pool = Pool()
pool.map_async(self.run_file, filenames_to_run, error_callback=self._on_error)
pool.close()
pool.join()
logging.info("{} Run completed. Failures: {}".format(global_conf.month_source, FAILURES))
def _on_error(self, ex: Exception):
raise ex
class WikiFileParser(object):
def __init__(self, wiki_file: str):
self.arrow_cols = ("namespace", "title", "timestamp", "text")
self.wiki_file = wiki_file
self.wiki = get_wiki_from_filename(self.wiki_file)
output_prefix = global_conf.get_output_prefix(self.wiki)
make_path(global_conf.output_path + output_prefix)
self.output_file = "{}{}.{}".format(output_prefix,
self.wiki_file,
global_conf.output_suffix)
# State trackers
self.arrow_buff = {colname: [] for colname in self.arrow_cols}
self.arrow_row, self.cur_date, self.current_revision = self.iter_reset()
self.schema: pq.ParquetSchema = None
self.writer: pq.ParquetWriter = None
def iter_reset(self) -> Tuple[Dict[str, None], datetime, None]:
self.arrow_row = {colname: None for colname in self.arrow_cols}
self.cur_date = global_conf.datetime_init
self.current_revision = None
return self.arrow_row, self.cur_date, self.current_revision
@property
def func_dict(self) -> Dict[str, callable]:
d = {
Tags.Revision.nstag: self.parse_revision,
Tags.Namespace.nstag: self.parse_namespace,
Tags.Page.nstag: self.parse_finish,
Tags.Title.nstag: self.parse_title
}
return defaultdict(lambda: (lambda x: None), **d)
def parse_title(self, elem: Element) -> None:
self.arrow_row["title"] = elem.text
def parse_namespace(self, elem: Element) -> None:
self.arrow_row["namespace"] = elem.text
def parse_revision(self, elem: Element) -> None:
if self.arrow_row["namespace"] == "0":
timestamp = datetime.strptime(elem.find(Tags.Timestamp.nstag).text[:-1], "%Y-%m-%dT%H:%M:%S")
if timestamp >= self.cur_date:
self.cur_date = datetime.combine(timestamp.date(), datetime.min.time()) + timedelta(days=1)
text = elem.find(Tags.Text.nstag).text or ""
self.arrow_row["text"] = text
self.arrow_row["timestamp"] = timestamp
for col, val in self.arrow_row.items():
self.arrow_buff[col].append(val)
elem.clear()
def parse_finish(self, elem: Element) -> None:
self.iter_reset()
# Determine whether buffer needs to be flushed based on available memory
process = psutil.Process(os.getpid())
if process.memory_info().rss / PROCESS_MEM >= PROCESS_MEM_LIMIT:
self.write()
elem.clear()
def stream(self) -> None:
stdout = Popen(["7z", "e", "-so", global_conf.input_path + self.wiki_file], stdout=PIPE).stdout
for event, elem in etree.iterparse(stdout, huge_tree=True):
self.func_dict[elem.tag](elem)
def write(self) -> None:
arrow_arrays = {colname: pa.array(arr) for colname, arr in self.arrow_buff.items()}
arrow_table = pa.Table.from_arrays(arrays=list(arrow_arrays.values()), names=list(arrow_arrays.keys()))
if not self.writer:
self.writer = pq.ParquetWriter(global_conf.output_path + self.output_file,
arrow_table.schema, compression='brotli')
self.writer.write_table(arrow_table)
self.arrow_buff = {colname: [] for colname in self.arrow_cols}
def upload(self) -> None:
client = storage.Client()
bucket = client.get_bucket(global_conf.bucket)
blob = bucket.blob(self.output_file)
with open(global_conf.output_path + self.output_file, 'rb') as pq_file:
blob.upload_from_file(pq_file)
def cleanup(self) -> None:
os.remove(global_conf.input_path + self.wiki_file)
os.remove(global_conf.output_path + self.output_file)
def run(self) -> None:
logging.info("Started parsing {}".format(self.wiki_file))
self.stream()
# Clear leftover buffer
self.write()
self.writer.close()
self.upload()
self.cleanup()
logging.info("Finished parsing {}".format(self.wiki_file))
if __name__ == "__main__":
WikiHandler().run()
| StarcoderdataPython |
3227099 | <reponame>ysilvy/ocean_toe_2020
'''
Compute, in zonal means, how much of the ocean (per basin) has emerged from 1861 to 2100
'''
import os
import glob
from netCDF4 import Dataset as open_ncfile
import matplotlib.pyplot as plt
import numpy as np
import datetime
import pickle
# -- Read result
emerge = pickle.load( open( "/home/ysilvy/Density_bining/Yona_analysis/data/percentage_emergence_medians_meanhistNat.pkl", "rb" ) )
# -- Median and range
median_emerge = np.ma.median(emerge,axis=1)
pc25_emerge = np.percentile(emerge,25,axis=1)
pc75_emerge = np.percentile(emerge,75,axis=1)
time = np.arange(1860,2100)
# -- Plot
fig, axes = plt.subplots(1,3,sharex=True,sharey=True,figsize=(16, 5))
axes[0].plot(time,median_emerge[:,1],color='k')
axes[0].fill_between(time,pc25_emerge[:,1],pc75_emerge[:,1],color='0.8') #alpha=0.3
axes[0].set_xlim([1920,2080])
axes[0].set_ylim([0,83])
axes[0].axvline(x=2005,ls='--',color='k',lw=0.5)
axes[0].grid(axis='y')
axes[0].text(2050,5,'Atlantic',fontweight='bold',fontsize=15,va='center',ha='center')
axes[1].plot(time,median_emerge[:,2],color='k')
axes[1].fill_between(time,pc25_emerge[:,2],pc75_emerge[:,2],color='0.8')
axes[1].axvline(x=2005,ls='--',color='k',lw=0.5)
axes[1].text(2050,5,'Pacific',fontweight='bold',fontsize=15,va='center',ha='center')
axes[1].grid(axis='y')
axes[2].plot(time,median_emerge[:,3],color='k')
axes[2].fill_between(time,pc25_emerge[:,3],pc75_emerge[:,3],color='0.8')
axes[2].axvline(x=2005,ls='--',color='k',lw=0.5)
axes[2].text(2050,5,'Indian',fontweight='bold',fontsize=15,va='center',ha='center')
axes[2].grid(axis='y')
axes[0].set_ylabel('% of basin zonal mean',fontweight='bold',fontsize=14)
axes[0].set_xticks(np.arange(1920,2081,20))
axes[1].tick_params(axis='y', labelleft='on')
axes[2].tick_params(axis='y', labelleft='on')
plt.subplots_adjust(wspace=0.1,top=0.85,left=0.04, right=0.92)
plt.figtext(.006,.95,'b',fontweight='bold',fontsize=18)
for i in range(3):
plt.setp(axes[i].get_xticklabels(), fontweight='bold',fontsize=12, rotation=20)
plt.setp(axes[i].get_yticklabels(), fontweight='bold',fontsize=12)
axes[i].xaxis.set_tick_params(which='major',width=2,labelsize=12)
axes[i].yaxis.set_tick_params(which='major',width=2,labelsize=12)
# Date
now = datetime.datetime.now()
date = now.strftime("%Y-%m-%d")
plotName = 'fig3b'
figureDir = 'models/ToE/'
plt.savefig(plotName+'.svg', dpi=300) #,bbox_inches='tight')
#plt.show()
| StarcoderdataPython |
1657598 | <reponame>Gizmondd/longmbart
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
from pathlib import Path
from filter_foreign import filter_foreign_characters
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument(
"--filter-files",
type=argparse.FileType("r"),
nargs="+",
help="Files from which to create a frequency list",
metavar="PATH",
)
parser.add_argument(
"--complete-vocab",
type=argparse.FileType("r"),
help="File containing the complete vocabulary to filter",
metavar="PATH",
)
parser.add_argument(
"--output-dir",
type=Path,
help="Output directory for filtered vocabularies",
metavar="PATH",
)
parser.add_argument(
"--output-prefix",
type=str,
help="File prefix for output files",
metavar="STRING",
)
parser.add_argument(
"--vocab-sizes",
type=int,
nargs="+",
help="Vocabulary sizes of the output",
metavar="INT",
)
parser.add_argument(
"--raw",
action="store_true",
help="Create a raw vocabulary from the filter files",
)
parser.add_argument(
"--filtered",
action="store_true",
help="Create a unicode-filtered vocabulary from the filter files",
)
args = parser.parse_args()
return args
def get_freq_list(frequency_files):
def by_value(item):
return item[1]
freq_dict = dict()
freq_list = list()
for file in frequency_files:
lines = file.readlines()
for line in lines:
tokens = line.split()
for token in tokens:
if token in freq_dict:
freq_dict[token] += 1
else:
freq_dict[token] = 1
for k, v in sorted(freq_dict.items(), key=by_value, reverse=True):
freq_list.append(k)
return freq_list
def filter_by_frequency(unfiltered, freq_list, n):
filtered = list()
for c in freq_list:
if len(filtered) >= n:
break
if c in unfiltered:
filtered.append(c)
return sorted(filtered)
def create_raw_vocab(freq_list, output_dir, output_prefix):
outfilename = output_dir / '{}.raw'.format(output_prefix)
with open(outfilename, 'w') as outfile:
for piece in freq_list:
outfile.write(piece + "\n")
def create_filtered_vocab(freq_list, output_dir, output_prefix):
filtered = filter_foreign_characters(freq_list)
outfilename = output_dir / '{}.filtered'.format(output_prefix)
with open(outfilename, 'w') as outfile:
for piece in filtered:
outfile.write(piece + "\n")
def main(args: argparse.Namespace):
freq_list = get_freq_list(args.filter_files)
if args.raw and args.output_dir and args.output_prefix:
create_raw_vocab(freq_list, args.output_dir, args.output_prefix)
if args.filtered and args.output_dir and args.output_prefix:
create_filtered_vocab(freq_list, args.output_dir, args.output_prefix)
if args.vocab_sizes and args.complete_vocab and args.output_dir and args.output_prefix:
complete = args.complete_vocab.readlines()
unfiltered = filter_foreign_characters(complete, return_set=True)
for n in args.vocab_sizes:
filtered = filter_by_frequency(unfiltered, freq_list, n)
outfilename = args.output_dir / '{}.{}k'.format(args.output_prefix, int(n/1000))
with open(outfilename, 'w') as outfile:
for token in filtered:
outfile.write(token + '\n')
if __name__ == '__main__':
args = parse_args()
main(args)
| StarcoderdataPython |
1705 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .available_operation_display import AvailableOperationDisplay
from .error_details_model import ErrorDetailsModel
from .error_error_model import ErrorErrorModel
from .error_model import ErrorModel, ErrorModelException
from .operation_result import OperationResult
from .provisioned_resource_properties import ProvisionedResourceProperties
from .proxy_resource import ProxyResource
from .managed_proxy_resource import ManagedProxyResource
from .resource import Resource
from .tracked_resource import TrackedResource
from .secret_resource_properties import SecretResourceProperties
from .inlined_value_secret_resource_properties import InlinedValueSecretResourceProperties
from .secret_resource_properties_base import SecretResourcePropertiesBase
from .secret_resource_description import SecretResourceDescription
from .secret_value import SecretValue
from .secret_value_properties import SecretValueProperties
from .secret_value_resource_description import SecretValueResourceDescription
from .volume_provider_parameters_azure_file import VolumeProviderParametersAzureFile
from .volume_properties import VolumeProperties
from .volume_reference import VolumeReference
from .application_scoped_volume_creation_parameters import ApplicationScopedVolumeCreationParameters
from .application_scoped_volume import ApplicationScopedVolume
from .application_scoped_volume_creation_parameters_service_fabric_volume_disk import ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk
from .volume_resource_description import VolumeResourceDescription
from .network_resource_properties import NetworkResourceProperties
from .local_network_resource_properties import LocalNetworkResourceProperties
from .endpoint_ref import EndpointRef
from .network_ref import NetworkRef
from .network_resource_properties_base import NetworkResourcePropertiesBase
from .network_resource_description import NetworkResourceDescription
from .gateway_destination import GatewayDestination
from .tcp_config import TcpConfig
from .http_route_match_path import HttpRouteMatchPath
from .http_route_match_header import HttpRouteMatchHeader
from .http_route_match_rule import HttpRouteMatchRule
from .http_route_config import HttpRouteConfig
from .http_host_config import HttpHostConfig
from .http_config import HttpConfig
from .gateway_properties import GatewayProperties
from .gateway_resource_description import GatewayResourceDescription
from .image_registry_credential import ImageRegistryCredential
from .environment_variable import EnvironmentVariable
from .setting import Setting
from .container_label import ContainerLabel
from .endpoint_properties import EndpointProperties
from .resource_requests import ResourceRequests
from .resource_limits import ResourceLimits
from .resource_requirements import ResourceRequirements
from .diagnostics_ref import DiagnosticsRef
from .reliable_collections_ref import ReliableCollectionsRef
from .container_state import ContainerState
from .container_event import ContainerEvent
from .container_instance_view import ContainerInstanceView
from .container_code_package_properties import ContainerCodePackageProperties
from .auto_scaling_trigger import AutoScalingTrigger
from .auto_scaling_mechanism import AutoScalingMechanism
from .auto_scaling_policy import AutoScalingPolicy
from .service_resource_description import ServiceResourceDescription
from .diagnostics_sink_properties import DiagnosticsSinkProperties
from .diagnostics_description import DiagnosticsDescription
from .application_properties import ApplicationProperties
from .azure_internal_monitoring_pipeline_sink_description import AzureInternalMonitoringPipelineSinkDescription
from .application_resource_description import ApplicationResourceDescription
from .add_remove_replica_scaling_mechanism import AddRemoveReplicaScalingMechanism
from .auto_scaling_metric import AutoScalingMetric
from .auto_scaling_resource_metric import AutoScalingResourceMetric
from .service_properties import ServiceProperties
from .service_replica_properties import ServiceReplicaProperties
from .service_replica_description import ServiceReplicaDescription
from .average_load_scaling_trigger import AverageLoadScalingTrigger
from .container_logs import ContainerLogs
from .operation_result_paged import OperationResultPaged
from .secret_resource_description_paged import SecretResourceDescriptionPaged
from .secret_value_resource_description_paged import SecretValueResourceDescriptionPaged
from .volume_resource_description_paged import VolumeResourceDescriptionPaged
from .network_resource_description_paged import NetworkResourceDescriptionPaged
from .gateway_resource_description_paged import GatewayResourceDescriptionPaged
from .application_resource_description_paged import ApplicationResourceDescriptionPaged
from .service_resource_description_paged import ServiceResourceDescriptionPaged
from .service_replica_description_paged import ServiceReplicaDescriptionPaged
from .service_fabric_mesh_management_client_enums import (
ResourceStatus,
HealthState,
SecretKind,
VolumeProvider,
SizeTypes,
ApplicationScopedVolumeKind,
NetworkKind,
HeaderMatchType,
OperatingSystemType,
DiagnosticsSinkKind,
AutoScalingMechanismKind,
AutoScalingMetricKind,
AutoScalingResourceMetricName,
AutoScalingTriggerKind,
)
__all__ = [
'AvailableOperationDisplay',
'ErrorDetailsModel',
'ErrorErrorModel',
'ErrorModel', 'ErrorModelException',
'OperationResult',
'ProvisionedResourceProperties',
'ProxyResource',
'ManagedProxyResource',
'Resource',
'TrackedResource',
'SecretResourceProperties',
'InlinedValueSecretResourceProperties',
'SecretResourcePropertiesBase',
'SecretResourceDescription',
'SecretValue',
'SecretValueProperties',
'SecretValueResourceDescription',
'VolumeProviderParametersAzureFile',
'VolumeProperties',
'VolumeReference',
'ApplicationScopedVolumeCreationParameters',
'ApplicationScopedVolume',
'ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk',
'VolumeResourceDescription',
'NetworkResourceProperties',
'LocalNetworkResourceProperties',
'EndpointRef',
'NetworkRef',
'NetworkResourcePropertiesBase',
'NetworkResourceDescription',
'GatewayDestination',
'TcpConfig',
'HttpRouteMatchPath',
'HttpRouteMatchHeader',
'HttpRouteMatchRule',
'HttpRouteConfig',
'HttpHostConfig',
'HttpConfig',
'GatewayProperties',
'GatewayResourceDescription',
'ImageRegistryCredential',
'EnvironmentVariable',
'Setting',
'ContainerLabel',
'EndpointProperties',
'ResourceRequests',
'ResourceLimits',
'ResourceRequirements',
'DiagnosticsRef',
'ReliableCollectionsRef',
'ContainerState',
'ContainerEvent',
'ContainerInstanceView',
'ContainerCodePackageProperties',
'AutoScalingTrigger',
'AutoScalingMechanism',
'AutoScalingPolicy',
'ServiceResourceDescription',
'DiagnosticsSinkProperties',
'DiagnosticsDescription',
'ApplicationProperties',
'AzureInternalMonitoringPipelineSinkDescription',
'ApplicationResourceDescription',
'AddRemoveReplicaScalingMechanism',
'AutoScalingMetric',
'AutoScalingResourceMetric',
'ServiceProperties',
'ServiceReplicaProperties',
'ServiceReplicaDescription',
'AverageLoadScalingTrigger',
'ContainerLogs',
'OperationResultPaged',
'SecretResourceDescriptionPaged',
'SecretValueResourceDescriptionPaged',
'VolumeResourceDescriptionPaged',
'NetworkResourceDescriptionPaged',
'GatewayResourceDescriptionPaged',
'ApplicationResourceDescriptionPaged',
'ServiceResourceDescriptionPaged',
'ServiceReplicaDescriptionPaged',
'ResourceStatus',
'HealthState',
'SecretKind',
'VolumeProvider',
'SizeTypes',
'ApplicationScopedVolumeKind',
'NetworkKind',
'HeaderMatchType',
'OperatingSystemType',
'DiagnosticsSinkKind',
'AutoScalingMechanismKind',
'AutoScalingMetricKind',
'AutoScalingResourceMetricName',
'AutoScalingTriggerKind',
]
| StarcoderdataPython |
1651319 | from opyapi.schema.types.type import Type
def is_optional(type_definition) -> bool:
if isinstance(type_definition, Type):
return type_definition.nullable
origin_type = getattr(type_definition, "__origin__", None)
if origin_type and type(None) in type_definition.__args__:
return True
return False
| StarcoderdataPython |
3363250 | import discord
import os
import time
import re
import datetime
from PIL import Image, ImageFont, ImageDraw
from finnhub import client as Finnhub # api docs: https://finnhub.io/docs/api
import requests
import matplotlib
import mplfinance
import stocks
import pandas as pd
FINNHUB_CHART_API_TOKEN_2 = os.environ.get('FINNHUB_API_TOKEN_2')
FINNHUB_CRYPTO_API_TOKEN_3 = os.environ.get('FINNHUB_API_TOKEN_3')
finnhub_chart_client = Finnhub.Client(api_key=FINNHUB_CHART_API_TOKEN_2)
finnhub_other_crypto_client = Finnhub.Client(api_key=FINNHUB_CRYPTO_API_TOKEN_3)
async def chart(ctx, ticker, timeframe, chart_type):
"""Called from stock_candle() and stock_line() in bot.py
Creates the stock chart for the specified ticker and timeframe
Parameters
----------
ctx : discord.ext.commands.Context
context of the command
ticker : string
stock ticker (ex. AAPL, MSFT, TSLA, ^DJI, BTCUSDT)
timeframe : string
the timeframe from which to fetch the stock data
chart_type : string
either 'line' or 'candle' represents which type of chart to create
"""
timeframe = timeframe.upper()
ticker = ticker.upper()
quote, dec = await stocks.get_finnhub_quote(ticker, finnhub_other_crypto_client)
current_price = quote['c']
try:
# pull company info from finnhub client
company_info = finnhub_chart_client.company_profile(symbol=ticker)
company_name = company_info['name']
except: # ticker doesn't exist
company_name = ticker
# # calculate the difference between today and the ipo date
# ipo_difference = datetime.date.today() - datetime.date(int(ipo_date[0]), int(ipo_date[1]), int(ipo_date[2]))
num_days = get_num_days(timeframe)
if num_days == -1:
await ctx.send(embed=discord.Embed(description='Invalid timeframe specified!', color=discord.Color.dark_red()))
return
# build either line or candle graph
if chart_type == 'candle':
filename, start_price = candlestick(ticker, num_days, quote)
elif chart_type == 'line':
filename, start_price = line(ticker, num_days, quote)
if start_price == -1:
await ctx.send(embed=discord.Embed(description='Invalid ticker', color=discord.Color.dark_red()))
return
await crop_chart(filename, company_name + ', ' + timeframe, ticker + ', ' + timeframe, start_price, current_price, )
# send file to the calling channel
await ctx.send(file=discord.File(filename))
#remove file from os
os.remove(filename)
def get_crypto_candle_data(ticker, to_time, from_time, res):
"""Gets the json for the crypto candle data for the ticker
If ticker doesn't exist then it will return a json block
where s : 'no data'
Parameters
----------
ticker : string
crypto ticker
to_time : timestamp
timestamp to mark the most recent data to be fetched
from_time : timestamp
timestamp to mark the oldest data to be fetched
res : int or string (must be: 1, 5, 15, 30, 60, D, W, M)
resolution, frequency of data points
Returns
-------
candle_crypto : dictionary
candle data for the specified crypto ticker
"""
candle_crypto = finnhub_chart_client.crypto_candle(symbol = 'BINANCE:'+ ticker, resolution=res, **{'from':str(from_time), 'to': str(to_time)})
status = candle_crypto['s']
if status == 'ok':
return candle_crypto
candle_crypto = finnhub_chart_client.crypto_candle(symbol = 'COINBASE:'+ ticker, resolution=res, **{'from':str(from_time), 'to': str(to_time)})
status = candle_crypto['s']
if status == 'ok':
return candle_crypto
# Iterate through remaining exchanges
crypto_exchanges = finnhub_chart_client.crypto_exchange()
for exchange in [i for i in crypto_exchanges if i not in ['Binance', 'COINBASE']]:
candle_crypto = finnhub_other_crypto_client.crypto_candle(symbol = exchange + ':'+ ticker, resolution=res, **{'from':str(from_time), 'to': str(to_time)})
status = candle_crypto['s']
if status == 'ok':
return candle_crypto
# status is never 'ok' returns { s: 'no_data'}
return candle_crypto
def get_num_days(timeframe):
"""Given the passed in timeframe, gets the number of
days to which the timeframe translates
Parameters
----------
timeframe : string
the timeframe from which to fetch the stock data
Returns
-------
num_days : int
number of days in the timeframe
"""
d = re.compile(r'\d+[D]')
m = re.compile(r'\d+[M]')
w = re.compile(r'\d+[W]')
y = re.compile(r'\d+[Y]')
# set num days based on timeframe
if timeframe == 'D' or d.match(timeframe) is not None:
timeframe = timeframe[:-1] # trim off 'D'
if len(timeframe) == 0: # was just 'D'
num_days = 1
else:
num_days = int(timeframe)
elif timeframe == 'W' or w.match(timeframe) is not None:
timeframe = timeframe[:-1] # trim off 'M'
if len(timeframe) == 0: # was just 'M'
num_days = 7
else:
num_days = int(timeframe) * 7
elif timeframe == 'M' or m.match(timeframe) is not None:
timeframe = timeframe[:-1] # trim off 'M'
if len(timeframe) == 0: # was just 'M'
num_days = 30
else:
num_days = int(timeframe) * 30
elif timeframe == 'Y' or y.match(timeframe) is not None:
timeframe = timeframe[:-1] # trim off 'Y'
if len(timeframe) == 0: # was just 'Y'
num_days = 365
else:
num_days = int(timeframe) * 365
elif timeframe == 'MAX':
num_days = 15000
else:
num_days = -1
return num_days
async def crop_chart(filename, title, alt_title, start_price, current_price):
"""Crops the chart and adds the enlarged title, current price,
price change and percent change
Parameters
----------
filename : string
the filename of the chart created with mplfinance
title : string
the title of the chart (the company name)
alt_title : string
the alternative title of the chart (company ticker)
start_price : float
previous close share price
current_price : float
the current share price
"""
im = Image.open(filename)
font = ImageFont.truetype('fonts/timesbd.ttf', size=30)
price_change = current_price - start_price
percent_change = ((current_price / start_price)-1) * 100
ccp, cpc, cpercentc, color = await stocks.get_string_change(current_price, price_change, percent_change, '{:,.2f}')
color = '#00ff00' if color == discord.Color.green() else '#ed2121'
# get image width and height
width, height = im.size
left = 50
top = 50
right = width - 130
bottom = height - 55
blackout = Image.open("media/blackout.png")
im.paste(blackout, (right-18, top+30))
# crop
im = im.crop((left, top, right, bottom))
draw = ImageDraw.Draw(im)
# get new width and height
width, height = im.size
title_width, title_height = draw.textsize(title, font=font)
# if company name too long then use ticker
if title_width > 400:
title = alt_title
title_width, title_height = draw.textsize(title, font=font)
location = ((width-title_width)/2, 10)
# draw title (Company Name, timeframe)
draw.text(location, title ,fill='white',font=font)
# draw current price
draw.text((100, 10), ccp, fill='#3ec2fa', font=font)
# Use smaller font size
font = ImageFont.truetype('fonts/timesbd.ttf', size=20)
# price change and percent change
pcpc = cpc + ' (' + cpercentc + ')'
# get price change and percent change width and height
pc_width, pc_height = draw.textsize(pcpc, font=font)
#draw price change and percent change
draw.text((width-17-pc_width, 20), cpc + ' (' + cpercentc + ')', fill=color, font=font)
im.save(filename)
def create_close_line(dates, close):
"""Creates the horizontal line that represents the
previous close price of the stock
Parameters
----------
dates : list(datetime)
dates in the chart
close : float
previous close share price
Returns
-------
previous_close : DataFrame
horizontal previous close line
"""
data = dict()
data['Date'] = dates
data['Close'] = [close]
for _ in range(len(dates)-1):
data['Close'].append(close)
# Create the dataframe from dictionary
previous_close = pd.DataFrame.from_dict(data)
# Set date as the index
previous_close.set_index('Date', inplace=True)
# Convert date to correct format
previous_close.index = pd.to_datetime(previous_close.index)
return previous_close
def add_line_at_date(date, dates):
"""Creates list with 'nan' values except for the date
that matches the specified date this date and the next
date has close values 0, 999999 this will create a nearly
verticle line to be put on the chart
Parameters
----------
date : datetime object
the date to create the verticle line
dates : list(datetime)
dates in the chart
Returns
-------
closes : list
close price list (all nan values except 0, 999999 if date in dates)
closing_time_exists : boolean
true if date in dates and 0, 999999 added to closes
"""
closing_time_exists = False
skip = False
closes = []
for i in range(len(dates)):
if skip:
skip = False
continue
if dates[i] == date:
closing_time_exists = True
closes.extend([0, 999999])
skip = True
continue
closes.append(float('nan'))
return closes, closing_time_exists
def create_endtrading_line(dates):
"""Creates the verticle line that represents the end
of the trading period (4pm EST)
Parameters
----------
dates : list(datetime)
dates in the chart
Returns
-------
end_trading : DataFrame
verticle end trading line
"""
today = dates[-1]
date = datetime.datetime(today.year, today.month, today.day, 16)
data = dict()
data['Close'] = []
data['Date'] = dates
alternate = True
iteration = 1
data['Close'], successful = add_line_at_date(date, dates)
while not successful:
data['Close'] = []
if alternate:
date = datetime.datetime(today.year, today.month, today.day, 15, 60-iteration)
else:
date = datetime.datetime(today.year, today.month, today.day, 16, iteration)
iteration += 1
alternate = not alternate
data['Close'], successful = add_line_at_date(date, dates)
# Create the dataframe from dictionary
end_trading = pd.DataFrame.from_dict(data)
# Set date as the index
end_trading.set_index('Date', inplace=True)
# Convert date to correct format
end_trading.index = pd.to_datetime(end_trading.index)
return end_trading
def candlestick(ticker, days, quote):
"""Creates a candlestick plot
Parameters
----------
ticker : string
stock ticker (ex. AAPL, MSFT, TSLA, ^DJI, BTCUSDT)
days : int
number of days of data to fetch
quote : dictionary
quote for the ticker - for more info see get_finnhub_quote()
Returns
-------
filename : string
name of the image file created by mpl.plot()
pc : float
previous close share price
"""
df, dates, create_vert_line, start_price = create_dataframe(ticker, days, 5, quote['pc'])
if quote['t'] == 0: #invalid ticker
return '', -1
# define kwargs
kwargs = dict(type='candle', ylabel='Share Price', volume = True, figratio=(10,8))
# Create my own `marketcolors` to use with the `nightclouds` style:
mc = mplfinance.make_marketcolors(up='#00ff00',down='#ed2121',inherit=True)
# Add 'previous close' horizontal line and 'end trading' verticle line
previous_close_line = create_close_line(dates, start_price)
guide_lines = [
mplfinance.make_addplot(previous_close_line, color='#3ec2fa', linestyle='dashdot')
]
today = dates[-1]
closing = datetime.datetime(today.year, today.month, today.day, 16) # closing time object
day_of_the_week = datetime.datetime.today().weekday()
if create_vert_line and (today > closing or day_of_the_week > 4):
endtrading_line = create_endtrading_line(dates)
guide_lines.append(mplfinance.make_addplot(endtrading_line, color='#fcfc03'))
# Create a new style based on `nightclouds` but with my own `marketcolors`:
s = mplfinance.make_mpf_style(base_mpf_style='nightclouds',marketcolors=mc)
# Plot the candlestick chart and save to ticker-chart.png
filename = ticker.upper() + '-candle.png'
save = dict(fname=filename, dpi = 100, pad_inches=0.25)
mplfinance.plot(df, addplot=guide_lines, **kwargs, style=s, savefig=save)
return filename, start_price
def line(ticker, days, quote):
"""Creates a line plot
Parameters
----------
ticker : string
stock ticker (ex. AAPL, MSFT, TSLA, ^DJI, BTCUSDT)
days : int
number of days of data to fetch
quote : dictionary
quote for the ticker - for more info see get_finnhub_quote()
Returns
-------
filename : string
name of the image file created by mpl.plot()
pc : float
previous close share price
"""
df, dates, create_vert_line, start_price = create_dataframe(ticker, days, 1, quote['pc'])
if quote['t'] == 0: #invalid ticker
return '', -1
# define kwargs
kwargs = dict(type='line', ylabel='Share Price', volume = True, figratio=(10,8))
# Create my own `marketcolors` to use with the `nightclouds` style:
mc = mplfinance.make_marketcolors(up='#00ff00',down='#ed2121', inherit=True)
# Add 'previous close' horizontal line and 'end trading' verticle line
previous_close_line = create_close_line(dates, start_price)
guide_lines = [
mplfinance.make_addplot(previous_close_line, color='#3ec2fa', linestyle='dashdot')
]
today = dates[-1]
closing = datetime.datetime(today.year, today.month, today.day, 16) # closing time object
day_of_the_week = datetime.datetime.today().weekday()
if create_vert_line and (today > closing or day_of_the_week > 4):
endtrading_line = create_endtrading_line(dates)
guide_lines.append(mplfinance.make_addplot(endtrading_line, color='#fcfc03'))
# Create a new style based on `nightclouds` but with my own `marketcolors`:
s = mplfinance.make_mpf_style(base_mpf_style = 'nightclouds',marketcolors = mc)
# Plot the candlestick chart and save to ticker-chart.png
filename = ticker.upper() + '-line.png'
save = dict(fname=filename, dpi = 100, pad_inches=0.25)
mplfinance.plot(df, addplot=guide_lines, **kwargs, linecolor='#ed2121' if start_price > quote['c'] else '#00ff00', style=s, savefig=save)
return filename, start_price
def get_from_time(days):
"""Gets the timestamp for the time 'days' away
from the to_time
Parameters
----------
days : int
number of days from the to_time to get the timestamp
Returns
-------
from_time : timestamp
timestamp 'days' away from the current time
"""
today = datetime.datetime.now()
opening = datetime.datetime(today.year, today.month, today.day, 9, 30) # opening time object
day_of_the_week = datetime.datetime.today().weekday()
if days == 1:
if day_of_the_week < 5 and today > opening: # weekday after trading starts
from_time = int(opening.timestamp())
elif day_of_the_week < 5: # weekday before trading time
prevday = opening - datetime.timedelta(days=1)
from_time = int(prevday.timestamp())
else: # weekend
prevday = opening - datetime.timedelta(days=(day_of_the_week-4))
from_time = int(prevday.timestamp())
else:
days_ago = opening - datetime.timedelta(days=days+1)
from_time = int(days_ago.timestamp())
return from_time
def get_candle_data(ticker, res, days):
"""Gets the candle data for the ticker
Parameters
----------
ticker : string
stock ticker (ex. AAPL, MSFT, TSLA, ^DJI, BTCUSDT)
res : int or string (must be: 1, 5, 15, 30, 60, D, W, M)
resolution, frequency of data points
days : int
number of days of data to fetch
Returns
-------
candle : dictionary
candle data for the specified ticker
is_not_crypto : boolean
false if the ticker is crypto, true otherwise
"""
today = datetime.datetime.now()
from_time = get_from_time(days)
current_time = int(datetime.datetime.now().timestamp())
candle = finnhub_chart_client.stock_candle(symbol=ticker, resolution=res, **{'from':str(from_time), 'to': str(current_time)})
status = candle['s']
is_not_crypto = True
if status != 'ok':
if days == 1:
prev = today-datetime.timedelta(days=1)
from_time = int(prev.timestamp())
candle = get_crypto_candle_data(ticker, current_time, from_time, res)
is_not_crypto = False
return candle, is_not_crypto
def create_dataframe(ticker, days, res, previous_close):
"""Creates the dataframe to be used by the mplfinance plot()
function to chart the data
Parameters
----------
ticker : string
stock ticker (ex. AAPL, MSFT, TSLA, ^DJI, BTCUSDT)
days : int
number of days of data to fetch
res : int or string (must be: 1, 5, 15, 30, 60, D, W, M)
resolution, frequency of data points
previous_close : float
used only for intraday - previous days close share price
Returns
-------
stockdata_df : DataFrame
dataframe created with the candle data of the passed in ticker
dates : list(datetime)
dates in the data
is_intraday_not_crypto : boolean
only true if the ticker is not crypto and days is 1
current_price : float
the most recently retched share price
"""
# api docs for financialmodelingprep.com: https://financialmodelingprep.com/developer/docs/
if days == 1: # intraday
stockdata, is_intraday_not_crypto = get_candle_data(ticker, res, days)
status = stockdata['s']
if status != 'ok': # invalid ticker
return None, None, True, -1
elif days < 6:
if res == 5:
res = 60
else:
res = 30
stockdata, is_intraday_not_crypto = get_candle_data(ticker, res, days)
status = stockdata['s']
print('stockdata')
print(stockdata)
is_intraday_not_crypto = False # override function output
if status != 'ok': # invalid ticker
return None, None, False, -1
else:
stockdata, is_intraday_not_crypto = get_candle_data(ticker, 'D', days)
status = stockdata['s']
is_intraday_not_crypto = False # override function output
if status != 'ok': # invalid ticker
return None, None, False, -1
reformatted_stockdata = dict()
reformatted_stockdata['Date'] = []
reformatted_stockdata['Open'] = []
reformatted_stockdata['High'] = []
reformatted_stockdata['Low'] = []
reformatted_stockdata['Close'] = []
reformatted_stockdata['Volume'] = []
if days == 1:
reformatted_stockdata['Date'].append(datetime.datetime.fromtimestamp(stockdata['t'][0]) - datetime.timedelta(days=1))
reformatted_stockdata['Open'].append(previous_close)
reformatted_stockdata['High'].append(previous_close)
reformatted_stockdata['Low'].append(previous_close)
reformatted_stockdata['Close'].append(previous_close)
reformatted_stockdata['Volume'].append(0)
for index in range(len(stockdata['t'])):
reformatted_stockdata['Date'].append(datetime.datetime.fromtimestamp(stockdata['t'][index]))
reformatted_stockdata['Open'].append(stockdata['o'][index])
reformatted_stockdata['High'].append(stockdata['h'][index])
reformatted_stockdata['Low'].append(stockdata['l'][index])
reformatted_stockdata['Close'].append(stockdata['c'][index])
reformatted_stockdata['Volume'].append(stockdata['v'][index])
# Convert to dataframe
stockdata_df = pd.DataFrame.from_dict(reformatted_stockdata)
# Set date as the index
stockdata_df.set_index('Date', inplace=True)
# Convert date to correct format
stockdata_df.index = pd.to_datetime(stockdata_df.index)
return stockdata_df, reformatted_stockdata['Date'], is_intraday_not_crypto, reformatted_stockdata['Close'][0] | StarcoderdataPython |
1798990 | <reponame>pytexas/PyTexas<filename>conference/profiles/admin.py
import traceback
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from django.template.response import TemplateResponse
from django.conf import settings
from django import http
from django.contrib import messages
from django.contrib.auth.decorators import permission_required
from django import forms
from django.views.decorators.csrf import csrf_exempt
from .models import User, SocialHandle
class SocialInline(admin.TabularInline):
model = SocialHandle
class User2CreationForm(UserCreationForm):
class Meta:
model = User
fields = ("username", "email", "verified_email")
def clean_username(self):
username = self.cleaned_data["username"]
try:
User._default_manager.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError(
self.error_messages['duplicate_username'], code='duplicate_username')
class User2ChangeForm(UserChangeForm):
class Meta:
model = User
fields = '__all__'
permission_required('profiles.change_user')
class CurrentSpeakerFilter(admin.SimpleListFilter):
title = 'Current Speaker'
parameter_name = 'current'
def lookups(self, request, model_admin):
return (('1', 'Current Speakers'),)
def queryset(self, request, queryset):
if self.value() == '1':
return queryset.filter(
session__status='accepted',
session__conference__slug=settings.DEFAULT_CONF).exclude(
session__stype='lightning')
@admin.register(User)
class User2Admin(UserAdmin):
list_display = ('username', 'email', 'name', 'phone', 'current_speaker',
'is_staff', 'is_superuser')
#list_filter = (CurrentSpeakerFilter,)
search_fields = ('name', 'email', 'phone')
inlines = (SocialInline,)
form = User2ChangeForm
add_form = User2CreationForm
fieldsets = (
(None, {
'fields': ('username', 'password')
}),
('Personal info', {
'fields': ('name', 'title', 'location', 'email', 'verified_email',
'phone', 'website', 'avatar', 'biography')
}),
('Permissions', {
'fields': ('is_active', 'is_staff', 'is_superuser', 'groups')
}),
('Important dates', {
'fields': ('last_login', 'date_joined', 'from_import')
}),
)
readonly_fields = ('last_login', 'date_joined', 'from_import')
add_fieldsets = ((None, {
'classes': ('wide',),
'fields': ('username', 'email', 'verified_email', '<PASSWORD>',
'<PASSWORD>')
}),)
def current_speaker(self, obj):
return 'coming soon'
if obj.session_set.filter(
status='accepted', conference__slug=settings.DEFAULT_CONF).exclude(
stype='lightning').count() > 0:
return '<strong style="color: green;">✓</strong>'
return '<strong style="color: red;">×</strong>'
current_speaker.allow_tags = True
| StarcoderdataPython |
1655576 | # Generated by Django 3.2.7 on 2021-09-16 15:42
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('django_world', '0002_auto_20210913_1528'),
]
operations = [
migrations.CreateModel(
name='Succession_Casts',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
('description', models.TextField()),
('birthday', models.DateField()),
('succession', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='django_world.succession')),
],
),
migrations.RenameField(
model_name='succession_season_episodes',
old_name='episode',
new_name='succession_seasons',
),
migrations.RenameField(
model_name='succession_seasons',
old_name='season',
new_name='succession',
),
migrations.DeleteModel(
name='Succession_Cast',
),
]
| StarcoderdataPython |
1620891 | # Copyright (c) Microsoft Corporation.
# Licensed under the Apache License 2.0.
import random
import os
from base64 import b64decode
import azext_aro.vendored_sdks.azure.mgmt.redhatopenshift.v2022_04_01.models as openshiftcluster
from azure.cli.command_modules.role import GraphError
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.commands.client_factory import get_subscription_id
from azure.cli.core.profiles import ResourceType
from azure.cli.core.util import sdk_no_wait
from azure.cli.core.azclierror import FileOperationError, ResourceNotFoundError, UnauthorizedError
from azext_aro._aad import AADManager
from azext_aro._rbac import assign_role_to_resource, \
has_role_assignment_on_resource
from azext_aro._rbac import ROLE_NETWORK_CONTRIBUTOR, ROLE_READER
from azext_aro._validators import validate_subnets
from knack.log import get_logger
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import resource_id, parse_resource_id
from msrest.exceptions import HttpOperationError
logger = get_logger(__name__)
FP_CLIENT_ID = 'f1dd0a37-89c6-4e07-bcd1-ffd3d43d8875'
def aro_create(cmd, # pylint: disable=too-many-locals
client,
resource_group_name,
resource_name,
master_subnet,
worker_subnet,
vnet=None, # pylint: disable=unused-argument
vnet_resource_group_name=None, # pylint: disable=unused-argument
location=None,
pull_secret=None,
domain=None,
cluster_resource_group=None,
fips_validated_modules=None,
client_id=None,
client_secret=None,
pod_cidr=None,
service_cidr=None,
software_defined_network=None,
disk_encryption_set=None,
master_encryption_at_host=False,
master_vm_size=None,
worker_encryption_at_host=False,
worker_vm_size=None,
worker_vm_disk_size_gb=None,
worker_count=None,
apiserver_visibility=None,
ingress_visibility=None,
tags=None,
no_wait=False):
if not rp_mode_development():
resource_client = get_mgmt_service_client(
cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
provider = resource_client.providers.get('Microsoft.RedHatOpenShift')
if provider.registration_state != 'Registered':
raise UnauthorizedError('Microsoft.RedHatOpenShift provider is not registered.',
'Run `az provider register -n Microsoft.RedHatOpenShift --wait`.')
validate_subnets(master_subnet, worker_subnet)
subscription_id = get_subscription_id(cmd.cli_ctx)
random_id = generate_random_id()
aad = AADManager(cmd.cli_ctx)
if client_id is None:
client_id, client_secret = aad.create_application(cluster_resource_group or 'aro-' + random_id)
client_sp_id = aad.get_service_principal_id(client_id)
if not client_sp_id:
client_sp_id = aad.create_service_principal(client_id)
rp_client_sp_id = aad.get_service_principal_id(resolve_rp_client_id())
if not rp_client_sp_id:
raise ResourceNotFoundError("RP service principal not found.")
if rp_mode_development():
worker_vm_size = worker_vm_size or 'Standard_D2s_v3'
else:
worker_vm_size = worker_vm_size or 'Standard_D4s_v3'
if apiserver_visibility is not None:
apiserver_visibility = apiserver_visibility.capitalize()
if ingress_visibility is not None:
ingress_visibility = ingress_visibility.capitalize()
oc = openshiftcluster.OpenShiftCluster(
location=location,
tags=tags,
cluster_profile=openshiftcluster.ClusterProfile(
pull_secret=pull_secret or "",
domain=domain or random_id,
resource_group_id=(f"/subscriptions/{subscription_id}"
f"/resourceGroups/{cluster_resource_group or 'aro-' + random_id}"),
fips_validated_modules='Enabled' if fips_validated_modules else 'Disabled',
),
service_principal_profile=openshiftcluster.ServicePrincipalProfile(
client_id=client_id,
client_secret=client_secret,
),
network_profile=openshiftcluster.NetworkProfile(
pod_cidr=pod_cidr or '10.128.0.0/14',
service_cidr=service_cidr or '172.30.0.0/16',
software_defined_network=software_defined_network or 'OpenShiftSDN'
),
master_profile=openshiftcluster.MasterProfile(
vm_size=master_vm_size or 'Standard_D8s_v3',
subnet_id=master_subnet,
encryption_at_host='Enabled' if master_encryption_at_host else 'Disabled',
disk_encryption_set_id=disk_encryption_set,
),
worker_profiles=[
openshiftcluster.WorkerProfile(
name='worker', # TODO: 'worker' should not be hard-coded
vm_size=worker_vm_size,
disk_size_gb=worker_vm_disk_size_gb or 128,
subnet_id=worker_subnet,
count=worker_count or 3,
encryption_at_host='Enabled' if worker_encryption_at_host else 'Disabled',
disk_encryption_set_id=disk_encryption_set,
)
],
apiserver_profile=openshiftcluster.APIServerProfile(
visibility=apiserver_visibility or 'Public',
),
ingress_profiles=[
openshiftcluster.IngressProfile(
name='default', # TODO: 'default' should not be hard-coded
visibility=ingress_visibility or 'Public',
)
],
)
sp_obj_ids = [client_sp_id, rp_client_sp_id]
ensure_resource_permissions(cmd.cli_ctx, oc, True, sp_obj_ids)
return sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=oc)
def aro_delete(cmd, client, resource_group_name, resource_name, no_wait=False):
# TODO: clean up rbac
rp_client_sp_id = None
try:
oc = client.get(resource_group_name, resource_name)
except CloudError as e:
if e.status_code == 404:
raise ResourceNotFoundError(e.message) from e
logger.info(e.message)
except HttpOperationError as e:
logger.info(e.message)
aad = AADManager(cmd.cli_ctx)
# Best effort - assume the role assignments on the SP exist if exception raised
try:
rp_client_sp_id = aad.get_service_principal_id(resolve_rp_client_id())
if not rp_client_sp_id:
raise ResourceNotFoundError("RP service principal not found.")
except GraphError as e:
logger.info(e.message)
# Customers frequently remove the Cluster or RP's service principal permissions.
# Attempt to fix this before performing any action against the cluster
if rp_client_sp_id:
ensure_resource_permissions(cmd.cli_ctx, oc, False, [rp_client_sp_id])
return sdk_no_wait(no_wait, client.begin_delete,
resource_group_name=resource_group_name,
resource_name=resource_name)
def aro_list(client, resource_group_name=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list()
def aro_show(client, resource_group_name, resource_name):
return client.get(resource_group_name, resource_name)
def aro_list_credentials(client, resource_group_name, resource_name):
return client.list_credentials(resource_group_name, resource_name)
def aro_list_admin_credentials(cmd, client, resource_group_name, resource_name, file="kubeconfig"):
# check for the presence of the feature flag and warn
# the check shouldn't block the API call - ARM can cache a feature state for several minutes
feature_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_FEATURES)
feature = feature_client.features.get(resource_provider_namespace="Microsoft.RedHatOpenShift",
feature_name="AdminKubeconfig")
accepted_states = ["Registered",
"Registering"]
if feature.properties.state not in accepted_states:
logger.warning("This operation requires the Microsoft.RedHatOpenShift/AdminKubeconfig feature to be registered")
logger.warning("To register run: az feature register --namespace Microsoft.RedHatOpenShift -n AdminKubeconfig")
query_result = client.list_admin_credentials(resource_group_name, resource_name)
file_mode = "x"
yaml_data = b64decode(query_result.kubeconfig).decode('UTF-8')
try:
with open(file, file_mode, encoding="utf-8") as f:
f.write(yaml_data)
except FileExistsError as e:
raise FileOperationError(f"File {file} already exists.") from e
logger.info("Kubeconfig written to file: %s", file)
def aro_update(cmd,
client,
resource_group_name,
resource_name,
refresh_cluster_credentials=False,
client_id=None,
client_secret=None,
no_wait=False):
# if we can't read cluster spec, we will not be able to do much. Fail.
oc = client.get(resource_group_name, resource_name)
ocUpdate = openshiftcluster.OpenShiftClusterUpdate()
client_id, client_secret = cluster_application_update(cmd.cli_ctx, oc, client_id, client_secret, refresh_cluster_credentials) # pylint: disable=line-too-long
if client_id is not None or client_secret is not None:
# construct update payload
ocUpdate.service_principal_profile = openshiftcluster.ServicePrincipalProfile()
if client_secret is not None:
ocUpdate.service_principal_profile.client_secret = client_secret
if client_id is not None:
ocUpdate.service_principal_profile.client_id = client_id
return sdk_no_wait(no_wait, client.begin_update,
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=ocUpdate)
def rp_mode_development():
return os.environ.get('RP_MODE', '').lower() == 'development'
def rp_mode_production():
return os.environ.get('RP_MODE', '') == ''
def generate_random_id():
random_id = (random.choice('abcdefghijklmnopqrstuvwxyz') +
''.join(random.choice('abcdefghijklmnopqrstuvwxyz1234567890')
for _ in range(7)))
return random_id
def get_route_tables_from_subnets(cli_ctx, subnets):
network_client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_NETWORK)
route_tables = set()
for sn in subnets:
sid = parse_resource_id(sn)
subnet = network_client.subnets.get(resource_group_name=sid['resource_group'],
virtual_network_name=sid['name'],
subnet_name=sid['resource_name'])
if subnet.route_table is not None:
route_tables.add(subnet.route_table.id)
return route_tables
def get_cluster_network_resources(cli_ctx, oc):
master_subnet = oc.master_profile.subnet_id
worker_subnets = set()
# Ensure that worker_profiles exists
# it will not be returned if the cluster resources do not exist
if oc.worker_profiles is not None:
worker_subnets = {w.subnet_id for w in oc.worker_profiles}
master_parts = parse_resource_id(master_subnet)
vnet = resource_id(
subscription=master_parts['subscription'],
resource_group=master_parts['resource_group'],
namespace='Microsoft.Network',
type='virtualNetworks',
name=master_parts['name'],
)
return get_network_resources(cli_ctx, worker_subnets | {master_subnet}, vnet)
def get_network_resources(cli_ctx, subnets, vnet):
route_tables = get_route_tables_from_subnets(cli_ctx, subnets)
resources = set()
resources.add(vnet)
resources.update(route_tables)
return resources
def get_disk_encryption_resources(oc):
disk_encryption_set = oc.master_profile.disk_encryption_set_id
resources = set()
if disk_encryption_set:
resources.add(disk_encryption_set)
return resources
# cluster_application_update manages cluster application & service principal update
# If called without parameters it should be best-effort
# If called with parameters it fails if something is not possible
# Flow:
# 1. Set fail - if we are in fail mode or best effort.
# 2. Sort out client_id, rp_client_sp, resources we care for RBAC.
# 3. If we are in refresh_cluster_credentials mode - attempt to reuse/recreate
# cluster service principal application and acquire client_id, client_secret
# 4. Reuse/Recreate service principal.
# 5. Sort out required rbac
def cluster_application_update(cli_ctx,
oc,
client_id,
client_secret,
refresh_cluster_credentials):
# QUESTION: is there possible unification with the create path?
rp_client_sp_id = None
client_sp_id = None
random_id = generate_random_id()
# if any of these are set - we expect users to have access to fix rbac so we fail
# common for 1 and 2 flows
fail = client_id or client_secret or refresh_cluster_credentials
aad = AADManager(cli_ctx)
# check if we can see if RP service principal exists
try:
rp_client_sp_id = aad.get_service_principal_id(resolve_rp_client_id())
if not rp_client_sp_id:
raise ResourceNotFoundError("RP service principal not found.")
except GraphError as e:
if fail:
logger.error(e.message)
raise
logger.info(e.message)
# refresh_cluster_credentials refreshes cluster SP application.
# At firsts it tries to re-use existing application and generate new password.
# If application does not exist - creates new one
if refresh_cluster_credentials:
try:
app = aad.get_application_object_id_by_client_id(client_id or oc.service_principal_profile.client_id)
if not app:
# we were not able to find and applications, create new one
parts = parse_resource_id(oc.cluster_profile.resource_group_id)
cluster_resource_group = parts['resource_group']
client_id, client_secret = aad.create_application(cluster_resource_group or 'aro-' + random_id)
else:
client_secret = aad.add_password(app)
except GraphError as e:
logger.error(e.message)
raise
# attempt to get/create SP if one was not found.
try:
client_sp_id = aad.get_service_principal_id(client_id or oc.service_principal_profile.client_id)
except GraphError as e:
if fail:
logger.error(e.message)
raise
logger.info(e.message)
if fail and not client_sp_id:
client_sp_id = aad.create_service_principal(client_id or oc.service_principal_profile.client_id)
sp_obj_ids = [sp for sp in [rp_client_sp_id, client_sp_id] if sp]
ensure_resource_permissions(cli_ctx, oc, fail, sp_obj_ids)
return client_id, client_secret
def resolve_rp_client_id():
if rp_mode_production():
return FP_CLIENT_ID
return os.environ.get('AZURE_FP_CLIENT_ID', FP_CLIENT_ID)
def ensure_resource_permissions(cli_ctx, oc, fail, sp_obj_ids):
try:
# Get cluster resources we need to assign permissions on, sort to ensure the same order of operations
resources = {ROLE_NETWORK_CONTRIBUTOR: sorted(get_cluster_network_resources(cli_ctx, oc)),
ROLE_READER: sorted(get_disk_encryption_resources(oc))}
except (CloudError, HttpOperationError) as e:
if fail:
logger.error(e.message)
raise
logger.info(e.message)
return
for sp_id in sp_obj_ids:
for role in sorted(resources):
for resource in resources[role]:
# Create the role assignment if it doesn't exist
# Assume that the role assignment exists if we fail to look it up
resource_contributor_exists = True
try:
resource_contributor_exists = has_role_assignment_on_resource(cli_ctx, resource, sp_id, role)
except CloudError as e:
if fail:
logger.error(e.message)
raise
logger.info(e.message)
if not resource_contributor_exists:
assign_role_to_resource(cli_ctx, resource, sp_id, role)
| StarcoderdataPython |
3382215 | from typing import Tuple, List, Dict
from matplotlib import pyplot
from tqdm import tqdm
from utils.argument_parser import parse_arguments
from utils.file_utils import get_files_to_be_processed, get_absolute_path, \
extract_judgements_from_given_year_from_file, extract_from_judgement, OUTPUT_DIRECTORY_PATH, save_data, \
create_output_dir
from utils.regex_utils import find_pattern_in_string, MONEY_PATTERN, convert_money_string_to_int, \
DETRIMENT_PATTERN, LAW_NAME, find_pattern_once_in_string, ARTICLE_PATTERN
def main() -> None:
input_dir, judgement_year, dividing_point = parse_arguments()
files_to_be_processed = get_files_to_be_processed(input_dir)
numbers, references_number, detriment_words_number = process_files(files_to_be_processed, input_dir, judgement_year)
exercise1(numbers, judgement_year)
exercise2(numbers, dividing_point, judgement_year)
exercise3(references_number, judgement_year)
exercise4(detriment_words_number, judgement_year)
def process_files(files_to_be_processed: List[str], input_dir: str, judgement_year: int) -> Tuple[List[int], int, int]:
numbers, references_number, judgements_with_detriment_word = [], 0, 0
for filename in tqdm(files_to_be_processed, mininterval=10, unit='files'):
file = get_absolute_path(input_dir, filename)
judgements_to_be_processed = extract_judgements_from_given_year_from_file(file, judgement_year)
for judgement in judgements_to_be_processed:
content = extract_from_judgement(judgement, 'textContent')
find_money_in_content(content, numbers) # collect data for exercise 1 and 2
regulations = extract_from_judgement(judgement, 'referencedRegulations') # collect data for exercise 3
for regulation in regulations:
if references_given_law_article(regulation, LAW_NAME, ARTICLE_PATTERN):
references_number += 1
break
match = find_pattern_once_in_string(DETRIMENT_PATTERN, content) # collect data for exercise 4
if match is not None:
judgements_with_detriment_word += 1
return numbers, references_number, judgements_with_detriment_word
def find_money_in_content(content: str, numbers: List[int]):
matches = find_pattern_in_string(MONEY_PATTERN, content)
for match in matches:
numbers.append(convert_money_string_to_int(match))
def references_given_law_article(regulation: Dict[str, str], law_name: str, article_pattern: str) -> bool:
return regulation['journalTitle'] == law_name \
and find_pattern_once_in_string(article_pattern, regulation['text']) is not None
def exercise1(numbers: List[int], judgement_year: int):
create_histogram(numbers, "exercise-1", judgement_year)
save_data('There have been {0} money amounts in judgements from year {1}.'.format(len(numbers), judgement_year),
'exercise-1.txt')
def create_histogram(numbers: List[int], filename: str, year: int):
output_file = get_absolute_path(OUTPUT_DIRECTORY_PATH, filename)
create_output_dir()
pyplot.hist(numbers)
pyplot.title('Number of occurrences of given money\namount across judgements from year {}'.format(str(year)))
pyplot.xlabel('Money amounts [pln]')
pyplot.ylabel('Number of occurrences [-]')
pyplot.grid(True)
pyplot.savefig(output_file, dpi=150)
pyplot.close()
def exercise2(numbers: List[int], dividing_point: int, judgement_year: int):
a, b = split_list_by_value(numbers, dividing_point)
create_histogram(a, 'exercise-2-upto-{}'.format(dividing_point), judgement_year)
create_histogram(b, 'exercise-2-above-{}'.format(dividing_point), judgement_year)
save_data('There have been {0} numbers up to {1} and {2} above that amount in year {3}.'
.format(len(a), dividing_point, len(b), judgement_year),
'exercise-2.txt')
def split_list_by_value(numbers: List[int], value: int) -> Tuple[List[int], List[int]]:
upto, above = [], []
for number in numbers:
if number > value:
above.append(number)
else:
upto.append(number)
return upto, above
def exercise3(occurrences: int, judgement_year: int):
save_data('There have been {0} judgements referencing given article in {1}.'.format(occurrences, judgement_year),
'exercise-3.txt')
def exercise4(occurrences: int, judgement_year: int):
save_data('There have been {0} judgements containing given word in {1}.'.format(occurrences, judgement_year),
'exercise-4.txt')
if __name__ == '__main__':
main()
| StarcoderdataPython |
1689142 | # -*- coding: utf-8 -*-
###############################################################################
# Copyright (c), Forschungszentrum Jülich GmbH, IAS-1/PGI-1, Germany. #
# All rights reserved. #
# This file is part of the AiiDA-FLEUR package. #
# #
# The code is hosted on GitHub at https://github.com/JuDFTteam/aiida-fleur #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.flapw.de or #
# http://aiida-fleur.readthedocs.io/en/develop/ #
###############################################################################
''' Contains tests for the FleurBandDosWorkChain '''
from __future__ import absolute_import
from __future__ import print_function
import pytest
import aiida_fleur
import os
aiida_path = os.path.dirname(aiida_fleur.__file__)
TEST_INP_XML_PATH = os.path.join(aiida_path, 'tests/files/inpxml/Si/inp.xml')
CALC_ENTRY_POINT = 'fleur.fleur'
# tests
@pytest.mark.usefixtures('aiida_profile', 'clear_database')
class Test_BandDosWorkChain():
"""
Regression tests for the FleurBandDosWorkChain
"""
@pytest.mark.skip(reason='Test is not implemented')
@pytest.mark.timeout(500, method='thread')
def test_fleur_band_converged_Si(self, run_with_cache, mock_code_factory, create_remote_fleur):
"""
full example using the band dos workchain with just a fleurinp data as input.
Calls scf, Several fleur runs needed till convergence
"""
from aiida.orm import Code, load_node, Dict, StructureData
from numpy import array
from aiida_fleur.workflows.banddos import FleurBandDosWorkChain
options = {
'resources': {
'num_machines': 1
},
'max_wallclock_seconds': 5 * 60,
'withmpi': False,
'custom_scheduler_commands': ''
}
FleurCode = mock_code = mock_code_factory(
label='fleur',
data_dir_abspath=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data_dir_calcs/'),
entry_point=CALC_ENTRY_POINT,
ignore_files=['_aiidasubmit.sh', 'cdnc', 'out', 'FleurInputSchema.xsd', 'cdn.hdf', 'usage.json', 'cdn??'])
# create process builder to set parameters
builder = FleurBandDosWorkChain.get_builder()
builder.metadata.description = 'Simple Fleur Band Dos calculation ontop converged fleur calc'
builder.metadata.label = 'FleurBandDos_test'
#builder.fleurinp = create_fleurinp(TEST_INP_XML_PATH)
builder.remote = create_remote_fleur()
builder.options = Dict(dict=options)
builder.fleur = FleurCode
# now run calculation
out, node = run_with_cache(builder)
# check output
# check if BandDos file was parsed. success and all output nodes there.
@pytest.mark.skip(reason='Test is not implemented')
@pytest.mark.timeout(500, method='thread')
def test_fleur_band_fleurinp_Si(self, run_with_cache, mock_code_factory, create_fleurinp):
"""
full example using the band dos workchain with just a fleurinp data as input.
Calls scf, Several fleur runs needed till convergence
"""
assert False
@pytest.mark.skip(reason='Test is not implemented')
@pytest.mark.timeout(500, method='thread')
def test_fleur_band_structure_Si(self, run_with_cache, mock_code_factory):
"""
Full regression test of the band dos workchain starting with a crystal structure and parameters
"""
assert False
@pytest.mark.skip(reason='Test is not implemented')
@pytest.mark.timeout(500, method='thread')
def test_fleur_band_validation_wrong_inputs(self, run_with_cache, mock_code_factory):
"""
Test the validation behavior of band dos workchain if wrong input is provided it should throw
an exitcode and not start a Fleur run or crash
"""
assert False
# needed?
@pytest.mark.skip(reason='Test is not implemented')
@pytest.mark.timeout(500, method='thread')
def test_fleur_band_seekpath(self, run_with_cache, mock_code_factory):
"""
Tests if the band dos workchain is capable of running without a specified path
"""
assert False
@pytest.mark.skip(reason='Test is not implemented')
@pytest.mark.timeout(500, method='thread')
def test_fleur_band_no_seekpath(self, run_with_cache, mock_code_factory):
"""
Tests if the band dos workchain is capable of running with a specified path
"""
assert False
| StarcoderdataPython |
1753803 | <reponame>waverDeep/WaveBYOL<filename>src/utils/make_dataset.py<gh_stars>1-10
import pandas as pd
import src.utils.interface_file_io as file_io
from tqdm import tqdm
from sklearn.model_selection import train_test_split
def main(metadata_path):
file_list = []
label = []
dataset = pd.read_csv(metadata_path)
dataset = dataset[['emotion', 'path']]
for data in dataset.values:
label.append(data[0])
file_list.append([data[0], '../../dataset/IEMOCAP_full_release/{}'.format(data[1])])
label = set(label)
print(label)
packed = {tick: [] for tick in label}
for file in file_list:
file_label = file[0]
packed[file_label].append(file[1])
train_dataset = []
test_dataset = []
for key, value in packed.items():
if len(value) > 0:
train, test = train_test_split(value, test_size=0.25, random_state=777)
for temp in train:
train_dataset.append('{} {}'.format(temp, key))
for temp in test:
test_dataset.append('{} {}'.format(temp, key))
file_io.make_list2txt(train_dataset, '../../dataset/iemocap-train.txt')
file_io.make_list2txt(test_dataset, '../../dataset/iemocap-test.txt')
file_io.make_list2txt(label, '../../dataset/iemocap-label.txt')
if __name__ == '__main__':
path = '../../dataset/IEMOCAP_full_release/iemocap_full_dataset.csv'
main(path) | StarcoderdataPython |
3217809 | import torch
import math
import torch.distributed as dist
from torch.utils.data.sampler import Sampler
from torch.utils.data.dataset import Dataset
from typing import Optional, Iterator, Callable
from collections import OrderedDict
__all__ = ["LoadBalancingDistributedSampler", "LoadBalancingDistributedBatchSampler"]
class LoadBalancingDistributedSampler(Sampler):
r"""Sampler that restricts data loading to a subset of the dataset.
This sampler use a `complexity_fn` to calculate each sample's computational
complexity and make each batch get similar computation complexity.
This is useful in scenarios like speech and NLP, where each batch has variable
length and distributed training suffers from straggler problem.
The usage is similar to :class:`torch.utils.data.DistributedSampler`, where each
process loads a subset of the original dataset that is exclusive to it.
.. note::
Dataset is assumed to be of constant size.
Args:
dataset: Dataset used for sampling.
complexity_fn(Callable): A function whose input is a sample and output is an integer as a
measure of the computational complexity of the sample.
num_replicas (int, optional): Number of processes participating in
distributed training. By default, :attr:`world_size` is retrieved from the
current distributed group.
rank (int, optional): Rank of the current process within :attr:`num_replicas`.
By default, :attr:`rank` is retrieved from the current distributed
group.
shuffle (bool, optional): If ``True`` (default), sampler will shuffle the
indices.
seed (int, optional): random seed used to shuffle the sampler if
:attr:`shuffle=True`. This number should be identical across all
processes in the distributed group. Default: ``0``.
drop_last (bool, optional): if ``True``, then the sampler will drop the
tail of the data to make it evenly divisible across the number of
replicas. If ``False``, the sampler will add extra indices to make
the data evenly divisible across the replicas. Default: ``False``.
random_level (float, optional): A float varies from ``0`` and ``1`` that controls the extent
of load balance. ``0`` means the best load balance, while ``1`` means the opposite.
.. warning::
In distributed mode, calling the :meth:`set_epoch` method at
the beginning of each epoch **before** creating the :class:`DataLoader` iterator
is necessary to make shuffling work properly across multiple epochs. Otherwise,
the same ordering will be always used.
Example::
Define your `complexity_fn`, which accepts a dataset sample as its input and produces an integer
as the sample's computational complexity.
>>> dataset = torch.utils.data.TensorDataset(torch.randn(n, 2), torch.randperm(n))
>>> complexity_fn = lambda x: x[1]
Below is the usage of :class:`LoadBalancingDistributedSampler` and :class:`DataLoader`:
>>> sampler = bagua.torch_api.contrib.LoadBalancingDistributedSampler(
... dataset,
... complexity_fn=complexity_fn) if is_distributed else None
>>> loader = torch.utils.data.DataLoader(dataset,
... shuffle=(sampler is None),
... sampler=sampler)
>>>
>>> for epoch in range(start_epoch, n_epochs):
... if is_distributed:
... sampler.set_epoch(epoch)
... train(loader)
"""
def __init__(
self,
dataset: Dataset,
complexity_fn: Callable[..., int],
num_replicas: Optional[int] = None,
rank: Optional[int] = None,
shuffle: bool = True,
seed: int = 0,
drop_last: bool = False,
random_level: float = 0,
) -> None:
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
if rank >= num_replicas or rank < 0:
raise ValueError(
"Invalid rank {}, rank should be in the interval"
" [0, {}]".format(rank, num_replicas - 1)
)
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.drop_last = drop_last
# If the dataset length is evenly divisible by # of replicas, then there
# is no need to drop any data, since the dataset will be split equally.
dataset_len = len(self.dataset) # type: ignore
if self.drop_last and dataset_len % self.num_replicas != 0: # type: ignore
# Split to nearest available length that is evenly divisible.
# This is to ensure each rank receives the same amount of data when
# using this Sampler.
self.num_samples = math.ceil(
# `type:ignore` is required because Dataset cannot provide a default __len__
# see NOTE in pytorch/torch/utils/data/sampler.py
(dataset_len - self.num_replicas)
/ self.num_replicas
)
else:
self.num_samples = math.ceil(dataset_len / self.num_replicas) # type: ignore
self.total_size = self.num_samples * self.num_replicas
self.shuffle = shuffle
self.seed = seed
self.item_complexity_map = dict()
for item_index in range(dataset_len):
self.item_complexity_map[item_index] = complexity_fn(
self.dataset[item_index]
)
self.ordered_item_complexity_map = OrderedDict(
sorted(self.item_complexity_map.items(), key=lambda t: t[1])
)
max_complexity = max(self.item_complexity_map.values())
min_complexity = min(self.item_complexity_map.values())
if random_level < 0.0 or random_level > 1.0:
raise ValueError(
"Invalid random level {}, shoule be in the range [0.0, 1.0]".format(
random_level
)
)
self.random_number = int((max_complexity - min_complexity) * random_level + 1)
def shuffle_chunks(self):
def chunks_wrap_padding(lst, n):
"""Yield successive n-sized chunks from lst."""
num_chunks = max(1, self.num_samples)
num_elements = num_chunks * n
current_lst = []
for i in range(num_elements):
current_lst.append(lst[i % len(lst)])
if len(current_lst) == n:
yield current_lst
current_lst = []
if self.shuffle:
# deterministically shuffle based on epoch and seed
g = torch.Generator()
g.manual_seed(self.seed + self.epoch)
if self.random_number > 0:
item_complexity_map = self.item_complexity_map.copy()
complexity_random_ints = torch.randint(
self.random_number, (len(item_complexity_map),), generator=g
).tolist()
for k, v in zip(item_complexity_map, complexity_random_ints):
item_complexity_map[k] += v
ordered_item_complexity_map = OrderedDict(
sorted(item_complexity_map.items(), key=lambda t: t[1])
)
else:
ordered_item_complexity_map = self.ordered_item_complexity_map
index_chunks = list(
chunks_wrap_padding(
list(ordered_item_complexity_map.keys()), self.num_replicas
)
)
chunk_indices = torch.randperm(len(index_chunks), generator=g).tolist() # type: ignore
else:
index_chunks = list(
chunks_wrap_padding(
list(self.ordered_item_complexity_map.keys()), self.num_replicas
)
)
chunk_indices = list(range(len(index_chunks))) # type: ignore
if not self.drop_last:
# add extra samples to make it evenly divisible
padding_size = self.num_samples - len(chunk_indices)
if padding_size <= len(chunk_indices):
chunk_indices += chunk_indices[:padding_size]
else:
chunk_indices += (
chunk_indices * math.ceil(padding_size / len(chunk_indices))
)[:padding_size]
else:
# remove tail of data to make it evenly divisible.
chunk_indices = chunk_indices[: self.num_samples]
assert len(chunk_indices) == self.num_samples
return index_chunks, chunk_indices
def __iter__(self) -> Iterator:
index_chunks, chunk_indices = self.shuffle_chunks()
# subsample
indices = [index_chunks[i][self.rank] for i in chunk_indices]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self) -> int:
return self.num_samples
def set_epoch(self, epoch: int) -> None:
r"""
Sets the epoch for this sampler. When :attr:`shuffle=True`, this ensures all replicas
use a different random ordering for each epoch. Otherwise, the next iteration of this
sampler will yield the same ordering.
Args:
epoch (int): Epoch number.
"""
self.epoch = epoch
class LoadBalancingDistributedBatchSampler(Sampler):
r"""Wraps another load balance sampler to yield variable sized mini-batches.
Args:
sampler (LoadBalancingDistributedSampler): Load balance sampler.
batch_fn (Callable): Callable to yield mini-batch indices.
drop_last (bool): If ``True``, the sampler will drop the last few batches exceeding
the least number of batches among replicas, otherwises, the number of batches
on each replica will be padded to the same.
`batch_fn` will have the signature of
``def batch_fn(indices: List[int]) -> List[List[int]]``.
Example::
>>> from bagua.torch_api.contrib import LoadBalancingDistributedSampler, \
... LoadBalancingDistributedBatchSampler
>>>
>>> sampler = LoadBalancingDistributedSampler(dataset, complexity_fn=complexity_fn)
>>> batch_sampler = LoadBalancingDistributedBatchSampler(sampler, batch_fn=batch_fn)
>>> loader = torch.utils.data.DataLoader(dataset, batch_sampler=batch_sampler)
>>>
>>> for epoch in range(start_epoch, n_epochs):
... batch_sampler.set_epoch(epoch)
... train(loader)
"""
def __init__(
self,
sampler: LoadBalancingDistributedSampler,
batch_fn,
drop_last: bool = False,
) -> None:
if not isinstance(sampler, LoadBalancingDistributedSampler):
raise ValueError(
"sampler should be of LoadBalancingDistributedSampler type."
)
if sampler.drop_last:
raise ValueError("drop_last of sampler should be False")
self.sampler = sampler
self.batch_fn = batch_fn
self.drop_last = drop_last
self.num_replicas = self.sampler.num_replicas
self.rank = self.sampler.rank
self.generate_batches()
def generate_batches(self):
index_chunks, chunk_indices = self.sampler.shuffle_chunks()
batches = []
for rank in range(self.num_replicas):
sub_indices = [index_chunks[i][rank] for i in chunk_indices]
batches.append(self.batch_fn(sub_indices))
self.total_batch = (
max([len(b) for b in batches])
if not self.drop_last
else min([len(b) for b in batches])
)
# here {len(batches[self.rank]) - self.total_batch} batches dropped for
# rank {self.rank}
if self.total_batch < len(batches[self.rank]):
pass
self.padded_batches = [
batch + batch[: self.total_batch - len(batch)] for batch in batches
]
def __iter__(self):
return iter(self.padded_batches[self.rank])
def __len__(self):
return self.total_batch
def set_epoch(self, epoch: int) -> None:
r"""
Sets the epoch for this sampler. When :attr:`shuffle=True`, this ensures all replicas
use a different random ordering for each epoch. Otherwise, the next iteration of this
sampler will yield the same ordering.
Args:
epoch (int): Epoch number.
"""
self.sampler.set_epoch(epoch)
self.generate_batches()
| StarcoderdataPython |
3265857 | <reponame>abretaud/biomaj2galaxy
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import click
from future import standard_library
from .config import read_global_config
standard_library.install_aliases()
__version__ = '2.1.0'
CONTEXT_SETTINGS = dict(auto_envvar_prefix='BM2G', help_option_names=['-h', '--help'])
class Context(object):
def __init__(self):
self.verbose = False
self.home = os.getcwd()
self._global_config = None
@property
def global_config(self):
if self._global_config is None:
self._global_config = read_global_config()
return self._global_config
def log(self, msg, *args):
"""Logs a message to stderr."""
if args:
msg %= args
click.echo(msg, file=sys.stderr)
def vlog(self, msg, *args):
"""Logs a message to stderr only if verbose is enabled."""
if self.verbose:
self.log(msg, *args)
def exit(self, exit_code):
self.vlog("Exiting biomaj2galaxy with exit code [%d]" % exit_code)
sys.exit(exit_code)
pass_context = click.make_pass_decorator(Context, ensure=True)
| StarcoderdataPython |
34482 | <reponame>horacexd/clist
# Generated by Django 2.2.10 on 2020-04-03 19:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0039_event_limits'),
]
operations = [
migrations.AddField(
model_name='event',
name='team_size',
field=models.IntegerField(default=3),
),
]
| StarcoderdataPython |
60829 | <filename>test/test_parameters.py<gh_stars>1000+
# Copyright 2020 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import unittest
from test.unittest_base import UnittestBase
class TestParameters(UnittestBase, unittest.TestCase):
def float_unittest(self, exploration):
agent, environment = self.prepare(exploration=exploration)
states = environment.reset()
actions = agent.act(states=states)
exploration1 = agent.model.exploration.value().numpy().item()
states, terminal, reward = environment.execute(actions=actions)
agent.observe(terminal=terminal, reward=reward)
actions = agent.act(states=states)
exploration2 = agent.model.exploration.value().numpy().item()
if not isinstance(exploration, dict) or exploration['type'] == 'constant':
self.assertEqual(exploration2, exploration1)
else:
self.assertNotEqual(exploration2, exploration1)
states, terminal, reward = environment.execute(actions=actions)
agent.observe(terminal=terminal, reward=reward)
agent.close()
environment.close()
self.finished_test()
def int_unittest(self, horizon):
agent, environment = self.prepare(reward_estimation=dict(horizon=horizon))
states = environment.reset()
actions = agent.act(states=states)
states, terminal, reward = environment.execute(actions=actions)
agent.observe(terminal=terminal, reward=reward)
horizon1 = agent.model.reward_horizon.value().numpy().item()
actions = agent.act(states=states)
states, terminal, reward = environment.execute(actions=actions)
agent.observe(terminal=terminal, reward=reward)
horizon2 = agent.model.reward_horizon.value().numpy().item()
if not isinstance(horizon, dict) or horizon['type'] == 'constant':
self.assertEqual(horizon2, horizon1)
else:
self.assertNotEqual(horizon2, horizon1)
agent.close()
environment.close()
self.finished_test()
def test_constant(self):
self.start_tests(name='constant')
exploration = 0.1
self.float_unittest(exploration=exploration)
horizon = 4
self.int_unittest(horizon=horizon)
def test_decaying(self):
self.start_tests(name='decaying')
exploration = dict(
type='decaying', decay='exponential', unit='timesteps', num_steps=5, initial_value=0.1,
decay_rate=0.5
)
self.float_unittest(exploration=exploration)
horizon = dict(
type='polynomial', unit='timesteps', num_steps=1, initial_value=2, final_value=4,
power=2
)
self.int_unittest(horizon=horizon)
def test_exponential(self):
self.start_tests(name='exponential')
# SPECIFICATION.MD
exploration = dict(
type='exponential', unit='timesteps', num_steps=5, initial_value=0.1, decay_rate=0.5
)
self.float_unittest(exploration=exploration)
def test_linear(self):
self.start_tests(name='linear')
exploration = dict(
type='linear', unit='timesteps', num_steps=5, initial_value=0.1, final_value=0.5
)
self.float_unittest(exploration=exploration)
# SPECIFICATION.MD
horizon = dict(type='linear', unit='timesteps', num_steps=1, initial_value=2, final_value=4)
self.int_unittest(horizon=horizon)
def test_ornstein_uhlenbeck(self):
self.start_tests(name='ornstein-uhlenbeck')
exploration = dict(type='ornstein_uhlenbeck', absolute=True)
self.float_unittest(exploration=exploration)
def test_piecewise_constant(self):
self.start_tests(name='piecewise-constant')
exploration = dict(
type='piecewise_constant', unit='timesteps', boundaries=[1], values=[0.1, 0.0]
)
self.float_unittest(exploration=exploration)
horizon = dict(
type='piecewise_constant', dtype='int', unit='timesteps', boundaries=[1], values=[1, 2]
)
self.int_unittest(horizon=horizon)
def test_random(self):
self.start_tests(name='random')
exploration = dict(type='random', distribution='uniform')
self.float_unittest(exploration=exploration)
| StarcoderdataPython |
3231124 | # -*- coding: utf8 -*-
CONNECT_MAX_TRY = 5
| StarcoderdataPython |
3394559 | import os
import sys
sys.path.append("../../../monk/");
import psutil
from keras_prototype import prototype
################################################### Foldered - Train Dataset #################################################################
ktf = prototype(verbose=1);
ktf.Prototype("sample-project-1", "sample-experiment-1", eval_infer=True);
ktf.Dataset_Params(dataset_path="../../../monk/system_check_tests/datasets/dataset_cats_dogs_eval");
ktf.Dataset();
accuracy, class_based_accuracy = ktf.Evaluate();
###############################################################################################################################################
######################################################### CSV - Train Dataset #################################################################
ktf = prototype(verbose=1);
ktf.Prototype("sample-project-1", "sample-experiment-1", eval_infer=True);
ktf.Dataset_Params(dataset_path="../../../monk/system_check_tests/datasets/dataset_csv_id/train",
path_to_csv="../../../monk/system_check_tests/datasets/dataset_csv_id/train.csv");
ktf.Dataset();
accuracy, class_based_accuracy = ktf.Evaluate();
###############################################################################################################################################
| StarcoderdataPython |
123041 | # Copyright 2016 <NAME> (<EMAIL>)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from diskimage_builder.block_device.utils import parse_abs_size_spec
class TestLoggingConfig(testtools.TestCase):
def test_parse_size_spec(self):
map(lambda tspec:
self.assertEqual(parse_abs_size_spec(tspec[0]), tspec[1]),
[["20TiB", 20 * 1024**4],
["1024KiB", 1024 * 1024],
["1.2TB", 1.2 * 1000**4],
["2.4T", 2.4 * 1000**4],
["512B", 512],
["364", 364]])
| StarcoderdataPython |
190627 | <reponame>ar90n/yolact
from . import backbone, data, layers, utils, web, yolact, eval
| StarcoderdataPython |
1654320 | def name_func(func, _, params):
return f'{func.__name__}_{"_".join(str(arg) for arg in params.args)}'
def get_enc_params(dtype):
if dtype == "float32":
return "PCM_F", 32
if dtype == "int32":
return "PCM_S", 32
if dtype == "int16":
return "PCM_S", 16
if dtype == "uint8":
return "PCM_U", 8
raise ValueError(f"Unexpected dtype: {dtype}")
| StarcoderdataPython |
29642 | import ConfigParser
from datetime import datetime
import os
import sys
import numpy as np
import pandas as pd
import utils.counts
import utils.counts_deviation
__author__ = '<NAME>'
# This script finds the days with the greatest deviation from some reference value (such as hourly means or medians)
if __name__ == '__main__':
if len(sys.argv) < 2:
print 'ERROR: need to supply the path to the conifg file'
config_path = sys.argv[1]
conf = ConfigParser.ConfigParser()
conf.read(config_path)
# Paths
station_TS_dir = conf.get('Paths', 'station_TS_dir') # Path to station Time Series
ref_counts_file = conf.get('Paths', 'ref_counts_file')
out_file = conf.get('Paths', 'out_file') # Where to write the counts file
# Parameters
start_date = conf.get('Params', 'start_date')
end_date = conf.get('Params', 'end_date')
days = [int(d.strip()) for d in conf.get('Params', 'days').split(',')]
measure = conf.get('Params', 'measure')
# Get target dates
targ_dates = utils.counts.date_string_list(start_date, end_date, days)
# Create the counts file
ref = utils.counts.df_from_counts(ref_counts_file) # DF w/ mean flow for each link
measures = []
keepers = []
for i, stat in enumerate(ref.columns):
# Get path to stat ts file
print 'Processings station: %s' % str(stat)
print 'Number %d of %d' % (i, ref.shape[1])
ts_path = os.path.join(station_TS_dir, str(stat), 'time_series.csv')
c_dev = utils.counts_deviation.CountsDeviation(ts_path, targ_dates)
if c_dev.missing: # if there is missing data, we skip the whole station
print "Missing data. Skipping station: %s" % str(stat)
continue
c_dev.calc_measure(measure, reference=ref[stat])
measures.append(c_dev.measures[measure])
keepers.append(stat)
df = pd.DataFrame(measures).transpose()
df.columns = keepers
df.index = targ_dates
df.dropna(axis=1)
df['Max_Dev'] = df.apply(np.sum, axis=1)
df.to_csv(out_file)
| StarcoderdataPython |
3332558 | <filename>module2/Bot/bot_user_session.py<gh_stars>0
import cherrypy
import aiml
class Response(object):
def __init__(self):
self.kernel = aiml.Kernel()
self.kernel.learn("startup.xml")
self.kernel.respond("load aiml")
self.question = Question()
def _cp_dispatch(self, vpath):
if len(vpath) == 1:
cherrypy.request.params['uid'] = vpath.pop()
return self
if len(vpath) == 2:
vpath.pop(0)
cherrypy.request.params['question'] = vpath.pop(0)
return self.question
return vpath
@cherrypy.expose
@cherrypy.tools.json_out()
def index(self, question, uid):
if os.path.isfile(str(uid) + ".brn"):
self.kernel.bootstrap(brainFile=str(uid) + ".brn")
else:
self.kernel.bootstrap(learnFiles="startup.xml", commands="load aiml")
self.kernel.saveBrain(str(uid) + ".brn")
return {'response': self.kernel.respond(question,uid)}
class Question(object):
def __init__(self):
self.kernel = aiml.Kernel()
self.kernel.learn("startup.xml")
self.kernel.respond("load aiml")
@cherrypy.expose
@cherrypy.tools.json_out()
def index(self, question):
return {'response': self.kernel.respond(question)}
if __name__ == '__main__':
cherrypy.quickstart(Response())
config = {'/':
{
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.trailing_slash.on': False,
}
}
cherrypy.tree.mount(Response(), config=config) | StarcoderdataPython |
1769375 | <gh_stars>100-1000
# pylint: disable=missing-function-docstring, missing-module-docstring, pointless-statement
def sum_two_numbers(x : 'int', y : 'int'):
x + y
| StarcoderdataPython |
3371131 | import pytest
from harvey.heap import InMemoryHeap
def test_in_memory_heap_push_and_pop(monkeypatch):
h = InMemoryHeap()
h.push(1, 'cat')
h.push(10, 'dog')
assert h.pop() == 'cat'
assert h.pop() == 'dog'
def test_in_memory_heap_upsert_element(monkeypatch):
h1 = InMemoryHeap()
h1.push(1, 'cat')
h1.push(10, 'dog')
h1.push(100, 'cat')
h1.pop()
h1.pop()
with pytest.raises(IndexError):
h1.pop()
h2 = InMemoryHeap()
h2.push(1, 'cat')
h2.push(10, 'dog')
h2.push(100, 'cat')
assert h2.pop() == 'dog'
assert h2.pop() == 'cat'
| StarcoderdataPython |
99156 | <filename>sequana_pipelines/bioconvert/main.py
#
# This file is part of Sequana software
#
# Copyright (c) 2016-2021 - Sequana Development Team
#
# Distributed under the terms of the 3-clause BSD license.
# The full license is in the LICENSE file, distributed with this software.
#
# website: https://github.com/sequana/sequana
# documentation: http://sequana.readthedocs.io
#
##############################################################################
import sys
import os
import argparse
import subprocess
from sequana_pipetools.options import *
from sequana_pipetools.options import before_pipeline
from sequana_pipetools.misc import Colors
from sequana_pipetools.info import sequana_epilog, sequana_prolog
from sequana_pipetools import SequanaManager
col = Colors()
NAME = "bioconvert"
class Options(argparse.ArgumentParser):
def __init__(self, prog=NAME, epilog=None):
usage = col.purple(sequana_prolog.format(**{"name": NAME}))
super(Options, self).__init__(usage=usage, prog=prog, description="",
epilog=epilog,
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
# add a new group of options to the parser
so = SlurmOptions()
so.add_options(self)
# add a snakemake group of options to the parser
so = SnakemakeOptions(working_directory=NAME)
so.add_options(self)
so = GeneralOptions()
so.add_options(self)
pipeline_group = self.add_argument_group("pipeline")
pipeline_group.add_argument("--input-pattern", dest="input_pattern",
required=True, type=str)
pipeline_group.add_argument("--input-directory", dest="input_directory",
required=True, type=str)
pipeline_group.add_argument("--input-ext", dest="input_extension",
required=True, type=str)
pipeline_group.add_argument("--output-ext", dest="output_extension",
required=True, type=str)
pipeline_group.add_argument("--command", dest="command",
required=True, type=str)
pipeline_group.add_argument("--method", dest="method",
type=str,
default=None,
help="If you know bioconvert and method's name, you can set it here")
def main(args=None):
if args is None:
args = sys.argv
# whatever needs to be called by all pipeline before the options parsing
before_pipeline(NAME)
# option parsing including common epilog
options = Options(NAME, epilog=sequana_epilog).parse_args(args[1:])
# the real stuff is here
manager = SequanaManager(options, NAME)
# create the beginning of the command and the working directory
manager.setup()
# fill the config file with input parameters
cfg = manager.config.config
# EXAMPLE TOREPLACE WITH YOUR NEEDS
cfg.input_directory = os.path.abspath(options.input_directory)
cfg.input_pattern = options.input_pattern
cfg.bioconvert.method = options.method
cfg.bioconvert.command = options.command
cfg.bioconvert.input_extension = options.input_extension
cfg.bioconvert.output_extension = options.output_extension
# finalise the command and save it; copy the snakemake. update the config
# file and save it.
manager.teardown(check_input_files=False)
if __name__ == "__main__":
main()
| StarcoderdataPython |
3242705 | # Código Original
# x = float(input('Digite a nota 1: '))
# y = float(input('Digite a nota 2: '))
# print (f'a media entre a nota {x} e a nota {y} é {(x+y)/2}')
# Desafio da aula 11
x = float(input('\033[33mDigite a nota 1: '))
y = float(input('\033[33mDigite a nota 2: '))
print(f'a media entre a nota {x} e a nota {y} é {(x+y)/2}')
| StarcoderdataPython |
1699367 | class Solution:
def isValidSerialization(self, preorder: str) -> bool:
| StarcoderdataPython |
34033 | import pandas as pd
import click
import collections
def kmer_suffix(kmer):
return kmer[1:]
def kmer_prefix(kmer):
return kmer[:-1]
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def build_graph(kmers):
graph = collections.defaultdict(list)
for kmer in kmers:
prefix = kmer_prefix(kmer)
suffix = kmer_suffix(kmer)
graph[prefix].append(suffix)
return graph
def find_start_vertex(graph):
counter = collections.defaultdict(lambda: 0)
for key, value in graph.items():
counter[key] += 0
if len(value) == 0:
return key
for node in value:
counter[node] += 1
counter_sort = sorted(counter.items(), key=lambda x: x[1])
return counter_sort[0][0]
def find_eulerian_tour(graph):
"""
stack St;
в St кладём любую вершину (стартовая вершина);
пока St не пустой
пусть V - значение на вершине St;
если степень(V) = 0, то
добавляем V к ответу;
снимаем V с вершины St;
иначе
находим любое ребро, выходящее из V;
удаляем его из графа;
второй конец этого ребра кладём в St;
"""
ans = []
stack = [find_start_vertex(graph)]
while stack:
curr_v = stack[-1]
if len(graph[curr_v]) == 0:
ans.append(curr_v)
stack.pop()
else:
next_v = graph[curr_v].pop()
stack.append(next_v)
return list(reversed(ans))
def dna_reconstruction(k, dna):
kmers = [x for x in chunks(dna, k)]
graph = build_graph(kmers)
path = find_eulerian_tour(graph)
result = [x[0] for x in path] + [path[-1][1:]]
return "".join(result)
@click.command()
@click.option(
"--fin",
type=str,
default="problem11_input.tsv")
def main(fin):
df = pd.read_csv(fin, sep="\t")
assert all(x in df.columns.values.tolist() for x in ["k", "dna"])
for i, row in df.iterrows():
print(dna_reconstruction(row["k"], row["dna"]))
if __name__ == '__main__':
main()
| StarcoderdataPython |
1791502 | <reponame>MelkiyHondavod/computations
# Дано натуральное число N>1. Проверьте, является ли оно простым.
# Программа должна вывести слово YES, если число простое и NO, если число составное.
def is_prime(N):
return "YES" | StarcoderdataPython |
18284 | <gh_stars>0
#!/usr/bin/env python
import asyncio
from abc import abstractmethod, ABC
from enum import Enum
import logging
from typing import (
Optional,
List,
Deque
)
from hummingbot.logger import HummingbotLogger
from hummingbot.core.data_type.kline_stream_tracker_data_source import \
KlineStreamTrackerDataSource
from hummingbot.core.data_type.kline import Kline
import numpy as np
import talib
from collections import deque
class KlineStreamTrackerDataSourceType(Enum):
# LOCAL_CLUSTER = 1 deprecated
REMOTE_API = 2
EXCHANGE_API = 3
class KlineStreamTracker(ABC):
_ust_logger: Optional[HummingbotLogger] = None
@classmethod
def logger(cls) -> HummingbotLogger:
if cls._ust_logger is None:
cls._ust_logger = logging.getLogger(__name__)
return cls._ust_logger
def __init__(self):
self._kline_stream: asyncio.Queue = asyncio.Queue()
self._ev_loop: asyncio.BaseEventLoop = asyncio.get_event_loop()
self._klines: Deque[Kline] = deque([], maxlen=200)
self._ema_short = float("Nan")
self._ema_long = float("Nan")
self._macd_histograms: List[float] = []
@property
@abstractmethod
def data_source(self) -> KlineStreamTrackerDataSource:
raise NotImplementedError
@property
def last_recv_time(self) -> float:
return self.data_source.last_recv_time
@abstractmethod
async def start(self):
raise NotImplementedError
@property
def kline_stream(self) -> asyncio.Queue:
return self._kline_stream
@property
def ema_short(self) -> float:
return self._ema_short
@property
def ema_long(self) -> float:
return self._ema_long
@property
def macd_histograms(self) -> List[float]:
return self._macd_histograms
@property
def klines(self) -> List[Kline]:
return self._klines
def add_kline(self, kline: Kline):
self._klines.append(kline)
def calc_tech_indicators(self):
array = [float(kline.close_price) for kline in self._klines]
# self.logger().info(f"HAHA array is {array}")
np_closes = np.array(array)
ema_short = talib.EMA(np_closes, timeperiod=7)
ema_long = talib.EMA(np_closes, timeperiod=20)
macd = talib.MACD(np_closes, fastperiod=7, slowperiod=20,
signalperiod=9)
self._ema_short = ema_short[-1]
self._ema_long = ema_long[-1]
# MACD output 3 lists. We only need last list(histogram). We only
# copy the last 10 histograms.
self._macd_histograms = macd[-1][-10:]
self.logger().info(
f"(Classic) EMA_7 is {self._ema_short}, EMA_20 is {self._ema_long}, MACD(7, 20, 9) Histogram is {macd[-1][-1]} Histogram list is {self._macd_histograms}")
| StarcoderdataPython |
4808803 | import commands
import datetime
import json
import logging
import math
import os
import shutil
import sys
import time
import traceback
import threading
import pickle
import signal
from os.path import abspath as _abspath, join as _join
# logging.basicConfig(filename='Yoda.log', level=logging.DEBUG)
import Interaction,Database,Logger
from signal_block.signal_block import block_sig, unblock_sig
#from HPC import EventServer
# main Yoda class
class Yoda(threading.Thread):
class HelperThread(threading.Thread):
def __init__(self, logger, helperFunc, **kwds):
threading.Thread.__init__(self, **kwds)
self.__log = logger
self.__func = helperFunc
self._stop = threading.Event()
self.__log.debug("HelperThread initialized.")
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
def run(self):
try:
exec_time = None
while True:
if self.stopped():
break
if exec_time is None or exec_time < time.time() - 60:
self.__func()
exec_time = time.time()
time.sleep(1)
except:
self.__log.debug("Exception: HelperThread failed: %s" % traceback.format_exc())
# constructor
def __init__(self, globalWorkingDir, localWorkingDir, pilotJob=None, rank=None, nonMPIMode=False, outputDir=None, dumpEventOutputs=False):
threading.Thread.__init__(self)
self.globalWorkingDir = globalWorkingDir
self.localWorkingDir = localWorkingDir
self.currentDir = None
# database backend
self.db = Database.Backend(self.globalWorkingDir)
# logger
self.tmpLog = Logger.Logger(filename='Yoda.log')
# communication channel
self.comm = Interaction.Receiver(rank=rank, nonMPIMode=nonMPIMode, logger=self.tmpLog)
self.rank = self.comm.getRank()
self.tmpLog.info("Global working dir: %s" % self.globalWorkingDir)
self.initWorkingDir()
self.tmpLog.info("Current working dir: %s" % self.currentDir)
self.failed_updates = []
self.outputDir = outputDir
self.dumpEventOutputs = dumpEventOutputs
self.pilotJob = pilotJob
self.cores = 10
self.jobs = []
# jobs which needs more than one rank
self.jobRanks = []
self.totalJobRanks = 0
# jobs which needs less than one rank
self.jobRanksSmallPiece = []
self.totalJobRanksSmallPiece = 0
self.rankJobsTries = {}
# scheduler policy:
self.bigJobFirst = True
self.lastRankForBigJobFirst = int(self.getTotalRanks() * 0.9)
self.readyEventRanges = []
self.runningEventRanges = {}
self.finishedEventRanges = []
self.readyJobsEventRanges = {}
self.runningJobsEventRanges = {}
self.finishedJobsEventRanges = {}
self.stagedOutJobsEventRanges = {}
self.updateEventRangesToDBTime = None
self.jobMetrics = {}
self.jobsTimestamp = {}
self.jobsRuningRanks = {}
self.originSigHandler = {}
for sig in [signal.SIGTERM, signal.SIGQUIT, signal.SIGSEGV, signal.SIGXCPU, signal.SIGUSR1, signal.SIGBUS]:
self.originSigHandler[sig] = signal.getsignal(sig)
signal.signal(signal.SIGTERM, self.stop)
signal.signal(signal.SIGQUIT, self.stop)
signal.signal(signal.SIGSEGV, self.stop)
signal.signal(signal.SIGXCPU, self.stopYoda)
signal.signal(signal.SIGUSR1, self.stopYoda)
signal.signal(signal.SIGBUS, self.stopYoda)
def getTotalRanks(self):
return self.comm.getTotalRanks()
def initWorkingDir(self):
# Create separate working directory for each rank
curdir = _abspath (self.localWorkingDir)
wkdirname = "rank_%s" % str(self.rank)
wkdir = _abspath (_join(curdir,wkdirname))
if not os.path.exists(wkdir):
os.makedirs (wkdir)
os.chdir (wkdir)
self.currentDir = wkdir
def postExecJob(self):
if self.globalWorkingDir != self.localWorkingDir:
command = "mv " + self.currentDir + " " + self.globalWorkingDir
self.tmpLog.debug("Rank %s: copy files from local working directory to global working dir(cmd: %s)" % (self.rank, command))
status, output = commands.getstatusoutput(command)
self.tmpLog.debug("Rank %s: (status: %s, output: %s)" % (self.rank, status, output))
# setup
def setupJob(self, job):
try:
self.job = job
jobFile = os.path.join(self.globalWorkingDir, 'HPCJob.json')
tmpFile = open(jobFile, "w")
#pickle.dump(self.job, tmpFile)
json.dump(self.job, tmpFile)
return True, None
except:
errtype,errvalue = sys.exc_info()[:2]
errMsg = 'failed to dump job with {0}:{1}'.format(errtype.__name__,errvalue)
return False,errMsg
# load job
def loadJob(self):
try:
# load job
tmpFile = open(os.path.join(self.globalWorkingDir, 'HPCJob.json'))
#self.job = pickle.load(tmpFile)
self.job = json.load(tmpFile)
tmpFile.close()
return True,self.job
except:
errtype,errvalue = sys.exc_info()[:2]
errMsg = 'failed to load job with {0}:{1}'.format(errtype.__name__,errvalue)
return False,errMsg
# load jobs
def loadJobs(self):
try:
# load jobs
tmpFile = open(os.path.join(self.globalWorkingDir, 'HPCJobs.json'))
#self.job = pickle.load(tmpFile)
self.jobs = json.load(tmpFile)
tmpFile.close()
return True,self.jobs
except:
errtype,errvalue = sys.exc_info()[:2]
errMsg = 'failed to load job with {0}:{1}'.format(errtype.__name__,errvalue)
return False,errMsg
# init job ranks
def initJobRanks(self):
try:
# sort by needed ranks
neededRanks = {}
for jobId in self.jobs:
job = self.jobs[jobId]
try:
self.cores = int(job.get('ATHENA_PROC_NUMBER', 10))
if self.cores < 1:
self.cores = 10
except:
self.tmpLog.debug("Rank %s: failed to get core count" % (self.rank, traceback.format_exc()))
if job['neededRanks'] not in neededRanks:
neededRanks[job['neededRanks']] = []
neededRanks[job['neededRanks']].append(jobId)
keys = neededRanks.keys()
keys.sort(reverse=True)
for key in keys:
self.tmpLog.debug("Rank %s: Needed ranks %s" % (self.rank, key))
if key < 1:
for jobId in neededRanks[key]:
self.tmpLog.debug("Rank %s: Adding %s to small piece queue" % (self.rank, jobId))
self.totalJobRanksSmallPiece += key
self.jobRanksSmallPiece.append(jobId)
else:
for jobId in neededRanks[key]:
# for i in range(int(math.ceil(key))):
for i in range(int(key)):
self.tmpLog.debug("Rank %s: Adding %s to full rank queue" % (self.rank, jobId))
self.jobRanks.append(jobId)
self.totalJobRanks = len(self.jobRanks)
self.tmpLog.debug("Rank %s: Jobs in small piece queue(one job is not enough to take the full rank) %s, total needed ranks %s" % (self.rank, self.jobRanksSmallPiece, self.totalJobRanksSmallPiece))
self.tmpLog.debug("Rank %s: Jobs in full rank queue(one job is long enough to take the full rank) %s, total needed ranks %s" % (self.rank, self.jobRanks, self.totalJobRanks))
return True,self.jobRanks
except:
self.tmpLog.debug("Rank %s: %s" % (self.rank, traceback.format_exc()))
errtype,errvalue = sys.exc_info()[:2]
errMsg = 'failed to load job with {0}:{1}'.format(errtype.__name__,errvalue)
return False,errMsg
def createEventTable(self):
try:
self.db.createEventTable()
return True,None
except:
errtype,errvalue = sys.exc_info()[:2]
errMsg = 'failed to create event table with {0}:{1}'.format(errtype.__name__,errvalue)
return False,errMsg
def insertEventRanges(self, eventRanges):
try:
self.db.insertEventRanges(eventRanges)
return True,None
except:
errtype,errvalue = sys.exc_info()[:2]
errMsg = 'failed to insert event range to table with {0}:{1}'.format(errtype.__name__,errvalue)
return False,errMsg
def insertJobsEventRanges(self, eventRanges):
try:
self.db.insertJobsEventRanges(eventRanges)
return True,None
except:
errtype,errvalue = sys.exc_info()[:2]
errMsg = 'failed to insert event range to table with {0}:{1}'.format(errtype.__name__,errvalue)
return False,errMsg
# make event table
def makeEventTable(self):
try:
# load event ranges
tmpFile = open(os.path.join(self.globalWorkingDir, 'EventRanges.json'))
eventRangeList = json.load(tmpFile)
tmpFile.close()
# setup database
self.db.setupEventTable(self.job,eventRangeList)
self.readyJobsEventRanges = eventRangeList
return True,None
except:
errtype,errvalue = sys.exc_info()[:2]
errMsg = 'failed to make event table with {0}:{1}'.format(errtype.__name__,errvalue)
return False,errMsg
# make event table
def makeJobsEventTable(self):
try:
# load event ranges
tmpFile = open(os.path.join(self.globalWorkingDir, 'JobsEventRanges.json'))
eventRangeList = json.load(tmpFile)
tmpFile.close()
# setup database
# self.db.setupJobsEventTable(self.jobs,eventRangeList)
self.readyJobsEventRanges = eventRangeList
for jobId in self.readyJobsEventRanges:
self.runningJobsEventRanges[jobId] = {}
self.finishedJobsEventRanges[jobId] = []
self.stagedOutJobsEventRanges[jobId] = []
return True,None
except:
self.tmpLog.debug("Rank %s: %s" % (self.rank, traceback.format_exc()))
errtype,errvalue = sys.exc_info()[:2]
errMsg = 'failed to make event table with {0}:{1}'.format(errtype.__name__,errvalue)
return False,errMsg
def printEventStatus(self):
try:
for jobId in self.jobs:
job = self.jobs[jobId]
neededRanks = job['neededRanks']
readyEvents = len(self.readyJobsEventRanges[jobId]) if jobId in self.readyJobsEventRanges else 0
self.tmpLog.debug("Rank %s: Job %s has %s events, needs %s ranks" % (self.rank, jobId, readyEvents, neededRanks))
self.tmpLog.debug("Rank %s: Job full rank queue: %s" % (self.rank, self.jobRanks))
self.tmpLog.debug("Rank %s: Job small piece queue: %s" % (self.rank, self.jobRanksSmallPiece))
return True, None
except:
self.tmpLog.debug("Rank %s: %s" % (self.rank, traceback.format_exc()))
errtype,errvalue = sys.exc_info()[:2]
errMsg = 'failed to make event table with {0}:{1}'.format(errtype.__name__,errvalue)
return False,errMsg
# inject more events
def injectEvents(self):
try:
# scan new event ranges json files
all_files = os.listdir(self.globalWorkingDir)
for file in all_files:
if file != 'EventRanges.json' and file.endswith("EventRanges.json"):
tmpFile = open(os.path.join(self.globalWorkingDir, file))
eventRangeList = json.load(tmpFile)
tmpFile.close()
self.insertEventRanges(eventRangeList)
for eventRange in eventRangeList:
self.readyEventRanges.append(eventRange)
return True,None
except:
errtype,errvalue = sys.exc_info()[:2]
errMsg = 'failed to inject more event range to table with {0}:{1}'.format(errtype.__name__,errvalue)
return False,errMsg
# inject more events
def injectJobsEvents(self):
try:
# scan new event ranges json files
all_files = os.listdir(self.globalWorkingDir)
for file in all_files:
if file != 'JobsEventRanges.json' and file.endswith("JobsEventRanges.json"):
tmpFile = open(os.path.join(self.globalWorkingDir, file))
eventRangeList = json.load(tmpFile)
tmpFile.close()
self.insertJobsEventRanges(eventRangeList)
for jobId in eventRangeList:
for eventRange in eventRangeList[jobId]:
self.readyJobsEventRanges[jobId].append(eventRange)
return True,None
except:
errtype,errvalue = sys.exc_info()[:2]
errMsg = 'failed to inject more event range to table with {0}:{1}'.format(errtype.__name__,errvalue)
return False,errMsg
def rescheduleJobRanks(self):
try:
self.tmpLog.debug("Rank %s: rescheduleJobRanks" % (self.rank))
numEvents = {}
for jobId in self.readyJobsEventRanges:
no = len(self.readyJobsEventRanges[jobId])
self.tmpLog.debug("Rank %s: Job %s ready events %s" % (self.rank, jobId, no))
if no not in numEvents:
numEvents[len] = []
numEvents.append(jobId)
keys = numEvents.keys()
keys.sort(reverse=True)
for key in keys:
if key < self.cores * 2:
continue
for jobId in numEvents[key]:
#for i in range(key/self.cores):
self.tmpLog.debug("Rank %s: Adding job %s to small piece queue" % (self.rank, jobId))
self.jobRanksSmallPiece.append(jobId)
self.tmpLog.debug("Rank %s: Jobs in small piece queue(one job is not enough to take the full rank) %s" % (self.rank, self.jobRanksSmallPiece))
self.tmpLog.debug("Rank %s: Jobs in full rank queue(one job is long enough to take the full rank, should be empty if reaching here) %s" % (self.rank, self.jobRanks))
self.printEventStatus()
except:
errtype,errvalue = sys.exc_info()[:2]
errMsg = 'failed to reschedule job ranks with {0}:{1}'.format(errtype.__name__,errvalue)
return False,errMsg
# get job
def getJobScheduler(self,params):
rank = params['rank']
job = None
jobId = None
if int(rank) <= self.lastRankForBigJobFirst:
self.tmpLog.debug("Rank %s: Big jobs first for rank %s(<=%s the last rank for big job first)" % (self.rank, rank, self.lastRankForBigJobFirst))
while len(self.jobRanks):
jobId = self.jobRanks.pop(0)
if rank in self.rankJobsTries and jobId in self.rankJobsTries[rank]:
self.tmpLog.debug("Rank %s: Job %s already tried on rank %s, will not scheduled to it again." % (self.rank, jobId, rank))
continue
if len(self.readyJobsEventRanges[jobId]) > 0:
job = self.jobs[jobId]
break
if job is None:
self.tmpLog.debug("Rank %s: no available jobs in full rank queue, try to get job from small piece queue" % (self.rank))
while len(self.jobRanksSmallPiece):
jobId = self.jobRanksSmallPiece.pop(0)
if rank in self.rankJobsTries and jobId in self.rankJobsTries[rank]:
self.tmpLog.debug("Rank %s: Job %s already tried on rank %s, will not scheduled to it again." % (self.rank, jobId, rank))
continue
if len(self.readyJobsEventRanges[jobId]) > 0:
job = self.jobs[jobId]
break
else:
self.tmpLog.debug("Rank %s: Small jobs first for rank %s(>%s the last rank for big job first)" % (self.rank, rank, self.lastRankForBigJobFirst))
while len(self.jobRanksSmallPiece):
jobId = self.jobRanksSmallPiece.pop()
if rank in self.rankJobsTries and jobId in self.rankJobsTries[rank]:
self.tmpLog.debug("Rank %s: Job %s already tried on rank %s, will not scheduled to it again." % (self.rank, jobId, rank))
continue
if len(self.readyJobsEventRanges[jobId]) > 0:
job = self.jobs[jobId]
break
if job is None:
while len(self.jobRanks):
jobId = self.jobRanks.pop()
if rank in self.rankJobsTries and jobId in self.rankJobsTries[rank]:
self.tmpLog.debug("Rank %s: Job %s already tried on rank %s, will not scheduled to it again." % (self.rank, jobId, rank))
continue
if len(self.readyJobsEventRanges[jobId]) > 0:
job = self.jobs[jobId]
break
return jobId, job
# get job
def getJob(self,params):
rank = params['rank']
jobId, job = self.getJobScheduler(params)
if job is None:
##### not disable reschedule job ranks, it will split jobs to additional ranks
##### instead, pilot will download more events then expected
self.rescheduleJobRanks()
jobId, job = self.getJobScheduler(params)
res = {'StatusCode':0,
'job': job}
self.tmpLog.debug('res={0}'.format(str(res)))
if jobId:
if jobId not in self.jobsRuningRanks:
self.jobsRuningRanks[jobId] = []
self.jobsRuningRanks[jobId].append(rank)
if jobId not in self.jobsTimestamp:
self.jobsTimestamp[jobId] = {'startTime': time.time(), 'endTime': None}
if rank not in self.rankJobsTries:
self.rankJobsTries[rank] = []
self.rankJobsTries[rank].append(jobId)
self.comm.returnResponse(res)
self.tmpLog.debug('return response')
# update job
def updateJob(self,params):
# final heartbeat
if params['state'] in ['finished','failed']:
# self.comm.decrementNumRank()
pass
# make response
res = {'StatusCode':0,
'command':'NULL'}
# return
self.tmpLog.debug('res={0}'.format(str(res)))
self.comm.returnResponse(res)
self.tmpLog.debug('return response')
# finish job
def finishJob(self,params):
# final heartbeat
jobId = params['jobId']
rank = params['rank']
if params['state'] in ['finished','failed']:
# self.comm.decrementNumRank()
self.jobsRuningRanks[jobId].remove(rank)
endTime = time.time()
if self.jobsTimestamp[params['jobId']]['endTime'] is None or self.jobsTimestamp[params['jobId']]['endTime'] < endTime:
self.jobsTimestamp[params['jobId']]['endTime'] = endTime
# make response
res = {'StatusCode':0,
'command':'NULL'}
# return
self.tmpLog.debug('res={0}'.format(str(res)))
self.comm.returnResponse(res)
self.tmpLog.debug('return response')
# finish droid
def finishDroid(self,params):
# final heartbeat
if params['state'] in ['finished','failed']:
self.comm.decrementNumRank()
# make response
res = {'StatusCode':0,
'command':'NULL'}
# return
self.tmpLog.debug('res={0}'.format(str(res)))
self.comm.returnResponse(res)
self.tmpLog.debug('return response')
# get event ranges
def getEventRanges_old(self,params):
# number of event ranges
if 'nRanges' in params:
nRanges = int(params['nRanges'])
else:
nRanges = 1
# get event ranges from DB
try:
eventRanges = self.db.getEventRanges(nRanges)
except Exception as e:
self.tmpLog.debug('db.getEventRanges failed: %s' % str(e))
res = {'StatusCode':-1,
'eventRanges':None}
else:
# make response
res = {'StatusCode':0,
'eventRanges':eventRanges}
# return response
self.tmpLog.debug('res={0}'.format(str(res)))
self.comm.returnResponse(res)
# dump updated records
try:
self.db.dumpUpdates()
except Exception as e:
self.tmpLog.debug('db.dumpUpdates failed: %s' % str(e))
# get event ranges
def getEventRanges(self,params):
jobId = params['jobId']
# number of event ranges
if 'nRanges' in params:
nRanges = int(params['nRanges'])
else:
nRanges = 1
eventRanges = []
try:
for i in range(nRanges):
if len(self.readyJobsEventRanges[jobId]) > 0:
eventRange = self.readyJobsEventRanges[jobId].pop(0)
eventRanges.append(eventRange)
self.runningJobsEventRanges[jobId][eventRange['eventRangeID']] = eventRange
else:
break
except:
self.tmpLog.warning("Failed to get event ranges: %s" % traceback.format_exc())
print self.readyJobsEventRanges
print self.runningJobsEventRanges
# make response
res = {'StatusCode':0,
'eventRanges':eventRanges}
# return response
self.tmpLog.debug('res={0}'.format(str(res)))
self.comm.returnResponse(res)
self.tmpLog.debug('return response')
# update event range
def updateEventRange_old(self,params):
# extract parameters
eventRangeID = params['eventRangeID']
eventStatus = params['eventStatus']
output = params['output']
# update database
try:
self.db.updateEventRange(eventRangeID,eventStatus, output)
except Exception as e:
self.tmpLog.debug('db.updateEventRange failed: %s' % str(e))
self.failed_updates.append([eventRangeID,eventStatus, output])
# make response
res = {'StatusCode':0}
# return
self.tmpLog.debug('res={0}'.format(str(res)))
self.comm.returnResponse(res)
# dump updated records
try:
self.db.dumpUpdates()
except Exception as e:
self.tmpLog.debug('db.dumpUpdates failed: %s' % str(e))
# update event range
def updateEventRange(self,params):
# extract parameters
jobId = params['jobId']
eventRangeID = params['eventRangeID']
eventStatus = params['eventStatus']
output = params['output']
if eventRangeID in self.runningJobsEventRanges[jobId]:
# eventRange = self.runningEventRanges[eventRangeID]
del self.runningJobsEventRanges[jobId][eventRangeID]
if eventStatus == 'stagedOut':
self.stagedOutJobsEventRanges[jobId].append((eventRangeID, eventStatus, output))
else:
self.finishedJobsEventRanges[jobId].append((eventRangeID, eventStatus, output))
# make response
res = {'StatusCode':0}
# return
self.tmpLog.debug('res={0}'.format(str(res)))
self.comm.returnResponse(res)
self.tmpLog.debug('return response')
# update event ranges
def updateEventRanges(self,params):
for param in params:
# extract parameters
jobId = param['jobId']
eventRangeID = param['eventRangeID']
eventStatus = param['eventStatus']
output = param['output']
if eventRangeID in self.runningJobsEventRanges[jobId]:
# eventRange = self.runningEventRanges[eventRangeID]
del self.runningJobsEventRanges[jobId][eventRangeID]
if eventStatus == 'stagedOut':
self.stagedOutJobsEventRanges[jobId].append((eventRangeID, eventStatus, output))
else:
self.finishedJobsEventRanges[jobId].append((eventRangeID, eventStatus, output))
# make response
res = {'StatusCode':0}
# return
self.tmpLog.debug('res={0}'.format(str(res)))
self.comm.returnResponse(res)
self.tmpLog.debug('return response')
def updateFailedEventRanges(self):
for failed_update in self.failed_updates:
eventRangeID,eventStatus, output = failed_update
try:
self.db.updateEventRange(eventRangeID,eventStatus, output)
except Exception as e:
self.tmpLog.debug('db.updateEventRange failed: %s' % str(e))
def updateRunningEventRangesToDB(self):
try:
runningEvents = []
for jobId in self.runningJobsEventRanges:
for eventRangeID in self.runningJobsEventRanges[jobId]:
# self.tmpLog.debug(self.runningEventRanges[eventRangeID])
status = 'running'
output = None
runningEvents.append((eventRangeID, status, output))
if len(runningEvents):
self.db.updateEventRanges(runningEvents)
except Exception as e:
self.tmpLog.debug('updateRunningEventRangesToDB failed: %s, %s' % (str(e), traceback.format_exc()))
def dumpUpdates(self, jobId, outputs, type=''):
#if self.dumpEventOutputs == False:
# return
timeNow = datetime.datetime.utcnow()
#outFileName = str(jobId) + "_" + timeNow.strftime("%Y-%m-%d-%H-%M-%S-%f") + '.dump' + type
outFileName = str(jobId) + "_event_status.dump" + type
outFileName = os.path.join(self.globalWorkingDir, outFileName)
outFile = open(outFileName + ".new", 'w')
self.tmpLog.debug("dumpUpdates: dumpFileName %s" % (outFileName))
metadataFileName = None
metafd = None
# if self.dumpEventOutputs:
if True:
metadataFileName = 'metadata-' + os.path.basename(outFileName).split('.dump')[0] + '.xml'
if self.outputDir:
metadataFileName = os.path.join(self.outputDir, metadataFileName)
else:
metadataFileName = os.path.join(self.globalWorkingDir, metadataFileName)
self.tmpLog.debug("dumpUpdates: outputDir %s, metadataFileName %s" % (self.outputDir, metadataFileName))
metafd = open(metadataFileName + ".new", "w")
metafd.write('<?xml version="1.0" encoding="UTF-8" standalone="no" ?>\n')
metafd.write("<!-- Edited By POOL -->\n")
metafd.write('<!DOCTYPE POOLFILECATALOG SYSTEM "InMemory">\n')
metafd.write("<POOLFILECATALOG>\n")
for eventRangeID,status,output in outputs:
outFile.write('{0} {1} {2} {3}\n'.format(str(jobId), str(eventRangeID), str(status), str(output)))
if status.startswith("ERR"):
status = 'failed'
if metafd:
metafd.write(' <File EventRangeID="%s" Status="%s">\n' % (eventRangeID, status))
metafd.write(" <physical>\n")
if isinstance(output, (list, tuple)):
for output1 in output:
metafd.write(' <pfn filetype="ROOT_All" name="%s"/>\n' % (str(output1)))
else:
for output1 in output.split(",")[:-3]:
metafd.write(' <pfn filetype="ROOT_All" name="%s"/>\n' % (str(output1)))
metafd.write(" </physical>\n")
metafd.write(" </File>\n")
outFile.close()
if metafd:
metafd.write("</POOLFILECATALOG>\n")
metafd.close()
# mv the new file to overwrite the current one
command = "mv %s.new %s" % (outFileName, outFileName)
retS, retOut = commands.getstatusoutput(command)
if retS:
self.tmpLog.debug('Failed to execute %s: %s' % (command, retOut))
if metadataFileName:
command = "mv %s.new %s" % (metadataFileName, metadataFileName)
retS, retOut = commands.getstatusoutput(command)
if retS:
self.tmpLog.debug('Failed to execute %s: %s' % (command, retOut))
def updateFinishedEventRangesToDB(self):
try:
self.tmpLog.debug('start to updateFinishedEventRangesToDB')
for jobId in self.stagedOutJobsEventRanges:
if len(self.stagedOutJobsEventRanges[jobId]):
self.dumpUpdates(jobId, self.stagedOutJobsEventRanges[jobId], type='.stagedOut')
#for i in self.stagedOutJobsEventRanges[jobId]:
# self.stagedOutJobsEventRanges[jobId].remove(i)
#self.stagedOutJobsEventRanges[jobId] = []
for jobId in self.finishedJobsEventRanges:
if len(self.finishedJobsEventRanges[jobId]):
self.dumpUpdates(jobId, self.finishedJobsEventRanges[jobId])
#self.db.updateEventRanges(self.finishedEventRanges)
#for i in self.finishedJobsEventRanges[jobId]:
# self.finishedJobsEventRanges[jobId].remove(i)
#self.finishedJobsEventRanges[jobId] = []
self.tmpLog.debug('finished to updateFinishedEventRangesToDB')
except Exception as e:
self.tmpLog.debug('updateFinishedEventRangesToDB failed: %s, %s' % (str(e), traceback.format_exc()))
def updateEventRangesToDB(self, force=False, final=False):
timeNow = time.time()
# forced or first dump or enough interval
if force or self.updateEventRangesToDBTime == None or \
((timeNow - self.updateEventRangesToDBTime) > 60 * 5):
self.tmpLog.debug('start to updateEventRangesToDB')
self.updateEventRangesToDBTime = time.time()
#if not final:
# self.updateRunningEventRangesToDB()
self.updateFinishedEventRangesToDB()
self.tmpLog.debug('finished to updateEventRangesToDB')
def finishDroids(self):
self.tmpLog.debug('finish Droids')
# make message
res = {'StatusCode':0, 'State': 'finished'}
self.tmpLog.debug('res={0}'.format(str(res)))
self.comm.sendMessage(res)
#self.comm.disconnect()
def collectMetrics(self, ranks):
metrics = {}
metricsReport = {}
for rank in ranks.keys():
for key in ranks[rank].keys():
if key in ["setupTime", "runningTime", 'totalTime', "cores", "queuedEvents", "processedEvents", "cpuConsumptionTime", 'avgTimePerEvent']:
if key not in metrics:
metrics[key] = 0
metrics[key] += ranks[rank][key]
setupTime = []
runningTime = []
totalTime = []
stageoutTime = []
for rank in ranks.keys():
setupTime.append(ranks[rank]['setupTime'])
runningTime.append(ranks[rank]['runningTime'])
totalTime.append(ranks[rank]['totalTime'])
stageoutTime.append(ranks[rank]['totalTime'] - ranks[rank]['setupTime'] - ranks[rank]['runningTime'])
num_ranks = len(ranks.keys())
if num_ranks < 1:
num_ranks = 1
processedEvents = metrics['processedEvents']
if processedEvents < 1:
processedEvents = 1
metricsReport['avgYodaSetupTime'] = metrics['setupTime']/num_ranks
metricsReport['avgYodaRunningTime'] = metrics['runningTime']/num_ranks
metricsReport['avgYodaStageoutTime'] = (metrics['totalTime'] - metrics['setupTime'] - metrics['runningTime'])/num_ranks
metricsReport['avgYodaTotalTime'] = metrics['totalTime']/num_ranks
metricsReport['maxYodaSetupTime'] = max(setupTime)
metricsReport['maxYodaRunningTime'] = max(runningTime)
metricsReport['maxYodaStageoutTime'] = max(stageoutTime)
metricsReport['maxYodaTotalTime'] = max(totalTime)
metricsReport['minYodaSetupTime'] = min(setupTime)
metricsReport['minYodaRunningTime'] = min(runningTime)
metricsReport['minYodaStageoutTime'] = min(stageoutTime)
metricsReport['minYodaTotalTime'] = min(totalTime)
metricsReport['cores'] = metrics['cores']
metricsReport['cpuConsumptionTime'] = metrics['cpuConsumptionTime']
metricsReport['totalQueuedEvents'] = metrics['queuedEvents']
metricsReport['totalProcessedEvents'] = metrics['processedEvents']
metricsReport['avgTimePerEvent'] = metrics['avgTimePerEvent']/ num_ranks
for key in metricsReport:
metricsReport[key] = int(metricsReport[key])
return metricsReport
def heartbeat(self, params):
"""
{"jobId": , "rank": , "startTime": ,"readyTime": , "endTime": , "setupTime": , "totalTime": , "cores": , "processCPUHour": , "totalCPUHour": , "queuedEvents": , "processedEvents": , "cpuConsumptionTime": }
"""
self.tmpLog.debug('heartbeat')
jobId = params['jobId']
rank = params['rank']
# make response
res = {'StatusCode':0}
# return
self.tmpLog.debug('res={0}'.format(str(res)))
self.comm.returnResponse(res)
self.tmpLog.debug('return response')
if jobId not in self.jobMetrics:
self.jobMetrics[jobId] = {'ranks': {}, 'collect': {}}
self.jobMetrics[jobId]['ranks'][rank] = params
self.jobMetrics[jobId]['collect'] = self.collectMetrics(self.jobMetrics[jobId]['ranks'])
#self.dumpJobMetrics()
def dumpJobMetrics(self):
jobMetricsFileName = "jobMetrics-yoda.json"
try:
#outputDir = self.jobs[jobId]["GlobalWorkingDir"]
outputDir = self.globalWorkingDir
except:
self.tmpLog.debug("Failed to get job's global working dir: %s" % (traceback.format_exc()))
outputDir = self.globalWorkingDir
jobMetrics = os.path.join(outputDir, jobMetricsFileName)
self.tmpLog.debug("JobMetrics file: %s" % (jobMetrics + ".new"))
tmpFile = open(jobMetrics+ ".new", "w")
json.dump(self.jobMetrics, tmpFile)
tmpFile.close()
command = "mv %s.new %s" % (jobMetrics, jobMetrics)
retS, retOut = commands.getstatusoutput(command)
if retS:
self.tmpLog.debug('Failed to execute %s: %s' % (command, retOut))
def dumpJobsStartTime(self):
jobsTimestampFileName = "jobsTimestamp-yoda.json"
outputDir = self.globalWorkingDir
jobsTimestampFile = os.path.join(outputDir, jobsTimestampFileName)
self.tmpLog.debug("JobsStartTime file: %s" % (jobsTimestampFile + ".new"))
tmpFile = open(jobsTimestampFile + ".new", "w")
json.dump(self.jobsTimestamp, tmpFile)
tmpFile.close()
command = "mv %s.new %s" % (jobsTimestampFile, jobsTimestampFile)
retS, retOut = commands.getstatusoutput(command)
if retS:
self.tmpLog.debug('Failed to execute %s: %s' % (command, retOut))
def helperFunction(self):
# flush the updated event ranges to db
self.updateEventRangesToDB(force=True)
self.dumpJobMetrics()
self.dumpJobsStartTime()
# main yoda
def runYoda(self):
# get logger
self.tmpLog.info('start')
# load job
self.tmpLog.info('loading job')
tmpStat,tmpOut = self.loadJobs()
self.tmpLog.info("loading jobs: (status: %s, output: %s)" %(tmpStat, tmpOut))
if not tmpStat:
self.tmpLog.error(tmpOut)
raise Exception(tmpOut)
self.tmpLog.info("init job ranks")
tmpStat,tmpOut = self.initJobRanks()
self.tmpLog.info("initJobRanks: (status: %s, output: %s)" %(tmpStat, tmpOut))
if not tmpStat:
self.tmpLog.error(tmpOut)
raise Exception(tmpOut)
# make event table
self.tmpLog.info('making JobsEventTable')
tmpStat,tmpOut = self.makeJobsEventTable()
if not tmpStat:
self.tmpLog.error(tmpOut)
raise Exception(tmpOut)
# print event status
self.tmpLog.info('print event status')
tmpStat,tmpOut = self.printEventStatus()
self.tmpLog.info('Initialize Helper thread')
helperThread = Yoda.HelperThread(self.tmpLog, self.helperFunction)
helperThread.start()
# main loop
self.tmpLog.info('main loop')
time_dupmJobMetrics = time.time()
while self.comm.activeRanks():
#self.injectEvents()
# get request
self.tmpLog.info('waiting requests')
tmpStat,method,params = self.comm.receiveRequest()
self.tmpLog.debug("received request: (rank: %s, status: %s, method: %s, params: %s)" %(self.comm.getRequesterRank(),tmpStat,method,params))
if not tmpStat:
self.tmpLog.error(method)
raise Exception(method)
# execute
self.tmpLog.debug('rank={0} method={1} param={2}'.format(self.comm.getRequesterRank(),
method,str(params)))
if hasattr(self,method):
methodObj = getattr(self,method)
try:
apply(methodObj,[params])
except:
self.tmpLog.debug("Failed to run function %s: %s" % (method, traceback.format_exc()))
else:
self.tmpLog.error('unknown method={0} was requested from rank={1} '.format(method,
self.comm.getRequesterRank()))
helperThread.stop()
self.flushMessages()
#self.updateFailedEventRanges()
self.updateEventRangesToDB(force=True)
self.dumpJobMetrics()
self.dumpJobsStartTime()
# final dump
#self.tmpLog.info('final dumping')
#self.db.dumpUpdates(True)
self.tmpLog.info("post Exec job")
self.postExecJob()
self.finishDroids()
self.tmpLog.info('done')
# main
def run(self):
try:
self.runYoda()
except:
self.tmpLog.info("Excpetion to run Yoda: %s" % traceback.format_exc())
raise
def flushMessages(self):
self.tmpLog.info('flush messages')
while self.comm.activeRanks():
# get request
tmpStat,method,params = self.comm.receiveRequest()
self.tmpLog.debug("received request: (rank: %s, status: %s, method: %s, params: %s)" %(self.comm.getRequesterRank(),tmpStat,method,params))
if not tmpStat:
self.tmpLog.error(method)
raise Exception(method)
# execute
self.tmpLog.debug('rank={0} method={1} param={2}'.format(self.comm.getRequesterRank(),
method,str(params)))
if hasattr(self,method):
methodObj = getattr(self,method)
apply(methodObj,[params])
else:
self.tmpLog.error('unknown method={0} was requested from rank={1} '.format(method, self.comm.getRequesterRank()))
def stopYoda(self, signum=None, frame=None):
self.tmpLog.info('stopYoda signal %s received' % signum)
#signal.signal(signum, self.originSigHandler[signum])
# make message
res = {'StatusCode':0, 'State': 'signal', 'signum': signum}
self.tmpLog.debug('res={0}'.format(str(res)))
self.comm.sendMessage(res)
self.dumpJobMetrics()
for jobId in self.jobsTimestamp:
if self.jobsTimestamp[jobId]['endTime'] is None:
self.jobsTimestamp[jobId]['endTime'] = time.time()
if len(self.jobsRuningRanks[jobId]) > 0:
self.jobsTimestamp[jobId]['endTime'] = time.time()
self.dumpJobsStartTime()
self.updateEventRangesToDB(force=True, final=True)
def stop(self, signum=None, frame=None):
self.tmpLog.info('stop signal %s received' % signum)
block_sig(signum)
signal.siginterrupt(signum, False)
self.dumpJobMetrics()
for jobId in self.jobsTimestamp:
if self.jobsTimestamp[jobId]['endTime'] is None:
self.jobsTimestamp[jobId]['endTime'] = time.time()
if len(self.jobsRuningRanks[jobId]) > 0:
self.jobsTimestamp[jobId]['endTime'] = time.time()
self.dumpJobsStartTime()
#self.flushMessages()
#self.updateFailedEventRanges()
# final dump
self.tmpLog.info('final dumping')
self.updateEventRangesToDB(force=True, final=True)
#self.db.dumpUpdates(True)
self.tmpLog.info("post Exec job")
self.postExecJob()
self.tmpLog.info('stop')
#signal.siginterrupt(signum, True)
unblock_sig(signum)
def getOutputs(self):
pass
def __del_not_use__(self):
self.tmpLog.info('__del__ function')
self.flushMessages()
self.updateFailedEventRanges()
# final dump
self.tmpLog.info('final dumping')
self.updateEventRangesToDB(force=True, final=True)
#self.db.dumpUpdates(True)
self.tmpLog.info("post Exec job")
self.postExecJob()
self.tmpLog.info('__del__ function')
| StarcoderdataPython |
1685289 | import gym
import matplotlib
import torch
import numpy as np
from sac.model import GaussianPolicy, QNetwork, DeterministicPolicy
# from core.notebook_utils import animate
# from core.notebook_utils import gen_video
seed = 123456
hidden_size = 256
device = 'cpu'
# env_name = 'Hopper-v2'
env_name = 'Walker2d-v2'
# env_name = 'HalfCheetah'
model_path = './model_last.pt'
env = gym.make(env_name)
env.seed(seed)
policy = GaussianPolicy(env.observation_space.shape[0], env.action_space.shape[0], hidden_size, env.action_space).to(device)
policy.load_state_dict(torch.load(model_path, map_location=torch.device(device))['Policy'])
def select_action(state, policy, device):
state = torch.FloatTensor(state).to(device).unsqueeze(0)
action, log_prob, mean = policy.sample(state)
# return mean.detach().cpu().numpy()[0]
return action.detach().cpu().numpy()[0]
# Now we generate video to see the performance.
state = env.reset()
state = np.expand_dims(state, axis=0)
frames = []
rewards = []
for i in range(1000):
# frame = env.render(mode='rgb_array')
state, reward, done, info = env.step(select_action(state, policy, device))
# frames.append(frame.copy())
rewards.append(reward)
if i % 100 == 0:
print("Step: {}, reward: {}".format(i, reward) )
if done:
break
print('total reward: {}'.format(np.sum(rewards))) | StarcoderdataPython |
3390861 | <filename>08_multi_processing/mapPool.py<gh_stars>10-100
from multiprocessing import Pool
import time
def myTask(n):
time.sleep(n+2)
return n+2
def main():
with Pool(4) as p:
for iter in p.imap_unordered(myTask, [1,3,2,1]):
print(iter)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1682802 | <reponame>binti59/LV<filename>tests/test_transformers.py
# tests to apply to all transformers
import pytest
import tubular.base as base
import tubular.capping as capping
import tubular.dates as dates
import tubular.imputers as imputers
import tubular.mapping as mapping
import tubular.misc as misc
import tubular.nominal as nominal
import tubular.numeric as numeric
import tubular.strings as strings
import sklearn.base as b
class TestInit(object):
"""Tests for transformer.init()."""
def ListOfTransformers():
"""List of transformers in tubular to be used in subsequent tests."""
list_of_transformers = [
base.BaseTransformer(columns=["a"]),
base.DataFrameMethodTransformer(
new_column_name="a", pd_method_name="sum", columns="b"
),
capping.CappingTransformer(capping_values={"a": [0.1, 0.2]}),
capping.OutOfRangeNullTransformer(capping_values={"a": [0.1, 0.2]}),
dates.DateDiffLeapYearTransformer(
column_lower="a", column_upper="b", new_column_name="c", drop_cols=True
),
dates.DateDifferenceTransformer(
column_lower="a", column_upper="b", new_column_name="c", units="D"
),
dates.ToDatetimeTransformer(column="a", new_column_name="b"),
dates.SeriesDtMethodTransformer(
new_column_name="a", pd_method_name="month", column="b"
),
dates.BetweenDatesTransformer(
column_lower="a",
column_upper="b",
column_between="c",
new_column_name="c",
),
imputers.BaseImputer(),
imputers.ArbitraryImputer(impute_value=1, columns="a"),
imputers.MedianImputer(columns="a"),
imputers.MeanImputer(columns="a"),
imputers.ModeImputer(columns="a"),
imputers.NearestMeanResponseImputer(response_column="a"),
imputers.NullIndicator(columns="a"),
mapping.BaseMappingTransformer(mappings={"a": {1: 2, 3: 4}}),
mapping.BaseMappingTransformMixin(),
mapping.MappingTransformer(mappings={"a": {1: 2, 3: 4}}),
mapping.CrossColumnMappingTransformer(
adjust_column="b", mappings={"a": {1: 2, 3: 4}}
),
mapping.CrossColumnMultiplyTransformer(
adjust_column="b", mappings={"a": {1: 2, 3: 4}}
),
mapping.CrossColumnAddTransformer(
adjust_column="b", mappings={"a": {1: 2, 3: 4}}
),
misc.SetValueTransformer(columns="a", value=1),
nominal.BaseNominalTransformer(),
nominal.NominalToIntegerTransformer(columns="a"),
nominal.GroupRareLevelsTransformer(columns="a"),
nominal.MeanResponseTransformer(columns="a", response_column="b"),
nominal.OrdinalEncoderTransformer(columns="a", response_column="b"),
nominal.OneHotEncodingTransformer(columns="a"),
numeric.LogTransformer(columns="a"),
numeric.CutTransformer(column="a", new_column_name="b"),
numeric.ScalingTransformer(columns="a", scaler_type="standard"),
strings.SeriesStrMethodTransformer(
new_column_name="a",
pd_method_name="find",
columns="b",
pd_method_kwargs={"sub": "a"},
),
]
return list_of_transformers
@pytest.mark.parametrize("transformer", ListOfTransformers())
def test_print(self, transformer):
"""
Test that transformer can be printed.
If an error is raised in this test it will not prevent the transformer from working correctly,
but will stop other unit tests passing.
"""
print(transformer)
@pytest.mark.parametrize("transformer", ListOfTransformers())
def test_clone(self, transformer):
"""
Test that transformer can be used in sklearn.base.clone function.
"""
b.clone(transformer)
| StarcoderdataPython |
36507 | from m5stack import *
from m5stack_ui import *
from uiflow import *
from ble import ble_uart
import face
screen = M5Screen()
screen.clean_screen()
screen.set_screen_bg_color(0x000000)
mb_click = None
rb_click = None
lb_click = None
snd_val = None
st_mode = None
stval = None
prval = None
faces_encode = face.get(face.ENCODE)
direction = M5Label('M5MouseWheel - Please dont touch for processing...', x=0, y=228, color=0xc7c7c7, font=FONT_MONT_12, parent=None)
LBtn = M5Btn(text='L', x=170, y=6, w=65, h=100, bg_c=0x000000, text_c=0xbcbcbc, font=FONT_UNICODE_24, parent=None)
RBtn = M5Btn(text='R', x=240, y=6, w=70, h=48, bg_c=0x000000, text_c=0xbebebe, font=FONT_UNICODE_24, parent=None)
d_w_x = M5Btn(text='WX', x=0, y=162, w=48, h=48, bg_c=0x000000, text_c=0xd4d4d4, font=FONT_UNICODE_24, parent=None)
MBtn = M5Btn(text='M', x=240, y=58, w=70, h=48, bg_c=0x000000, text_c=0xbebebe, font=FONT_UNICODE_24, parent=None)
d_w_y = M5Btn(text='WY', x=52, y=162, w=48, h=48, bg_c=0x000000, text_c=0xd4d4d4, font=FONT_UNICODE_24, parent=None)
b_step = M5Btn(text='STEP', x=0, y=6, w=100, h=100, bg_c=0x000000, text_c=0xd4d4d4, font=FONT_UNICODE_24, parent=None)
d_y = M5Btn(text='Y', x=220, y=110, w=100, h=100, bg_c=0x000000, text_c=0xd4d4d4, font=FONT_MONT_48, parent=None)
d_scr = M5Btn(text='SCR', x=0, y=110, w=100, h=48, bg_c=0x000000, text_c=0xd4d4d4, font=FONT_UNICODE_24, parent=None)
d_x = M5Btn(text='X', x=110, y=110, w=100, h=100, bg_c=0x000000, text_c=0xd4d4d4, font=FONT_MONT_48, parent=None)
v_step = M5Label('1', x=121, y=38, color=0xc7c7c7, font=FONT_MONT_24, parent=None)
# Change Mode
def changeMode():
global mb_click, lb_click, rb_click, snd_val, st_mode, stval, prval
snd_val = 0
uart_ble.write((str(st_mode) + str(str(snd_val))))
direction.set_text(str((str(st_mode) + str(str(snd_val)))))
# Reset Mode
def resetMode():
global mb_click, lb_click, rb_click, snd_val, st_mode, stval, prval
st_mode = ''
b_step.set_bg_color(0x000000)
d_y.set_bg_color(0x000000)
d_scr.set_bg_color(0x000000)
d_w_x.set_bg_color(0x000000)
d_w_y.set_bg_color(0x000000)
d_x.set_bg_color(0x000000)
def MBtn_pressed():
global mb_click, lb_click, rb_click, snd_val, st_mode, stval, prval
mb_click = 0 if mb_click == 1 else 1
uart_ble.write((str('M') + str(str(mb_click))))
if mb_click == 1:
MBtn.set_bg_color(0x666666)
else:
MBtn.set_bg_color(0x000000)
direction.set_text(str((str('M') + str(str(mb_click)))))
pass
MBtn.pressed(MBtn_pressed)
def LBtn_pressed():
global mb_click, lb_click, rb_click, snd_val, st_mode, stval, prval
lb_click = 0 if lb_click == 1 else 1
uart_ble.write((str('L') + str(str(lb_click))))
if lb_click == 1:
LBtn.set_bg_color(0x666666)
else:
LBtn.set_bg_color(0x000000)
direction.set_text(str((str('L') + str(str(lb_click)))))
pass
LBtn.pressed(LBtn_pressed)
def RBtn_pressed():
global mb_click, lb_click, rb_click, snd_val, st_mode, stval, prval
rb_click = 0 if rb_click == 1 else 1
uart_ble.write((str('R') + str(str(rb_click))))
if rb_click == 1:
RBtn.set_bg_color(0x666666)
else:
RBtn.set_bg_color(0x000000)
direction.set_text(str((str('R') + str(str(rb_click)))))
pass
RBtn.pressed(RBtn_pressed)
def b_step_pressed():
global mb_click, lb_click, rb_click, snd_val, st_mode, stval, prval
if st_mode != 'T':
resetMode()
st_mode = 'T'
b_step.set_bg_color(0x666666)
faces_encode.setLed(0, 0xffffff)
changeMode()
pass
b_step.pressed(b_step_pressed)
def d_scr_pressed():
global mb_click, lb_click, rb_click, snd_val, st_mode, stval, prval
if st_mode != 'S':
resetMode()
st_mode = 'S'
d_scr.set_bg_color(0x666666)
faces_encode.setLed(0, 0xff9900)
changeMode()
pass
d_scr.pressed(d_scr_pressed)
def d_x_pressed():
global mb_click, lb_click, rb_click, snd_val, st_mode, stval, prval
if st_mode != 'X':
resetMode()
st_mode = 'X'
d_x.set_bg_color(0x666666)
faces_encode.setLed(0, 0xff0000)
changeMode()
pass
d_x.pressed(d_x_pressed)
def d_y_pressed():
global mb_click, lb_click, rb_click, snd_val, st_mode, stval, prval
if st_mode != 'Y':
resetMode()
st_mode = 'Y'
d_y.set_bg_color(0x666666)
faces_encode.setLed(0, 0x3333ff)
changeMode()
pass
d_y.pressed(d_y_pressed)
def d_w_x_pressed():
global mb_click, lb_click, rb_click, snd_val, st_mode, stval, prval
if st_mode != 'U':
resetMode()
st_mode = 'U'
d_w_x.set_bg_color(0x666666)
faces_encode.setLed(0, 0x33ff33)
changeMode()
pass
d_w_x.pressed(d_w_x_pressed)
def d_w_y_pressed():
global mb_click, lb_click, rb_click, snd_val, st_mode, stval, prval
if st_mode != 'V':
resetMode()
st_mode = 'V'
d_w_y.set_bg_color(0x666666)
faces_encode.setLed(0, 0x00cccc)
changeMode()
pass
d_w_y.pressed(d_w_y_pressed)
resetMode()
uart_ble = ble_uart.init('m5mw_01')
stval = 1
st_mode = 'S'
prval = faces_encode.getValue()
snd_val = 0
d_scr.set_bg_color(0x666666)
faces_encode.setLed(0, 0xff9900)
uart_ble.write((str(st_mode) + str(str(snd_val))))
direction.set_text(str((str(st_mode) + str(str(snd_val)))))
while True:
if (faces_encode.getValue()) != prval:
if st_mode == 'T':
stval = stval + ((faces_encode.getValue()) - prval)
v_step.set_text(str(stval))
else:
snd_val = snd_val + ((faces_encode.getValue()) - prval) * stval
uart_ble.write((str(st_mode) + str(str(snd_val))))
direction.set_text(str((str(st_mode) + str(str(snd_val)))))
prval = faces_encode.getValue()
wait_ms(2)
| StarcoderdataPython |
3255268 | # Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""``sim`` contains ``qubovert``'s simulation and annealing functionality.
See ``__all__`` for a list of uses.
"""
# import order here is important
from ._anneal_temperature_range import *
from ._anneal_results import *
from ._anneal import *
from ._anneal_temperature_range import __all__ as __all_tr__
from ._anneal_results import __all__ as __all_results__
from ._anneal import __all__ as __all_anneal__
__all__ = __all_tr__ + __all_results__ + __all_anneal__
del __all_tr__, __all_results__, __all_anneal__
name = "sim"
| StarcoderdataPython |
3288151 | __author__ = '<NAME> <<EMAIL>>'
import unittest
from bitcodin import Subscription
from bitcodin import list_events
from bitcodin import create_subscription
from bitcodin import delete_subscription
from bitcodin.test.bitcodin_test_case import BitcodinTestCase
class DeleteSubscriptionTestCase(BitcodinTestCase):
def setUp(self):
super(DeleteSubscriptionTestCase, self).setUp()
event = next((event for event in list_events() if event.name == 'encoding.finished'))
subscription = Subscription(event_id=event.id, url='http://www.example.com/')
self.created_subscription = create_subscription(subscription)
def runTest(self):
delete_subscription(self.created_subscription.id)
def tearDown(self):
super(DeleteSubscriptionTestCase, self).tearDown()
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1654072 | """This module provides decorator/context manager solutions that prevent wrapped operations from finishing
before a given number of seconds has elapsed.
Useful if you're fairly confident that your function should finish in a certain amount of time,
but you want to make the return time constant (e.g. to prevent constant-time attacks).
For example, if we know that our password reset function consistently takes about 1 second to execute
if the given email address is valid but if the email address is invalid, it usually finishes much faster,
We can ensure that it always takes 2 seconds to execute whenever it's called.
This is less useful if your function's normal execution time is subject to a lot of jitter.
"""
import time
from timerutil.compat import (
ContextDecorator,
get_time
)
__all__ = [
'ObservableWaiter',
'StopWatch',
'Waiter'
]
class Waiter(ContextDecorator):
"""Context manager/decorator which prevents an operation
from finishing before a given number of seconds has elapsed.
Usage as a decorator:
.. code-block:: python
@Waiter(10)
def take_ten():
print('Starting to wait')
take_ten()
# Ten seconds later...
print('Done waiting!')
Usage as a context manager:
.. code-block:: python
with Waiter(10):
print("Starting to wait")
# Ten seconds later...
print("Done waiting!")
"""
def __init__(self, minimum_time):
"""Initializes a Waiter
:param minimum_time: The number of seconds that must elapse before the Waiter exits
:type minimum_time: int, float
"""
super(ContextDecorator, self).__init__()
self.minimum_time = minimum_time
self._start_time = None
def __enter__(self):
"""Begins a countdown for the configured duration
:return: This :class:`~Waiter` instance
:rtype: Waiter
"""
self._start_time = get_time()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Blocks until the configured duration has elapsed
"""
try:
time.sleep(self.minimum_time - (get_time() - self._start_time))
except (ValueError, IOError):
pass
class ObservableWaiter(Waiter):
"""A :class:`~Waiter` subclass which behaves exactly the same as its parent,
except that it records usage statistics for inspection.
:ivar last_runtime: The total duration of the last wrapped operation, in seconds
:vartype last_runtime: float
:ivar last_elapsed: The total duration that the decorator/context manager was active, in seconds.
This should always be greater than or (rarely) equal to :attr:`~last_runtime`.
:vartype last_elapsed: float
Usage as a decorator:
.. code-block:: python
ten_second_waiter = ObservableWaiter(10)
@ten_second_waiter
def take_ten():
print('Starting to wait')
>>> take_ten()
>>> print(
... 'Call to take_ten() finished after:\\n',
... ten_second_waiter.last_runtime,
... 'seconds'
... )
Call to take_ten() finished after
2.366909757256508e-05 seconds
>>> print(
... 'Total time with waiter:',
... ten_second_waiter.last_elapsed,
... 'seconds'
... )
Total time with waiter: 10.003751991083845 seconds
Usage as a context manager:
.. code-block:: python
with ObservableWaiter(10) as ten_second_waiter:
print('Starting to wait')
print(
'Done waiting after',
ten_second_waiter.last_elapsed,
'seconds'
)
"""
def __init__(self, minimum_time):
super(ObservableWaiter, self).__init__(minimum_time)
self.last_runtime = None
self.last_elapsed = None
def __exit__(self, exc_type, exc_val, exc_tb):
# Record the approximate runtime of the wrapped operation
self.last_runtime = get_time() - self._start_time
super(ObservableWaiter, self).__exit__(exc_type, exc_val, exc_tb)
# Record the duration of time since `__enter__` began
self.last_elapsed = get_time() - self._start_time
class StopWatch(ObservableWaiter):
"""Context manager/decorator for observing the execution time of wrapped operations, but without enforcing
a minimum time (a stopwatch!). Useful in cases where :mod:`timeit` is impractical or overcomplicated.
.. note:: This class is mostly provided out of convenience (primarily for DRYness and readability),
since a correctly-configured instance of :class:`ObservableWaiter` could achieve the same behavior.
As this class is an implementation of :class:`ObservableWaiter`, it provides the same interface for
inspecting statistics. The only effective difference is that this Waiter does not enforce any
minimum execution time.
Example of observing the execution time for a function call by using this class as a decorator:
.. code-block:: python
timer = StopWatch()
@timer
def watch_this():
# Do some things
...
watch_this()
logging.log(logging.INFO, 'Watched watch_this() do some things for %r seconds', timer.last_runtime)
Example of observing the execution time of nested code by using this class as a context manager:
.. code-block:: python
with StopWatch() as timer:
# Do some things
...
logging.log(logging.INFO, 'Watched some things for %r seconds', timer.last_runtime)
"""
def __init__(self):
"""Initializes a StopWatch for observing the execution time of wrapped operations"""
super(StopWatch, self).__init__(0)
def __setattr__(self, name, value):
"""Prevents the :attr:`minimum_time` attribute from being set to a nonzero value.
:raises AttributeError: If the :attr:`minimum_value` instance attribute is being set
to any value other than zero
"""
if name == 'minimum_time' and value != 0:
raise AttributeError('minimum_time attribute is read-only')
super(StopWatch, self).__setattr__(name, value)
| StarcoderdataPython |
4828021 | <reponame>eyalzek/gcpdiag<filename>gcpdiag/config.py
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Globals that will be potentially user-configurable in future."""
import os
import sys
from typing import Any, Dict
import appdirs
import yaml
# gcpdiag version (not configurable, but useful to have here)
VERSION = '0.53-test'
"""
Configuration properties are divided into 3 main categories:
- static (class properties) which values cannot be changed or provided
- options which values can be provided as command arguments
- options which values can be provided as yaml configuration
In addition yaml configuration can contains global configuration which will
be applied to all inspected projects or we can provide strict configuration
dedicated to particular project:
```
---
logging_fetch_max_time_seconds: 300
verbose: 3
within_days: 5
projects:
myproject:
billing_project: sample
include:
- '*BP*'
exclude:
- '*SEC*'
- '*ERR*'
include_extended: True
```
Yaml configuration defined per project takes precedence over global
configuration. Global configuration takes precedence over configuration
provided as a command arguments.
"""
#
# Static properties
#
# Default number of retries for API Calls.
API_RETRIES = 10
# Cache directory for diskcache.
CACHE_DIR = appdirs.user_cache_dir('gcpdiag')
# Number of seconds to wait for the gcpdiag.cache API cache lock to be freed.
CACHE_LOCK_TIMEOUT = 120
# How long to cache documents that rarely change (e.g. predefined IAM roles).
STATIC_DOCUMENTS_EXPIRY_SECONDS = 3600 * 24
# Prefetch worker threads
MAX_WORKERS = 10
_args: Dict[str, Any] = {}
_config: Dict[str, Any] = {}
_project_id: str = ''
_defaults: Dict[str, Any] = {
'auth_adc': False,
'auth_key': None,
'auth_oauth': False,
'billing_project': None,
'show_skipped': False,
'hide_ok': False,
'include': None,
'exclude': None,
'include_extended': False,
'verbose': 0,
'within_days': 3,
'hide_skipped': True,
'show_ok': True,
'logging_ratelimit_requests': 60,
'logging_ratelimit_period_seconds': 60,
'logging_page_size': 500,
'logging_fetch_max_entries': 10000,
'logging_fetch_max_time_seconds': 120,
}
#
# externally used methods
#
def init(args, project_id, is_cloud_shell=False):
"""Load configuration based on provided CLI args.
Args:
args (Dict): Configuration dictionary.
project_id (str): Current project id
is_cloud_shell (bool, optional): Wheather cloud shell is used. Defaults to False.
"""
global _args
global _config
global _project_id
_args = args if args else {}
_args.update({'is_cloud_shell': is_cloud_shell})
_project_id = project_id
file = args.get('config', None)
if file:
# Read the file contents
if os.path.exists(file):
with open(file, encoding='utf-8') as f:
content = f.read()
else:
content = None
# Parse the content of the file as YAML
if content:
try:
_config = yaml.safe_load(content)
except yaml.YAMLError as err:
print(f"ERROR: can't parse content of the file as YAML: {err}",
file=sys.stderr)
def get(key):
"""Find property value for provided key inside CLI args or yaml configuration
(including global and per project configuration).
Yaml configuration defined per project takes precedence over global
configuration. Global configuration takes precedence over configuration
provided as a command argument.
Args:
key (str): property key name
Returns:
Any: return value for provided key
"""
if _project_id and _project_id in _config.get('projects', {}).keys():
if key in _config['projects'][_project_id].keys():
# return property from configuration per project if provided
return _config['projects'][_project_id][key]
if key in _config:
# return property from global configuration if provided
return _config[key]
if key in _args and _args[key]:
# return property from args if provided and not None
return _args[key]
# return property form defaults
return _defaults.get(key, None)
| StarcoderdataPython |
1754778 | import csv
from typing import List
from app.data_readers.trip_data import TripData
from app.data_readers.trip_data import Date
class FileReader:
def __init__(self, filename) -> None:
self.filename = filename
self.unprocessed_count = 0
def read(self) -> List[TripData]:
rows = []
with open(self.filename, 'r') as f:
reader = csv.DictReader(f, delimiter=',')
for row in reader:
try:
td = TripData(
duration=int(row['Duration']),
dates = Date(row['Start date'], row['End date']),
start_st_number = int(row['Start station number']),
start_station=row['Start station'],
end_station_number=int(row['End station number']),
end_station=row['End station'],
bike_number=row['Bike number'],
member_type=row['Member type'],
)
rows.append(td)
except (ValueError, TypeError, KeyError):
self.unprocessed_count += 1
print("Ошибка чтения файлов")
return rows
def get_unprocessed_data(self):
return self.unprocessed_count
#TODO: DELETE LINES 39-40
# def to_date(date_string):
# return datetime.datetime.strptime(date_string, '%Y-%m-%d %H:%M:%S') | StarcoderdataPython |
67711 | # -*- coding: utf-8 -*-
import unittest
from openeo_udf.server.data_model.metadata_schema import MetadataModel
from openeo_udf.server.data_model.data_collection_schema import DataCollectionModel, ObjectCollectionModel, TimeStampsModel
from openeo_udf.server.data_model.model_example_creator import create_simple_feature_collection_model_example
__license__ = "Apache License, Version 2.0"
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, Soeren Gebbert"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
class DataCollectionModelWithSimpleFeaturesTest(unittest.TestCase):
def test_data_cube_creation(self):
m = MetadataModel(name="SimpleFeatureCollection",
description="New collection of simple features",
creator="Soeren",
creation_time="2001-01-01T10:00:00",
modification_time="2001-01-01T10:00:00",
number_of_object_collections=1,
number_of_geometries=3,
number_of_variable_collections=1,
number_of_time_stamps=1)
sfc, f_sfc, g = create_simple_feature_collection_model_example()
oc = ObjectCollectionModel(data_cubes=[], simple_feature_collections=[sfc])
ts = TimeStampsModel(calendar="gregorian", intervals=[("2001-01-01T10:00:00", "2001-01-01T00:02:00")])
t = DataCollectionModel(metadata=m, object_collections=oc, geometry_collection=g,
variables_collections=[f_sfc], timestamps=ts)
self.assertIsNotNone(t.json())
print(t.json())
print(t.schema_json())
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
137770 | from .conv_head import ConvHead
from .latent_head import LatentHead
__all__ = [
'ConvHead',
'LatentHead',
] | StarcoderdataPython |
4823366 | <reponame>zkan/pysomtum-pythonic-code
# 1. Avoid comparing directly to `True`, `False`, or `None`
a = True
if a == False:
# do something
if a:
# do something
if a is None:
# do something
# 2. Avoid repeating variable name in compound if statement
if name == 'Kan' or name == 'Man' or name == 'Natty':
# do something
# # if name in ['Kan', 'Man', 'Natty']:
# # # do something
# 3. Use `in` to iterate over iterable
# names = ['Kan', 'Man', 'Natty']
# for i in range(len(names)):
# print(names[i])
# index = 0
# for name in names:
# print(name)
# index = index + 1
# 4. Use default parameter of `dict.get`
# persons = {
# 'name': 'Kan',
# 'company': 'Pronto'
# }
# if 'age' in persons:
# print(persons['age'])
# else:
# persons['age'] = 25
# persons.get('age', 25)
# 5. Use `enumerate` function in loops
# for idx, name in enumerate(names):
# print(idx, name)
# 6. Use `_` for data that should be ignored
# data = (1, 2, 3, 4, 5)
# first = data[0]
# second = data[1]
# third = data[2]
# first, *second, third = data
# print(first, second, third)
# 7. Use (for) `else` after iterator is exhausted!
# checked = True
# spam = [True, True, True, False]
# for s in spam:
# if not spam:
# checked = False
# break
# if checked:
# print('all are spam')
# for s in spam:
# if not spam:
# checked = False
# break
# else:
# print('all are spam')
# 8. List comprehension to create a transformed list
# numbers = [1, 2, 3, 4, 5]
# results = []
# for n in numbers:
# results.append(n * n)
# # -----------
# results = [n*n for n in numbers if n % 2 == 0]
# 9. Use context manager to ensure resources are managed
# f = open('filename', 'r')
# for row in f.readlines():
# print(row)
# f.close()
# with open('filename', 'r') as f:
# for row in f.readlines():
# print(row)
# 10. Use generator to lazily load infinite sequences
def fibo(number):
current_value, next_value = 0, 1
for _ in range(number):
print(current_value)
current_value, next_value = next_value, current_value + next_value
# fibo(10)
def fibo(number):
current_value, next_value = 0, 1
for _ in range(number):
yield current_value
current_value, next_value = next_value, current_value + next_value
gen = fibo(10)
print(list(gen))
# print(next(gen))
# print(next(gen))
# print(next(gen))
# print(next(gen))
# def fibo(number):
# current_value, next_value = 0, 1
# for _ in range(number):
# print(current_value)
# current_value, next_value = next_value, current_value + next_value
# fibo(10)
# def fibo():
# current_value, next_value = 0, 1
# while True:
# yield current_value
# current_value, next_value = next_value, current_value + next_value
# gen = fibo()
# for _ in range(10):
# print(next(gen))
| StarcoderdataPython |
3344807 | import sys
import numpy as np
import torch
from layers.encoding import *
from layers.attention import *
import torch.nn as nn
class MMBiDAF(nn.Module):
"""
The combination of the Bidirectional Attention Flow model and the Multimodal Attention Layer model.
Follows a high-level structure inspired from the BiDAF implementation by <NAME>.
- Embedding layer : Embed the text, audio and the video into suitable embeddings using Glove, MFCC and VGG respectively.
- Encoder layer : Encode the embedded sequence.
- Attention Flow layer : Apply the bidirectional attention mechanism for the multimodal data.
- Modality aware encoding : Encode the modality aware sequence
- Multimodal Attention : Apply the attention mechanism for the separate modality of data.
- Ouput layer : Simple Softmax layer to generate the probability distribution over the textual data for extractive summary.
Args:
word_vectors (torch.Tensor) : Pre-trained word vectors (GLoVE).
image_vectors (torch.Tensor) : Pre-trained image features (ResNet).
audio_vectors (torch.Tensor) : Pre-trained audio features (MFCC).
hidden_size (int) : Number of features in the hidden state at each layer.
drop_prob (float) : Dropout probability.
"""
def __init__(self, hidden_size, text_embedding_size, audio_embedding_size, image_embedding_size, device, drop_prob=0., max_transcript_length=405):
super(MMBiDAF, self).__init__()
self.device = device
self.max_transcript_length = max_transcript_length
self.emb = Embedding(embedding_size=text_embedding_size,
hidden_size=hidden_size,
drop_prob=drop_prob)
self.a_emb = Embedding(embedding_size=audio_embedding_size, # Since audio embedding size is not 300, we need another highway encoder layer
hidden_size=hidden_size, # and we cannot increase the hidden size beyond 100
drop_prob=drop_prob)
self.i_emb = Embedding(embedding_size=image_embedding_size, # Since image embedding size is not 300, we need another highway encoder layer
hidden_size=hidden_size, # and we cannot increase the hidden size beyond 100
drop_prob=drop_prob)
self.text_enc = RNNEncoder(input_size=hidden_size,
hidden_size=hidden_size,
num_layers=1,
drop_prob=drop_prob)
self.audio_enc = RNNEncoder(input_size=hidden_size,
hidden_size=hidden_size,
num_layers=1,
drop_prob=drop_prob)
self.image_enc = RNNEncoder(input_size=hidden_size,
hidden_size=hidden_size,
num_layers=1,
drop_prob=drop_prob)
self.image_keyframes_emb = ImageEmbedding()
self.bidaf_att_audio = BiDAFAttention(2*hidden_size,
drop_prob=drop_prob)
self.bidaf_att_image = BiDAFAttention(2*hidden_size,
drop_prob=drop_prob)
self.mod_t_a = RNNEncoder(input_size=8*hidden_size,
hidden_size=hidden_size,
num_layers=2, # changed the number of layers for decoder attention
drop_prob=drop_prob)
self.mod_t_i = RNNEncoder(input_size=8*hidden_size,
hidden_size=hidden_size,
num_layers=2,
drop_prob=drop_prob)
self.multimodal_att_decoder = MultimodalAttentionDecoder(text_embedding_size,
hidden_size,
max_transcript_length,
num_layers=1)
def get_mask(self, X, X_len):
X_len = torch.LongTensor(X_len)
maxlen = X.size(1)
idx = torch.arange(maxlen).unsqueeze(0).expand(torch.Size(list(X.size())[:2]))
len_expanded = X_len.unsqueeze(1).expand(torch.Size(list(X.size())[:2]))
mask = idx < len_expanded
return mask
def forward(self, embedded_text, original_text_lengths, embedded_audio, original_audio_lengths, transformed_images, original_image_lengths, batch_target_indices, original_target_len, max_dec_len):
text_emb = self.emb(embedded_text) # (batch_size, num_sentences, hidden_size)
# print("Highway Embedded text")
text_encoded, _ = self.text_enc(text_emb, original_text_lengths) # (batch_size, num_sentences, 2 * hidden_size)
# print("Text encoding")
audio_emb = self.a_emb(embedded_audio) # (batch_size, num_audio_envelopes, hidden_size)
# print("Highway Embedded Audio")
audio_encoded, _ = self.audio_enc(audio_emb, original_audio_lengths) # (batch_size, num_audio_envelopes, 2 * hidden_size)
# print("Audio encoding")
original_images_size = transformed_images.size() # (batch_size, num_keyframes, num_channels, transformed_image_size, transformed_image_size)
# Combine images across videos in a batch into a single dimension to be embedded by ResNet
transformed_images = torch.reshape(transformed_images, (-1, transformed_images.size(2), transformed_images.size(3), transformed_images.size(4))) # (batch_size * num_keyframes, num_channels, transformed_image_size, transformed_image_size)
image_emb = self.image_keyframes_emb(transformed_images) # (batch_size * num_keyframes, encoded_image_size=1000)
# print("Resnet Image")
image_emb = torch.reshape(image_emb, (original_images_size[0], original_images_size[1], -1)) # (batch_size, num_keyframes, 300)
image_emb = self.i_emb(image_emb) # (batch_size, num_keyframes, hidden_size)
# print("Highway Image")
image_encoded, _ = self.image_enc(image_emb, original_image_lengths) # (batch_size, num_keyframes, 2 * hidden_size)
# print("Image Encoding")
text_mask = self.get_mask(embedded_text, original_text_lengths)
audio_mask = self.get_mask(embedded_audio, original_audio_lengths)
image_mask = self.get_mask(image_emb, original_image_lengths)
# Generate mask with size = max_transcript_length for the decoder
text_mask_pad = torch.zeros(text_mask.size(0), self.max_transcript_length - text_mask.size(1))
text_mask_pad = text_mask_pad.type(text_mask.type())
decoder_mask = torch.cat((text_mask, text_mask_pad), dim=1)
# Loading the tensors to device
text_mask = text_mask.to(self.device)
audio_mask = audio_mask.to(self.device)
image_mask = image_mask.to(self.device)
decoder_mask = decoder_mask.to(self.device)
text_audio_att = self.bidaf_att_audio(text_encoded, audio_encoded, text_mask, audio_mask) # (batch_size, num_sentences, 8 * hidden_size)
text_image_att = self.bidaf_att_image(text_encoded, image_encoded, text_mask, image_mask) # (batch_size, num_sentences, 8 * hidden_size)
mod_text_audio, text_audio_hidden = self.mod_t_a(text_audio_att, original_text_lengths) # (batch_size, num_sentences, 2 * hidden_size
mod_text_image, text_img_hidden = self.mod_t_i(text_image_att, original_text_lengths) # (batch_size, num_sentences, 2 * hidden_size)
# if hidden_gru is None:
# hidden_gru = self.multimodal_att_decoder.initHidden()
# hidden_gru, final_out, sentence_dist = self.multimodal_att_decoder(mod_text_audio, mod_text_image, hidden_gru, text_mask) # (batch_size, num_sentences, )
# else:
# hidden_gru, final_out, sentence_dist = self.multimodal_att_decoder(mod_text_audio, mod_text_image, hidden_gru, text_mask)
decoder_hidden = (text_audio_hidden.sum(1) + text_img_hidden.sum(1)).unsqueeze(1) # (batch_size, num_layers*num_dir, hidden_size)
# decoder_hidden = decoder_hidden.transpose(0,1) # To get the decoder input hidden state in required form
decoder_cell_state = torch.zeros(1, text_emb.size(0), decoder_hidden.size(-1)) # (num_layer*num_dir, batch, hidden_size)
decoder_input = torch.zeros(text_emb.size(0), 1, embedded_text.size(-1)) # (batch, num_dir*num_layers, embedding_size)
coverage_vec = torch.zeros(text_emb.size(0), text_emb.size(1), 1) # (batch_size, max_seq_len, 1)
# Loading the tensors to the GPU
decoder_hidden = decoder_hidden.to(self.device)
decoder_cell_state = decoder_cell_state.to(self.device)
decoder_input = decoder_input.to(self.device)
coverage_vec = coverage_vec.to(self.device)
eps = 1e-12
loss = 0
cov_loss_wt = 1.0
out_distributions = []
if self.training: # Teacher forcing
for idx in range(batch_target_indices.size(1)):
out_distribution, decoder_hidden, decoder_cell_state, att_cov_dist, coverage_vec = self.multimodal_att_decoder(decoder_input, decoder_hidden, decoder_cell_state, mod_text_audio, mod_text_image, coverage_vec, decoder_mask)
decoder_input = list()
for batch_idx in range(batch_target_indices.size(0)):
# Loss calculation
prob = out_distribution[batch_idx, int(batch_target_indices[batch_idx, idx])]
# print("Prob = {}".format(prob))
loss = loss + (-1 * torch.log(prob + eps))
# print("Loss = {}".format(loss))
decoder_input.append(embedded_text[batch_idx, int(batch_target_indices[batch_idx, idx])].unsqueeze(0)) # (1, embedding_size)
decoder_input = torch.stack(decoder_input) # (batch_size, 1, embedding_size)
out_distributions.append(out_distribution) # (max_timesteps, batch_size, total_max_len)
coverage_loss = torch.sum(torch.min(att_cov_dist, coverage_vec)) # 1D tensor
loss = loss + cov_loss_wt * coverage_loss # adding the coverage loss to the model loss
loss = loss / batch_target_indices.size(1) # average loss for all the timesteps
else: # Evaluation time of the decoder
for idx in range(max_dec_len):
out_distribution, decoder_hidden, decoder_cell_state, att_cov_dist, coverage_vec = self.multimodal_att_decoder(decoder_input, decoder_hidden, decoder_cell_state, mod_text_audio, mod_text_image, coverage_vec, decoder_mask)
_, max_prob_idx = torch.max(out_distribution, 1)
decoder_input = list()
for batch_idx in range(text_emb.size(0)):
# Loss calculation
prob = out_distribution[batch_idx, int(batch_target_indices[batch_idx, idx])]
# print("Prob = {}".format(prob))
loss = loss + (-1 * torch.log(prob + eps))
# print("Loss = {}".format(loss))
decoder_input.append(embedded_text[batch_idx, int(max_prob_idx[batch_idx])].unsqueeze(0)) # (1, embedding_size)
decoder_input = torch.stack(decoder_input) # (batch_size, 1, embedding_size)
out_distributions.append(out_distribution) # (max_timesteps, batch_size, total_max_len)
coverage_loss = torch.sum(torch.min(att_cov_dist, coverage_vec)) # 1D tensor
loss = loss + cov_loss_wt * coverage_loss # adding the coverage loss to the model loss
loss = loss / max_dec_len # average loss for all the timesteps
# print(out_distributions.size())
# sys.exit() # Debugging purpose
# print(len(out_distributions))
# print(out_distributions[0].size())
out_distributions = torch.stack(out_distributions).transpose(0,1) # (batch_size, max_timesteps, toal_max_len)
return out_distributions, loss
| StarcoderdataPython |
3242454 | <gh_stars>1-10
# Simple Generator Function
def simpleGenerator():
yield 1
yield 2
yield 3
x = simpleGenerator()
print(x.__next__());
print(x.__next__());
print(x.__next__());
| StarcoderdataPython |
4817615 | <filename>api/app/api/api_v1/endpoints/sources.py<gh_stars>10-100
from typing import Any, Dict, List
from asyncpg.exceptions import UniqueViolationError
from fastapi import APIRouter, HTTPException
from orm.exceptions import NoMatch
from starlette.status import (
HTTP_200_OK,
HTTP_201_CREATED,
HTTP_400_BAD_REQUEST,
HTTP_404_NOT_FOUND,
)
from app.models import Source
from app.schemas import SourceCreate, SourceDB, SourceUpdate
router = APIRouter()
@router.get("/", response_model=List[SourceDB], status_code=HTTP_200_OK)
async def get_sources():
sources = await Source.objects.all()
if not sources:
raise HTTPException(HTTP_404_NOT_FOUND, "No sources found")
return sources
@router.post("/", response_model=SourceDB, status_code=HTTP_201_CREATED)
async def add_source(payload: SourceCreate):
try:
return await Source.objects.create(name=payload.name, url=payload.url.host)
except UniqueViolationError as err:
raise HTTPException(HTTP_400_BAD_REQUEST, "Source exists") from err
@router.get("/{source_id}/", response_model=SourceDB, status_code=HTTP_200_OK)
async def get_source(source_id: int):
try:
return await Source.objects.get(id=source_id)
except NoMatch as err:
raise HTTPException(HTTP_404_NOT_FOUND, "Source not found") from err
@router.put("/{source_id}/", response_model=SourceDB, status_code=HTTP_200_OK)
async def update_source(source_id: int, payload: SourceUpdate):
source = await get_source(source_id)
updates: Dict[str, Any] = {k: v for k, v in payload.dict().items() if v is not None}
await source.update(**updates)
return await get_source(source_id)
@router.delete("/{source_id}/", response_model=SourceDB, status_code=HTTP_200_OK)
async def remove_source(source_id: int):
source = await get_source(source_id)
await source.delete()
return source
| StarcoderdataPython |
3322624 | <filename>azurefestorage.py
import os, uuid
from azure.storage.blob import BlobServiceClient, BlobClient, ContainerClient
import json
##python blob storage test
with open('credentials.json', 'r') as f:
creds = json.load(f)
connect_str = creds["azure_storage"]["connectionstring"]
# print (connect_str)
def store_embedding(embedding, filename):
blob_service_client = BlobServiceClient.from_connection_string(connect_str)
container_name = "faceembeddings"
container_client = blob_service_client.get_container_client("faceembeddings")
blob_client = blob_service_client.get_blob_client(container=container_name, blob=filename)
blob_client.upload_blob(embedding)
def get_embedding(filename):
blob = BlobClient.from_connection_string(conn_str=connect_str, container_name="faceembeddings", blob_name=filename)
blob_data = blob.download_blob()
bd = blob_data.content_as_bytes()
return bd
# blob_service_client = BlobServiceClient.from_connection_string(connect_str)
# container_name = "faceembeddings"
# container_client = blob_service_client.get_container_client("faceembeddings")
# blob_client = blob_service_client.get_blob_client(container=container_name, blob="test.txt")
# with open("test.txt", "rb") as data:
# blob_client.upload_blob(data)
# print("\nListing blobs...")
# # List the blobs in the container
# blob_list = container_client.list_blobs()
# for blob in blob_list:
# print("\t" + blob.name)
# bn = blob.name
# # with open("./t2.txt", "wb") as my_blob:
# # blob_data = blob.download_blob()
# # my_blob.writelines(blob_data.content_as_bytes())
# blob = BlobClient.from_connection_string(conn_str=connect_str, container_name="faceembeddings", blob_name=bn)
# # blob_data = blob.download_blob()
# # bd = str(blob_data.content_as_bytes().decode())
# # print(bd)
# with open("./BlockDestination.txt", "w") as my_blob:
# blob_data = blob.download_blob()
# # type(blob_data)
# my_blob.writelines(str(blob_data.content_as_bytes().decode()))
| StarcoderdataPython |
1608108 | from graphene_django import DjangoObjectType
from pnp_graphql.constants import MODEL_TYPE_ATTR
from pnp_graphql.utils.class_factory import class_factory
from pnp_graphql.utils.managers import get_enabled_app_models
class GraphQlTypeGenerator(object):
@classmethod
def get_models_for_typing(cls, *args, **kwargs):
"""
Look for models and return them in a list
:param args:
:param kwargs: Expecting any values. Still not using it
:return: list of Django model
"""
return get_enabled_app_models()
@classmethod
def generate_query_types(cls, *args, **kwargs):
"""
This method has a great power of creating graphql type object
:param args:
:param kwargs:
:return: it doesn't return anything. Rather than return it set type class as attribute to model
"""
_models = cls.get_models_for_typing(*args, **kwargs)
for _m in _models:
_meta_class = class_factory(__class_name='Meta', model=_m)
_class = class_factory(
__class_name='{0}Type'.format(_m.__name__), base_classes=(DjangoObjectType,), Meta=_meta_class)
# Setting attribute to model. So that we can access from model
setattr(_m, MODEL_TYPE_ATTR, _class)
| StarcoderdataPython |
3348591 | from django.contrib import admin
from .models import post
admin.site.register(post)
| StarcoderdataPython |
153426 | # Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/
# Written by <NAME> <<EMAIL>>
#
# This file is part of CBI Toolbox.
#
# CBI Toolbox is free software: you can redistribute it and/or modify
# it under the terms of the 3-Clause BSD License.
#
# CBI Toolbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# 3-Clause BSD License for more details.
#
# You should have received a copy of the 3-Clause BSD License along
# with CBI Toolbox. If not, see https://opensource.org/licenses/BSD-3-Clause.
#
# SPDX-License-Identifier: BSD-3-Clause
import os
import numpy as np
import json
from cbi_toolbox.reconstruct import psnr
nas = (30, 50, 80)
dnas = np.arange(10, 101, 5)
norm = 'mse'
path = os.environ['OVC_PATH']
npath = os.path.join(path, 'noise')
gpath = os.path.join(path, 'graph')
ref = np.load(os.path.join(path, 'arrays', 'phantom.npy'))
radon = np.load(os.path.join(npath, 'iradon.npy'))
results = {
'fss': {},
'fps': {},
'dc': {},
'fdc': {},
}
fss_snr = results['fss']
fps_snr = results['fps']
dc_snr = results['dc']
fdc_snr = results['fdc']
results['radon'] = psnr(ref, radon, norm)
del radon
for na in nas:
fss = np.load(os.path.join(npath, 'fssopt_{:03d}.npy'.format(na)))
fss_snr[na] = psnr(ref, fss, norm)
del fss
fps = np.load(os.path.join(npath, 'fpsopt_{:03d}.npy'.format(na)))
fps_snr[na] = psnr(ref, fps, norm)
del fps
dc_snr[na] = []
fdc_snr[na] = []
for dna in dnas:
dc = np.load(os.path.join(npath, '{:03d}_{:03d}.npy'.format(na, dna)))
dc_snr[na].append(psnr(ref, dc, norm))
fdc = np.load(os.path.join(
npath, '{:03d}_{:03d}f.npy'.format(na, dna)))
fdc_snr[na].append(psnr(ref, fdc, norm))
with open(os.path.join(gpath, 'noise.json'), 'w') as fp:
json.dump(results, fp)
| StarcoderdataPython |
77982 | # Copyright 2021 Foundries.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import aiohttp
import asyncio
import contextlib
import json
import logging
import signal
import zmq
import zmq.asyncio
from aiohttp import web
from asgiref.sync import sync_to_async
from conductor.core.models import PDUAgent
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from zmq.utils.strtypes import u
async def zmq_message_forward(app):
logger = app["logger"]
context = zmq.asyncio.Context()
logger.info("Create pull socket at %r", settings.INTERNAL_ZMQ_SOCKET)
pull = context.socket(zmq.PULL)
pull.bind(settings.INTERNAL_ZMQ_SOCKET)
async def forward_message(msg):
data = [s.decode("utf-8") for s in msg]
app["logger"].debug("Forwarding: %s", data[0])
message = json.loads(data[2])
agent_name = message["agent"]
agent_ws = app["agents"].get(agent_name)
if agent_ws is not None:
await agent_ws.send_json(message)
app["logger"].debug("Message: %s sent", data[0])
app["logger"].debug("%s", message)
with contextlib.suppress(asyncio.CancelledError):
logger.info("waiting for events")
while True:
try:
msg = await pull.recv_multipart()
await forward_message(msg)
except zmq.error.ZMQError as exc:
logger.error("Received a ZMQ error: %s", exc)
endpoint = u(pull.getsockopt(zmq.LAST_ENDPOINT))
pull.unbind(endpoint)
def signal_handler(*_):
logger.debug("Exiting in a moment...")
loop = asyncio.get_event_loop()
loop.add_signal_handler(signal.SIGINT, signal_handler)
loop.add_signal_handler(signal.SIGTERM, signal_handler)
while True:
try:
msg = await asyncio.wait_for(pull.recv_multipart(), settings.INTERNAL_ZMQ_TIMEOUT)
await forward_message(msg)
except zmq.error.ZMQError as exc:
logger.error("Received a ZMQ error: %s", exc)
except asyncio.TimeoutError:
logger.info("Timing out")
break
pull.close(linger=1)
context.term()
async def websocket_handler(request):
logger = request.app["logger"]
logger.info(f"connection from {request.remote}")
ws = web.WebSocketResponse(autoping=True, heartbeat=59)
logger.info("Prepare ws")
await ws.prepare(request)
logger.info("After prepare")
# check if client authenticates properly
auth = request.headers.get("Authorization")
if auth:
logger.debug("Received request with Authorization")
token = auth.split(":")[1].strip()
try:
agent = await sync_to_async(PDUAgent.objects.get, thread_sensitive=True)(token=token)
logger.info(f"Agent {agent.name} connected")
if agent.name in request.app["agents"].keys():
await ws.send_json({"error": "already logged in"})
request.app["agents"][agent.name] = ws
agent.state = PDUAgent.STATE_ONLINE
await sync_to_async(agent.save, thread_sensitive=True)()
if agent.message:
logger.debug(f"Sending msg {agent.message} to {agent.name}")
await ws.send_json({"msg": agent.message})
agent.message = None
await sync_to_async(agent.save, thread_sensitive=True)()
try:
async for msg in ws:
logger.debug(f"Received websocket message from {agent.name}")
if msg.type == aiohttp.WSMsgType.ERROR:
logger.exception(ws.exception())
if msg.type == aiohttp.WSMsgType.CLOSE:
request.app["agents"].pop(agent.name)
logger.info(f"Removed {agent.name}")
except asyncio.exceptions.CancelledError:
if agent.name in request.app["agents"].keys():
request.app["agents"].pop(agent.name)
logger.info(f"Removed {agent.name} on exception")
except PDUAgent.DoesNotExist:
# ignore unathorized request
pass
finally:
if not request.app["in_shutdown"]:
await ws.close()
logger.info(f"connection closed from {request.remote}({agent.name})")
if agent.name in request.app["agents"].keys():
request.app["agents"].pop(agent.name)
agent.state = PDUAgent.STATE_OFFLINE
await sync_to_async(agent.save, thread_sensitive=True)()
return ws
async def on_shutdown(app):
logger = app["logger"]
app["in_shutdown"] = True
for name, ws in app["agents"].items():
logger.debug(name)
await ws.close(code=aiohttp.WSCloseCode.GOING_AWAY, message="Server shutdown")
try:
agent = await sync_to_async(PDUAgent.objects.get, thread_sensitive=True)(name=name)
agent.state = PDUAgent.STATE_OFFLINE
await sync_to_async(agent.save, thread_sensitive=True)()
logger.debug("Turning %s offline" % name)
except PDUAgent.DoesNotExist:
pass
async def on_startup(app):
app["zmq"] = asyncio.create_task(zmq_message_forward(app))
class Command(BaseCommand):
help = "Runs websocket server for agents"
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument("--host", default="*", help="Hostname")
parser.add_argument("--port", default=8001, type=int, help="Port")
parser.add_argument("--logfile", default="-", help="Path to logfile")
def handle(self, *args, **options):
self.logger = logging.getLogger("pduserver")
self.logger.setLevel(logging.DEBUG)
if options["verbosity"] == 0:
self.logger.setLevel(logging.ERROR)
elif options["verbosity"] == 1:
self.logger.setLevel(logging.WARN)
elif options["verbosity"] == 2:
self.logger.setLevel(logging.INFO)
if options["logfile"] != "-":
handler = logging.handlers.WatchedFileHandler(options["logfile"])
self.logger.addHandler(handler)
self.logger.info("Starting pduserver")
self.logger.debug("Debug enabled")
# Create the aiohttp application
app = web.Application()
# Variables
app["logger"] = self.logger
app["agents"] = {}
app["in_shutdown"] = False
# Routes
app.add_routes([web.get("/ws/", websocket_handler)])
# signals
app.on_shutdown.append(on_shutdown)
app.on_startup.append(on_startup)
# Run the application
self.logger.info(
"Listening on http://%s:%d", options["host"], options["port"]
)
web.run_app(app, host=options["host"], port=options["port"], print=False)
| StarcoderdataPython |
1764592 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
"""An example of customizing PPO to leverage a centralized critic with an imitation loss"""
import argparse
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils import try_import_tf
from ray.rllib.agents.ppo.ppo_policy import PPOTFPolicy
from ray.rllib.agents.ppo.ppo_policy import LearningRateSchedule, EntropyCoeffSchedule, KLCoeffMixin
from ray.rllib.utils.explained_variance import explained_variance
from ray.rllib.evaluation.postprocessing import Postprocessing
import tensorflow as tf
from flow.agents.custom_ppo import AttributeMixin, CustomPPOTrainer
from flow.agents.ImitationPPO import update_kl, ImitationLearningRateSchedule, imitation_loss, loss_stats
from flow.agents.centralized_PPO import CentralizedValueMixin, \
centralized_critic_postprocessing, loss_with_central_critic
parser = argparse.ArgumentParser()
parser.add_argument("--stop", type=int, default=100000)
def new_ppo_surrogate_loss(policy, model, dist_class, train_batch):
policy.imitation_loss = imitation_loss(policy, model, dist_class, train_batch)
loss = loss_with_central_critic(policy, model, dist_class, train_batch)
return policy.policy_weight * loss + policy.imitation_weight * policy.imitation_loss
def setup_mixins(policy, obs_space, action_space, config):
AttributeMixin.__init__(policy, config)
KLCoeffMixin.__init__(policy, config)
EntropyCoeffSchedule.__init__(policy, config["entropy_coeff"],
config["entropy_coeff_schedule"])
LearningRateSchedule.__init__(policy, config["lr"], config["lr_schedule"])
ImitationLearningRateSchedule.__init__(policy, config["model"]["custom_options"]["num_imitation_iters"],
config["model"]["custom_options"]["imitation_weight"], config)
# hack: put in a noop VF so some of the inherited PPO code runs
policy.value_function = tf.zeros(
tf.shape(policy.get_placeholder(SampleBatch.CUR_OBS))[0])
def grad_stats(policy, train_batch, grads):
return {
"grad_gnorm": tf.global_norm(grads),
"vf_explained_var": explained_variance(
train_batch[Postprocessing.VALUE_TARGETS],
policy.central_value_function),
}
ImitationCentralizedPolicy = PPOTFPolicy.with_updates(
name="ImitationCentralizedPolicy",
before_loss_init=setup_mixins,
postprocess_fn=centralized_critic_postprocessing,
stats_fn=loss_stats,
grad_stats_fn=grad_stats,
loss_fn=new_ppo_surrogate_loss,
mixins=[
LearningRateSchedule, EntropyCoeffSchedule, KLCoeffMixin,
CentralizedValueMixin, ImitationLearningRateSchedule
])
ImitationCentralizedTrainer = CustomPPOTrainer.with_updates(name="ImitationCentralizedPPOTrainer",
default_policy=ImitationCentralizedPolicy,
after_optimizer_step=update_kl) | StarcoderdataPython |
1674215 | from odoo import models, fields, api
from odoo import exceptions
import logging
_logger = logging.getLogger(__name__)
class TodoWizard(models.TransientModel):
_name = 'todo.wizard'
_description = 'To-do Mass Assignment'
task_ids = fields.Many2many('todo.task', string='Tasks')
new_deadline = fields.Date('Set Deadline')
new_user_id = fields.Many2one('res.users', string='Set Responsible')
@api.multi
def do_mass_update(self):
self.ensure_one()
if not self.new_deadline and not self.new_user_id:
raise exceptions.ValidationError('No data to update!')
_logger.debug('Mass update on Todo Tasks %s' % self.task_ids)
# Values to Write
vals = {}
if self.new_deadline:
vals['date_deadline'] = self.new_deadline
if self.new_user_id:
vals['user_id'] = self.new_user_id
# Mass write values on all selected tasks
if vals:
self.task_ids.write(vals)
return True
@api.multi
def do_count_tasks(self):
Task = self.env['todo.task']
count = Task.search_count([('is_done', '=', False)])
raise exceptions.Warning('Counted %d to-do tasks.' % count)
@api.multi
def _reopen_form(self):
self.ensure_one()
action = {
'type': 'ir.actions.act_window',
'res_model': self._name,
'res_id': self.id,
'view_type': 'form',
'view_mode': 'form',
'target': 'new',
}
return action
@api.multi
def do_populate_tasks(self):
import pudb; pudb.set_trace()
self.ensure_one()
Task = self.env['todo.task']
open_tasks = Task.search([('is_done', '=', False)])
self.task_ids = open_tasks
# reopen wizard form on same wizard record
return self._reopen_form()
| StarcoderdataPython |
17690 | """
Driver class for Hagisonic Stargazer, with no ROS dependencies.
"""
from serial import Serial
from collections import deque
import re
import yaml
import time
import logging
import rospy
import numpy as np
from threading import Thread, Event
from tf import transformations
# STX: char that represents the start of a properly formed message
STX = '~'
# ETX: char that represents the end of a properly formed message
ETX = '`'
# DELIM: char that splits data
DELIM = '|'
# CMD: char that indicates command
CMD = '#'
# CMD: char that indicates command
RESPONSE = '!'
# RESULT: char that indicates that the message contains result data
RESULT = '^'
# NOTIFY: char that indicates a notification message of some kind
NOTIFY = '*'
class StarGazer(object):
def __init__(self, device, marker_map, callback_global=None, callback_local=None,callback_raw=None, callback_raw_reponse=None):
"""
Connect to a Hagisonic StarGazer device and receive poses.
device: The device location for the serial connection.
marker_map: dictionary of marker transforms, formatted:
{marker_id: (4,4) matrix}
callback_global: will be called whenever a new pose is received from the
Stargazer, will be called with (n,4,4) matrix of poses
of the location of the Stargazer in the global frame.
These are computed from marker_map.
callback_local: will be called whenever a new poses is received from the
Stargazer, with a dict: {marker_id: [xyz, angle]}
"""
self.device = device
self.marker_map = marker_map
self.connection = None
# chunk_size: how many characters to read from the serial bus in
# between checking the buffer for the STX/ETX characters
self._chunk_size = 80
self._callback_global = callback_global
self._callback_local = callback_local
self._callback_raw = callback_raw
self._callback_raw_reponse = callback_raw_reponse
self._stopped = Event()
self._thread = None
def __enter__(self):
self.connect()
return self
def __exit__(self, type, value, traceback):
if self.is_connected:
self.disconnect()
@property
def is_connected(self):
"""
Returns whether the driver is currently connected to a serial port.
"""
return self.connection is not None
def connect(self):
"""
Connect to the StarGazer over the specified RS-232 port.
"""
if self.is_connected:
self.disconnect()
self.connection = Serial(port=self.device, baudrate=115200, timeout=1.0)
if self.connection is None:
return False
return True
def disconnect(self):
"""
Disconnects from the StarGazer and closes the RS-232 port.
"""
if self.is_connected:
self.connection.close()
self.connection = None
if self.connection is None:
return True
return False
@property
def is_streaming(self):
"""
Returns whether the driver is currently streaming pose data.
"""
return self._thread is not None
def start_streaming(self):
"""
Begin streaming pose data from the StarGazer.
"""
assert self.is_connected and not self.is_streaming
success = self._send_command('CalcStart')
if success:
self._thread = Thread(target=self._read, args=()).start()
return success
def stop_streaming(self):
"""
Stop streaming pose data from the StarGazer.
"""
assert self.is_connected
if self.is_streaming:
self._stopped.set()
self._thread.join()
success = self._send_command('CalcStop')
return success
def reset_parameters(self):
"""
Stop streaming pose data from the StarGazer.
"""
assert self.is_connected and not self.is_streaming
success = self._send_command('Reset')
return success
def set_parameter(self, name, value):
"""
Set a StarGazer configuration parameter.
This function can only be called while the StarGazer is
connected, but not streaming.
Arguments
---------
name: string name of the parameter to set
value: string value of the parameter to set
Example
-------
set_parameter('MarkType', 'HLD1L')
"""
assert self.is_connected and not self.is_streaming
success = self._send_command(name, value)
return success
def get_parameter(self, name):
pass
def _send_command(self, *args):
"""
Send a command to the StarGazer.
Arguments
---------
command: string, or list. If string of single command, send just that.
if list, reformat to add delimiter character
Example
-------
_send_command('CalcStop')
_send_command('MarkType', 'HLD1L')
"""
success = True
delimited = DELIM.join(str(i) for i in args)
if 'SetEnd' in delimited:
delimited = 'SetEnd'
command_str = STX + CMD + delimited + ETX
rospy.loginfo('Sending command to StarGazer: %s', command_str)
# The StarGazer requires a 50 ms delay between each byte.
for ch in command_str:
self.connection.write(ch)
time.sleep(0.05)
response_expected = STX + RESPONSE + delimited + ETX
success = self._read_response(response_expected)
if success and ('SetEnd' in response_expected):
response_expected = STX + RESPONSE + 'ParameterUpdate' + ETX
time.sleep(1.0)
success = self._read_response(response_expected)
if(success):
rospy.loginfo('Parameters update successful')
return success
def _read_response(self, response_expected):
success = True
try:
response_actual = self.connection.read(len(response_expected))
except Exception as e:
rospy.logwarn(str(e))
sucess = False
return success
# Scan for more incoming characters until we get a read timeout.
# (This is useful if there is still some incoming data from previous
# commands in intermediate serial buffers.)
while response_actual[-len(response_expected):] != response_expected:
c = None
try:
c = self.connection.read()
except Exception as e:
rospy.logwarn(str(e))
return success
if c:
# Add new characters to the response string.
response_actual += c
else:
rospy.logwarn('Received invalid response {%s} expected "{%s}'% \
(response_actual, response_expected))
success = False
break
'''
# If we run out of characters and still don't match, report
# the invalid response as an exception.
raise Exception(
'Command "{:s}" received invalid response "{:s}"; '
'expected "{:s}".'
.format(command_str, response_actual, response_expected)
)
'''
print response_actual
if self._callback_raw_reponse:
self._callback_raw_reponse(response_actual)
return success
def _read(self):
"""
Read from the serial connection to the StarGazer, process buffer,
then execute callbacks.
"""
# Compute a regular expression that returns the last valid
# message in a StarGazer stream.
msg_pattern = ('.*' + STX + '(?P<type>.)(?P<payload>.+)' + ETX +
'(?P<remainder>.*)$')
msg_matcher = re.compile(msg_pattern)
# Compute a regular expression that converts a StarGazer message
# into a list of tuples containing parsed groups.
delimiter = '\\' + DELIM
number = '[\d\+\-\.]'
tag_pattern = (r'(?P<id>\d+)' + delimiter +
r'(?P<yaw>' + number + '+)' + delimiter +
r'(?P<x>' + number + '+)' + delimiter +
r'(?P<y>' + number + '+)' + delimiter +
r'(?P<z>' + number + '+)')
tag_matcher = re.compile(tag_pattern)
def process_buffer(message_buffer):
"""
Looks at current message_buffer string for STX and ETX chars.
Proper behavior is to process string found between STX/ETX for poses
and remove everything in the buffer up the last observed ETX.
Valid readings:
~^148|-175.91|+98.74|+7.10|182.39`
~^248|-176.67|+98.38|+8.39|181.91|370|-178.41|-37.05|+8.97|179.51`
No valid readings:
~*DeadZone`
"""
# Look for a matching message, return the buffer if none are found.
message = msg_matcher.match(message_buffer)
if not message:
return message_buffer
if message.group('type') == RESULT:
markers = tag_matcher.finditer(message.group('payload'))
local_poses = {}
raw_poses = []
for marker in markers:
# Parse pose information for this marker.
_id = marker.group('id')
yaw = -np.radians(float(marker.group('yaw')))
x = 0.01 * float(marker.group('x'))
y = 0.01 * float(marker.group('y'))
# Note: this axis is negated.
z = 0.0#-0.01 * float(marker.group('z'))
raw_pose = [_id,x,y,0,-yaw]
raw_poses.append(raw_pose)
# Convert the pose to a transform and store it by ID.
marker_to_stargazer = fourdof_to_matrix((x, y, z), yaw)
local_poses[_id] = np.linalg.inv(marker_to_stargazer)
if self._callback_raw:
self._callback_raw(raw_poses)
if self._callback_local:
self._callback_local(local_poses)
if self._callback_global:
global_poses, unknown_ids = local_to_global(self.marker_map,
local_poses)
self._callback_global(global_poses, unknown_ids)
elif message.group('type') == NOTIFY:
# TODO: Report deadzone messages in here!
pass
else:
pass
# Return the rest of the message buffer.
return message.group('remainder')
rospy.loginfo('Entering read loop.')
message_buffer = ''
while not self._stopped.is_set() and self.connection:
try:
message_buffer += self.connection.read(self._chunk_size)
message_buffer = process_buffer(message_buffer)
except Exception as e:
rospy.logwarn('Error processing current buffer: %s (content: "%s")',
str(e), message_buffer
)
message_buffer = ''
break # For debugging purposes.
rospy.loginfo('Exited read loop.')
def close(self):
self._stopped.set()
self._send_command('CalcStop')
self.connection.close()
def local_to_global(marker_map, local_poses):
"""
Transform local marker coordinates to map coordinates.
"""
global_poses = dict()
unknown_ids = set()
for _id, pose in local_poses.iteritems():
if _id in marker_map:
marker_to_map = marker_map[_id]
local_to_marker = np.linalg.inv(pose)
local_to_map = np.dot(marker_to_map, local_to_marker)
global_poses[_id] = local_to_map
else:
unknown_ids.add(_id)
return global_poses, unknown_ids
def fourdof_to_matrix(translation, yaw):
"""
Convert from a Cartesian translation and yaw to a homogeneous transform.
"""
T = transformations.rotation_matrix(yaw, [0,0,1])
T[0:3,3] = translation
return T
def _callback_dummy(data):
return
def _callback_print(data):
print(data)
| StarcoderdataPython |
1731337 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Written by <NAME>.
MIT Licensed.
Contact at www.sinclair.bio
"""
# Built-in modules #
import os, glob
# Internal modules #
import autopaths
# Constants #
if os.name == "posix": sep = "/"
if os.name == "nt": sep = "\\"
################################################################################
class BasePath(str):
"""
This object contains methods that are common to both FilePath objects
and DirectoryPath objects.
"""
def __repr__(self):
return '<%s object "%s">' % (self.__class__.__name__, self.path)
@classmethod
def clean_path(cls, path):
"""Given a path, return a cleaned up version for initialization."""
# Conserve 'None' object style #
if path is None: return None
# Don't nest BasePaths object or the like #
if hasattr(path, 'path'): path = path.path
# Expand the tilda #
if "~" in path: path = os.path.expanduser(path)
# We will store the path with the OS specific separator #
# We will never mix both kinds of separators #
if os.name == "posix": path = path.replace("\\", sep)
if os.name == "nt": path = path.replace("/", sep)
# Expand star #
if "*" in path:
matches = glob.glob(path)
if len(matches) < 1:
raise Exception("Found exactly no paths matching '%s'" % path)
if len(matches) > 1:
raise Exception("Found several paths matching '%s'" % path)
path = matches[0]
# Our standard is to end with a slash for directories #
if cls is autopaths.dir_path.DirectoryPath:
if not path.endswith(sep):
path += sep
# Return the result #
return path
def __new__(cls, path, *args, **kwargs):
"""A Path object is in fact a string."""
return str.__new__(cls, cls.clean_path(path))
def __init__(self, path):
self.path = self.clean_path(path)
def __add__(self, other):
if os.name == "posix": other = other.replace("\\", sep)
if os.name == "nt": other = other.replace("/", sep)
if other.endswith(sep):
return autopaths.dir_path.DirectoryPath(self.path + other)
else:
return autopaths.file_path.FilePath(self.path + other)
# ------------------------------ Properties ----------------------------- #
@property
def short_prefix(self):
"""Just the filename without any extension or periods."""
return self.name.split('.')[0]
@property
def extension(self):
"""The extension with the leading period."""
return os.path.splitext(self.path)[1]
@property
def escaped(self):
"""
The path with special characters escaped.
For instance a backslash becomes a double backslash.
"""
return self.path.replace("\\", "\\\\")
@property
def absolute_path(self):
"""The absolute path starting with a `/`."""
return self.__class__(os.path.abspath(self.path))
@property
def physical_path(self):
"""The physical path like in `pwd -P`."""
return self.__class__(os.path.realpath(self.path))
@property
def with_tilda(self):
"""
The absolute path starting with a '~' if it's in the home.
Returns a string, not an autopaths object, since autopaths can't
be encoded with a tilda.
"""
# Get variables #
home = os.path.expanduser('~')
path = self.absolute_path
# Check we are in the home #
if not path.startswith(home): return path
# Replace #
return path.replace(home, '~', 1)
@property
def relative_path(self):
"""The relative path when compared with current directory."""
return self.__class__(os.path.relpath(self.physical_path))
def rel_path_from(self, path):
"""The relative path when compared to the given path."""
return self.__class__(os.path.relpath(self.path, path))
@property
def unix_style(self):
"""The path with forward slashes and no disk drive."""
if self.path[1] == ':': path = self.path[2:]
else: path = self.path
return path.replace("\\", "/")
@property
def wsl_style(self):
"""
The path with forward slashes and a windows subsystem
for linux style leading disk drive.
"""
return "/mnt/c" + self.unix_style
@property
def win_style(self):
"""The path with backward slashes."""
return self.path.replace("/", "\\")
@property
def exists(self):
"""
Does it exist in the file system?
Returns True even for broken symbolic links.
"""
return os.path.lexists(self.path)
@property
def is_symlink(self):
"""Is this file a symbolic link to an other file?"""
if os.name == "posix": return os.path.islink(self.path)
if os.name == "nt":
import win32api
import win32con
num = win32con.FILE_ATTRIBUTE_REPARSE_POINT
return bool(win32api.GetFileAttributes(self.path) & num)
@property
def permissions(self):
"""Convenience object for dealing with permissions."""
return autopaths.file_permissions.FilePermissions(self.path)
@property
def mdate(self):
"""Return the modification date as a unix time."""
return os.path.getmtime(self.path)
@property
def mdate_iso(self):
"""Return the modification date as a datetime iso object."""
import datetime
return datetime.fromtimestamp(self.mdate).isoformat()
@property
def cdate(self):
"""Return the creation date."""
return os.path.getctime(self.path)
@property
def cdate_iso(self):
"""Return the creation date as a datetime iso object."""
import datetime
return datetime.fromtimestamp(self.cdate).isoformat()
#------------------------------ Soft links -------------------------------#
def link_from(self, path, safe=False, absolute=False):
"""
Make a link here pointing to another file/directory somewhere else.
The destination is hence *self* and the source is *path*.
"""
# Get source and destination #
from autopaths import Path
source = Path(path)
destination = self
# Call method #
self._symlink(source, destination, safe, absolute)
def link_to(self, path, safe=False, absolute=False):
"""
Create a link somewhere else pointing to this file.
The destination is hence *path* and the source is *self*.
"""
# Get source and destination #
from autopaths import Path
source = self
destination = Path(path)
# Call method #
self._symlink(source, destination, safe, absolute)
def _symlink(self, source, destination, safe, absolute):
# If source is a file and the destination is a dir, put it inside #
if os.path.isdir(destination) and not os.path.isdir(source):
destination = destination + source.filename
# Do we want absolute paths #
if absolute: source = source.absolute_path
# Strip trailing separators #
source = source.rstrip(sep)
destination = destination.rstrip(sep)
# Windows doesn't have os.symlink #
if os.name == "posix":
self.symlinks_on_linux(source, destination, safe)
if os.name == "nt":
self.symlinks_on_windows(source, destination, safe)
@staticmethod
def symlinks_on_linux(source, destination, safe):
# Do it unsafely #
if not safe:
if os.path.exists(destination) or os.path.islink(destination):
os.remove(destination)
os.symlink(source, destination)
# Do it safely #
if safe:
try: os.remove(destination)
except OSError: pass
try: os.symlink(source, destination)
except OSError: pass
@staticmethod
def symlinks_on_windows(source, destination, safe):
"""Yes, source and destination need to be in the reverse order."""
import win32file
if os.path.isdir(source):
return win32file.CreateSymbolicLink(destination, source, 1)
else:
return win32file.CreateSymbolicLink(destination, source, 0)
#------------------------------ Hard links -------------------------------#
def hard_link_win_to(self, path):
"""
In the case of Windows
"""
# Get source and destination #
from autopaths import Path
source = self
destination = Path(path)
# Call method #
os.link(source, destination)
| StarcoderdataPython |
3266488 | <gh_stars>0
from pathlib import Path
from appdirs import user_data_dir
user_data_directory = Path(user_data_dir(appname='ml4a', appauthor='golmschenk'))
| StarcoderdataPython |
4835084 | # Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
# This software is distributed under the terms and conditions of the 'Apache-2.0'
# license which can be found in the file 'LICENSE' in this package distribution
# or at 'http://www.apache.org/licenses/LICENSE-2.0'.
import logging
from cliff.lister import Lister
from cliff.command import Command
class SubjectScopesList(Lister):
"""List all subject scopes."""
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(SubjectScopesList, self).get_parser(prog_name)
parser.add_argument(
'subject_category_id',
metavar='<subject-category-uuid>',
help='Subject category UUID',
)
parser.add_argument(
'--intraextension',
metavar='<intraextension-uuid>',
help='IntraExtension UUID',
)
return parser
def take_action(self, parsed_args):
if not parsed_args.intraextension:
parsed_args.intraextension = self.app.intraextension
data = self.app.get_url(self.app.url_prefix+"/intra_extensions/{}/subject_scopes/{}".format(
parsed_args.intraextension,
parsed_args.subject_category_id),
authtoken=True)
return (
("id", "name", "description"),
((_id, data[_id]["name"], data[_id]["description"]) for _id in data)
)
class SubjectScopesAdd(Command):
"""Add a new subject scope."""
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(SubjectScopesAdd, self).get_parser(prog_name)
parser.add_argument(
'subject_category_id',
metavar='<subject-category-uuid>',
help='Subject category UUID',
)
parser.add_argument(
'subject_scope_name',
metavar='<subject-scope-str>',
help='Subject scope Name',
)
parser.add_argument(
'--description',
metavar='<description-str>',
help='Description',
)
parser.add_argument(
'--intraextension',
metavar='<intraextension-uuid>',
help='IntraExtension UUID',
)
return parser
def take_action(self, parsed_args):
if not parsed_args.intraextension:
parsed_args.intraextension = self.app.intraextension
data = self.app.get_url(self.app.url_prefix+"/intra_extensions/{}/subject_scopes/{}".format(
parsed_args.intraextension, parsed_args.subject_category_id),
post_data={
"subject_scope_name": parsed_args.subject_scope_name,
"subject_scope_description": parsed_args.description,
},
authtoken=True)
return (
("id", "name", "description"),
((_id, data[_id]["name"], data[_id]["description"]) for _id in data)
)
class SubjectScopesDelete(Command):
"""Delete a subject scope."""
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(SubjectScopesDelete, self).get_parser(prog_name)
parser.add_argument(
'subject_category_id',
metavar='<subject-category-uuid>',
help='Subject category UUID',
)
parser.add_argument(
'subject_scope_id',
metavar='<subject-scope-uuid>',
help='Subject scope UUID',
)
parser.add_argument(
'--intraextension',
metavar='<intraextension-uuid>',
help='IntraExtension UUID',
)
return parser
def take_action(self, parsed_args):
if not parsed_args.intraextension:
parsed_args.intraextension = self.app.intraextension
self.app.get_url(self.app.url_prefix+"/intra_extensions/{}/subject_scopes/{}/{}".format(
parsed_args.intraextension,
parsed_args.subject_category_id,
parsed_args.subject_scope_id
),
method="DELETE",
authtoken=True
) | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.