hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d15aa90cf8b146f10fcd1838f8fa2f846261f835 | 1,912 | py | Python | nicos_demo/vrefsans/setups/nok/b2.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | nicos_demo/vrefsans/setups/nok/b2.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | nicos_demo/vrefsans/setups/nok/b2.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | description = 'at samplecamper [slit k1]'
group = 'lowlevel'
devices = dict(
b2 = device('nicos_mlz.refsans.devices.slits.DoubleSlit',
description = 'b2 at sample pos',
fmtstr = 'opening: %.3f mm, zpos: %.3f mm',
unit = '',
slit_r = 'b2r',
slit_s = 'b2s',
),
b2r = device('nicos_mlz.refsans.devices.slits.SingleSlit',
# length: 13.0 mm
description = 'b2 slit, reactor side; 220 full access, 74 for upper srcews',
motor = 'b2_r',
nok_start = 11049.50,
nok_end = 11064.50,
nok_gap = 1.0,
masks = {
'slit': 0.0,
'point': -4.067,
'gisans': -218.645,
},
visibility = (),
unit = 'mm',
),
b2s = device('nicos_mlz.refsans.devices.slits.SingleSlit',
# length: 13.0 mm
description = 'b2 slit, sample side; -291 full access, -182 low row',
motor = 'b2_s',
nok_start = 11049.50,
nok_end = 11064.50,
nok_gap = 1.0,
masks = {
'slit': 0.0,
'point': -0.233,
'gisans': 206.4,
},
unit = 'mm',
visibility = (),
),
b2_r = device('nicos.devices.generic.Axis',
description = 'b2, reactorside',
motor = device('nicos.devices.generic.VirtualMotor',
abslimits = (-1294, 1222),
speed = 3.,
unit = 'mm',
),
backlash = 0,
precision = 0.02,
visibility = (),
),
b2_s = device('nicos.devices.generic.Axis',
description = 'b2, sampleside',
motor = device('nicos.devices.generic.VirtualMotor',
abslimits = (-2960, 2130),
speed = 3.,
unit = 'mm',
),
backlash = 0,
precision = 0.02,
visibility = (),
),
)
alias_config = {
'last_aperture': {'b2': 100},
}
| 27.314286 | 84 | 0.486925 | 203 | 1,912 | 4.502463 | 0.413793 | 0.084245 | 0.078775 | 0.109409 | 0.602845 | 0.602845 | 0.56674 | 0.363239 | 0.363239 | 0.363239 | 0 | 0.097279 | 0.365586 | 1,912 | 69 | 85 | 27.710145 | 0.656224 | 0.016213 | 0 | 0.515625 | 0 | 0 | 0.283813 | 0.13099 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d15dd7f172b63fa664d2c06fe65f7e7984030b80 | 2,909 | py | Python | pentagon.py | tobi08151405/Platonic-bodys | 077976d1943a0b834ad9e81b8e6ff9376ef76449 | [
"MIT"
] | null | null | null | pentagon.py | tobi08151405/Platonic-bodys | 077976d1943a0b834ad9e81b8e6ff9376ef76449 | [
"MIT"
] | null | null | null | pentagon.py | tobi08151405/Platonic-bodys | 077976d1943a0b834ad9e81b8e6ff9376ef76449 | [
"MIT"
] | null | null | null | __author__ = 'tobias'
import math as m
import turtle as t
import rotation_matrix_3D as rm3D
def pentagon():
print("\n\n****************************************************************************\n\n")
#requesting the variables
radius = float(input("prisms radius [mm] : "))
height = float(input("height h [mm] : "))
Volume = float(input("volume [mm^3] : "))
alpha_grad = float(input("angle alpha [°] : "))
beta_grad = float(input("angle beta [°] : "))
gamma_grad = float(input("angle gamma [°] : "))
print("\n\n****************************************************************************\n\n")
#calculation
if Volume == 0:
W = ((((m.sin((m.pi * 2) / 5) * radius) ** 2 + (radius - (m.cos((m.pi * 2) / 5) * radius)) ** 2) ** 0.5) / 2 * ((radius ** 2 - ((((m.sin((m.pi * 2) / 5) * radius) ** 2 + (radius - (m.cos((m.pi * 2) / 5) * radius)) ** 2) ** 0.5) / 2) ** 2) ** 0.5) * 5) * height
print("Ergebnis: volume = %.5f" % W, "[mm^3]")
print("\n\n****************************************************************************\n\n")
elif height == 0:
a = Volume / ((((m.sin((m.pi * 2) / 5) * radius) ** 2 + (radius - (m.cos((m.pi * 2) / 5) * radius)) ** 2) ** 0.5) / 2 * ((radius ** 2 - ((((m.sin((m.pi * 2) / 5) * radius) ** 2 + (radius - (m.cos((m.pi * 2) / 5) * radius)) ** 2) ** 0.5) / 2) ** 2) ** 0.5) * 5)
print("Ergebnis: height h = %.%f" % a, "[mm]")
height = a
#transform from degree to radiant
alpha = m.radians(alpha_grad)
beta = m.radians(beta_grad)
gamma = m.radians(gamma_grad)
#define the points
Ap, Bp, Cp, Dp, Ep, Fp, Gp, Hp, Ip, Jp = [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]
points_calculat = [Ap, Bp, Cp, Dp, Ep, Fp, Gp, Hp, Ip, Jp]
corner_pionts = []
k = 0
for point in points_calculat:
if k > 4:
z = height
else:
z = 0
corner_pionts.append([(radius * m.sin(m.pi * 2 / 5 * k)), (radius * m.cos(m.pi * 2 / 5 * k)), z])
k = k + 1
#rotate the points
corner_pionts3 = rm3D.rotation_matrix_3D(alpha, beta, gamma, corner_pionts)
#draw the prism
window = t.Screen()
t.ht()
t.speed(2)
t.up()
t.goto(corner_pionts3[0][0:2])
t.pd()
for pointk1 in corner_pionts3[1:5]:
t.goto(pointk1[0:2])
t.goto(corner_pionts3[0][0:2])
for pointk2 in corner_pionts3[5:10]:
t.goto(pointk2[0:2])
t.goto(corner_pionts3[5][0:2])
t.pu()
t.goto(corner_pionts3[1][0:2])
t.pd()
t.goto(corner_pionts3[6][0:2])
t.pu()
t.goto(corner_pionts3[2][0:2])
t.pd()
t.goto(corner_pionts3[7][0:2])
t.pu()
t.goto(corner_pionts3[3][0:2])
t.pd()
t.goto(corner_pionts3[8][0:2])
t.pu()
t.goto(corner_pionts3[4][0:2])
t.pd()
t.goto(corner_pionts3[9][0:2])
window.exitonclick() | 40.402778 | 268 | 0.475421 | 457 | 2,909 | 2.960613 | 0.214442 | 0.045824 | 0.062084 | 0.079823 | 0.411678 | 0.390244 | 0.367332 | 0.325203 | 0.189209 | 0.189209 | 0 | 0.069369 | 0.236851 | 2,909 | 72 | 269 | 40.402778 | 0.538739 | 0.039532 | 0 | 0.215385 | 0 | 0 | 0.151309 | 0.090355 | 0 | 0 | 0 | 0 | 0 | 1 | 0.015385 | false | 0 | 0.046154 | 0 | 0.061538 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d1608f7400945f8fca0e8f7dfc05612ae0e8b662 | 11,102 | py | Python | coordinator.py | vascoalramos/distributed-map-reduce | 315711489b75929c7f7c80527265a2c7f3d20af5 | [
"MIT"
] | null | null | null | coordinator.py | vascoalramos/distributed-map-reduce | 315711489b75929c7f7c80527265a2c7f3d20af5 | [
"MIT"
] | null | null | null | coordinator.py | vascoalramos/distributed-map-reduce | 315711489b75929c7f7c80527265a2c7f3d20af5 | [
"MIT"
] | null | null | null | # coding: utf-8
from socket import socket, AF_INET, SOCK_STREAM
from backup import Backup
import csv
import logging
import argparse
import json
import asyncio
import sys
import queue
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s', datefmt='%m-%d %H:%M:%S')
logger = logging.getLogger('Coordinator')
# Asyncio class (we used callback asyncio)
class EchoProtocol(asyncio.BaseProtocol):
def __init__(self, coordinator):
self.coordinator = coordinator
def connection_made(self, transport):
self.transport = transport
self.socket = transport.get_extra_info('socket')
self.addr = transport.get_extra_info('peername')
logger.info('New connection established: %s', self.addr)
def data_received(self, data):
self.coordinator.receive(self.socket, data)
def eof_received(self):
self.coordinator.redistributeWork(self.socket)
# Coordinator class
class Coordinator:
def __init__(self, datastore, datastoreIndex=0, maps=[], msgBuffer=""):
self.datastore = datastore # Array with all of the blobs
if len(self.datastore) == 1:
self.singleBlob = True # Control variable used to force a reduce if we only have 1 blob
else:
self.singleBlob = False
# Index that points to what blob we're treating
self.datastoreIndex = datastoreIndex
self.lostWork = queue.Queue() # Queue with work lost when a worker crashes
self.sentWork = {} # Dictionary with all work messages sent and to whom
self.workers = {} # Dictionary with worker's id-connection
self.tasksCount = 0
self.msgBuffer = msgBuffer # Incoming message buffer
self.maps = maps # Maps we're treating
logger.debug("Map : %s", self.maps)
logger.debug(self.datastoreIndex)
# Backup:
# Backup connection (Backup-Coordinator address)
self.backupConn = None
self.backupAddr = () # Backup address (Backup socket address)
# Receive function. Used to receive and treat incoming data
def receive(self, connection, data):
dataReceived = data.decode('UTF-8')
if '\x04' not in dataReceived: # If we haven't received a message with the break char
self.msgBuffer += dataReceived # Keep appending it to our msgBuffer
else:
splitBuf = dataReceived.split('\x04')
auxBuf = self.msgBuffer + splitBuf[0]
self.msgBuffer = splitBuf[1]
self.handle(connection, auxBuf)
# Send function. Used to send messages
def send(self, connection, data):
connection.sendall(data.encode(
'UTF-8') + ('\x04').encode('UTF-8'))
self.sentWork[connection] = data
################ SYNC FUNCS ############################################
# Sends current maps and index values to backup
def syncData(self):
msg = {"task": "update", "value": self.maps,
"index": self.datastoreIndex}
updateMsg = json.dumps(msg)
try:
self.backupConn.sendall(updateMsg.encode(
'UTF-8') + ('\x04').encode('UTF-8'))
except OSError:
pass
#######################################################################
# Function used to register work that had been sent to a worker that died
def redistributeWork(self, connection):
if (connection != self.backupConn) and (connection in list(self.workers.values())):
for workerID, workerConn in self.workers.items():
if workerConn == connection:
del self.workers[workerID]
self.lostWork.put(self.sentWork[connection])
break
# Function used to register a new worker
def regWorker(self, connection, workerID):
self.workers[workerID] = connection # Add it to our worker dict
logger.info('Worker registered with id %s', workerID)
if self.backupConn is not None: # If we have a backup, give the worker a viable address to connect to it
workMsg = json.dumps(
{"task": "reg_backup", "value": self.backupAddr})
self.send(connection, workMsg)
self.giveWork(connection)
# Function used to write a csv with the resulting final map
def writeToCSV(self):
with args.out as f:
csv_writer = csv.writer(
f, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for w, c in self.maps[0]:
csv_writer.writerow([w, c])
# Function used to give work to a worker
def giveWork(self, connection):
if (len(self.maps) < 2): # If we only have 1 map in our maps
# And still have blobs in the datastore, get a new one and send it to be mapped (either from the lost works or the datastore)
if (self.datastoreIndex < len(self.datastore)):
if (self.lostWork.qsize() == 0):
workMsg = json.dumps(
{"task": "map_request", "blob": self.datastore[self.datastoreIndex]})
self.send(connection, workMsg)
self.datastoreIndex += 1
if self.backupConn is not None:
self.syncData() # Update backup data
else:
workMsg = self.lostWork.get()
self.send(connection, workMsg)
if self.backupConn is not None:
self.syncData() # Update backup data
else:
if self.backupConn is not None:
self.syncData() # Update backup data
self.tasksCount += 1
logger.debug(str(self.tasksCount) + " | " + str(len(self.workers)))
if (self.tasksCount == len(self.workers)):
if self.singleBlob == True: # In case we only have 1 blob we have to force the reduce!
self.singleBlob = False
self.tasksCount -= 1
self.maps.append([])
workMsg = json.dumps(
{"task": "reduce_request", "value": [self.maps[0], self.maps[1]]})
self.send(connection, workMsg)
del self.maps[1]
del self.maps[0]
else:
logger.info('Map complete: %s', self.maps)
if self.backupConn is not None:
self.syncData() # Update backup data
self.writeToCSV() # Store final histogram into a CSV file
sys.exit()
else: # Otherwise, reduce the 2 maps we've got OR send work that had been lost
if (self.lostWork.qsize() == 0):
workMsg = json.dumps(
{"task": "reduce_request", "value": [self.maps[0], self.maps[1]]})
self.send(connection, workMsg)
if self.backupConn is not None:
self.syncData() # Update backup data
del self.maps[1]
del self.maps[0]
else:
workMsg = self.lostWork.get()
self.send(connection, workMsg)
if self.backupConn is not None:
self.syncData() # Update backup data
# Function used to handle incoming requests
def handle(self, connection, data):
msg = json.loads(data)
logger.info('Handling task %s', msg["task"])
if msg['task'] == 'register': # Register new worker
self.regWorker(connection, msg['id'])
elif msg['task'] == 'reg_backup': # Register new backup
self.backupConn = connection
self.backupAddr = msg['addr']
workMsg = json.dumps(
{"task": "reg_backup", "value": self.backupAddr})
for workerID, workerConn in self.workers.items(): # Register new backup at all workers
self.send(workerConn, workMsg)
logger.debug("Registered backup!")
elif msg['task'] == 'map_reply' or msg['task'] == 'reduce_reply': # Receive a work reply
self.maps.append(msg["value"])
self.giveWork(connection)
# Main process initializing function
async def main(args):
datastore = []
# Build blobs
with args.file as f:
while True:
blob = f.read(args.blob_size)
if not blob:
break
# This loop is used to not break word in half
while not str.isspace(blob[-1]):
ch = f.read(1)
if not ch:
break
blob += ch
logger.debug('Blob: %s\n\n', blob)
datastore.append(blob)
#############################################
# Create Coordinator
coordinator = Coordinator(datastore)
failCounter = 0
try: # Its a coordinator!
loop = asyncio.get_event_loop()
server = await loop.create_server(lambda: EchoProtocol(coordinator), "127.0.0.1", args.port)
logger.info("Coordinator created!")
await server.serve_forever()
except: # Its a backup!
port = args.port + 1 # Backup port, used to communicate with workers
while True:
try:
backup_coord = Backup(
"127.0.0.1", args.port, datastore, "127.0.0.1", port)
failCounter = 0
break
except:
failCounter += 1
port += 1
if failCounter >= 10:
break
pass
logger.info("Backup created!")
backup_coord.start_backup()
# When the coordinator dies, backup becomes the new coordinator by launching a server
coordinator = Coordinator(
datastore, backup_coord.indexDatastore, backup_coord.maps)
loop = asyncio.get_event_loop()
server = await loop.create_server(lambda: EchoProtocol(coordinator), "127.0.0.1", args.port)
logger.info("Coordinator created!")
await server.serve_forever()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='MapReduce Coordinator')
parser.add_argument('-p', dest='port', type=int,
help='coordinator port', default=8765)
parser.add_argument('-f', dest='file',
type=argparse.FileType('r'), help='file path')
parser.add_argument('-o', dest='out', type=argparse.FileType('w',
encoding='UTF-8'), help='output file path', default='output.csv')
parser.add_argument('-b', dest='blob_size', type=int,
help='blob size', default=1024)
args = parser.parse_args()
loop = asyncio.get_event_loop()
loop.run_until_complete(main(args))
loop.close() | 36.045455 | 137 | 0.555756 | 1,229 | 11,102 | 4.971522 | 0.227828 | 0.020949 | 0.016039 | 0.020622 | 0.232242 | 0.205074 | 0.198691 | 0.178396 | 0.178396 | 0.144354 | 0 | 0.011148 | 0.3294 | 11,102 | 308 | 138 | 36.045455 | 0.809537 | 0.168888 | 0 | 0.365385 | 0 | 0 | 0.07465 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0.009615 | 0.043269 | 0 | 0.115385 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d160afe29ca98b897f73981e72be75b8dff2b9ff | 2,554 | py | Python | batch_merge.py | rmukh/abanalysis | 5f75aa7562a47ea8304be8fa1ccf6720f6e5e77d | [
"MIT"
] | null | null | null | batch_merge.py | rmukh/abanalysis | 5f75aa7562a47ea8304be8fa1ccf6720f6e5e77d | [
"MIT"
] | null | null | null | batch_merge.py | rmukh/abanalysis | 5f75aa7562a47ea8304be8fa1ccf6720f6e5e77d | [
"MIT"
] | null | null | null | #!/usr/bin/python
# filename: batch_merge.py
###########################################################################
#
# Copyright (c) 2013 Bryan Briney. All rights reserved.
# Copyright (c) 2021 Rinat Mukhometzianov.
# @version: 1.0.0
# @author: Bryan Briney, Rinat Mukhometzianov
# @props: IgBLAST team (http://www.ncbi.nlm.nih.gov/igblast/igblast.cgi)
# @license: MIT (http://opensource.org/licenses/MIT)
#
###########################################################################
import os
import glob
import shutil
import argparse
import pandaseq
parser = argparse.ArgumentParser("Batch merging of paired-end reads with PANDAseq")
parser.add_argument('-i', '--in', dest='input', required=True, help="The input directory, containing paired FASTQ files"
" (uncompressed or gzip compressed). Required.")
parser.add_argument('-o', '--out', dest='output', required=True,
help="The output directory, will contain merged FASTA files. Required.")
parser.add_argument('-n', '--nextseq', dest='nextseq', default=False, action='store_true',
help="Use flag if run was performed on a NextSeq sequencer.")
args = parser.parse_args()
def make_direc(d):
if not os.path.exists(d):
os.mkdir(d)
def remove_direc(d):
shutil.rmtree(d)
def list_files(d):
return sorted([f for f in glob.glob(d + '/*') if os.path.isfile(f)])
def bin_files(files):
file_bins = {}
for f in files:
f_pre = '_'.join(os.path.basename(f).split('_')[:-1])
if f_pre in file_bins:
file_bins[f_pre].append(f)
else:
file_bins[f_pre] = [f, ]
return file_bins
def concat(d):
files = list_files(d)
file_bins = bin_files(files)
for file_bin in file_bins:
outfile = os.path.join(args.output, '{}.fasta'.format(file_bin))
with open(outfile, 'w') as o:
for f in file_bins[file_bin]:
with open(f) as i:
for line in i:
o.write(line)
def main():
make_direc(args.output)
if args.nextseq:
temp = os.path.join(args.output, 'temp')
make_direc(temp)
o = temp
else:
o = args.output
pandaseq.run(args.input, o, args.nextseq)
if args.nextseq:
print('')
print('Concatenating NextSeq lane files for each sample...')
concat(o)
remove_direc(o)
print('Done.')
print('')
if __name__ == '__main__':
main()
| 28.696629 | 120 | 0.564996 | 324 | 2,554 | 4.330247 | 0.41358 | 0.045617 | 0.036351 | 0.027085 | 0.02851 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006266 | 0.250196 | 2,554 | 88 | 121 | 29.022727 | 0.726371 | 0.124902 | 0 | 0.105263 | 0 | 0 | 0.189007 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.087719 | 0.017544 | 0.22807 | 0.070175 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d160e05e12200ca4b5c97430d3fdfce6f5aa5dd3 | 8,350 | py | Python | globus_automate_client/action_client.py | seren/globus-automate-client | e12314911bdf4ef62d2ca533d9de4dd3b3d1ad2c | [
"Apache-2.0"
] | null | null | null | globus_automate_client/action_client.py | seren/globus-automate-client | e12314911bdf4ef62d2ca533d9de4dd3b3d1ad2c | [
"Apache-2.0"
] | null | null | null | globus_automate_client/action_client.py | seren/globus-automate-client | e12314911bdf4ef62d2ca533d9de4dd3b3d1ad2c | [
"Apache-2.0"
] | null | null | null | import uuid
from typing import Any, Dict, Iterable, Mapping, Optional, Type, TypeVar, Union
from globus_sdk import (
AccessTokenAuthorizer,
ClientCredentialsAuthorizer,
GlobusHTTPResponse,
RefreshTokenAuthorizer,
)
from globus_sdk.base import BaseClient
from .helpers import merge_lists
_ActionClient = TypeVar("_ActionClient", bound="ActionClient")
class ActionClient(BaseClient):
allowed_authorizer_types = (
AccessTokenAuthorizer,
RefreshTokenAuthorizer,
ClientCredentialsAuthorizer,
)
AllowedAuthorizersType = Union[
AccessTokenAuthorizer, RefreshTokenAuthorizer, ClientCredentialsAuthorizer
]
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
@property
def action_scope(self) -> str:
"""
This property can be used to determine an ``ActionClient``'s
``action_scope``. Internally, this property will introspect the Action
Provider at the URL for which the ``ActionClient`` was created. If the
``Action Provider`` is not public, a valid ``Globus Authorizer`` will
have to have been provided on initialization to the ``ActionClient``.
Otherwise, this call will fail.
"""
if not hasattr(self, "_action_scope"):
resp = self.introspect()
if resp.data is None:
self._action_scope = ""
else:
self._action_scope = resp.data.get("globus_auth_scope", "")
return self._action_scope
def introspect(self, **kwargs) -> GlobusHTTPResponse:
"""
Introspect the details of an Action Provider to discover information
such as its expected ``action_scope``, its ``input_schema``, and who to
contact when there's trouble.
"""
return self.get("")
def run(
self,
body: Mapping[str, Any],
request_id: Optional[str] = None,
manage_by: Optional[Iterable[str]] = None,
monitor_by: Optional[Iterable[str]] = None,
label: Optional[str] = None,
force_path: Optional[str] = None,
**kwargs
) -> GlobusHTTPResponse:
"""
Invoke the Action Provider to execute an Action with the given
parameters.
:param body: The Action Provider specific input required to execute an
Action payload
:param request_id: An optional identifier that serves to de-duplicate
requests to the Action Provider
:param manage_by: A series of Globus identities which may alter
this Action's execution. The principal value is the user's or
group's UUID prefixed with either 'urn:globus:groups:id:' or
'urn:globus:auth:identity:'
:param monitor_by: A series of Globus identities which may
view the state of this Action. The principal value is the user's or
group's UUID prefixed with either 'urn:globus:groups:id:' or
'urn:globus:auth:identity:'
:param force_path: A URL to use for running this action, ignoring any
previous configuration
:param label: Set a label for the Action that is run.
:param run_monitors: May be used as an alias for ``monitor_by``
:param run_managers: May be used as an alias for ``manage_by``
"""
if request_id is None:
request_id = str(uuid.uuid4())
path = self.qjoin_path("run")
if force_path:
path = force_path
body = {
"request_id": str(request_id),
"body": body,
"monitor_by": merge_lists(monitor_by, kwargs, "run_monitors"),
"manage_by": merge_lists(manage_by, kwargs, "run_managers"),
"label": label,
}
# Remove None items from the temp_body
body = {k: v for k, v in body.items() if v is not None}
return self.post(path, body)
def status(self, action_id: str) -> GlobusHTTPResponse:
"""
Query the Action Provider for the status of executed Action
:param action_id: An identifier that uniquely identifies an Action
executed on this Action Provider.
"""
path = self.qjoin_path(action_id, "status")
return self.get(path)
def resume(self, action_id: str) -> GlobusHTTPResponse:
"""
Resume an INACTIVE action. Corrective action must have been taken prior to invoking
this method, including the possibility of consenting to additional permissions
and using tokens issued by those consents when creating this client. These
consents would commonly be required when an Action is INACTIVE and shows the code
ConsentRequired.
:param action_id: An identifier that uniquely identifies an Action
executed on this Action Provider.
"""
path = self.qjoin_path(action_id, "resume")
return self.post(path)
def cancel(self, action_id: str) -> GlobusHTTPResponse:
"""
Cancel a currently executing Action on an Action Provider
:param action_id: An identifier that uniquely identifies an Action
executed on this Action Provider.
"""
path = self.qjoin_path(action_id, "cancel")
return self.post(path)
def release(self, action_id: str) -> GlobusHTTPResponse:
"""
Remove the history of an Action's execution from an Action Provider
:param action_id: An identifier that uniquely identifies an Action
executed on this Action Provider.
"""
path = self.qjoin_path(action_id, "release")
return self.post(path)
def log(
self,
action_id: str,
limit: int = 10,
reverse_order: bool = False,
marker: Optional[str] = None,
per_page: Optional[int] = None,
) -> GlobusHTTPResponse:
"""
Retrieve an Action's execution log history. Not all ``Action Providers``
support this operation.
:param action_id: An identifier that uniquely identifies an Action
executed on this Action Provider.
:param limit: A integer specifying how many log records to return
:param reverse_order: Display the Action states in reverse-
chronological order
:param marker: A pagination_token indicating the page of results to
return and how many entries to return. Not all ActionProviders will
support this parameter.
:param per_page: The number of results to return per page. If
supplied a pagination_token, this parameter has no effect. Not all
ActionProviders will support this parameter.
"""
params: Dict[str, Union[int, str]] = {
"reverse_order": reverse_order,
"limit": limit,
}
if marker is not None:
params["pagination_token"] = marker
if per_page is not None and marker is None:
params["per_page"] = per_page
path = self.qjoin_path(action_id, "log")
return self.get(path, params=params)
@classmethod
def new_client(
cls: Type[_ActionClient],
action_url: str,
authorizer: AllowedAuthorizersType,
http_timeout: int = 10,
) -> _ActionClient:
"""
Classmethod to simplify creating an ActionClient. Use this method when
attemping to create an ActionClient with pre-existing credentials or
authorizers.
:param action_url: The url at which the target Action Provider is
located.
:param authorizer: The authorizer to use for validating requests to the
Action Provider.
:param http_timeout: The amount of time to wait for connections to
the Action Provider to be made.
**Examples**
>>> authorizer = ...
>>> action_url = "https://actions.globus.org/hello_world"
>>> ac = ActionClient.new_client(action_url, authorizer)
>>> print(ac.run({"echo_string": "Hello from SDK"}))
"""
return cls(
"action_client",
app_name="Globus Automate SDK - ActionClient",
base_url=action_url,
authorizer=authorizer,
http_timeout=http_timeout,
)
| 38.302752 | 91 | 0.628862 | 983 | 8,350 | 5.23296 | 0.267548 | 0.046268 | 0.026439 | 0.019829 | 0.250194 | 0.202566 | 0.185264 | 0.159603 | 0.145995 | 0.145995 | 0 | 0.000849 | 0.29497 | 8,350 | 217 | 92 | 38.479263 | 0.87294 | 0.465389 | 0 | 0.128713 | 0 | 0 | 0.063049 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09901 | false | 0 | 0.049505 | 0 | 0.267327 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d16327d6f6de6412d5ee3afdf6424723bafa7c97 | 8,995 | py | Python | number_plate_redaction.py | Hashim-Ali-Zaidi/https-github.com-parkpow-deep-license-plate-recognition | 01ebc5090923c0cc959f4d8ce55e5cac06b2117f | [
"MIT"
] | 21 | 2022-01-05T03:05:35.000Z | 2022-03-31T19:28:27.000Z | number_plate_redaction.py | Hashim-Ali-Zaidi/https-github.com-parkpow-deep-license-plate-recognition | 01ebc5090923c0cc959f4d8ce55e5cac06b2117f | [
"MIT"
] | 10 | 2022-01-18T14:34:31.000Z | 2022-03-14T07:43:38.000Z | number_plate_redaction.py | Hashim-Ali-Zaidi/https-github.com-parkpow-deep-license-plate-recognition | 01ebc5090923c0cc959f4d8ce55e5cac06b2117f | [
"MIT"
] | 2 | 2022-01-24T22:45:03.000Z | 2022-01-27T19:13:23.000Z | import io
import json
import math
import re
from itertools import combinations
from pathlib import Path
from PIL import Image, ImageFilter, ImageDraw, ImageFont
from plate_recognition import parse_arguments, recognition_api
def draw_bb(im, data, new_size=(1920, 1050), text_func=None):
draw = ImageDraw.Draw(im)
font_path = Path('assets/DejaVuSansMono.ttf')
if font_path.exists():
font = ImageFont.truetype(str(font_path), 10)
else:
font = ImageFont.load_default()
rect_color = (0, 255, 0)
for result in data:
b = result['box']
coord = [(b['xmin'], b['ymin']), (b['xmax'], b['ymax'])]
draw.rectangle(coord, outline=rect_color)
draw.rectangle(((coord[0][0] - 1, coord[0][1] - 1),
(coord[1][0] - 1, coord[1][1] - 1)),
outline=rect_color)
draw.rectangle(((coord[0][0] - 2, coord[0][1] - 2),
(coord[1][0] - 2, coord[1][1] - 2)),
outline=rect_color)
if text_func:
text = text_func(result)
text_width, text_height = font.getsize(text)
margin = math.ceil(0.05 * text_height)
draw.rectangle(
[(b['xmin'] - margin, b['ymin'] - text_height - 2 * margin),
(b['xmin'] + text_width + 2 * margin, b['ymin'])],
fill='white')
draw.text((b['xmin'] + margin, b['ymin'] - text_height - margin),
text,
fill='black',
font=font)
if new_size:
im = im.resize(new_size)
return im
def blur(im, blur_amount, api_res, ignore_no_bb=False, ignore_list=None):
for res in api_res.get('results', []):
if ignore_no_bb and res['vehicle']['score'] == 0.0:
continue
if ignore_list:
skip_blur = False
for ignore_regex in ignore_list:
if re.search(ignore_regex, res['plate']):
skip_blur = True
break
if skip_blur:
continue
b = res['box']
width, height = b['xmax'] - b['xmin'], b['ymax'] - b['ymin']
crop_box = (b['xmin'], b['ymin'], b['xmax'], b['ymax'])
ic = im.crop(crop_box)
# Increase amount of blur with size of bounding box
blur_image = ic.filter(
ImageFilter.GaussianBlur(radius=math.sqrt(width * height) * .3 *
blur_amount / 10))
im.paste(blur_image, crop_box)
return im
def bb_iou(a, b):
# determine the (x, y)-coordinates of the intersection rectangle
x_a = max(a["xmin"], b["xmin"])
y_a = max(a["ymin"], b["ymin"])
x_b = min(a["xmax"], b["xmax"])
y_b = min(a["ymax"], b["ymax"])
# compute the area of both the prediction and ground-truth
# rectangles
area_a = (a["xmax"] - a["xmin"]) * (a["ymax"] - a["ymin"])
area_b = (b["xmax"] - b["xmin"]) * (b["ymax"] - b["ymin"])
# compute the area of intersection rectangle
area_inter = max(0, x_b - x_a) * max(0, y_b - y_a)
return area_inter / float(max(area_a + area_b - area_inter, 1))
def clean_objs(objects, threshold=.1):
# Only keep the ones with best score or no overlap
for o1, o2 in combinations(objects, 2):
if 'remove' in o1 or 'remove' in o2 or bb_iou(o1['box'],
o2['box']) <= threshold:
continue
if o1['score'] > o2['score']:
o2['remove'] = True
else:
o1['remove'] = True
return [x for x in objects if 'remove' not in x]
def merge_results(images):
result = dict(results=[])
for data in images:
for item in data['prediction']['results']:
result['results'].append(item)
for b in [item['box'], item['vehicle'].get("box", {})]:
b['ymin'] += data['y']
b['xmin'] += data['x']
b['ymax'] += data['y']
b['xmax'] += data['x']
result['results'] = clean_objs(result['results'])
return result
def inside(a, b):
return (a["xmin"] > b["xmin"] and a["ymin"] > b["ymin"] and
a["xmax"] < b["xmax"] and a["ymax"] < b["ymax"])
def post_processing(results):
new_list = []
for item in results['results']:
if item['score'] < .2 and any([
inside(x['box'], item['box'])
for x in results['results']
if x != item
]):
continue
new_list.append(item)
results['results'] = new_list
return results
def process_image(path, args, i):
config = dict(threshold_d=args.detection_threshold,
threshold_o=args.ocr_threshold,
mode='redaction')
# Predictions
source_im = Image.open(path)
if source_im.mode != 'RGB':
source_im = source_im.convert('RGB')
images = [((0, 0), source_im)] # Entire image
# Top left and top right crops
if args.split_image:
y = 0
win_size = .55
width, height = source_im.width * win_size, source_im.height * win_size
for x in [0, int((1 - win_size) * source_im.width)]:
images.append(((x, y), source_im.crop(
(x, y, x + width, y + height))))
# Inference
results = []
for (x, y), im in images:
im_bytes = io.BytesIO()
im.save(im_bytes, 'JPEG', quality=95)
im_bytes.seek(0)
im_results = recognition_api(im_bytes,
args.regions,
args.api_key,
args.sdk_url,
config=config)
results.append(dict(prediction=im_results, x=x, y=y))
results = post_processing(merge_results(results))
results['filename'] = Path(path).name
# Set bounding box padding
for item in results['results']:
# Decrease padding size for large bounding boxes
b = item['box']
width, height = b['xmax'] - b['xmin'], b['ymax'] - b['ymin']
padding_x = int(
max(0, width * (.3 * math.exp(-10 * width / source_im.width))))
padding_y = int(
max(0, height * (.3 * math.exp(-10 * height / source_im.height))))
b['xmin'] = b['xmin'] - padding_x
b['ymin'] = b['ymin'] - padding_y
b['xmax'] = b['xmax'] + padding_x
b['ymax'] = b['ymax'] + padding_y
if args.show_boxes or args.save_blurred:
im = blur(source_im,
5,
results,
ignore_no_bb=args.ignore_no_bb,
ignore_list=args.ignore_regexp)
if args.show_boxes:
im.show()
if args.save_blurred:
filename = Path(path)
im.save(filename.parent / ('%s_blurred%s' %
(filename.stem, filename.suffix)))
if 0:
draw_bb(source_im, results['results']).show()
return results
def custom_args(parser):
parser.epilog += 'To analyse the image for redaction: python number_plate_redaction.py --api-key MY_API_KEY --split-image /tmp/car.jpg'
parser.add_argument(
'--split-image',
action='store_true',
help=
'Do extra lookups on parts of the image. Useful on high resolution images.'
)
parser.add_argument('--show-boxes',
action='store_true',
help='Display the resulting blurred image.')
parser.add_argument(
'--save-blurred',
action='store_true',
help='Blur license plates and save image in filename_blurred.jpg.')
parser.add_argument(
'--ignore-regexp',
action='append',
help='Plate regex to ignore during blur. Usually invalid plate numbers.'
)
parser.add_argument(
'--ignore-no-bb',
action='store_true',
help='Ignore detections without a vehicle bounding box during blur.')
parser.add_argument(
'--detection-threshold',
type=float,
default=.2,
help='Keep all detections above this threshold. Between 0 and 1.')
parser.add_argument(
'--ocr-threshold',
type=float,
default=.5,
help=
'Keep all plates if the characters reading score is above this threshold. Between 0 and 1.'
)
def main():
args = parse_arguments(custom_args)
result = []
for i, path in enumerate(args.files):
if Path(path).is_file():
result.append(process_image(path, args, i))
if 0:
for im_result in result:
for i, x in enumerate(im_result['results']):
im_result['results'][i] = dict(dscore=x['dscore'],
score=x['score'],
box=x['box'])
print(json.dumps(result, indent=2))
if __name__ == '__main__':
main()
| 34.596154 | 140 | 0.531184 | 1,139 | 8,995 | 4.060579 | 0.217735 | 0.014054 | 0.02573 | 0.016432 | 0.086486 | 0.067459 | 0.067459 | 0.043243 | 0.014703 | 0.014703 | 0 | 0.015431 | 0.329961 | 8,995 | 259 | 141 | 34.72973 | 0.75195 | 0.045359 | 0 | 0.150235 | 0 | 0.004695 | 0.143457 | 0.01073 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046948 | false | 0 | 0.037559 | 0.004695 | 0.122066 | 0.004695 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d163f1d2396b5d8d3b6886c2d5299e1f66284e91 | 7,229 | py | Python | events/admin.py | roberzguerra/rover | 14b6a7a47e75d6b6f8ca44fc0eb1cca500e0eecb | [
"BSD-3-Clause"
] | 2 | 2015-12-02T17:26:12.000Z | 2015-12-03T00:43:14.000Z | events/admin.py | roberzguerra/rover | 14b6a7a47e75d6b6f8ca44fc0eb1cca500e0eecb | [
"BSD-3-Clause"
] | 1 | 2015-12-02T17:26:43.000Z | 2016-03-15T00:01:20.000Z | events/admin.py | roberzguerra/rover | 14b6a7a47e75d6b6f8ca44fc0eb1cca500e0eecb | [
"BSD-3-Clause"
] | null | null | null | # -*- coding:utf-8 -*-
from copy import deepcopy
from django.db.models import Q
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from django.contrib import admin
from mezzanine.core.admin import DisplayableAdmin, TabularDynamicInlineAdmin
from ajax_select.admin import AjaxSelectAdminTabularInline
from ajax_select.fields import autoselect_fields_check_can_add
from events.forms import EventForm, \
BlockForm, \
EventBlockInlineForm, \
ProgramationForm, \
EventProgramationInlineForm
from events.models import Event, Block, EventBlock, Programation, EventProgramation
from ajax_select import register, LookupChannel
@register('event_blocks')
class EventBlockLookup(LookupChannel):
"""
Lookup para EventBlock
"""
model = Block
def get_query(self, q, request):
teste
return self.model.objects.filter(title__icontains=q)
def format_item_display(self, item):
return u"<span class='tag'>%s</span>" % item.title
@register('event_programations')
class EventProgramationLookup(LookupChannel):
"""
Lookup para EventProgramation
"""
model = Programation
def get_query(self, q, request):
return self.model.objects.filter(Q(title__icontains=q) | Q(date_time__icontains=q))
def format_item_display(self, item):
return u"<span class='tag'>%s</span>" % (item)
programation_fieldsets = deepcopy(DisplayableAdmin.fieldsets)
programation_fieldsets[0][1]["fields"].extend(['date_time','image','content'])
programation_list_display = ["title", "status", "description", "events_using"] # "admin_link"]
class ProgramationAdmin(DisplayableAdmin):
"""
Admin dos Eventos
"""
form = ProgramationForm
fieldsets = programation_fieldsets
list_display = programation_list_display
# def link_event_change(self, obj):
# html = u' - '
# if obj.event:
# html = u'<a href="%s">%s</a>' % (obj.event.get_admin_url(), obj.event)
# return html
# link_event_change.allow_tags = True
# link_event_change.short_description = _(u"Evento")
def get_form(self, request, obj=None, **kwargs):
form = super(ProgramationAdmin, self).get_form(request, obj, **kwargs)
autoselect_fields_check_can_add(form, self.model, request.user)
return form
def events_using(self, obj):
"""
monta um link com os eventos utilizando a programação
:param obj:
:return:
"""
html = ''
event_programations = obj.eventprogramation_set.all()
event_ids = []
for event_programation in event_programations:
event_ids.append(event_programation.event.id)
if event_ids:
events = Event.objects.filter(id__in=event_ids)
for event in events:
html += "<a href=\"%s\">%s</a>, " % (event.get_admin_url(), event)
return mark_safe(html)
events_using.allow_tags = True
events_using.short_description = _(u"Utilizando nos eventos")
block_fieldsets = deepcopy(DisplayableAdmin.fieldsets)
block_fieldsets[0][1]["fields"].extend(['content'])
block_list_display = ["title", "status", "description", "events_using"] # "admin_link"]
class BlockAdmin(DisplayableAdmin):
"""
Admin dos Eventos
"""
form = BlockForm
fieldsets = block_fieldsets
list_display = block_list_display
# def link_event_change(self, obj):
# html = u' - '
# if obj.event:
# html = u'<a href="%s">%s</a>' % (obj.event.get_admin_url(), obj.event)
# return html
# link_event_change.allow_tags = True
# link_event_change.short_description = _(u"Evento")
def get_form(self, request, obj=None, **kwargs):
form = super(BlockAdmin, self).get_form(request, obj, **kwargs)
autoselect_fields_check_can_add(form, self.model, request.user)
return form
def events_using(self, obj):
"""
monta um link com os eventos utilizando o Bloco
:param obj:
:return:
"""
html = ''
event_blocks = obj.eventblock_set.all()
event_ids = []
for event_block in event_blocks:
event_ids.append(event_block.event.id)
if event_ids:
events = Event.objects.filter(id__in=event_ids)
for event in events:
html += "<a href=\"%s\">%s</a>, " % (event.get_admin_url(), event)
return mark_safe(html)
events_using.allow_tags = True
events_using.short_description = _(u"Utilizando nos eventos")
class EventBlockInline(TabularDynamicInlineAdmin, AjaxSelectAdminTabularInline):
model = EventBlock
form = EventBlockInlineForm
ordering = ('_order',)
fieldsets = (
(None, {
#"fields": ["name", "status", "image", "date_time", "content"],
"fields": ["block", "type", "link_menu", "status", "_order"],
}),
)
# Teste de personalizacao do Admin
template = "admin/includes/event_dynamic_inline_tabular.html"
def get_form(self, request, obj=None, **kwargs):
form = super(EventBlockInline, self).get_form(request, obj, **kwargs)
autoselect_fields_check_can_add(form, self.model, request.user)
return form
class EventProgramationInline(TabularDynamicInlineAdmin, AjaxSelectAdminTabularInline):
model = EventProgramation
form = EventProgramationInlineForm
ordering = ('_order',)
fieldsets = (
(None, {
#"fields": ["name", "status", "image", "date_time", "content"],
"fields": ["programation", "status", "_order"],
}),
)
# Teste de personalizacao do Admin
template = "admin/includes/event_dynamic_inline_tabular.html"
def get_form(self, request, obj=None, **kwargs):
form = super(EventBlockInline, self).get_form(request, obj, **kwargs)
autoselect_fields_check_can_add(form, self.model, request.user)
return form
event_fieldsets = deepcopy(DisplayableAdmin.fieldsets)
event_fieldsets[0][1]["fields"].extend([
'event_title_menu','event_description_short', 'event_logo', 'event_title', 'event_image_background',
'event_social_image', 'code',
])
event_list_display = ["title", "status", "preview_link"]
class EventAdmin(DisplayableAdmin):
"""
Admin dos Eventos
"""
form = EventForm
fieldsets = event_fieldsets
list_display = event_list_display
#filter_horizontal = ("categories",)
inlines = [
EventBlockInline,
EventProgramationInline,
]
def get_form(self, request, obj=None, **kwargs):
form = super(EventAdmin, self).get_form(request, obj, **kwargs)
autoselect_fields_check_can_add(form, self.model, request.user)
return form
def preview_link(self, obj):
return u'<a target="_blank" href="%s">%s</a>' % (reverse('events:event-preview', args=(obj.slug,)), _(u"Pré-visualizar"))
preview_link.allow_tags = True
preview_link.short_description = _(u"Pré-visualizar")
admin.site.register(Event, EventAdmin)
admin.site.register(Block, BlockAdmin)
admin.site.register(Programation, ProgramationAdmin) | 33.467593 | 129 | 0.670632 | 823 | 7,229 | 5.673147 | 0.190765 | 0.014993 | 0.026987 | 0.030842 | 0.529235 | 0.464339 | 0.445063 | 0.445063 | 0.445063 | 0.445063 | 0 | 0.001223 | 0.208466 | 7,229 | 216 | 130 | 33.467593 | 0.81475 | 0.142343 | 0 | 0.386364 | 0 | 0 | 0.110023 | 0.030368 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.090909 | 0.030303 | 0.492424 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d1640c3248a566d8c4b0b2e5d95f52de1ad31f75 | 4,904 | py | Python | api/calculate_frequencies.py | GTJuniorDesign0100-2020/anti-malarial-MCMC-bayesian-algorithm | 8ab95c9b65275096dd86268fbb99bb37b6806e05 | [
"MIT"
] | 1 | 2020-10-28T18:19:21.000Z | 2020-10-28T18:19:21.000Z | api/calculate_frequencies.py | GTJuniorDesign0100-2020/anti-malarial-MCMC-bayesian-algorithm | 8ab95c9b65275096dd86268fbb99bb37b6806e05 | [
"MIT"
] | 45 | 2020-09-03T22:17:36.000Z | 2020-12-06T02:51:46.000Z | api/calculate_frequencies.py | GTJuniorDesign0100-2020/anti-malarial-MCMC-bayesian-algorithm | 8ab95c9b65275096dd86268fbb99bb37b6806e05 | [
"MIT"
] | 1 | 2020-12-07T16:47:54.000Z | 2020-12-07T16:47:54.000Z | import pandas as pd
import numpy as np
import math
import statistics
import api.recrudescence_utils as recrudescence_utils
def calculate_frequencies3(genotypedata, alleles_definitions):
'''
Calculate frequencies of alleles
Using the input data table and alleles_definition
which contains the lower and upper break values of the allele data,
this method calculates the amount (frequency) and the variability of raw allele data
in each lower and upper break values.
Returns a list that that contains the following:
- index[0]
type: numpy array
description: the length of each list of frequencies
- index[1]
type: numpy matrix
description: the matrix (number of locinames by number of alleles) that contains the frequency values
- index[2]
type: numpy array
description: mean SD of within allele length
:param genotypedata
type: pandas dataframe
description: genetic data, where first column (name 'Sample ID') has the id of the sample,
and rest of columns have the format nameoflocus_X, where X is the xth allele detected
:param alleles_definitions
type: list that contains dataframe
description: list of length number of loci
each entry is a number of alleles by 2 matrix (1st column = lower bound, 2nd column = upper bound)
'''
locinames = recrudescence_utils.get_locinames(genotypedata)
nloci = len(locinames)
frequencies = []
variability = []
n = 0
for j in range(nloci):
# retrieve raw alleles (each index contains every raw alleles data with the same locinames)
# ex. all data with X313 prefix lociname in index 0
loci_name_prefix, last_index = locinames.get(j)
raw_alleles, n = recrudescence_utils.get_RawAlleles(genotypedata, n, last_index)
# lower = list of lower bound values
# high = list of upper bound values
low = alleles_definitions[j]["0"]
high = alleles_definitions[j]["1"]
# length of the lower bound and upper bound list
nrows = len(alleles_definitions[j])
sum_list, meanSD = _get_sumList(nrows, raw_alleles, low, high)
frequencies.append(sum_list)
variability.append(meanSD)
frequencies[j] = frequencies[j] / len(raw_alleles)
# switch freq_length and variability list to numpy array
freq_length = np.asarray([len(frequencies[j]) for j in range(len(frequencies))])
variability = np.asarray(variability)
ncol = max(freq_length)
# create matrix with frequency values from frequencies list
freqmatrix = _create_frequencyMatrix(nloci, ncol, frequencies)
# final result
ret = _pack_result(freq_length, freqmatrix, variability)
return ret
def _get_sumList(nrows: int, raw_alleles: list, low: pd.core.series.Series, high: pd.core.series.Series):
'''
Returns a numpy array of the number of allele values that is between lower and upper bound values
Also returns a mean of standard deviation of that numpy array.
:param nrows: The number of rows of the alleles_definition
:param raw_alleles: The allele values retrieved from the input file
:param low: The list of the lower bound values from the alleles_definition
:param high: The list of the upper bound values from the alleles_definition
'''
sum_list = [] # needed for storing frequency values
sd_list = [] # standard deviation
for i in range(nrows):
tf_table = []
result_sum = 0
for allele in raw_alleles:
eval = allele > low[i] and allele <= high[i]
tf_table.append(eval)
if eval:
result_sum += 1
sum_list.append(result_sum)
true_items = []
for eval_i in range(len(tf_table)):
if tf_table[eval_i]:
true_items.append(raw_alleles[eval_i])
if len(true_items) > 1:
sd_list.append(statistics.stdev(true_items))
sum_list = np.array(sum_list)
# mean of standard deviation
meanSD = 0
if (len(sd_list) > 0):
meanSD = np.mean(sd_list)
return sum_list, meanSD
def _create_frequencyMatrix(nloci: int, ncol: np.int64, frequencies: list):
'''
Turn 1D frequencies list into 2D numpy matrix
:param nloci: The number of rows
:param ncol: The number of columns
:param frequencies: The 1D list that contains the frequency values
'''
# initialize frequency matrix with zeros
freqmatrix = np.zeros([nloci, ncol])
# fill out each box in the frequency matrix with frequency values from frequencies list
for j in range(nloci):
for i in range(len(frequencies[j])):
freqmatrix[j][i] = frequencies[j][i]
return freqmatrix
def _pack_result(freq_lengths: np.ndarray, freqmatrix: np.ndarray, variability: np.ndarray):
'''
Returns a Frequency object that contains all frequency-related matrices and arrays
'''
frequency_result = Frequencies(freq_lengths, freqmatrix, variability)
return frequency_result
class Frequencies:
'''
Holds the related results of calculated frequencies
'''
def __init__(self, freq_lengths: np.ndarray, freqmatrix: np.ndarray, variability: np.ndarray):
self.lengths = freq_lengths
self.matrix = freqmatrix
self.variability = variability
| 32.263158 | 105 | 0.753263 | 720 | 4,904 | 5.020833 | 0.231944 | 0.024896 | 0.012172 | 0.009129 | 0.115076 | 0.076349 | 0.056985 | 0.032642 | 0.032642 | 0.032642 | 0 | 0.005908 | 0.171697 | 4,904 | 151 | 106 | 32.476821 | 0.884047 | 0.523654 | 0 | 0.03125 | 0 | 0 | 0.000831 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078125 | false | 0 | 0.078125 | 0 | 0.234375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d165298f9d6c2dd4e54aa2831a62b33e2abb9ed6 | 305 | py | Python | linked_lists/linked_list_middle.py | MrCsabaToth/IK | 713f91c28af7b4a964ba854ede9fec73bf0c4682 | [
"Apache-2.0"
] | null | null | null | linked_lists/linked_list_middle.py | MrCsabaToth/IK | 713f91c28af7b4a964ba854ede9fec73bf0c4682 | [
"Apache-2.0"
] | null | null | null | linked_lists/linked_list_middle.py | MrCsabaToth/IK | 713f91c28af7b4a964ba854ede9fec73bf0c4682 | [
"Apache-2.0"
] | null | null | null | #Reference:
#LinkedListNode {
# int val
# LinkedListNode next
#}
def find_middle_node(head):
if not head:
return None
node = head
mid = node
i = 0
while node.next:
if not i % 2:
mid = mid.next
node = node.next
i += 1
return mid
| 14.52381 | 27 | 0.521311 | 39 | 305 | 4.025641 | 0.512821 | 0.101911 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016304 | 0.396721 | 305 | 20 | 28 | 15.25 | 0.836957 | 0.2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d1657091800903e794906206a91e8aff42673361 | 178 | py | Python | beginner_contest/172/D.py | FGtatsuro/myatcoder | 25a3123be6a6311e7d1c25394987de3e35575ff4 | [
"MIT"
] | null | null | null | beginner_contest/172/D.py | FGtatsuro/myatcoder | 25a3123be6a6311e7d1c25394987de3e35575ff4 | [
"MIT"
] | null | null | null | beginner_contest/172/D.py | FGtatsuro/myatcoder | 25a3123be6a6311e7d1c25394987de3e35575ff4 | [
"MIT"
] | null | null | null | import sys
input = sys.stdin.readline
sys.setrecursionlimit(10 ** 7)
n = int(input())
ans = 0
for i in range(1, n+1):
j = n // i
ans += (j * (j+1) * i) // 2
print(ans)
| 14.833333 | 31 | 0.55618 | 33 | 178 | 3 | 0.606061 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.059701 | 0.247191 | 178 | 11 | 32 | 16.181818 | 0.679104 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.111111 | 0 | 0.111111 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d165bc6f340cc6a3b2581ecbbfc90108635787e6 | 2,056 | py | Python | demo/writing_xml.py | zepheira/amara | d3ffe07d6e2266b34d72b012a82d572c8edbf1e7 | [
"Apache-2.0"
] | 6 | 2015-01-30T03:50:36.000Z | 2022-03-20T16:09:58.000Z | demo/writing_xml.py | zepheira/amara | d3ffe07d6e2266b34d72b012a82d572c8edbf1e7 | [
"Apache-2.0"
] | 2 | 2015-02-04T17:18:47.000Z | 2019-09-27T23:39:52.000Z | demo/writing_xml.py | zepheira/amara | d3ffe07d6e2266b34d72b012a82d572c8edbf1e7 | [
"Apache-2.0"
] | 6 | 2015-02-04T16:16:18.000Z | 2019-10-30T20:07:48.000Z | #Import the basic writer class for users
from amara import writer
w = writer(indent=u"yes") #Operates in streaming mode
w.start_document()
w.start_element(u'xsa')
w.start_element(u'vendor')
#Element with simple text (#PCDATA) content
w.simple_element(u'name', content=u'Centigrade systems')
#Note writer.text(content) still works
w.simple_element(u'email', content=u"info@centigrade.bogus")
w.end_element(u'vendor')
#Element with an attribute
w.start_element(u'product', attributes={u'id': u"100\u00B0"})
#Note w.attribute(name, value, namespace=None) still works
w.simple_element(u'name', content=u"100\u00B0 Server")
#XML fragment
#w.xml_fragment('<version>1.0</version><last-release>20030401</last-release>')
#Empty element
w.simple_element(u'changes')
w.end_element(u'product')
w.end_element(u'xsa')
w.end_document()
print
#Now an HTML example
w = writer(method=u"html") #indent=u"yes" is default in this mode
w.start_document()
w.start_element(u'html')
w.start_element(u'head')
w.simple_element(u'title', content=u'Hello')
w.end_element(u'head')
w.start_element(u'body')
#w.start_element(u'body', attributes={u'id': u"100\u00B0"})
w.simple_element(u'p', content=u"World")
#XML fragment
#w.xml_fragment('<version>1.0</version><last-release>20030401</last-release>')
#Empty element
w.simple_element(u'br')
w.end_element(u'html')
w.end_document()
print
from amara.writers.struct import *
w = structwriter(indent=u"yes").feed(
ROOT(
E(u'doc',
E(u'a', u'hello'),
#E('float-content', 3.14),
E((None, u'b'), u'this is unicode: \u221e'),
#E(u'list-content', [E('child', 'a'), RAW('<raw-node message="hello"/>'), E('child', 'b')]),
E(u'c', {u'parrot': u'dead', u'spam': u'eggs'}),
E((None, u'c'), {u'parrot': u'dead', (None, u'spam'): u'eggs'}, u'again'),
E(u'gen-content', (E('node', x) for x in range(6))),
E(u'monty', E('spam', 'eggs')),
E(u'empty'),
E(u'func', lambda: u'this is a func'),
#E(u'raw-xml-content', RAW('<a>b</a>', '<c>d</c>')) #The multiple raw text bits are just concatenated
)
))
print
| 31.630769 | 105 | 0.683366 | 361 | 2,056 | 3.822715 | 0.307479 | 0.110145 | 0.065942 | 0.071014 | 0.364493 | 0.302174 | 0.224638 | 0.185507 | 0.13913 | 0.13913 | 0 | 0.024523 | 0.10749 | 2,056 | 64 | 106 | 32.125 | 0.72752 | 0.368677 | 0 | 0.166667 | 0 | 0 | 0.229444 | 0.016445 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.047619 | 0 | 0.047619 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d16615251ad54e500022f66f41091dcdb069f334 | 4,295 | py | Python | opencood/utils/eval_utils.py | YuanYunshuang/OpenCOOD | 98e07eb45f7fdcd32518b2cf8f9052f73ca80bec | [
"Apache-2.0"
] | null | null | null | opencood/utils/eval_utils.py | YuanYunshuang/OpenCOOD | 98e07eb45f7fdcd32518b2cf8f9052f73ca80bec | [
"Apache-2.0"
] | null | null | null | opencood/utils/eval_utils.py | YuanYunshuang/OpenCOOD | 98e07eb45f7fdcd32518b2cf8f9052f73ca80bec | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Author: Runsheng Xu <rxx3386@ucla.edu>
# License: TDG-Attribution-NonCommercial-NoDistrib
import os
import numpy as np
import torch
from opencood.utils import common_utils
from opencood.hypes_yaml import yaml_utils
def voc_ap(rec, prec):
"""
VOC 2010 Average Precision.
"""
rec.insert(0, 0.0)
rec.append(1.0)
mrec = rec[:]
prec.insert(0, 0.0)
prec.append(0.0)
mpre = prec[:]
for i in range(len(mpre) - 2, -1, -1):
mpre[i] = max(mpre[i], mpre[i + 1])
i_list = []
for i in range(1, len(mrec)):
if mrec[i] != mrec[i - 1]:
i_list.append(i)
ap = 0.0
for i in i_list:
ap += ((mrec[i] - mrec[i - 1]) * mpre[i])
return ap, mrec, mpre
def caluclate_tp_fp(det_boxes, det_score, gt_boxes, result_stat, iou_thresh):
"""
Calculate the true positive and false positive numbers of the current
frames.
Parameters
----------
det_boxes : torch.Tensor
The detection bounding box, shape (N, 8, 3) or (N, 4, 2).
det_score :torch.Tensor
The confidence score for each preditect bounding box.
gt_boxes : torch.Tensor
The groundtruth bounding box.
result_stat: dict
A dictionary contains fp, tp and gt number.
iou_thresh : float
The iou thresh.
"""
# fp, tp and gt in the current frame
fp = []
tp = []
gt = gt_boxes.shape[0]
if det_boxes is not None:
# convert bounding boxes to numpy array
det_boxes = common_utils.torch_tensor_to_numpy(det_boxes)
det_score = common_utils.torch_tensor_to_numpy(det_score)
gt_boxes = common_utils.torch_tensor_to_numpy(gt_boxes)
# sort the prediction bounding box by score
score_order_descend = np.argsort(-det_score)
det_polygon_list = list(common_utils.convert_format(det_boxes))
gt_polygon_list = list(common_utils.convert_format(gt_boxes))
# match prediction and gt bounding box
for i in range(score_order_descend.shape[0]):
det_polygon = det_polygon_list[score_order_descend[i]]
ious = common_utils.compute_iou(det_polygon, gt_polygon_list)
if len(gt_polygon_list) == 0 or np.max(ious) < iou_thresh:
fp.append(1)
tp.append(0)
continue
fp.append(0)
tp.append(1)
gt_index = np.argmax(ious)
gt_polygon_list.pop(gt_index)
result_stat[iou_thresh]['fp'] += fp
result_stat[iou_thresh]['tp'] += tp
result_stat[iou_thresh]['gt'] += gt
def calculate_ap(result_stat, iou):
"""
Calculate the average precision and recall, and save them into a txt.
Parameters
----------
result_stat : dict
A dictionary contains fp, tp and gt number.
iou : float
"""
iou_5 = result_stat[iou]
fp = iou_5['fp']
tp = iou_5['tp']
assert len(fp) == len(tp)
gt_total = iou_5['gt']
cumsum = 0
for idx, val in enumerate(fp):
fp[idx] += cumsum
cumsum += val
cumsum = 0
for idx, val in enumerate(tp):
tp[idx] += cumsum
cumsum += val
rec = tp[:]
for idx, val in enumerate(tp):
rec[idx] = float(tp[idx]) / gt_total
prec = tp[:]
for idx, val in enumerate(tp):
prec[idx] = float(tp[idx]) / (fp[idx] + tp[idx])
ap, mrec, mprec = voc_ap(rec[:], prec[:])
return ap, mrec, mprec
def eval_final_results(result_stat, save_path):
dump_dict = {}
ap_30, mrec_30, mpre_30 = calculate_ap(result_stat, 0.30)
ap_50, mrec_50, mpre_50 = calculate_ap(result_stat, 0.50)
ap_70, mrec_70, mpre_70 = calculate_ap(result_stat, 0.70)
dump_dict.update({'ap30': ap_30,
'ap_50': ap_50,
'ap_70': ap_70,
'mpre_50': mpre_50,
'mrec_50': mrec_50,
'mpre_70': mpre_70,
'mrec_70': mrec_70,
})
yaml_utils.save_yaml(dump_dict, os.path.join(save_path, 'eval.yaml'))
print('The Average Precision at IOU 0.3 is %.2f, '
'The Average Precision at IOU 0.5 is %.2f, '
'The Average Precision at IOU 0.7 is %.2f' % (ap_30, ap_50, ap_70))
| 27.532051 | 77 | 0.589523 | 628 | 4,295 | 3.826433 | 0.224522 | 0.049938 | 0.032459 | 0.031627 | 0.240533 | 0.195589 | 0.184353 | 0.066583 | 0.042447 | 0.042447 | 0 | 0.037904 | 0.293597 | 4,295 | 155 | 78 | 27.709677 | 0.75412 | 0.208382 | 0 | 0.081395 | 0 | 0 | 0.057082 | 0 | 0 | 0 | 0 | 0 | 0.011628 | 1 | 0.046512 | false | 0 | 0.05814 | 0 | 0.127907 | 0.011628 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d166cf6c9589c4871225d45b81518e6b37bcc726 | 1,568 | py | Python | caj2pdf.py | ElonH/caj2pdf_gui | 48fdab29144f77bd2360dce7457a252f859c13e4 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 127 | 2018-11-08T08:19:39.000Z | 2022-03-12T15:19:26.000Z | caj2pdf.py | kennylx/caj2pdf_gui | 48fdab29144f77bd2360dce7457a252f859c13e4 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | caj2pdf.py | kennylx/caj2pdf_gui | 48fdab29144f77bd2360dce7457a252f859c13e4 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 23 | 2019-04-01T02:54:31.000Z | 2022-02-26T06:06:07.000Z | #!/usr/bin/env python3
import os
import argparse
from cajparser import CAJParser
from utils import add_outlines
if __name__ == "__main__":
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(help="commands", dest="command")
show_parser = subparsers.add_parser("show", help="Show the information of the CAJ file.")
show_parser.add_argument("input", help="Path to the CAJ file.")
convert_parser = subparsers.add_parser("convert", help="Convert the CAJ file to PDF file.")
convert_parser.add_argument("input", help="Path to the CAJ file.")
convert_parser.add_argument("-o", "--output", help="Output path to the PDF file.", required=True)
outlines_parser = subparsers.add_parser("outlines", help="Extract outlines from the CAJ file and add it to PDF file.")
outlines_parser.add_argument("input", help="Path to the CAJ file.")
outlines_parser.add_argument("-o", "--output", help="Path to the PDF file.", required=True)
args = parser.parse_args()
if args.command == "show":
caj = CAJParser(args.input)
print("File: {0}\nType: {1}\nPage count: {2}\nOutlines count: {3}\n".format(
args.input,
caj.format,
caj.page_num,
caj.toc_num
))
if args.command == "convert":
caj = CAJParser(args.input)
caj.convert(args.output)
if args.command == "outlines":
caj = CAJParser(args.input)
toc = caj.get_toc()
add_outlines(toc, args.output, "tmp.pdf")
os.replace("tmp.pdf", args.output)
| 36.465116 | 122 | 0.657526 | 212 | 1,568 | 4.716981 | 0.283019 | 0.054 | 0.06 | 0.052 | 0.289 | 0.258 | 0.208 | 0.152 | 0.152 | 0.152 | 0 | 0.004023 | 0.20727 | 1,568 | 42 | 123 | 37.333333 | 0.800483 | 0.013393 | 0 | 0.09375 | 0 | 0.03125 | 0.265201 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0.03125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d169e15fc76abd672102835ffb60d42da63ce07b | 3,764 | py | Python | razor/pool.py | lahiri-phdworks/OCCAM | 21919b0819606b8f76a391965151fba6df86cee7 | [
"BSD-3-Clause"
] | 1 | 2021-04-27T01:33:01.000Z | 2021-04-27T01:33:01.000Z | razor/pool.py | lahiri-phdworks/OCCAM | 21919b0819606b8f76a391965151fba6df86cee7 | [
"BSD-3-Clause"
] | 1 | 2020-07-22T21:59:54.000Z | 2020-07-22T21:59:54.000Z | razor/pool.py | lahiri-phdworks/OCCAM | 21919b0819606b8f76a391965151fba6df86cee7 | [
"BSD-3-Clause"
] | 1 | 2020-11-25T12:24:36.000Z | 2020-11-25T12:24:36.000Z | """
OCCAM
Copyright (c) 2011-2017, SRI International
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of SRI International nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Thread pool for processing modules in parallel.
"""
from Queue import Queue
import threading
import traceback
import sys
class Worker(threading.Thread):
""" A daemon worker thread.
"""
def __init__(self, q):
""" Initializes a thread in the queue q.
"""
threading.Thread.__init__(self)
self.daemon = True
self.queue = q
def run(self):
""" The thread main.
"""
while True:
f = self.queue.get(True)
f()
class ThreadPool(object):
""" A pool of daemon worker threads.
"""
def __init__(self, count=3):
""" Initializes a pool queue.
"""
self.queue = Queue()
self.workers = None
self.count = count
def _start(self):
if self.workers is None:
self.workers = [Worker(self.queue)]
for w in self.workers:
w.start()
def map(self, f, args):
self._start()
result = [None for i in range(0, len(args))]
sem = threading.Semaphore(0)
def func(i):
def rf():
try:
result[i] = f(args[i])
except Exception:
seperator = '-' * 60
print("Exception in worker for {0}:".format(f.func_doc))
print(seperator)
traceback.print_exc(file=sys.stderr)
print(seperator)
sys.exit(1) #iam: was _exit; but are we really that low level?
finally:
sem.release()
return rf
for i in range(0, len(args)):
self.queue.put(func(i))
for _ in args:
sem.acquire(True)
return result
def shutdown(self):
pass
POOL = ThreadPool(3)
def getDefaultPool():
return POOL
def InParallel(f, args, pool=None):
import datetime
dt = datetime.datetime.now ().strftime ('%d/%m/%Y %H:%M:%S')
sys.stderr.write("[%s] Starting %s...\n" % (dt, f.func_doc))
if pool is None:
pool = getDefaultPool()
result = pool.map(f, args)
sys.stderr.write("done\n")
return result
def shutdownDefaultPool():
POOL.shutdown()
| 31.366667 | 83 | 0.632837 | 486 | 3,764 | 4.86214 | 0.434156 | 0.019044 | 0.014388 | 0.019467 | 0.093948 | 0.073635 | 0.073635 | 0.057554 | 0.057554 | 0.057554 | 0 | 0.00632 | 0.285335 | 3,764 | 119 | 84 | 31.630252 | 0.872119 | 0.472635 | 0 | 0.064516 | 0 | 0 | 0.037981 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.177419 | false | 0.016129 | 0.080645 | 0.016129 | 0.354839 | 0.064516 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d16a87fa6f7d9f74b352a27443e5a31a5a09e5e7 | 6,729 | py | Python | cplusplus/level1_single_api/4_op_dev/1_custom_op/tbe/impl/conv2d_tik.py | coldenheart/123 | 798768bba7dfaef051a46d8e1df48bc671de5213 | [
"Apache-2.0"
] | 25 | 2020-11-20T09:01:35.000Z | 2022-03-29T10:35:38.000Z | cplusplus/level1_single_api/4_op_dev/1_custom_op/tbe/impl/conv2d_tik.py | coldenheart/123 | 798768bba7dfaef051a46d8e1df48bc671de5213 | [
"Apache-2.0"
] | 5 | 2021-02-28T20:49:37.000Z | 2022-03-04T21:50:27.000Z | cplusplus/level1_single_api/4_op_dev/1_custom_op/tbe/impl/conv2d_tik.py | coldenheart/123 | 798768bba7dfaef051a46d8e1df48bc671de5213 | [
"Apache-2.0"
] | 16 | 2020-12-06T07:26:13.000Z | 2022-03-01T07:51:55.000Z | """
Copyright (C) 2019. Huawei Technologies Co., Ltd. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the Apache License Version 2.0.You may not use this file
except in compliance with the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Apache License for more details at
http://www.apache.org/licenses/LICENSE-2.0
conv2d_tik
"""
from __future__ import absolute_import
import numpy as np
from tbe import tik
DTYPE_SIZE = {
'bool': 1,
'uint8': 1,
'int8': 1,
'uint16': 2,
'int16': 2,
'int24': 3,
'uint32': 4,
'int32': 4,
'float16': 2,
'float32': 4,
'int48': 6,
'int64': 8,
'uint64': 8,
'float64':8
}
def conv2d_tik_compute(params):
"""
conv2d tik compute
@param params: conv2d data
@return: tik instance
"""
tik_instance = tik.Tik()
# get shape of feature map and weight
n, c1, h, w, c0 = params["fm_shape"]
c1, kh, kw, cout, c0 = params["weight_shape"]
# get value of stride, dilation, pad
stride_h, stride_w = params["stride_list"]
dilation_h, dilation_w = params["dilation_list"]
pad_top, pad_bot, pad_left, pad_right = params["pad_list"]
# calculate height and width
kh_dilation = (kh - 1) * dilation_h + 1
kw_dilation = (kw - 1) * dilation_w + 1
ho = int(np.ceil((h + pad_top + pad_bot - kh_dilation + 1) / stride_h))
wo = int(np.ceil((w + pad_right + pad_left - kw_dilation + 1) / stride_w))
round_howo = ((ho * wo + 16 - 1) // 16) * 16
fm_gm = tik_instance.Tensor(params['fm_dtype'], (n, c1, h, w, c0),
name='fm_gm', scope=tik.scope_gm)
weight_gm = tik_instance.Tensor(params['weight_type'],
(c1, kh, kw, cout, c0), name='weight_gm',
scope=tik.scope_gm)
dst_gm = tik_instance.Tensor(params['dst_gm_type'],
[n, cout // 16, ho, wo, 16],
name='dst_gm', scope=tik.scope_gm)
core_num = params['core_num']
pre_core_cout = cout // core_num
cout_iter_num = pre_core_cout // params["cout_split_factor"]
Cin_blocks = c1
with tik_instance.for_range(0, core_num, block_num=core_num) as cout_o:
with tik_instance.for_range(0, cout_iter_num, thread_num=1) as cout_i:
weight_L1 = tik_instance.Tensor(
params['weight_type'], (Cin_blocks, kh, kw,
params["cout_split_factor"], c0),
name='weight_l1', scope=tik.scope_cbuf)
tik_instance.data_move(
weight_L1,
weight_gm.flatten()[cout_o * pre_core_cout * c0 +
params["cout_split_factor"] * cout_i * c0],
0, Cin_blocks * kh * kw,
params["cout_split_factor"],
(cout - params["cout_split_factor"]), 0)
with tik_instance.for_range(0, n, thread_num=2) as n_index:
feature_map_l1 = tik_instance.Tensor(params['fm_dtype'],
(c1, h, w, c0),
name='feature_map_l1',
scope=tik.scope_cbuf)
tik_instance.data_move(feature_map_l1,
fm_gm[n_index, :, :, :, :],
0, 1, c1 * h * w, 0, 0)
dst_l0c = tik_instance.Tensor(
params['dst_l0c_type'], [params["cout_split_factor"] // 16,
round_howo, 16],
name='dst_l0c', scope=tik.scope_cbuf_out)
tik_instance.conv2d(dst_l0c, feature_map_l1,
weight_L1, (c1, h, w, c0),
(Cin_blocks, kh, kw,
params["cout_split_factor"], c0),
params['stride_list'],
[pad_left, pad_right, pad_top, pad_bot],
params['dilation_list'],
params['pad_value'])
tik_instance.fixpipe(
dst_gm[n_index, (cout_o * pre_core_cout + params["cout_split_factor"] * cout_i) //
(32 // DTYPE_SIZE[params['dst_gm_type']]), 0, 0, 0],
dst_l0c, params["cout_split_factor"] // 16,
ho * wo * 16 * DTYPE_SIZE[params['dst_l0c_type']] // 32, 0, 0,
extend_params={"bias": None,
"quantize_params": params["quantize_params"]})
tik_instance.BuildCCE(kernel_name=params["kernel_name"],
inputs=[fm_gm, weight_gm], outputs=[dst_gm], config={'l2_mode': 1})
return tik_instance
def conv2d_tik(inputs, weights, outputs, strides, pads, dilations, kernel_name="conv2d_tik"):
in_dtype = inputs.get("dtype")
w_dtype = weights.get("dtype")
res_dtype = outputs.get("dtype")
in_shape = inputs.get("shape")
w_shape = weights.get("ori_shape")
if len(strides) != 4:
raise RuntimeError("strides shape should be 4d.")
if len(dilations) != 4:
raise RuntimeError("dilations shape should be 4d.")
if len(pads) != 4:
raise RuntimeError("pads shape should be 4d.")
if in_dtype != "float16" or w_dtype != "float16" or res_dtype != "float16":
raise RuntimeError("dtype shape should be float16.")
if weights.get("ori_format") != "NCHW":
raise RuntimeError("format should be NCHW.")
loc_dtype = "float32"
quantize_params = {"mode": "fp322fp16", "mode_param": None}
stride_list = [strides[2], strides[3]]
dilation_list = [dilations[2], dilations[3]]
w5hd_shape = [w_shape[1] // 16, w_shape[2], w_shape[3], w_shape[0], 16]
params = {
"fm_shape": in_shape,
"weight_shape": w5hd_shape,
"fm_dtype": in_dtype,
"weight_type": w_dtype,
"dst_l0c_type": loc_dtype,
"dst_gm_type": res_dtype,
"quantize_params": quantize_params,
"pad_list": pads,
"pad_value": 0,
"stride_list": stride_list,
"dilation_list": dilation_list,
"cout_split_factor": 64,
"core_num": 2,
"kernel_name": kernel_name}
conv2d_tik_compute(params)
| 40.293413 | 102 | 0.539605 | 836 | 6,729 | 4.077751 | 0.230861 | 0.054855 | 0.044001 | 0.055441 | 0.231446 | 0.151071 | 0.072162 | 0.053388 | 0.043414 | 0 | 0 | 0.039229 | 0.344628 | 6,729 | 166 | 103 | 40.536145 | 0.733787 | 0.104622 | 0 | 0.01626 | 0 | 0 | 0.150033 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.01626 | false | 0 | 0.02439 | 0 | 0.04878 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d16ad1765d12c4ab3d586ee47e803dea6e2400a1 | 4,589 | py | Python | core/src/zeit/cms/clipboard/clipboard.py | rickdg/vivi | 16134ac954bf8425646d4ad47bdd1f372e089355 | [
"BSD-3-Clause"
] | 5 | 2019-05-16T09:51:29.000Z | 2021-05-31T09:30:03.000Z | core/src/zeit/cms/clipboard/clipboard.py | rickdg/vivi | 16134ac954bf8425646d4ad47bdd1f372e089355 | [
"BSD-3-Clause"
] | 107 | 2019-05-24T12:19:02.000Z | 2022-03-23T15:05:56.000Z | core/src/zeit/cms/clipboard/clipboard.py | rickdg/vivi | 16134ac954bf8425646d4ad47bdd1f372e089355 | [
"BSD-3-Clause"
] | 3 | 2020-08-14T11:01:17.000Z | 2022-01-08T17:32:19.000Z | import zope.annotation
import zope.component
import zope.interface
import zope.proxy
import zope.publisher.interfaces
import zope.security.interfaces
import zope.traversing.api
import zope.container.interfaces
import zope.container.contained
import zope.container.ordered
import z3c.traverser.interfaces
import zeit.cms.workingcopy.interfaces
import zeit.cms.clipboard.interfaces
@zope.component.adapter(zeit.cms.workingcopy.interfaces.IWorkingcopy)
@zope.interface.implementer(zeit.cms.clipboard.interfaces.IClipboard)
class Clipboard(zope.container.ordered.OrderedContainer):
title = 'Clipboard'
def addContent(self, reference_object, add_object,
name=None, insert=False):
"""Add unique_id to obj."""
if not zeit.cms.clipboard.interfaces.IClipboardEntry.providedBy(
reference_object):
raise ValueError(
"`reference_object` does not provide IClipboardEntry (%r)" %
reference_object)
if insert:
container = reference_object
position = 0
else:
container = reference_object.__parent__
position = list(container.keys()).index(
reference_object.__name__) + 1
if not zope.container.interfaces.IOrderedContainer.providedBy(
container):
raise ValueError('`reference_object` must be a Clip to insert.')
entry = zeit.cms.clipboard.interfaces.IClipboardEntry(add_object)
entry = zope.proxy.removeAllProxies(entry)
order = list(container.keys())
chooser = zope.container.interfaces.INameChooser(container)
name = chooser.chooseName(name, entry)
container[name] = entry
order.insert(position, name)
container.updateOrder(order)
def addClip(self, title):
clip = zeit.cms.clipboard.entry.Clip(title)
chooser = zope.container.interfaces.INameChooser(self)
name = chooser.chooseName(title, clip)
self[name] = clip
return self[name]
def moveObject(self, obj, new_container, insert=False):
if not zeit.cms.clipboard.interfaces.IClipboardEntry.providedBy(obj):
raise TypeError("obj must provided IClipboardEntry. Got %r." % obj)
if obj == new_container:
return
if obj in zope.traversing.api.getParents(new_container):
raise ValueError(
"`obj` must not be an ancestor of `new_container`.")
old_container = obj.__parent__
old_name = obj.__name__
del old_container[old_name]
self.addContent(new_container, obj, old_name, insert)
def __setitem__(self, key, value):
if not zeit.cms.clipboard.interfaces.IClipboardEntry.providedBy(value):
raise ValueError("Can only contain IClipboardEntry objects. "
"Got %r instead." % value)
super(Clipboard, self).__setitem__(key, value)
clipboardFactory = zope.annotation.factory(Clipboard)
@zope.interface.implementer(zeit.cms.clipboard.interfaces.IClipboard)
@zope.component.adapter(zope.security.interfaces.IPrincipal)
def principalAdapter(principal):
"""Shortcut adapter from principal to clipboard."""
workingcopy = zeit.cms.workingcopy.interfaces.IWorkingcopy(principal)
return zeit.cms.clipboard.interfaces.IClipboard(workingcopy)
@zope.component.adapter(
zeit.cms.workingcopy.interfaces.IWorkingcopy,
zope.publisher.interfaces.IPublisherRequest)
@zope.interface.implementer(z3c.traverser.interfaces.IPluggableTraverser)
class WorkingcopyTraverser(object):
"""Traverses to clipboard through a workingcopy."""
def __init__(self, context, request):
self.context = context
self.request = request
def publishTraverse(self, request, name):
clipboard = zeit.cms.clipboard.interfaces.IClipboard(
self.context, None)
if clipboard is not None and clipboard.__name__ == name:
return clipboard
raise zope.publisher.interfaces.NotFound(self.context, name, request)
@zope.component.adapter(zeit.cms.clipboard.interfaces.IClipboard)
class ClipboardNameChooser(zope.container.contained.NameChooser):
"""A namechooser removing invalid characters."""
def chooseName(self, name, object):
name = name.replace('/', '')
while name.startswith('+'):
name = name.replace('+', '', 1)
while name.startswith('@'):
name = name.replace('@', '', 1)
return super(ClipboardNameChooser, self).chooseName(name, object)
| 37.308943 | 79 | 0.687296 | 483 | 4,589 | 6.42029 | 0.248447 | 0.03386 | 0.056756 | 0.083844 | 0.253144 | 0.170268 | 0.155434 | 0.13286 | 0.041277 | 0 | 0 | 0.001662 | 0.213336 | 4,589 | 122 | 80 | 37.614754 | 0.857341 | 0.033994 | 0 | 0.042553 | 0 | 0 | 0.059384 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085106 | false | 0 | 0.138298 | 0 | 0.319149 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d16c1e89b2ab8da6e931b84bf07afa7c43410c07 | 1,640 | py | Python | attendees/persons/serializers/attendingmeet_etc_serializer.py | xjlin0/attendees32 | 25913c75ea8d916dcb065a23f2fa68bea558f77c | [
"MIT"
] | null | null | null | attendees/persons/serializers/attendingmeet_etc_serializer.py | xjlin0/attendees32 | 25913c75ea8d916dcb065a23f2fa68bea558f77c | [
"MIT"
] | 5 | 2022-01-21T03:26:40.000Z | 2022-02-04T17:32:16.000Z | attendees/persons/serializers/attendingmeet_etc_serializer.py | xjlin0/attendees32 | 25913c75ea8d916dcb065a23f2fa68bea558f77c | [
"MIT"
] | null | null | null | from rest_framework import serializers
from attendees.persons.models import AttendingMeet
class AttendingMeetEtcSerializer(serializers.ModelSerializer):
assembly = serializers.IntegerField(read_only=True)
class Meta:
model = AttendingMeet
fields = "__all__"
def create(self, validated_data):
"""
Create or update `AttendingMeet` instance, given the validated data.
"""
attendingmeet_id = self._kwargs["data"].get("id")
obj, created = AttendingMeet.objects.update_or_create(
id=attendingmeet_id,
defaults=validated_data,
)
return obj
def update(self, instance, validated_data):
"""
Update and return an existing `AttendingMeet` instance, given the validated data.
"""
if (
True
): # need validations such as if the assembly matching meet, it's better to validate on UI first
instance.meet = validated_data.get("meet", instance.meet)
# instance.meet.assembly = validated_data.get('assembly', instance.meet.assembly)
instance.meet.save()
instance.attending = validated_data.get("attending", instance.attending)
instance.start = validated_data.get("start", instance.start)
instance.finish = validated_data.get("finish", instance.finish)
instance.character = validated_data.get("character", instance.character)
instance.category = validated_data.get("category", instance.category)
instance.team = validated_data.get("team", instance.team)
instance.save()
return instance
| 36.444444 | 105 | 0.664634 | 172 | 1,640 | 6.209302 | 0.372093 | 0.15824 | 0.11985 | 0.054307 | 0.078652 | 0.078652 | 0 | 0 | 0 | 0 | 0 | 0 | 0.241463 | 1,640 | 44 | 106 | 37.272727 | 0.858521 | 0.196951 | 0 | 0 | 0 | 0 | 0.045741 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.071429 | 0 | 0.321429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d16c5eccf5ae33abe7cfa19ea89759066337ca25 | 6,475 | py | Python | CDMouth/TextToSpeech.py | okcd00/CDAuto | d2c90062d537bad8c3749ae35542b5e50cb37db0 | [
"MIT"
] | 3 | 2020-03-05T10:09:57.000Z | 2021-12-11T09:32:00.000Z | CDMouth/TextToSpeech.py | okcd00/CDAlter | 43486591d2e5ae3d93ebfc21949894c5c8c8cd94 | [
"MIT"
] | null | null | null | CDMouth/TextToSpeech.py | okcd00/CDAlter | 43486591d2e5ae3d93ebfc21949894c5c8c8cd94 | [
"MIT"
] | null | null | null | # coding: utf-8
# ==========================================================================
# Copyright (C) 2018-2020 All rights reserved.
#
# filename : TextToSpeech.py
# author : chendian / okcd00@qq.com
# origin : junzew / HanTTS
# date : 2020-03-09
# desc : TTS class
# ==========================================================================
import os
import time
import wave
import json
import _thread
import pyaudio
import CDMouth.atc as atc
from pathlib import Path
from pypinyin import lazy_pinyin, TONE3
from pydub import AudioSegment
from pydub.silence import split_on_silence
class TextToSpeech:
CHUNK = 1024
punctuation = [
',', '。', '?', '!', '“', '”', ';', ':', '(', ")",
":", ";", ",", ".", "?", "!", "\"", "\'", "(", ")"]
def __init__(self):
self.delay = 0.450
self.root_path = '../'
self.source_dir = os.path.join(
self.root_path, 'CDMouth', 'syllables/')
pass
def speak(self, text):
syllables = lazy_pinyin(text, style=TONE3)
print(syllables)
delay = 0
def pre_process(_syl):
temp = []
for _s in _syl:
for p in TextToSpeech.punctuation:
_s = _s.replace(p, "")
if _s.isdigit():
_s = atc.num2chinese(_s)
new_sounds = lazy_pinyin(_s, style=TONE3)
for e in new_sounds:
temp.append(e)
else:
temp.append(_s)
return temp
syllables = pre_process(syllables)
for syllable in syllables:
path = self.source_dir + syllable + ".wav"
_thread.start_new_thread(TextToSpeech._play_audio, (path, delay))
delay += self.delay
@staticmethod
def mp3_to_wav(source_file_path, dest_path=None):
if dest_path is None:
dest_path = source_file_path.replace('.mp3', '.wav')
sound = AudioSegment.from_mp3(source_file_path)
sound.export(dest_path, format='wav')
@staticmethod
def split_wav(path, syllables=None, key='a', debug=False):
# syllables in form of [['a1', 'a2', 'a3', 'a4', 'a'], ...]
file = Path(path)
if not file.is_file():
raise Exception(path + " doesn't exist")
if syllables is None:
data = json.load(open('mapping.json'))
syllables = data.get(key)
sound_file = AudioSegment.from_wav(path)
audio_chunks = split_on_silence(
sound_file,
min_silence_len=333, # must be silent for at least 333ms
silence_thresh=-32 # consider it silent if quieter than -32 dBFS
)
# from mapping.json in HanTTS
for i, chunk in enumerate(audio_chunks):
if debug: # debug mode, ignore syllables list.
out_file = "./pre/test{:03}".format(i) + '.wav'
elif isinstance(syllables[0], list): # nested list of 5 tones
if i // 5 >= syllables.__len__(): # over-capacity chunks
syllable = 'oth{}'.format(i)
out_file = "./pre/" + syllable + '.wav'
else:
syllable = syllables[i // 5]
print(syllable)
j = i % 5
if j != 4: # 1st, 2nd, 3rd, 4th tone
out_file = "./pre/" + syllable + str(j + 1) + ".wav"
else: # neutrual tone
out_file = "./pre/" + syllable + ".wav"
else: # a list of single tones
if i >= syllables.__len__(): # over-capacity chunks
syllable = 'oth{}'.format(i)
out_file = "./pre/" + syllable + '.wav'
else:
syllable = syllables[i]
print(syllable)
out_file = "./pre/" + syllable + ".wav"
print("exporting", out_file)
chunk.export(out_file, format="wav")
return audio_chunks
def synthesize(self, text, src, dst):
"""
Synthesize .wav from text
src is the folder that contains all syllables .wav files
dst is the destination folder to save the synthesized file
"""
print("Synthesizing ...")
delay = 0
increment = self.delay * 1000 # milliseconds
pause = 500 # pause for punctuation
syllables = lazy_pinyin(text, style=TONE3)
# initialize to be complete silence, each character takes up ~500ms
result = AudioSegment.silent(duration=500 * len(text))
for syllable in syllables:
path = src + syllable + ".wav"
sound_file = Path(path)
# insert 500 ms silence for punctuation marks
if syllable in TextToSpeech.punctuation:
short_silence = AudioSegment.silent(duration=pause)
result = result.overlay(short_silence, position=delay)
delay += increment
continue
# skip sound file that doesn't exist
if not sound_file.is_file():
continue
segment = AudioSegment.from_wav(path)
result = result.overlay(segment, position=delay)
delay += increment
directory = dst
if not os.path.exists(directory):
os.makedirs(directory)
result.export(directory + "generated.wav", format="wav")
print("Exported.")
@staticmethod
def _play_audio(path, delay):
try:
time.sleep(delay)
wf = wave.open(path, 'rb')
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True)
data = wf.readframes(TextToSpeech.CHUNK)
while data:
stream.write(data)
data = wf.readframes(TextToSpeech.CHUNK)
stream.stop_stream()
stream.close()
p.terminate()
return
except Exception as e:
print(e)
pass
if __name__ == '__main__':
tts = TextToSpeech()
while True:
_t = input('输入中文:')
if str(_t).lower().startswith('exit'):
break
tts.speak(_t)
| 35.576923 | 78 | 0.51166 | 674 | 6,475 | 4.768546 | 0.347181 | 0.017424 | 0.018668 | 0.028002 | 0.136279 | 0.084941 | 0.056627 | 0.056627 | 0.056627 | 0.056627 | 0 | 0.01924 | 0.357838 | 6,475 | 181 | 79 | 35.773481 | 0.753728 | 0.154595 | 0 | 0.20979 | 0 | 0 | 0.044379 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.048951 | false | 0.013986 | 0.076923 | 0 | 0.167832 | 0.048951 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d1701b96fd7a92f7df5c07d7aab9cf3f631d9646 | 635 | py | Python | deprecated/rcbu/utils/bytes.py | nloadholtes/python-cloudbackup-sdk | 1866e23aaaac41c35be4cb6ab964fcd0ba9a8fe6 | [
"Apache-2.0"
] | 4 | 2015-02-10T14:28:12.000Z | 2016-12-26T22:52:07.000Z | deprecated/rcbu/utils/bytes.py | nloadholtes/python-cloudbackup-sdk | 1866e23aaaac41c35be4cb6ab964fcd0ba9a8fe6 | [
"Apache-2.0"
] | 17 | 2015-01-22T21:58:36.000Z | 2018-01-25T19:47:43.000Z | deprecated/rcbu/utils/bytes.py | nloadholtes/python-cloudbackup-sdk | 1866e23aaaac41c35be4cb6ab964fcd0ba9a8fe6 | [
"Apache-2.0"
] | 9 | 2015-01-26T19:25:45.000Z | 2018-11-01T20:14:12.000Z | """Utilities for handling byte quantities and strings."""
def dehumanize_bytes(human_bytes):
"""Convert a string in the format '2.23 GB' -> 2.23 * 10**30"""
packed = human_bytes.split()
amount, magnitude = packed[0], None
if len(packed) == 2:
magnitude = packed[1].upper()
prefixes = ['YB', 'ZB', 'EB', 'PB', 'TB', 'GB', 'MB', 'KB']
multipliers = [2**(i*10) for i in range(8, 0, -1)]
magnitude_to_multiplier = {ma: mult for ma, mult in
zip(prefixes, multipliers)}
multiplier = magnitude_to_multiplier.get(magnitude, 1)
return int(float(amount) * multiplier)
| 39.6875 | 67 | 0.60315 | 86 | 635 | 4.372093 | 0.616279 | 0.053191 | 0.111702 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.041237 | 0.23622 | 635 | 15 | 68 | 42.333333 | 0.734021 | 0.171654 | 0 | 0 | 0 | 0 | 0.031068 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0 | 0 | 0.181818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d17126b678901eaff2294ed14ac2e19ccefc75d6 | 3,002 | py | Python | ptreeopt/plotting.py | quaquel/ptreeopt | d4df26ecd877185b0a8c02c8ecbd3c73e54f6f52 | [
"MIT"
] | 26 | 2017-02-27T01:30:19.000Z | 2022-02-23T07:26:46.000Z | ptreeopt/plotting.py | quaquel/ptreeopt | d4df26ecd877185b0a8c02c8ecbd3c73e54f6f52 | [
"MIT"
] | 8 | 2018-06-28T15:52:49.000Z | 2021-09-27T15:49:50.000Z | ptreeopt/plotting.py | quaquel/ptreeopt | d4df26ecd877185b0a8c02c8ecbd3c73e54f6f52 | [
"MIT"
] | 5 | 2018-03-31T12:48:00.000Z | 2021-09-22T16:36:59.000Z | import numpy as np
import matplotlib.pyplot as plt
import os, subprocess
import pandas as pd
def graphviz_export(P, filename, colordict=None, animation=False, dpi=300):
''' Export policy tree P to filename (SVG or PNG)
colordict optional. Keys must match actions. Example:
colordict = {'Release_Demand': 'cornsilk',
'Hedge_90': 'indianred',
'Flood_Control': 'lightsteelblue'}
Requires pygraphviz.'''
import pygraphviz as pgv
G = pgv.AGraph(directed=True)
G.node_attr['shape'] = 'box'
G.node_attr['style'] = 'filled'
if animation:
G.graph_attr['size'] = '2!,2!' # use for animations only
G.graph_attr['dpi'] = str(dpi)
parent = P.root
G.add_node(str(parent), fillcolor='white')
S = []
while parent.is_feature or len(S) > 0:
if parent.is_feature:
S.append(parent)
child = parent.l
label = 'T'
else:
parent = S.pop()
child = parent.r
label = 'F'
if child.is_feature or not colordict:
c = 'white'
else:
c = colordict[child.value]
G.add_node(str(child), fillcolor=c)
G.add_edge(str(parent), str(child), label=label)
parent = child
G.layout(prog='dot')
G.draw(filename)
def animate_trees(snapshots, filename, colordict=None, max_nfe=None):
os.makedirs('temp')
for i, P in enumerate(snapshots['best_P']):
nfe = snapshots['nfe'][i]
if max_nfe and nfe > max_nfe:
break
nfestring = 'nfe-' + '%10d' % nfe + '.png'
graphviz_export(P, 'temp/%s-%s' % (filename, nfestring), colordict, dpi=150)
subprocess.call(['./ptreeopt/stitch-animations.sh', filename, ''])
subprocess.call(['rm', '-r', 'temp'])
def ts_color(ts_actions, colordict=None):
for pol in set(ts_actions):
first = ts_actions.index[(ts_actions == pol) & (ts_actions.shift(1) != pol)]
last = ts_actions.index[(ts_actions == pol) & (ts_actions.shift(-1) != pol)]
for f, l in zip(first, last):
plt.axvspan(f, l + pd.Timedelta('1 day'),
facecolor=colordict[pol], edgecolor='none', alpha=0.4)
def animate_objfxn(snapshots, filename, max_nfe=None):
os.makedirs('temp')
for i, P in enumerate(snapshots['best_P']):
if max_nfe and snapshots['nfe'][i] > max_nfe:
break
plt.plot(snapshots['nfe'][:i + 1], snapshots['best_f']
[:i + 1], linewidth=2, color='steelblue')
L = [max_nfe, snapshots['nfe'][-1]]
plt.xlim([0, min(i for i in L if i is not None)])
plt.ylim([0, np.max(snapshots['best_f'])])
plt.ylabel('Objective Function')
plt.xlabel('NFE')
plt.tight_layout()
nfestring = 'nfe-' + '%10d' % snapshots['nfe'][i] + '.png'
plt.savefig('temp/%s-%s' % (filename, nfestring), dpi=150)
plt.close()
subprocess.call(['./ptreeopt/stitch-animations.sh', filename, '-layers optimize'])
subprocess.call(['rm', '-r', 'temp'])
| 28.590476 | 84 | 0.596269 | 406 | 3,002 | 4.315271 | 0.362069 | 0.041096 | 0.02968 | 0.012557 | 0.216895 | 0.166667 | 0.166667 | 0.111872 | 0.111872 | 0.111872 | 0 | 0.012753 | 0.242505 | 3,002 | 104 | 85 | 28.865385 | 0.757696 | 0.087941 | 0 | 0.147059 | 0 | 0 | 0.099631 | 0.022878 | 0.029412 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.073529 | 0 | 0.132353 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d171d9e66a91c717237db741b180e1ba5317644f | 3,541 | py | Python | will/webapp.py | rgorsuch/will | ebfc0d2953b4dcf5a4acc1118811cab5aec3bd17 | [
"MIT"
] | 1 | 2018-11-19T15:34:07.000Z | 2018-11-19T15:34:07.000Z | will/webapp.py | rgorsuch/will | ebfc0d2953b4dcf5a4acc1118811cab5aec3bd17 | [
"MIT"
] | null | null | null | will/webapp.py | rgorsuch/will | ebfc0d2953b4dcf5a4acc1118811cab5aec3bd17 | [
"MIT"
] | 1 | 2019-02-26T02:17:32.000Z | 2019-02-26T02:17:32.000Z | #!/usr/bin/env python
#
# export REDISCLOUD_URL=redis://localhost:6379/7
# FLASK_APP=manage.py flask run
# FLASK_DEBUG=1 python manage.py runserver
# gunicorn manage:app
from flask_script import Server, Manager
from flask import Flask
from flask import jsonify
from flask import send_from_directory
from flask import request
from flask import render_template
from pprint import pformat
import os
import time
import json
import urlparse
import redis
import logging
import gevent
import settings
keep_alive_url = "/keep-alive"
logger = logging.getLogger(__name__)
url = urlparse.urlparse(os.environ.get('REDISCLOUD_URL'))
r = redis.Redis(host=url.hostname, port=url.port, password=url.password)
pubsub = redis.Redis(host=url.hostname, port=url.port, password=url.password)
logger.info("type of pics is " + r.type("pics"))
if not r.exists("pics"):
# First ever startup -- load some default images
defaultpics = [
"https://s3.amazonaws.com/uploads.hipchat.com/398524/2959249/Fv0U8jU47Z1RWQt/20151211_235234.jpg",
"https://s3.amazonaws.com/uploads.hipchat.com/398524/2973098/mX8h75W4A17wm8a/upload.jpg",
"https://s3.amazonaws.com/uploads.hipchat.com/398524/2856310/qwTHxLZQA1uVrEw/upload.png",
"https://s3.amazonaws.com/uploads.hipchat.com/398524/2973989/YTonrDGuXGngIDM/upload.png"]
now = time.time()
for index, pic in enumerate(defaultpics):
r.zadd("pics", pic, now - index)
elif r.type("pics") == "list":
# Data migration from list to sorted-set
all = [p for p in r.lrange("pics", 0, -1)]
r.delete("pics")
for index, pic in enumerate(all):
r.zadd("pics", pic, index+1)
app = Flask(__name__, static_folder=os.path.join(os.path.dirname(__file__), 'static'), static_url_path='/static')
app.config['SECRET_KEY'] = 'TOPsecret!'
def bootstrap_flask():
from will.sockets import get_socketio_app
logger.info("Starting flask server on port " + settings.HTTPSERVER_PORT)
socketioapp = get_socketio_app()
socketioapp.run(app, host="0.0.0.0", port=int(settings.HTTPSERVER_PORT))
# # Just Flask
# app.run(host="0.0.0.0", port=int(settings.HTTPSERVER_PORT))
@app.route('/')
def slideshow():
return send_from_directory(os.path.join(os.path.dirname(__file__), 'static'), "slideshow.html")
@app.route('/pics', methods=['GET'])
def pics():
logger.info("Handling GET /pics with " + request.method)
logger.info(" Headers:" + pformat(request.headers))
logger.info(" Form: " + pformat(request.form))
logger.info(" Data: " + pformat(request.data))
# Get photo URLs from Redis
urls = r.zrange("pics", 0, -1)
return jsonify(list(reversed(urls)))
@app.route('/pics', methods=['POST'])
def add_pic():
logger.info("POSTed pic")
image = request.form['image']
if image:
logger.info("Publishing new image: " + image)
logger.info(pformat(request.form))
r.zadd("pics", image, time.time())
pubsub.publish("updates", json.dumps(image))
return jsonify([image])
else:
return jsonify([])
@app.route('/pics', methods=['DELETE'])
def delete_pic():
image = request.form['image']
logger.info("Deleting pic: " + image)
r.zrem("pics", image)
return jsonify("deleted " + image)
@app.route('/reset')
def reset_pics():
r.delete("pics")
return "Pics have been reset"
@app.route('/keep-alive')
def keep_alive():
return "I'm alive!"
@app.route("/ping")
def ping():
return "PONG"
if __name__ == "__main__":
manager.run()
| 28.556452 | 113 | 0.684552 | 489 | 3,541 | 4.848671 | 0.335378 | 0.042176 | 0.031632 | 0.032054 | 0.21763 | 0.178828 | 0.178828 | 0.178828 | 0.115563 | 0.077604 | 0 | 0.034401 | 0.162666 | 3,541 | 123 | 114 | 28.788618 | 0.765261 | 0.096583 | 0 | 0.048193 | 0 | 0.048193 | 0.235719 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096386 | false | 0.024096 | 0.192771 | 0.036145 | 0.385542 | 0.012048 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
66f37010bee3fc9dd18822a66b9dc83e5b71b409 | 1,429 | py | Python | pwstat/processor.py | yenarhee/pwstat | 67d465d2806ae35ec1a39de867549d3a3f363192 | [
"MIT"
] | null | null | null | pwstat/processor.py | yenarhee/pwstat | 67d465d2806ae35ec1a39de867549d3a3f363192 | [
"MIT"
] | null | null | null | pwstat/processor.py | yenarhee/pwstat | 67d465d2806ae35ec1a39de867549d3a3f363192 | [
"MIT"
] | null | null | null | # Processor class with static methods
# : gets headers, body, timestamp
# : returns header and body data
# e.g. POST form variables, cookies, keyword parameters
import re
from Cookie import SimpleCookie as cookie
class Processor(object):
def __init__(self):
return
# main function
@staticmethod
def process(headers, body, timestamp):
# process
return Processor.process_headers(headers), Processor.process_body(body), timestamp
# helper functions
@staticmethod
def process_headers(headers):
# process header data and return useful info as a dictionary
headers_dict = dict(headers)
# parse cookie
try:
cookiestring = headers_dict['cookie']
c = cookie()
cookie_dict = {}
c.load(cookiestring)
for key in c:
cookie_dict[key] = c[key].value
headers_dict['cookie'] = cookie_dict
except KeyError:
# there is no cookie
pass
return headers_dict
@staticmethod
def process_body(body):
# process body data and return POST form variables as a dictionary
result = {}
regex = r"^(\w*)=(\w*)(?:&(\w*)=(\S*))*"
match = re.match(regex, body)
if match:
it = iter(match.groups())
for key in it:
result[key] = next(it)
return result
| 28.019608 | 90 | 0.585724 | 160 | 1,429 | 5.1375 | 0.43125 | 0.053528 | 0.080292 | 0.07056 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.325402 | 1,429 | 50 | 91 | 28.58 | 0.852697 | 0.244927 | 0 | 0.09375 | 0 | 0 | 0.038425 | 0.027179 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0.03125 | 0.0625 | 0.0625 | 0.34375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
66f529e3e1847f546b2fe48ed349c1cb515916c6 | 1,424 | py | Python | shadowbot/sql/__init__.py | shadowmoose/Discord-Bot | 39db07233f23b43a8e15fdaf53de890ad2fe2866 | [
"MIT"
] | null | null | null | shadowbot/sql/__init__.py | shadowmoose/Discord-Bot | 39db07233f23b43a8e15fdaf53de890ad2fe2866 | [
"MIT"
] | null | null | null | shadowbot/sql/__init__.py | shadowmoose/Discord-Bot | 39db07233f23b43a8e15fdaf53de890ad2fe2866 | [
"MIT"
] | null | null | null | """
The SqlAlchemy static wrapper class.
The Sessions created are Thread-safe, but Thread-local in scope.
Its objects should not be shared across Processes or Threads.
"""
import sqlalchemy
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, scoped_session
import os
Base = declarative_base()
_engine = None
_Session = None
def init(db_path=":memory:"):
"""
Initialize the DB, a required function to access the database.
Creates the DB file if it does not already exist.
:param db_path:
:return:
"""
global _engine, _Session
if _Session and _engine:
return
create_new = False
if db_path != ':memory:':
db_path = os.path.abspath(db_path)
create_new = not os.path.exists(db_path)
_engine = sqlalchemy.create_engine('sqlite:///%s' % db_path)
session_factory = sessionmaker(bind=_engine)
_Session = scoped_session(session_factory)
if create_new:
_create()
def _create():
from sql.message import UserMessageDB, BotMessageDB
Base.metadata.create_all(_engine)
print("\tCreated Database file.")
session().execute("PRAGMA journal_mode=WAL")
print("\t+Activated WAL Mode.")
def session():
"""
Create a Thread-local Session object, the entrypoint to the Database.
:return:
"""
if not _Session or not _engine:
raise Exception("SQL Session cannot be created if the DB is not initialized!")
# noinspection PyCallingNonCallable
return _Session()
| 24.135593 | 80 | 0.754213 | 197 | 1,424 | 5.279188 | 0.467005 | 0.040385 | 0.023077 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.149579 | 1,424 | 58 | 81 | 24.551724 | 0.858794 | 0.291433 | 0 | 0 | 0 | 0 | 0.159672 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.166667 | 0 | 0.333333 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
66fb7669cb49911cdfab9cbef121c812ac9aaf5f | 519 | py | Python | tools/libari/tests/test_libari.py | seemoo-lab/aristoteles | 092c746d2a211bb4edc1b60072487a94e8a97c99 | [
"MIT"
] | 21 | 2021-08-30T13:25:08.000Z | 2021-12-09T16:48:25.000Z | tools/libari/tests/test_libari.py | seemoo-lab/aristoteles | 092c746d2a211bb4edc1b60072487a94e8a97c99 | [
"MIT"
] | 5 | 2021-09-03T20:33:37.000Z | 2021-11-28T21:01:00.000Z | tools/libari/tests/test_libari.py | seemoo-lab/aristoteles | 092c746d2a211bb4edc1b60072487a94e8a97c99 | [
"MIT"
] | 3 | 2021-09-03T20:08:47.000Z | 2021-11-05T21:10:02.000Z | from libari.packet import Packet
from libari.tlv import TLV
import binascii
def test_default_header_only_all_zero():
pkt = Packet()
assert(pkt.getHexString() == 'dec07eab0000000000000000')
def test_default_header_only_all_zero_and_zero_tlv():
pkt = Packet()
pkt.addTLV(TLV(id=0, version=0))
assert(pkt.getHexString() == 'dec07eab000008000000000000000000')
reversePkt = Packet.fromBytes(binascii.unhexlify(pkt.getHexString()))
assert(reversePkt.getHexString() == pkt.getHexString())
| 23.590909 | 73 | 0.747592 | 60 | 519 | 6.25 | 0.433333 | 0.16 | 0.074667 | 0.106667 | 0.165333 | 0.165333 | 0.165333 | 0 | 0 | 0 | 0 | 0.102679 | 0.136802 | 519 | 21 | 74 | 24.714286 | 0.734375 | 0 | 0 | 0.166667 | 0 | 0 | 0.1079 | 0.1079 | 0 | 0 | 0 | 0 | 0.25 | 1 | 0.166667 | false | 0 | 0.25 | 0 | 0.416667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
66fe695bec9a53f95c6444cdfa497c1f3778ce6b | 78,263 | py | Python | napalm_iosxr/iosxr.py | ktbyers/napalm-iosxr | dddd96118c20775163299fd0634e177598cc910f | [
"Apache-2.0"
] | 1 | 2021-07-15T18:13:32.000Z | 2021-07-15T18:13:32.000Z | napalm_iosxr/iosxr.py | ktbyers/napalm-iosxr | dddd96118c20775163299fd0634e177598cc910f | [
"Apache-2.0"
] | null | null | null | napalm_iosxr/iosxr.py | ktbyers/napalm-iosxr | dddd96118c20775163299fd0634e177598cc910f | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Spotify AB. All rights reserved.
#
# The contents of this file are licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# python std lib
import re
import copy
from collections import defaultdict
# third party libs
from lxml import etree as ETREE
import xml.etree.ElementTree as ET
from netaddr import IPAddress
from netaddr.core import AddrFormatError
from pyIOSXR import IOSXR
from pyIOSXR.exceptions import ConnectError
from pyIOSXR.exceptions import TimeoutError
from pyIOSXR.exceptions import InvalidInputError
# napalm_base
from napalm_base.helpers import convert, find_txt, mac, ip
from napalm_base.base import NetworkDriver
from napalm_base.utils import string_parsers
from napalm_base.exceptions import ConnectionException, MergeConfigException, ReplaceConfigException,\
CommandErrorException, CommandTimeoutException
class IOSXRDriver(NetworkDriver):
def __init__(self, hostname, username, password, timeout=60, optional_args=None):
self.hostname = hostname
self.username = username
self.password = password
self.timeout = timeout
self.pending_changes = False
self.replace = False
if optional_args is None:
optional_args = {}
self.port = optional_args.get('port', 22)
self.lock_on_connect = optional_args.get('config_lock', True)
self.device = IOSXR(hostname, username, password, timeout=timeout, port=self.port, lock=self.lock_on_connect)
def open(self):
try:
self.device.open()
except ConnectError as conn_err:
raise ConnectionException(conn_err.message)
def close(self):
self.device.close()
def load_replace_candidate(self, filename=None, config=None):
self.pending_changes = True
self.replace = True
if not self.lock_on_connect:
self.device.lock()
try:
self.device.load_candidate_config(filename=filename, config=config)
except InvalidInputError as e:
self.pending_changes = False
self.replace = False
raise ReplaceConfigException(e.message)
def load_merge_candidate(self, filename=None, config=None):
self.pending_changes = True
self.replace = False
if not self.lock_on_connect:
self.device.lock()
try:
self.device.load_candidate_config(filename=filename, config=config)
except InvalidInputError as e:
self.pending_changes = False
self.replace = False
raise MergeConfigException(e.message)
def compare_config(self):
if not self.pending_changes:
return ''
elif self.replace:
return self.device.compare_replace_config().strip()
else:
return self.device.compare_config().strip()
def commit_config(self):
if self.replace:
self.device.commit_replace_config()
else:
self.device.commit_config()
self.pending_changes = False
if not self.lock_on_connect:
self.device.unlock()
def discard_config(self):
self.device.discard_config()
self.pending_changes = False
if not self.lock_on_connect:
self.device.unlock()
def rollback(self):
self.device.rollback()
# perhaps both should be moved in napalm_base.helpers at some point
@staticmethod
def _find_txt(xml_tree, path, default = ''):
try:
return xml_tree.find(path).text.strip()
except Exception:
return default
@staticmethod
def _convert(to, who, default = u''):
if who is None:
return default
try:
return to(who)
except:
return default
def get_facts(self):
facts = {
'vendor': u'Cisco',
'os_version': u'',
'hostname': u'',
'uptime': -1,
'serial_number': u'',
'fqdn': u'',
'model': u'',
'interface_list': []
}
facts_rpc_request = (
'<Get>'
'<Operational>'
'<SystemTime/>'
'<PlatformInventory/>'
'</Operational>'
'</Get>'
)
facts_rpc_reply = ETREE.fromstring(self.device.make_rpc_call(facts_rpc_request))
system_time_xpath = './/SystemTime/Uptime'
platform_attr_xpath = './/RackTable/Rack/Attributes/BasicInfo'
system_time_tree = facts_rpc_reply.xpath(system_time_xpath)[0]
platform_attr_tree = facts_rpc_reply.xpath(platform_attr_xpath)[0]
hostname = convert(unicode, find_txt(system_time_tree, 'Hostname'))
uptime = convert(int, find_txt(system_time_tree, 'Uptime'), -1)
serial = convert(unicode, find_txt(platform_attr_tree, 'SerialNumber'))
os_version = convert(unicode, find_txt(platform_attr_tree, 'SoftwareRevision'))
model = convert(unicode, find_txt(platform_attr_tree, 'ModelName'))
interface_list = self.get_interfaces().keys()
facts.update({
'os_version': os_version,
'hostname': hostname,
'model': model,
'uptime': uptime,
'serial_number': serial,
'fqdn': hostname,
'interface_list': interface_list
})
return facts
def get_interfaces(self):
interfaces = {}
INTERFACE_DEFAULTS = {
'is_enabled': False,
'is_up': False,
'mac_address': u'',
'description': u'',
'speed': -1,
'last_flapped': -1.0
}
interfaces_rpc_request = '<Get><Operational><Interfaces/></Operational></Get>'
interfaces_rpc_reply = ETREE.fromstring(self.device.make_rpc_call(interfaces_rpc_request))
for interface_tree in interfaces_rpc_reply.xpath('.//Interfaces/InterfaceTable/Interface'):
interface_name = find_txt(interface_tree, 'Naming/InterfaceName')
if not interface_name:
continue
is_up = (find_txt(interface_tree, 'LineState') == 'IM_STATE_UP')
is_enabled = (find_txt(interface_tree, 'LineState') == 'IM_STATE_UP')
mac_address = mac(find_txt(interface_tree, 'MACAddress/Address'))
speed = int(convert(int, find_txt(interface_tree, 'Bandwidth'), 0) * 1e-3)
description = find_txt(interface_tree, 'Description')
interfaces[interface_name] = copy.deepcopy(INTERFACE_DEFAULTS)
interfaces[interface_name].update({
'is_up': is_up,
'speed': speed,
'is_enabled': is_enabled,
'mac_address': mac_address,
'description': description
})
return interfaces
def get_interfaces_counters(self):
rpc_command = "<Get><Operational><Interfaces><InterfaceTable></InterfaceTable></Interfaces></Operational></Get>"
result_tree = ET.fromstring(self.device.make_rpc_call(rpc_command))
interface_counters = dict()
for interface in result_tree.iter('Interface'):
interface_name = interface.find('InterfaceHandle').text
interface_stats = dict()
if interface.find('InterfaceStatistics') is None:
continue
else:
interface_stats = dict()
interface_stats['tx_multicast_packets'] = int(interface.find(
'InterfaceStatistics/FullInterfaceStats/MulticastPacketsSent').text)
interface_stats['tx_discards'] = int(interface.find(
'InterfaceStatistics/FullInterfaceStats/OutputDrops').text)
interface_stats['tx_octets'] = int(interface.find(
'InterfaceStatistics/FullInterfaceStats/BytesSent').text)
interface_stats['tx_errors'] = int(interface.find(
'InterfaceStatistics/FullInterfaceStats/OutputErrors').text)
interface_stats['rx_octets'] = int(interface.find(
'InterfaceStatistics/FullInterfaceStats/BytesReceived').text)
interface_stats['tx_unicast_packets'] = int(interface.find(
'InterfaceStatistics/FullInterfaceStats/PacketsSent').text)
interface_stats['rx_errors'] = int(interface.find(
'InterfaceStatistics/FullInterfaceStats/InputErrors').text)
interface_stats['tx_broadcast_packets'] = int(interface.find(
'InterfaceStatistics/FullInterfaceStats/BroadcastPacketsSent').text)
interface_stats['rx_multicast_packets'] = int(interface.find(
'InterfaceStatistics/FullInterfaceStats/MulticastPacketsReceived').text)
interface_stats['rx_broadcast_packets'] = int(interface.find(
'InterfaceStatistics/FullInterfaceStats/BroadcastPacketsReceived').text)
interface_stats['rx_discards'] = int(interface.find(
'InterfaceStatistics/FullInterfaceStats/InputDrops').text)
interface_stats['rx_unicast_packets'] = int(interface.find(
'InterfaceStatistics/FullInterfaceStats/PacketsReceived').text)
interface_counters[interface_name] = interface_stats
return interface_counters
def get_bgp_neighbors(self):
def generate_vrf_query(vrf_name):
"""
Helper to provide XML-query for the VRF-type we're interested in.
"""
if vrf_name == "global":
rpc_command = """<Get>
<Operational>
<BGP>
<InstanceTable>
<Instance>
<Naming>
<InstanceName>
default
</InstanceName>
</Naming>
<InstanceActive>
<DefaultVRF>
<GlobalProcessInfo>
</GlobalProcessInfo>
<NeighborTable>
</NeighborTable>
</DefaultVRF>
</InstanceActive>
</Instance>
</InstanceTable>
</BGP>
</Operational>
</Get>"""
else:
rpc_command = """<Get>
<Operational>
<BGP>
<InstanceTable>
<Instance>
<Naming>
<InstanceName>
default
</InstanceName>
</Naming>
<InstanceActive>
<VRFTable>
<VRF>
<Naming>
%s
</Naming>
<GlobalProcessInfo>
</GlobalProcessInfo>
<NeighborTable>
</NeighborTable>
</VRF>
</VRFTable>
</InstanceActive>
</Instance>
</InstanceTable>
</BGP>
</Operational>
</Get>""" % vrf_name
return rpc_command
"""
Initial run to figure out what VRF's are available
Decided to get this one from Configured-section because bulk-getting all instance-data to do the same could get ridiculously heavy
Assuming we're always interested in the DefaultVRF
"""
active_vrfs = ["global"]
rpc_command = """<Get>
<Operational>
<BGP>
<ConfigInstanceTable>
<ConfigInstance>
<Naming>
<InstanceName>
default
</InstanceName>
</Naming>
<ConfigInstanceVRFTable>
</ConfigInstanceVRFTable>
</ConfigInstance>
</ConfigInstanceTable>
</BGP>
</Operational>
</Get>"""
result_tree = ET.fromstring(self.device.make_rpc_call(rpc_command))
for node in result_tree.iter('ConfigVRF'):
active_vrfs.append(str(node.find('Naming/VRFName').text))
result = dict()
for vrf in active_vrfs:
rpc_command = generate_vrf_query(vrf)
result_tree = ET.fromstring(self.device.make_rpc_call(rpc_command))
this_vrf = dict()
this_vrf['peers'] = dict()
if vrf == "global":
this_vrf['router_id'] = unicode(result_tree.find(
'Get/Operational/BGP/InstanceTable/Instance/InstanceActive/DefaultVRF/GlobalProcessInfo/VRF/RouterID').text)
else:
this_vrf['router_id'] = unicode(result_tree.find(
'Get/Operational/BGP/InstanceTable/Instance/InstanceActive/VRFTable/VRF/GlobalProcessInfo/VRF/RouterID').text)
neighbors = dict()
for neighbor in result_tree.iter('Neighbor'):
this_neighbor = dict()
this_neighbor['local_as'] = int(neighbor.find('LocalAS').text)
this_neighbor['remote_as'] = int(neighbor.find('RemoteAS').text)
this_neighbor['remote_id'] = unicode(neighbor.find('RouterID').text)
if neighbor.find('ConnectionAdminStatus').text is "1":
this_neighbor['is_enabled'] = True
try:
this_neighbor['description'] = unicode(neighbor.find('Description').text)
except AttributeError:
this_neighbor['description'] = u''
this_neighbor['is_enabled'] = str(neighbor.find('ConnectionAdminStatus').text) is "1"
if str(neighbor.find('ConnectionAdminStatus').text) is "1":
this_neighbor['is_enabled'] = True
else:
this_neighbor['is_enabled'] = False
if str(neighbor.find('ConnectionState').text) == "BGP_ST_ESTAB":
this_neighbor['is_up'] = True
this_neighbor['uptime'] = int(neighbor.find('ConnectionEstablishedTime').text)
else:
this_neighbor['is_up'] = False
this_neighbor['uptime'] = -1
this_neighbor['address_family'] = dict()
if neighbor.find('ConnectionRemoteAddress/AFI').text == "IPv4":
this_afi = "ipv4"
elif neighbor.find('ConnectionRemoteAddress/AFI').text == "IPv6":
this_afi = "ipv6"
else:
this_afi = neighbor.find('ConnectionRemoteAddress/AFI').text
this_neighbor['address_family'][this_afi] = dict()
try:
this_neighbor['address_family'][this_afi][
"received_prefixes"] = int(neighbor.find('AFData/Entry/PrefixesAccepted').text) + int(
neighbor.find('AFData/Entry/PrefixesDenied').text)
this_neighbor['address_family'][this_afi][
"accepted_prefixes"] = int(neighbor.find('AFData/Entry/PrefixesAccepted').text)
this_neighbor['address_family'][this_afi][
"sent_prefixes"] = int(neighbor.find('AFData/Entry/PrefixesAdvertised').text)
except AttributeError:
this_neighbor['address_family'][this_afi]["received_prefixes"] = -1
this_neighbor['address_family'][this_afi]["accepted_prefixes"] = -1
this_neighbor['address_family'][this_afi]["sent_prefixes"] = -1
try:
neighbor_ip = unicode(neighbor.find('Naming/NeighborAddress/IPV4Address').text)
except AttributeError:
neighbor_ip = unicode(neighbor.find('Naming/NeighborAddress/IPV6Address').text)
neighbors[neighbor_ip] = this_neighbor
this_vrf['peers'] = neighbors
result[vrf] = this_vrf
return result
def get_environment(self):
def get_module_xml_query(module,selection):
return """<Get>
<AdminOperational>
<EnvironmentalMonitoring>
<RackTable>
<Rack>
<Naming>
<rack>0</rack>
</Naming>
<SlotTable>
<Slot>
<Naming>
<slot>%s</slot>
</Naming>
%s
</Slot>
</SlotTable>
</Rack>
</RackTable>
</EnvironmentalMonitoring>
</AdminOperational>
</Get>""" % (module,selection)
environment_status = dict()
environment_status['fans'] = dict()
environment_status['temperature'] = dict()
environment_status['power'] = dict()
environment_status['cpu'] = dict()
environment_status['memory'] = int()
# finding slots with equipment we're interested in
rpc_command = """<Get>
<AdminOperational>
<PlatformInventory>
<RackTable>
<Rack>
<Naming>
<Name>0</Name>
</Naming>
<SlotTable>
</SlotTable>
</Rack>
</RackTable>
</PlatformInventory>
</AdminOperational>
</Get>"""
result_tree = ET.fromstring(self.device.make_rpc_call(rpc_command))
active_modules = defaultdict(list)
for slot in result_tree.iter("Slot"):
for card in slot.iter("CardTable"):
#find enabled slots, figoure out type and save for later
if card.find('Card/Attributes/FRUInfo/ModuleAdministrativeState').text == "ADMIN_UP":
slot_name = slot.find('Naming/Name').text
module_type = re.sub("\d+", "", slot_name)
if len(module_type) > 0:
active_modules[module_type].append(slot_name)
else:
active_modules["LC"].append(slot_name)
#
# PSU's
#
for psu in active_modules['PM']:
if psu in ["PM6", "PM7"]: # Cisco bug, chassis difference V01<->V02
continue
rpc_command = get_module_xml_query(psu,'')
result_tree = ET.fromstring(self.device.make_rpc_call(rpc_command))
psu_status = dict()
psu_status['status'] = False
psu_status['capacity'] = float()
psu_status['output'] = float()
for sensor in result_tree.iter('SensorName'):
if sensor.find('Naming/Name').text == "host__VOLT":
this_psu_voltage = float(sensor.find('ValueBrief').text)
elif sensor.find('Naming/Name').text == "host__CURR":
this_psu_current = float(sensor.find('ValueBrief').text)
elif sensor.find('Naming/Name').text == "host__PM":
this_psu_capacity = float(sensor.find('ValueBrief').text)
if this_psu_capacity > 0:
psu_status['capacity'] = this_psu_capacity
psu_status['status'] = True
if this_psu_current and this_psu_voltage:
psu_status['output'] = (this_psu_voltage * this_psu_current) / 1000000.0
environment_status['power'][psu] = psu_status
#
# Memory
#
rpc_command = "<Get><AdminOperational><MemorySummary></MemorySummary></AdminOperational></Get>"
result_tree = ET.fromstring(self.device.make_rpc_call(rpc_command))
for node in result_tree.iter('Node'):
print
if node.find('Naming/NodeName/Slot').text == active_modules['RSP'][0]: # first enabled RSP
available_ram = int(node.find('Summary/SystemRAMMemory').text)
free_ram = int(node.find('Summary/FreeApplicationMemory').text)
break # we're only looking at one of the RSP's
if available_ram and free_ram:
used_ram = available_ram - free_ram
memory = dict()
memory['available_ram'] = available_ram
memory['used_ram'] = used_ram
environment_status['memory'] = memory
#
# Fans
#
for fan in active_modules['FT']:
rpc_command = get_module_xml_query(fan,'')
result_tree = ET.fromstring(self.device.make_rpc_call(rpc_command))
for module in result_tree.iter('Module'):
for sensortype in module.iter('SensorType'):
for sensorname in sensortype.iter('SensorNameTable'):
if sensorname.find('SensorName/Naming/Name').text == "host__FanSpeed_0":
environment_status['fans'][fan] = {'status': int(sensorname.find(
'SensorName/ValueDetailed/Status').text) is 1}
#
# CPU
#
cpu = dict()
rpc_command = "<Get><Operational><SystemMonitoring></SystemMonitoring></Operational></Get>"
result_tree = ET.fromstring(self.device.make_rpc_call(rpc_command))
for module in result_tree.iter('CPUUtilization'):
this_cpu = dict()
this_cpu["%usage"] = float(module.find('TotalCPUFiveMinute').text)
rack = module.find('Naming/NodeName/Rack').text
slot = module.find('Naming/NodeName/Slot').text
instance = module.find('Naming/NodeName/Instance').text
position = "%s/%s/%s" % (rack,slot,instance)
cpu[position] = this_cpu
environment_status["cpu"] = cpu
#
# Temperature
#
temperature = dict()
slot_list = set()
for category, slot in active_modules.iteritems():
slot_list |= set(slot)
for slot in slot_list:
rpc_command = get_module_xml_query(slot,'')
result_tree = ET.fromstring(self.device.make_rpc_call(rpc_command))
for sensor in result_tree.findall(".//SensorName"):
if not sensor.find('Naming/Name').text == "host__Inlet0":
continue
this_reading = dict()
this_reading['temperature'] = float(sensor.find('ValueBrief').text)
threshold_value = [float(x.text) for x in sensor.findall("ThresholdTable/Threshold/ValueBrief")]
this_reading['is_alert'] = threshold_value[2] <= this_reading['temperature'] <= threshold_value[3]
this_reading['is_critical'] = threshold_value[4] <= this_reading['temperature'] <= threshold_value[5]
this_reading['temperature'] = this_reading['temperature']/10
environment_status["temperature"][slot] = this_reading
return environment_status
def get_lldp_neighbors(self):
# init result dict
lldp = {}
sh_lldp = self.device.show_lldp_neighbors().splitlines()[5:-3]
for n in sh_lldp:
local_interface = n.split()[1]
if local_interface not in lldp.keys():
lldp[local_interface] = list()
lldp[local_interface].append({'hostname': unicode(n.split()[0]), 'port': unicode(n.split()[4]), })
return lldp
def get_lldp_neighbors_detail(self, interface = ''):
lldp_neighbors = dict()
rpc_command = '<Get><Operational><LLDP></LLDP></Operational></Get>'
result_tree = ET.fromstring(self.device.make_rpc_call(rpc_command))
for neighbor in result_tree.findall('.//Neighbors/DetailTable/Detail/Entry'):
if neighbor is None:
continue
try:
interface_name = unicode(neighbor.find('ReceivingInterfaceName').text)
parent_interface = unicode(neighbor.find('ReceivingParentInterfaceName').text)
device_id = unicode(neighbor.find('DeviceID').text)
chassis_id = unicode(neighbor.find('ChassisID').text)
port_id = unicode(neighbor.find('PortIDDetail').text)
port_descr = unicode(neighbor.find('Detail/PortDescription').text)
system_name = unicode(neighbor.find('Detail/SystemName').text)
system_descr = unicode(neighbor.find('Detail/SystemDescription').text)
system_capabilities = unicode(neighbor.find('Detail/SystemCapabilities').text)
enabled_capabilities= unicode(neighbor.find('Detail/EnabledCapabilities').text)
# few other optional...
# time_remaining = neighbor.find('Detail/TimeRemaining').text
# media_attachement_unit_type = neighbor.find('Detail/MediaAttachmentUnitType').text
# port_vlan_id = neighbor.find('Detail/PortVlanID').text
if interface_name not in lldp_neighbors.keys():
lldp_neighbors[interface_name] = list()
lldp_neighbors[interface_name].append({
'parent_interface' : parent_interface,
'remote_chassis_id' : chassis_id,
'remote_port' : port_id,
'remote_port_description' : port_descr,
'remote_system_name' : system_name,
'remote_system_description' : system_descr,
'remote_system_capab' : system_capabilities,
'remote_system_enable_capab' : enabled_capabilities
})
except Exception:
continue # jump to next neighbor
return lldp_neighbors
def cli(self, commands = None):
cli_output = dict()
if type(commands) is not list:
raise TypeError('Please enter a valid list of commands!')
for command in commands:
try:
cli_output[unicode(command)] = unicode(self.device._execute_show(command))
except TimeoutError:
cli_output[unicode(command)] = 'Execution of command "{command}" took too long! Please adjust your params!'.format(
command = command
)
raise CommandTimeoutException(str(cli_output))
except Exception as e:
cli_output[unicode(command)] = 'Unable to execute command "{cmd}": {err}'.format(
cmd = command,
err = e
)
raise CommandErrorException(str(cli_output))
return cli_output
def get_bgp_config(self, group = '', neighbor = ''):
bgp_config = {}
# a helper
def build_prefix_limit(af_table, limit, prefix_percent, prefix_timeout):
prefix_limit = dict()
inet = False
inet6 = False
preifx_type = 'inet'
if 'IPV4' in af_table:
inet = True
if 'IPv6' in af_table:
inet6 = True
preifx_type = 'inet6'
if inet or inet6:
prefix_limit = {
preifx_type: {
af_table[4:].lower(): {
'limit': limit,
'teardown': {
'threshold': prefix_percent,
'timeout' : prefix_timeout
}
}
}
}
return prefix_limit
# here begins actual method...
rpc_command = '''
<Get>
<Configuration>
<BGP>
<Instance>
<Naming>
<InstanceName>
default
</InstanceName>
</Naming>
</Instance>
</BGP>
</Configuration>
</Get>
'''
result_tree = ET.fromstring(self.device.make_rpc_call(rpc_command))
group = group.lower()
neighbor = neighbor.lower()
if not group:
neighbor = ''
bgp_group_neighbors = {}
for bgp_neighbor in result_tree.iter('Neighbor'):
group_name = self._find_txt(bgp_neighbor, 'NeighborGroupAddMember')
peer = self._find_txt(bgp_neighbor, 'Naming/NeighborAddress/IPV4Address') or self._find_txt(bgp_neighbor, 'Naming/NeighborAddress/IPV6Address')
if neighbor and peer != neighbor:
continue
description = unicode(self._find_txt(bgp_neighbor, 'Description'))
peer_as = int(self._find_txt(bgp_neighbor, 'RemoteAS/AS_YY', 0))
local_as = int(self._find_txt(bgp_neighbor, 'LocalAS/AS_YY', 0))
af_table = self._find_txt(bgp_neighbor, 'NeighborAFTable/NeighborAF/Naming/AFName')
prefix_limit = int(self._find_txt(bgp_neighbor, 'NeighborAFTable/NeighborAF/MaximumPrefixes/PrefixLimit', 0))
prefix_percent = int(self._find_txt(bgp_neighbor, 'NeighborAFTable/NeighborAF/MaximumPrefixes/WarningPercentage', 0))
prefix_timeout = int(self._find_txt(bgp_neighbor, 'NeighborAFTable/NeighborAF/MaximumPrefixes/RestartTime', 0))
import_policy = unicode(self._find_txt(bgp_neighbor, 'NeighborAFTable/NeighborAF/RoutePolicyIn'))
export_policy = unicode(self._find_txt(bgp_neighbor, 'NeighborAFTable/NeighborAF/RoutePolicyOut'))
local_address = unicode(self._find_txt(bgp_neighbor, 'LocalAddress/LocalIPAddress/IPV4Address') or self._find_txt(bgp_neighbor, 'LocalAddress/LocalIPAddress/IPV6Address'))
password = unicode(self._find_txt(bgp_neighbor, 'Password/Password/Password'))
nhs = False
route_reflector= False
if group_name not in bgp_group_neighbors.keys():
bgp_group_neighbors[group_name] = dict()
bgp_group_neighbors[group_name][peer] = {
'description' : description,
'remote_as' : peer_as,
'prefix_limit' : build_prefix_limit(af_table, prefix_limit, prefix_percent, prefix_timeout),
'export_policy' : export_policy,
'import_policy' : import_policy,
'local_address' : local_address,
'local_as' : local_as,
'authentication_key' : password,
'nhs' : nhs,
'route_reflector_client': route_reflector
}
if neighbor and peer == neighbor:
break
for bgp_group in result_tree.iter('NeighborGroup'):
group_name = self._find_txt(bgp_group, 'Naming/NeighborGroupName')
if group and group != group_name:
continue
bgp_type = 'external' # by default external
# must check
description = unicode(self._find_txt(bgp_group, 'Description'))
import_policy = unicode(self._find_txt(bgp_group, 'NeighborGroupAFTable/NeighborGroupAF/RoutePolicyIn'))
export_policy = unicode(self._find_txt(bgp_group, 'NeighborGroupAFTable/NeighborGroupAF/RoutePolicyOut'))
multipath = eval(self._find_txt(bgp_group, 'NeighborGroupAFTable/NeighborGroupAF/Multipath', 'false').title())
peer_as = int(self._find_txt(bgp_group, 'RemoteAS/AS_YY', 0))
local_as = int(self._find_txt(bgp_group, 'LocalAS/AS_YY', 0))
multihop_ttl = int(self._find_txt(bgp_group, 'EBGPMultihop/MaxHopCount', 0))
local_address = unicode(self._find_txt(bgp_group, 'LocalAddress/LocalIPAddress/IPV4Address') or self._find_txt(bgp_group, 'LocalAddress/LocalIPAddress/IPV6Address'))
af_table = self._find_txt(bgp_group, 'NeighborAFTable/NeighborAF/Naming/AFName')
prefix_limit = int(self._find_txt(bgp_group, 'NeighborGroupAFTable/NeighborGroupAF/MaximumPrefixes/PrefixLimit', 0))
prefix_percent= int(self._find_txt(bgp_group, 'NeighborGroupAFTable/NeighborGroupAF/MaximumPrefixes/WarningPercentage', 0))
prefix_timeout= int(self._find_txt(bgp_group, 'NeighborGroupAFTable/NeighborGroupAF/MaximumPrefixes/RestartTime', 0))
remove_private= True # is it specified in the XML?
bgp_config[group_name] = {
'apply_groups' : [], # on IOS-XR will always be empty list!
'description' : description,
'local_as' : local_as,
'type' : unicode(bgp_type),
'import_policy' : import_policy,
'export_policy' : export_policy,
'local_address' : local_address,
'multipath' : multipath,
'multihop_ttl' : multihop_ttl,
'remote_as' : peer_as,
'remove_private_as' : remove_private,
'prefix_limit' : build_prefix_limit(af_table, prefix_limit, prefix_percent, prefix_timeout),
'neighbors' : bgp_group_neighbors.get(group_name, {})
}
if group and group == group_name:
break
return bgp_config
def get_bgp_neighbors_detail(self, neighbor_address=''):
bgp_neighbors_detail = {}
active_vrfs = ['default']
active_vrfs_rpc_request = (
'<Get>'
'<Operational>'
'<BGP>'
'<ConfigInstanceTable>'
'<ConfigInstance>'
'<Naming>'
'<InstanceName>'
'default'
'</InstanceName>'
'</Naming>'
'<ConfigInstanceVRFTable/>'
'</ConfigInstance>'
'</ConfigInstanceTable>'
'</BGP>'
'</Operational>'
'</Get>'
)
active_vrfs_rpc_reply = ETREE.fromstring(self.device.make_rpc_call(active_vrfs_rpc_request))
active_vrfs_tree = active_vrfs_rpc_reply.xpath('.//ConfigVRF')
for active_vrf_tree in active_vrfs_tree:
active_vrfs.append(find_txt(active_vrf_tree, 'Naming/VRFName'))
unique_active_vrfs = set(active_vrfs)
bgp_neighbors_vrf_all_rpc = (
'<Get>'
'<Operational>'
'<BGP>'
'<InstanceTable>'
'<Instance>'
'<Naming>'
'<InstanceName>'
'default'
'</InstanceName>'
'</Naming>'
)
for active_vrf in unique_active_vrfs:
vrf_rpc = (
'<InstanceActive>'
'<VRFTable>'
'<VRF>'
'<Naming>'
'{vrf_name}'
'</Naming>'
'<GlobalProcessInfo/>'
'<NeighborTable/>'
'</VRF>'
'</VRFTable>'
'</InstanceActive>'
)
bgp_neighbors_vrf_all_rpc += vrf_rpc.format(vrf_name=active_vrf)
bgp_neighbors_vrf_all_rpc += (
'</Instance>'
'</InstanceTable>'
'</BGP>'
'</Operational>'
'</Get>'
)
bgp_neighbors_vrf_all_tree = ETREE.fromstring(self.device.make_rpc_call(bgp_neighbors_vrf_all_rpc))
_BGP_STATE_ = {
'0': 'Unknown',
'1': 'Idle',
'2': 'Connect',
'3': 'OpenSent',
'4': 'OpenConfirm',
'5': 'Active',
'6': 'Established'
}
instance_active_list = bgp_neighbors_vrf_all_tree.xpath('.//InstanceTable/Instance/InstanceActive/VRFTable/VRF')
for vrf_tree in instance_active_list:
vrf_name = find_txt(vrf_tree, 'Naming/VRFName')
vrf_keepalive = convert(int, find_txt(instance_active_list, 'GlobalProcessInfo/VRF/KeepAliveTime'))
vrf_holdtime = convert(int, find_txt(instance_active_list, 'GlobalProcessInfo/VRF/HoldTime'))
if vrf_name not in bgp_neighbors_detail.keys():
bgp_neighbors_detail[vrf_name] = {}
for neighbor in vrf_tree.xpath('NeighborTable/Neighbor'):
up = (find_txt(neighbor, 'ConnectionState') == 'BGP_ST_ESTAB')
local_as = convert(int, find_txt(neighbor, 'LocalAS', 0))
remote_as = convert(int, find_txt(neighbor, 'RemoteAS', 0))
router_id = ip(find_txt(neighbor, 'RouterID'))
remote_address = ip(find_txt(neighbor, 'Naming/NeighborAddress/IPV4Address')) \
or ip(find_txt(neighbor, 'Naming/NeighborAddress/IPV6Address'))
local_address_configured = eval(find_txt(neighbor, 'IsLocalAddressConfigured', 'false').title())
local_address = ip(find_txt(neighbor, 'ConnectionLocalAddress/IPV4Address')) \
or ip(find_txt(neighbor, 'ConnectionLocalAddress/IPV6Address'))
local_port = convert(int, find_txt(neighbor, 'ConnectionLocalPort'))
remote_address = ip(find_txt(neighbor, 'ConnectionRemoteAddress/IPV4Address')) \
or ip(find_txt(neighbor, 'ConnectionRemoteAddress/IPV6Address'))
remote_port = convert(int, find_txt(neighbor, 'ConnectionRemotePort'))
multihop = eval(find_txt(neighbor, 'IsExternalNeighborNotDirectlyConnected', 'false').title())
remove_private_as = eval(find_txt(neighbor, 'AFData/Entry/RemovePrivateASFromUpdates', 'false').title())
multipath = eval(find_txt(neighbor, 'AFData/Entry/SelectiveMultipathEligible', 'false').title())
import_policy = find_txt(neighbor, 'AFData/Entry/RoutePolicyIn')
export_policy = find_txt(neighbor, 'AFData/Entry/RoutePolicyOut')
input_messages = convert(int, find_txt(neighbor, 'MessgesReceived', 0))
output_messages = convert(int, find_txt(neighbor, 'MessagesSent', 0))
connection_up_count = convert(int, find_txt(neighbor, 'ConnectionUpCount', 0))
connection_down_count = convert(int, find_txt(neighbor, 'ConnectionDownCount', 0))
messages_queued_out = convert(int, find_txt(neighbor, 'MessagesQueuedOut', 0))
connection_state = find_txt(neighbor, 'ConnectionState').replace('BGP_ST_', '').title()
if connection_state == u'Estab':
connection_state = u'Established'
previous_connection_state = unicode(_BGP_STATE_.get(find_txt(neighbor, 'PreviousConnectionState', '0')))
active_prefix_count = convert(int, find_txt(neighbor, 'AFData/Entry/NumberOfBestpaths', 0))
accepted_prefix_count = convert(int, find_txt(neighbor, 'AFData/Entry/PrefixesAccepted', 0))
suppressed_prefix_count = convert(int, find_txt(neighbor, 'AFData/Entry/PrefixesDenied', 0))
received_prefix_count = accepted_prefix_count + suppressed_prefix_count # not quite right...
advertised_prefix_count = convert(int, find_txt(neighbor, 'AFData/Entry/PrefixesAdvertised', 0))
suppress_4byte_as = eval(find_txt(neighbor, 'Suppress4ByteAs', 'false').title())
local_as_prepend = not eval(find_txt(neighbor, 'LocalASNoPrepend', 'false').title())
holdtime = convert(int, find_txt(neighbor, 'HoldTime', 0)) or vrf_holdtime
configured_holdtime = convert(int, find_txt(neighbor, 'ConfiguredHoldTime', 0))
keepalive = convert(int, find_txt(neighbor, 'KeepAliveTime', 0)) or vrf_keepalive
configured_keepalive = convert(int, find_txt(neighbor, 'ConfiguredKeepalive', 0))
flap_count = connection_down_count / 2
if up:
flap_count -= 1
if remote_as not in bgp_neighbors_detail[vrf_name].keys():
bgp_neighbors_detail[vrf_name][remote_as] = []
bgp_neighbors_detail[vrf_name][remote_as].append({
'up': up,
'local_as': local_as,
'remote_as': remote_as,
'router_id': router_id,
'local_address': local_address,
'routing_table': vrf_name,
'local_address_configured': local_address_configured,
'local_port': local_port,
'remote_address': remote_address,
'remote_port': remote_port,
'multihop': multihop,
'multipath': multipath,
'import_policy': import_policy,
'export_policy': export_policy,
'input_messages': input_messages,
'output_messages': output_messages,
'input_updates': 0,
'output_updates': 0,
'messages_queued_out': messages_queued_out,
'connection_state': connection_state,
'previous_connection_state': previous_connection_state,
'last_event': u'',
'remove_private_as': remove_private_as,
'suppress_4byte_as': suppress_4byte_as,
'local_as_prepend': local_as_prepend,
'holdtime': holdtime,
'configured_holdtime': configured_holdtime,
'keepalive': keepalive,
'configured_keepalive': configured_keepalive,
'active_prefix_count': active_prefix_count,
'received_prefix_count': received_prefix_count,
'accepted_prefix_count': accepted_prefix_count,
'suppressed_prefix_count': suppressed_prefix_count,
'advertised_prefix_count': advertised_prefix_count,
'flap_count': flap_count
})
return bgp_neighbors_detail
def get_arp_table(self):
arp_table = list()
rpc_command = '<Get><Operational><ARP></ARP></Operational></Get>'
result_tree = ET.fromstring(self.device.make_rpc_call(rpc_command))
for arp_entry in result_tree.findall('.//EntryTable/Entry'):
try:
interface = unicode(arp_entry.find('.//InterfaceName').text)
ip = unicode(arp_entry.find('.//Address').text)
age = float(arp_entry.find('.//Age').text)
mac_raw = arp_entry.find('.//HardwareAddress').text
mac_all = mac_raw.replace('.', '').replace(':', '')
mac_format= unicode(':'.join([mac_all[i:i+2] for i in range(12)[::2]]))
arp_table.append(
{
'interface' : interface,
'mac' : mac_format,
'ip' : ip,
'age' : age
}
)
except Exception:
continue
return arp_table
def get_ntp_peers(self):
ntp_peers = {}
rpc_command = '<Get><Configuration><NTP></NTP></Configuration></Get>'
result_tree = ETREE.fromstring(self.device.make_rpc_call(rpc_command))
for version in ['IPV4', 'IPV6']:
for peer in result_tree.findall('.//Peer{version}Table/Peer{version}'.format(version=version)):
peer_type = find_txt(peer, 'PeerType{version}/Naming/PeerType'.format(version=version))
if peer_type != 'Peer':
continue
peer_address = find_txt(peer, 'Naming/Address{version}'.format(version=version))
if not peer_address:
continue
ntp_peers[peer_address] = {}
return ntp_peers
def get_ntp_servers(self):
ntp_servers = {}
rpc_command = '<Get><Configuration><NTP></NTP></Configuration></Get>'
result_tree = ETREE.fromstring(self.device.make_rpc_call(rpc_command))
for version in ['IPV4', 'IPV6']:
for peer in result_tree.xpath('.//Peer{version}Table/Peer{version}'.format(version=version)):
peer_type = find_txt(peer, 'PeerType{version}/Naming/PeerType'.format(version=version))
if peer_type != 'Server':
continue
server_address =find_txt(peer, 'Naming/Address{version}'.format(version=version))
if not server_address:
continue
ntp_servers[server_address] = {}
return ntp_servers
def get_ntp_stats(self):
ntp_stats = list()
rpc_command = '<Get><Operational><NTP><NodeTable></NodeTable></NTP></Operational></Get>'
result_tree = ET.fromstring(self.device.make_rpc_call(rpc_command))
for node in result_tree.findall('.//NodeTable/Node/Associations/PeerSummaryInfo/Entry/PeerInfoCommon'):
try:
synchronized = eval(self._find_txt(node, 'IsSysPeer', 'false').title())
address = unicode(self._find_txt(node, 'Address'))
if address == 'DLRSC node':
continue
referenceid = unicode(self._find_txt(node, 'ReferenceID'))
hostpoll = int(self._find_txt(node, 'HostPoll', '0'))
reachability = int(self._find_txt(node, 'Reachability', '0'))
stratum = int(self._find_txt(node, 'Stratum', '0'))
delay = float(self._find_txt(node, 'Delay', '0.0'))
offset = float(self._find_txt(node, 'Offset', '0.0'))
jitter = float(self._find_txt(node, 'Dispersion', '0.0'))
ntp_stats.append({
'remote' : address,
'synchronized' : synchronized,
'referenceid' : referenceid,
'stratum' : stratum,
'type' : u'',
'when' : u'',
'hostpoll' : hostpoll,
'reachability' : reachability,
'delay' : delay,
'offset' : offset,
'jitter' : jitter
})
except Exception:
continue
return ntp_stats
def get_interfaces_ip(self):
interfaces_ip = dict()
rpc_command_ipv4 = '<Get><Operational><IPV4Network></IPV4Network></Operational></Get>'
ipv4_tree = ET.fromstring(self.device.make_rpc_call(rpc_command_ipv4))
for interface in ipv4_tree.findall('.//InterfaceTable/Interface'):
try:
interface_name = unicode(interface.find('Naming/InterfaceName').text)
primary_ip = unicode(interface.find('VRFTable/VRF/Detail/PrimaryAddress').text)
primary_prefix = int(interface.find('VRFTable/VRF/Detail/PrefixLength').text)
if interface_name not in interfaces_ip.keys():
interfaces_ip[interface_name] = dict()
if u'ipv4' not in interfaces_ip[interface_name].keys():
interfaces_ip[interface_name][u'ipv4'] = dict()
if primary_ip not in interfaces_ip[interface_name].get(u'ipv4', {}).keys():
interfaces_ip[interface_name][u'ipv4'][primary_ip] = {
u'prefix_length': primary_prefix
}
for secondary_address in interface.findall('VRFTable/VRF/Detail/SecondaryAddress/Entry'):
secondary_ip = unicode(secondary_address.find('Address').text)
secondary_prefix = int(secondary_address.find('PrefixLength').text)
if secondary_ip not in interfaces_ip[interface_name]:
interfaces_ip[interface_name][u'ipv4'][secondary_ip] = {
u'prefix_length': secondary_prefix
}
except Exception:
continue
rpc_command_ipv6 = '<Get><Operational><IPV6Network></IPV6Network></Operational></Get>'
ipv6_tree = ET.fromstring(self.device.make_rpc_call(rpc_command_ipv6))
for interface in ipv6_tree.findall('.//InterfaceData/VRFTable/VRF/GlobalDetailTable/GlobalDetail'):
interface_name = unicode(interface.find('Naming/InterfaceName').text)
if interface_name not in interfaces_ip.keys():
interfaces_ip[interface_name] = dict()
if u'ipv6' not in interfaces_ip[interface_name].keys():
interfaces_ip[interface_name][u'ipv6'] = dict()
for address in interface.findall('AddressList/Entry'):
address_ip = unicode(address.find('Address').text)
address_prefix = int(address.find('PrefixLength').text)
if address_ip not in interfaces_ip[interface_name].get(u'ipv6', {}).keys():
interfaces_ip[interface_name][u'ipv6'][address_ip] = {
u'prefix_length': address_prefix
}
return interfaces_ip
def get_mac_address_table(self):
mac_table = list()
rpc_command = '<Get><Operational><L2VPNForwarding></L2VPNForwarding></Operational></Get>'
result_tree = ET.fromstring(self.device.make_rpc_call(rpc_command))
for mac_entry in result_tree.findall('.//L2FIBMACDetailTable/L2FIBMACDetail'):
try:
mac_raw = mac_entry.find('Naming/Address').text
# will throw error in case not found
# and jump to next entry
mac_str = mac_raw.replace('.', '').replace(':', '')
mac_format = unicode(':'.join([ mac_str[i:i+2] for i in range(12)[::2] ]))
vlan = int(self._find_txt(mac_entry, 'Naming/Name', '').replace('vlan', ''))
interface = unicode(self._find_txt(mac_entry, 'Segment/AC/InterfaceHandle', u''))
mac_table.append(
{
'mac' : mac_format,
'interface' : interface,
'vlan' : vlan,
'active' : True,
'static' : False,
'moves' : 0,
'last_move' : 0.0
}
)
except Exception:
continue
return mac_table
def get_route_to(self, destination = '', protocol = ''):
routes = {}
if not isinstance(destination, str):
raise TypeError('Please specify a valid destination!')
if not isinstance(protocol, str) or protocol.lower() not in ['static', 'bgp', 'isis']:
raise TypeError("Protocol not supported: {protocol}.".format(
protocol = protocol
))
protocol = protocol.lower()
dest_split = destination.split('/')
network = dest_split[0]
prefix_tag = ''
if len(dest_split) == 2:
prefix_tag = '''
<PrefixLength>
{prefix_length}
</PrefixLength>
'''.format(prefix_length = dest_split[1])
route_info_rpc_command = '''
<Get>
<Operational>
<RIB>
<VRFTable>
<VRF>
<Naming>
<VRFName>
default
</VRFName>
</Naming>
<AFTable>
<AF>
<Naming>
<AFName>
IPv4
</AFName>
</Naming>
<SAFTable>
<SAF>
<Naming>
<SAFName>
Unicast
</SAFName>
</Naming>
<IP_RIBRouteTable>
<IP_RIBRoute>
<Naming>
<RouteTableName>
default
</RouteTableName>
</Naming>
<RouteTable>
<Route>
<Naming>
<Address>
{network}
</Address>
{prefix}
</Naming>
</Route>
</RouteTable>
</IP_RIBRoute>
</IP_RIBRouteTable>
</SAF>
</SAFTable>
</AF>
</AFTable>
</VRF>
</VRFTable>
</RIB>
</Operational>
</Get>
'''.format(
network = network,
prefix = prefix_tag
)
routes_tree = ET.fromstring(self.device.make_rpc_call(route_info_rpc_command))
for route in routes_tree.iter('Route'):
route_details = dict()
try:
address = route.find('Prefix').text
length = route.find('PrefixLength').text
distance = int(route.find('Distance').text)
protocol = unicode(route.find('ProtocolName').text.upper())
priority = int(route.find('Priority').text)
age = int(route.find('RouteAge').text)
destination = unicode('{prefix}/{length}'.format(
prefix = address,
length = length
))
if destination not in routes.keys():
routes[destination] = list()
except Exception:
continue
route_details = {
'current_active' : False,
'last_active' : False,
'age' : age,
'next_hop' : u'',
'protocol' : protocol,
'outgoing_interface': u'',
'preference' : priority,
'selected_next_hop' : False,
'inactive_reason' : u'',
'routing_table' : u'default',
'protocol_attributes': {}
}
# from BGP will try to get some more information
if protocol.lower() == 'bgp':
# looks like IOS-XR does not filter correctly
# !IMPORTANT
bgp_route_info_rpc_command = '''
<Get>
<Operational>
<BGP>
<Active>
<DefaultVRF>
<AFTable>
<AF>
<Naming>
<AFName>
IPv4Unicast
</AFName>
</Naming>
<PathTable>
<Path>
<Naming>
<Network>
<IPV4Address>
{network}
</IPV4Address>
<IPV4PrefixLength>
{prefix_len}
</IPV4PrefixLength>
</Network>
</Naming>
</Path>
</PathTable>
</AF>
</AFTable>
</DefaultVRF>
</Active>
</BGP>
</Operational>
</Get>
'''.format(
network = network,
prefix_len = dest_split[-1]
)
bgp_route_tree = ET.fromstring(self.device.make_rpc_call(bgp_route_info_rpc_command))
for bgp_path in bgp_route_tree.iter('Path'):
try:
best_path = eval(self._find_txt(bgp_path,'PathInformation/IsBestPath', 'false').title())
backup = eval(self._find_txt(bgp_path,'PathInformation/IsPathBackup', 'false').title())
local_preference = int(
self._find_txt(bgp_path, 'AttributesAfterPolicyIn/CommonAttributes/LocalPreference', '0')
)
local_preference = int(
self._find_txt(bgp_path, 'AttributesAfterPolicyIn/CommonAttributes/LocalPreference', '0')
)
metric = int(
self._find_txt(bgp_path, 'AttributesAfterPolicyIn/CommonAttributes/Metric', '0')
)
remote_as = int(
self._find_txt(bgp_path, 'AttributesAfterPolicyIn/CommonAttributes/NeighborAS', '0')
)
remote_address = unicode(self._find_txt(bgp_path, 'PathInformation/NeighborAddress/IPV4Address') \
or self._find_txt(bgp_path, 'PathInformation/NeighborAddress/IPV6Address'))
as_path = ' '.join(
[bgp_as.text for bgp_as in bgp_path.findall('AttributesAfterPolicyIn/CommonAttributes/NeighborAS/Entry')]
)
next_hop = unicode(self._find_txt(bgp_path, 'PathInformation/NextHop/IPV4Address') \
or self._find_txt(bgp_path, 'PathInformation/NextHop/IPV6Address') )
except Exception:
continue
single_route_details = route_details.copy()
single_route_details['current_active'] = best_path
single_route_details['next_hop'] = next_hop
single_route_details['protocol_attributes'] = {
'local_preference' : local_preference,
'as_path' : as_path,
'remote_as' : remote_as,
'remote_address' : remote_address
}
routes[destination].append(single_route_details)
else:
first_route = True
for route_entry in route.findall('RoutePath/Entry'):
# get all possible entries
try:
next_hop = unicode(route_entry.find('Address').text)
except Exception:
continue
single_route_details = route_details.copy()
single_route_details.update({
'current_active': first_route,
'next_hop' : next_hop
})
routes[destination].append(single_route_details)
first_route = False
return routes
def get_snmp_information(self):
snmp_information = dict()
snmp_rpc_command = '<Get><Configuration><SNMP></SNMP></Configuration></Get>'
snmp_result_tree = ET.fromstring(self.device.make_rpc_call(snmp_rpc_command))
_PRIVILEGE_MODE_MAP_ = {
'ReadOnly': u'ro',
'ReadWrite': u'rw'
}
snmp_information = {
'chassis_id': unicode(self._find_txt(snmp_result_tree, './/ChassisID')),
'contact': unicode(self._find_txt(snmp_result_tree, './/Contact')),
'location': unicode(self._find_txt(snmp_result_tree, './/Location')),
'community': {}
}
for community in snmp_result_tree.iter('DefaultCommunity'):
name = unicode(self._find_txt(community, 'Naming/CommunityName'))
privilege = self._find_txt(community, 'Priviledge')
acl = unicode(self._find_txt(community, 'AccessList'))
snmp_information['community'][name] = {
'mode': _PRIVILEGE_MODE_MAP_.get(privilege, u''),
'acl' : acl
}
return snmp_information
def get_probes_config(self):
sla_config = dict()
_PROBE_TYPE_XML_TAG_MAP_ = {
'ICMPEcho': u'icmp-ping',
'UDPEcho': u'udp-ping',
'ICMPJitter': u'icmp-ping-timestamp',
'UDPJitter': u'udp-ping-timestamp'
}
sla_config_rpc_command = '<Get><Configuration><IPSLA></IPSLA></Configuration></Get>'
sla_config_result_tree = ET.fromstring(self.device.make_rpc_call(sla_config_rpc_command))
for probe in sla_config_result_tree.findall('.//Definition'):
probe_name = unicode(self._find_txt(probe, 'Naming/OperationID'))
operation_type = probe.find('OperationType').getchildren()[0].tag
probe_type = _PROBE_TYPE_XML_TAG_MAP_.get(operation_type, u'')
operation = probe.find('OperationType').find(operation_type)
test_name = unicode(self._find_txt(operation, 'Tag'))
source = unicode(self._find_txt(operation, 'SourceAddress'))
target = unicode(self._find_txt(operation, 'DestAddress'))
test_interval = int(self._find_txt(operation, 'Frequency', '0')) # defined in seconds
probe_count = int(self._find_txt(operation, 'History/Buckets', '0'))
if probe_name not in sla_config.keys():
sla_config[probe_name] = dict()
if test_name not in sla_config[probe_name]:
sla_config[probe_name][test_name] = dict()
sla_config[probe_name][test_name] = {
'probe_type': probe_type,
'source': source,
'target': target,
'probe_count': probe_count,
'test_interval': test_interval
}
return sla_config
def get_probes_results(self):
sla_results = dict()
_PROBE_TYPE_XML_TAG_MAP_ = {
'ICMPEcho': u'icmp-ping',
'UDPEcho': u'udp-ping',
'ICMPJitter': u'icmp-ping-timestamp',
'UDPJitter': u'udp-ping-timestamp'
}
sla_results_rpc_command = '<Get><Operational><IPSLA></IPSLA></Operational></Get>'
sla_results_tree = ET.fromstring(self.device.make_rpc_call(sla_results_rpc_command))
probes_config = self.get_probes_config() # need to retrieve also the configuration
# source and tag/test_name not provided
for probe in sla_results_tree.findall('.//Operation'):
probe_name = unicode(self._find_txt(probe, 'Naming/OperationID'))
test_name = probes_config.get(probe_name).keys()[0]
target = unicode(self._find_txt(probe, 'History/Target/LifeTable/Life/BucketTable/Bucket[0]/TargetAddress/IPv4AddressTarget'))
source = probes_config.get(probe_name).get(test_name, {}).get('source', '')
probe_type = _PROBE_TYPE_XML_TAG_MAP_.get(self._find_txt(probe, 'Statistics/Latest/Target/SpecificStats/op_type'))
test_interval = int(self._find_txt(probe, 'Common/OperationalState/Frequency')) * 1e-3 # here f is defined in miliseconds
probe_count = probes_config.get(probe_name).get(test_name, {}).get('probe_count', 0)
# rtt = float(self._find_txt(probe, 'Statistics/Aggregated/HourTable/Hour/Distributed/Target/DistributionIntervalTable/DistributionInterval/CommonStats/ResponseTime'))
response_times = probe.findall('History/Target/LifeTable/Life[last()]/BucketTable/Bucket/ResponseTime')
response_times = [int(self._find_txt(response_time, '.', '0')) for response_time in response_times]
rtt = 0.0
if len(response_times):
rtt = sum(response_times, 0.0)/len(response_times)
return_codes = probe.findall('History/Target/LifeTable/Life[last()]/BucketTable/Bucket/ReturnCode')
return_codes = [self._find_txt(return_code, '.') for return_code in return_codes]
last_test_loss = 0.0
if len(return_codes):
last_test_loss = int(100*(1-return_codes.count('ipslaRetCodeOK')/float(len(return_codes))))
rms = float(self._find_txt(probe, 'Statistics/Aggregated/HourTable/Hour/Distributed/Target/DistributionIntervalTable/DistributionInterval/CommonStats/Sum2ResponseTime'))
global_test_updates = float(self._find_txt(probe, 'Statistics/Aggregated/HourTable/Hour/Distributed/Target/DistributionIntervalTable/DistributionInterval/CommonStats/UpdateCount'))
jitter = rtt-(rms/global_test_updates)**0.5
# jitter = max(rtt - max(response_times), rtt - min(response_times))
current_test_min_delay = 0.0 # no stats for undergoing test :(
current_test_max_delay = 0.0
current_test_avg_delay = 0.0
last_test_min_delay = float(self._find_txt(probe, 'Statistics/Latest/Target/CommonStats/MinResponseTime'))
last_test_max_delay = float(self._find_txt(probe, 'Statistics/Latest/Target/CommonStats/MaxResponseTime'))
last_test_sum_delay = float(self._find_txt(probe, 'Statistics/Latest/Target/CommonStats/SumResponseTime'))
last_test_updates = float(self._find_txt(probe, 'Statistics/Latest/Target/CommonStats/UpdateCount'))
last_test_avg_delay = 0.0
if last_test_updates:
last_test_avg_delay = last_test_sum_delay/last_test_updates
global_test_min_delay = float(self._find_txt(probe, 'Statistics/Aggregated/HourTable/Hour/Distributed/Target/DistributionIntervalTable/DistributionInterval/CommonStats/MinResponseTime'))
global_test_max_delay = float(self._find_txt(probe, 'Statistics/Aggregated/HourTable/Hour/Distributed/Target/DistributionIntervalTable/DistributionInterval/CommonStats/MaxResponseTime'))
global_test_sum_delay = float(self._find_txt(probe, 'Statistics/Aggregated/HourTable/Hour/Distributed/Target/DistributionIntervalTable/DistributionInterval/CommonStats/SumResponseTime'))
global_test_avg_delay = 0.0
if global_test_updates:
global_test_avg_delay = global_test_sum_delay/global_test_updates
if probe_name not in sla_results.keys():
sla_results[probe_name] = dict()
sla_results[probe_name][test_name] = {
'target': target,
'source': source,
'probe_type': probe_type,
'probe_count': probe_count,
'rtt': rtt,
'round_trip_jitter': jitter,
'last_test_loss': last_test_loss,
'current_test_min_delay': current_test_min_delay,
'current_test_max_delay': current_test_max_delay,
'current_test_avg_delay': current_test_avg_delay,
'last_test_min_delay': last_test_min_delay,
'last_test_max_delay': last_test_max_delay,
'last_test_avg_delay': last_test_avg_delay,
'global_test_min_delay': global_test_min_delay,
'global_test_max_delay': global_test_max_delay,
'global_test_avg_delay': global_test_avg_delay
}
return sla_results
def traceroute(self, destination, source='', ttl=0, timeout=0):
traceroute_result = dict()
ipv = 4
try:
ipv = IPAddress(destination).version
except AddrFormatError:
return {'error': 'Wrong destination IP Address!'}
source_tag = ''
ttl_tag = ''
timeout_tag = ''
if source:
source_tag = '<Source>{source}</Source>'.format(source = source)
if ttl:
ttl_tag = '<MaxTTL>{maxttl}</MaxTTL>'.format(maxttl = ttl)
if timeout:
timout_tag = '<Timeout>{timeout}</Timeout>'.format(timeout = timeout)
else:
timeout = 5 # seconds
traceroute_rpc_command = '''
<Set>
<Action>
<TraceRoute>
<IPV{version}>
<Destination>
{destination}
</Destination>
{source_tag}
{ttl_tag}
{timeout_tag}
</IPV{version}>
</TraceRoute>
</Action>
</Set>
'''.format(
version=ipv,
destination=destination,
source_tag=source_tag,
ttl_tag=ttl_tag,
timeout_tag=timeout_tag
)
xml_tree_txt = self.device.make_rpc_call(traceroute_rpc_command)
traceroute_tree = ET.fromstring(xml_tree_txt)
results_tree = traceroute_tree.find('.//Results')
results_error = self._find_txt(results_tree, 'Error')
if results_error:
return {'error': results_error}
if results_tree is None or not len(results_tree):
return {'error': 'Device returned empty results.'}
traceroute_result['success'] = {}
last_hop_index = 1
last_probe_index = 1
last_probe_ip_address = '*'
last_probe_host_name = ''
last_hop_dict = {'probes': {}}
for thanks_cisco in results_tree.getchildren():
tag_name = thanks_cisco.tag
tag_value = thanks_cisco.text
if tag_name == 'HopIndex':
new_hop_index = int(self._find_txt(thanks_cisco, '.', '-1'))
if last_hop_index and last_hop_index != new_hop_index:
traceroute_result['success'][last_hop_index] = copy.deepcopy(last_hop_dict)
last_hop_dict = {'probes': {}}
last_probe_ip_address = '*'
last_probe_host_name = ''
last_hop_index = new_hop_index
continue
tag_value = unicode(self._find_txt(thanks_cisco, '.', ''))
if tag_name == 'ProbeIndex':
last_probe_index = self._convert(int, tag_value, 0) + 1
if last_probe_index not in last_hop_dict.get('probes').keys():
last_hop_dict['probes'][last_probe_index] = {}
if not last_probe_host_name:
last_probe_host_name = last_probe_ip_address
last_hop_dict['probes'][last_probe_index] = {
'ip_address': unicode(last_probe_ip_address),
'host_name': unicode(last_probe_host_name),
'rtt': timeout * 1000.0
}
continue
if tag_name == 'HopAddress':
last_probe_ip_address = tag_value
continue
if tag_name == 'HopHostName':
last_probe_host_name = tag_value
continue
if tag_name == 'DeltaTime':
last_hop_dict['probes'][last_probe_index]['rtt'] = self._convert(float, tag_value, 0.0)
continue
if last_hop_index:
traceroute_result['success'][last_hop_index] = last_hop_dict
return traceroute_result
def get_users(self):
users = dict()
_CISCO_GROUP_TO_CISCO_PRIVILEGE_MAP = {
'root-system': 15,
'operator': 5,
'sysadmin': 1,
'serviceadmin': 1,
'root-lr': 15
}
_DEFAULT_USER_DETAILS = {
'level': 0,
'password': '',
'sshkeys': []
}
users_xml_req = '<Get><Configuration><AAA></AAA></Configuration></Get>'
users_xml_reply = ET.fromstring(self.device.make_rpc_call(users_xml_req))
for user_entry in users_xml_reply.findall('.//Username'):
username = unicode(self._find_txt(user_entry, 'Naming/Name'))
group = self._find_txt(user_entry, 'UsergroupsUnderUsername/UsergroupUnderUsername/Naming/Name', '')
level = _CISCO_GROUP_TO_CISCO_PRIVILEGE_MAP.get(group, 0)
password = self._find_txt(user_entry, 'Password/Password')
user_details = _DEFAULT_USER_DETAILS.copy()
user_details.update({
'level': level,
'password': password
})
users[username] = user_details
return users
| 45.848272 | 198 | 0.518572 | 6,912 | 78,263 | 5.601563 | 0.120226 | 0.025311 | 0.023865 | 0.014102 | 0.422 | 0.351749 | 0.279172 | 0.227388 | 0.191384 | 0.148846 | 0 | 0.00528 | 0.38538 | 78,263 | 1,706 | 199 | 45.875147 | 0.799634 | 0.025644 | 0 | 0.271296 | 0 | 0.000716 | 0.309241 | 0.09473 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02577 | false | 0.005727 | 0.015032 | 0.000716 | 0.067287 | 0.000716 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f01e48d0c802a8abbb9c8391e045c7b8aa15816 | 411 | py | Python | src/calPixels.py | imohamadhoseins/ObjectSegmentCNN | 755d7cf80139f48da1accba6e42bea0b381aaa64 | [
"MIT"
] | null | null | null | src/calPixels.py | imohamadhoseins/ObjectSegmentCNN | 755d7cf80139f48da1accba6e42bea0b381aaa64 | [
"MIT"
] | null | null | null | src/calPixels.py | imohamadhoseins/ObjectSegmentCNN | 755d7cf80139f48da1accba6e42bea0b381aaa64 | [
"MIT"
] | null | null | null | import os
import cv2
image_name = 'Binary/ILSVRC2012_test_00096192.jpg'
image = cv2.imread(image_name)
height, width, channel = image.shape
counter = 0
for j in range(height):
for k in range(width):
color = image[j][k]
if color[0] == 0 and color[1] == 0 and color[2] == 0:
counter += 1
print('counter', counter)
print('area', width * height)
print('ratio', counter/(width * height)) | 22.833333 | 56 | 0.654501 | 63 | 411 | 4.206349 | 0.47619 | 0.067925 | 0.067925 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.066869 | 0.199513 | 411 | 18 | 57 | 22.833333 | 0.738602 | 0 | 0 | 0 | 0 | 0 | 0.129114 | 0.088608 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0.214286 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f09854540cbe2f68e01e0a4c67c7ba05b8bfc38 | 5,556 | py | Python | AI/w2v clustering.py | osamhack2021/APP_AI_MMIS_teamMMIS | 6055f6dffda2ec09ed37251a8228e7371a22d206 | [
"MIT"
] | 2 | 2021-09-12T13:33:48.000Z | 2021-09-13T11:00:53.000Z | AI/w2v clustering.py | osamhack2021/APP_WEB_AI_MMIS_teamMMIS | b39d889126c80416acaeb48ebfa895fbe41321e0 | [
"MIT"
] | null | null | null | AI/w2v clustering.py | osamhack2021/APP_WEB_AI_MMIS_teamMMIS | b39d889126c80416acaeb48ebfa895fbe41321e0 | [
"MIT"
] | 2 | 2021-09-16T10:56:01.000Z | 2021-09-29T09:52:34.000Z | #%%
#패키지 불러오기
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from pandas.core.frame import DataFrame
from gensim.models import Word2Vec
#형태소 분석 패키지
from konlpy.tag import Okt
from konlpy.tag import Komoran
from sklearn.utils.validation import indexable
# 데이터 불러오기
data= pd.read_csv("All Menu (Various Versions)/국방부메뉴_v2.1.csv", encoding="UTF-8")
# 정규 표현식을 통한 한글 외 문자 제거
data['메뉴이름'] = data['메뉴이름'].str.replace("[^ㄱ-ㅎㅏ-ㅣ가-힣 ]","")
data['메뉴이름'] = data['메뉴이름'].str.replace(" ","")
# 중복 제거
data = data.drop_duplicates(['메뉴이름'], ignore_index=True)
# 정규화
for i in data:
if(i in ['계란류', '우유', '메밀', '땅콩', '대두', '밀', '고등어', '게', '새우', '돼지고기', '복숭아', '토마토', '아황산류', '호두', '닭고기', '쇠고기', '오징어', '조개류', '잣']):
data[i] = data[i]/100
if i in ['열량', '탄수화물', '지방', '단백질', '나트륨', '콜레스트롤']:
data[i] = ((data[i]-data[i].mean())/data[i].std())/500
#
# Komoran 사용한 토큰화 작업
# okt, komoran 이 사용 가능(절대적 우위 가리기 불가) & morphs
# kkma는 시간이 오래 걸림/ hannanum은 위의 두 개 보다 성능 낮음
komoran = Komoran()
okt=Okt()
tokenized_data=[]
for menu in data['메뉴이름']:
temp_X = okt.morphs(menu) #토큰화
tokenized_data.append(temp_X)
# word2vec 사용
model = Word2Vec(sentences=tokenized_data, vector_size=200 , window=3, min_count=0, workers=4, sg=0)
#
# 메뉴별 벡터 구하는 함수
def get_sentence_mean_vector(morphs):
vector=[]
for i in morphs:
try:
vector.append(model.wv[i])
except KeyError as e:
pass
try:
return np.mean(vector, axis=0).tolist()
except IndexError as e:
pass
#
# sentence vector를 data에 추가
sentence_vector=[]
for vectors in tokenized_data:
temp_X = get_sentence_mean_vector(vectors)
sentence_vector.append(temp_X)
data.insert(3, 'wv',sentence_vector)
#
# clustering -> 추천 잘 될 수 있는지 시각화 용도
dataList=[]
for i in range(0, 1550):
vectorData=data['wv'][i]
for j in ['계란류', '우유', '메밀', '땅콩', '대두', '밀', '고등어', '게', '새우', '돼지고기', '복숭아', '토마토', '아황산류', '호두', '닭고기', '쇠고기', '오징어', '조개류', '잣', '열량', '탄수화물', '지방', '단백질', '나트륨', '콜레스트롤']:
vectorData.append(data[j][i])
dataList.append(vectorData)
from sklearn.cluster import KMeans
num_clusters = 10
k_means_clustering = KMeans(n_clusters=num_clusters)
idx = k_means_clustering.fit_predict(dataList)
data['category']=idx
#
# Embedding & 시각화 위해 import
from sklearn.manifold import TSNE
import os.path
import pickle
# 차원축소(2차원으로)
X = data['wv'].tolist()
y = data['category'].tolist()
tsne_filepath = 'tnse3000(w2v).pkl'
# File Cache
if not os.path.exists(tsne_filepath):
tsne = TSNE(random_state=42)
tsne_points = tsne.fit_transform(X)
with open(tsne_filepath, 'wb+') as f:
pickle.dump(tsne_points, f)
else: #cache hits
with open(tsne_filepath, 'rb') as f:
tsne_points=pickle.load(f)
tsne=TSNE(random_state=42)
tsne_points = tsne.fit_transform(X)
tsne_df = pd.DataFrame(tsne_points, index=range(len(X)), columns=['x_coord', 'y_coord'])
tsne_df['menu_name']=data['메뉴이름'].tolist()
tsne_df['cluster_number'] = y
#2차원 plotting
from bokeh.plotting import figure, show, output_notebook
from bokeh.models import HoverTool, ColumnDataSource, value
from bokeh.palettes import brewer
output_notebook()
# Get the number of colors we'll need for the plot.
colors = brewer["Spectral"][len(tsne_df['cluster_number'].unique())]
# Create a map between factor and color.
colormap = {i: colors[i] for i in tsne_df['cluster_number'].unique()}
# Create a list of colors for each value that we will be looking at.
colors = [colormap[x] for x in tsne_df['cluster_number']]
tsne_df['color']=colors
# Bokeh Datasouce 만들기
plot_data = ColumnDataSource(
data=tsne_df.to_dict(orient='list')
)
# Plot 만들기(배경)
tsne_plot = figure(
# title='TSNE Twitter BIO Embeddings',
plot_width = 650,
plot_height = 650,
active_scroll='wheel_zoom',
output_backend="webgl",
)
# 해당 Hover 툴팁 만들기
tsne_plot.add_tools(
HoverTool(
tooltips='@menu_name'
)
)
tsne_plot.circle(
source=plot_data,
x='x_coord',
y='y_coord',
line_alpha=0.3,
fill_alpha=0.2,
size=10,
fill_color='color',
line_color='color',
)
# 각 값들 추가해주기
tsne_plot.title.text_font_size = '16pt'
tsne_plot.xaxis.visible = False
tsne_plot.yaxis.visible = False
tsne_plot.grid.grid_line_color = None
tsne_plot.outline_line_color = None
# 짠!
show(tsne_plot)
#
# 유사 메뉴 추천
from sklearn.metrics.pairwise import cosine_similarity
import warnings; warnings.filterwarnings('ignore')
#
# 코사인 유사도 계산
menuNameSimilarity = cosine_similarity(dataList, dataList)
# 유사도 정렬
menu_sim_sorted_idx = menuNameSimilarity.argsort()[:, ::-1]
#
# 유사 메뉴 추천 함수
def find_sim_menu(data, sorted_idx, name, number=10):
title_menu=data[data['메뉴이름']==name]
title_menu_idx = title_menu.index.values
top_sim_idx = sorted_idx[title_menu_idx, :number]
top_sim_idx = top_sim_idx.reshape(-1,)
similar_menu = data.iloc[top_sim_idx]['메뉴이름']
similar_menu_list =[]
for sim_menu in similar_menu:
similar_menu_list.append(sim_menu)
return similar_menu_list[1:4]
#
# 메뉴 추천 Test
recommendMenus = ['청양마요치킨', '비엔나소시지찌개','햄치즈버거','탕수육','두부고추장찌개','낙지덮밥','꼬리곰탕','김장김치','콘형아이스크림']
for menu in recommendMenus:
print(menu, end=' : ')
print(find_sim_menu(data, menu_sim_sorted_idx, menu))
# %%
# 서비스 작동을 위한 배열 저장
np.save('/workspaces/APP_AI_MMIS_teamMMIS/AI/server/AI file/w2v_menu_sim_sorted_idx',menu_sim_sorted_idx)
data.to_csv('/workspaces/APP_AI_MMIS_teamMMIS/AI/server/AI file/data.csv')
# %%
| 23.948276 | 180 | 0.678906 | 859 | 5,556 | 4.222352 | 0.406286 | 0.019851 | 0.006617 | 0.020954 | 0.135925 | 0.118555 | 0.093741 | 0.076096 | 0.076096 | 0.053488 | 0 | 0.013049 | 0.172426 | 5,556 | 231 | 181 | 24.051948 | 0.77599 | 0.12023 | 0 | 0.062016 | 0 | 0 | 0.126394 | 0.029327 | 0 | 0 | 0 | 0 | 0 | 1 | 0.015504 | false | 0.015504 | 0.139535 | 0 | 0.170543 | 0.015504 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f09abf5936e787e6ef87489854c7b6ad3be7773 | 8,324 | py | Python | src/model/efficient_disparity.py | JonasFrey96/FlowPose6D | 2297ab5fa0afd0c247d59c2f1c7f899f078e2893 | [
"MIT"
] | null | null | null | src/model/efficient_disparity.py | JonasFrey96/FlowPose6D | 2297ab5fa0afd0c247d59c2f1c7f899f078e2893 | [
"MIT"
] | null | null | null | src/model/efficient_disparity.py | JonasFrey96/FlowPose6D | 2297ab5fa0afd0c247d59c2f1c7f899f078e2893 | [
"MIT"
] | null | null | null | import torch
from efficientnet_pytorch import EfficientNet
from torch import nn
from torchvision import transforms
def deconv(in_planes, out_planes, bias=False):
return nn.Sequential(
nn.ConvTranspose2d(in_planes, out_planes, kernel_size=4,
stride=2, padding=1, bias=bias),
nn.LeakyReLU(0.1, inplace=True)
)
def predict_flow(in_planes):
return nn.Conv2d(in_planes, 2, kernel_size=3, stride=1, padding=1, bias=False)
def cat(x, y):
if x == None:
return y
else:
return torch.cat( [x,y], dim= 1)
class EfficientDisparity(nn.Module):
def __init__(self, num_classes = 22, backbone= 'efficientnet-b1', seperate_flow_head= False, pred_flow_pyramid=True, pred_flow_pyramid_add=True, ced_real=1, ced_render=1, ced_render_d=1,ced_real_d=1):
# tested with b6
super().__init__()
self.feature_extractor = EfficientNet.from_pretrained(backbone)
self.size = self.feature_extractor.get_image_size( backbone )
self.seperate_flow_head = seperate_flow_head
self.ced_real = ced_real
self.ced_render = ced_render
self.ced_real_d = ced_real_d
self.ced_render_d = ced_render_d
self.pred_flow_pyramid_add = pred_flow_pyramid_add
self.pred_flow_pyramid = pred_flow_pyramid
idxs, feats, res = self.feature_extractor.layer_info( torch.ones( (4,3,self.size, self.size)))
if ced_render_d > 0 or ced_real_d > 0:
self.depth_backbone = True
else:
self.depth_backbone = False
if self.depth_backbone:
self.feature_extractor_depth = EfficientNet.from_name(backbone, in_channels=1)
r = res[0]
self.idx_extract = []
self.feature_sizes = []
for i in range(len(idxs)):
if r != res[i]:
self.idx_extract.append(i-1)
r = res[i]
self.feature_sizes.append( feats[i-1] )
self.idx_extract.append(len(idxs)-1)
self.feature_sizes.append( feats[len(idxs)-1] )
self._num_classes = num_classes
dc = []
pred_flow_pyramid = []
upsample_flow_layers = []
self.feature_sizes = [8] + self.feature_sizes
label_feat = [16,8, num_classes]
label_layers = []
label_i = -1
for i in range( 1, len(self.feature_sizes) ):
if i == 1:
inc_feat_0 = (int(ced_real>0) + int(ced_render>0) + int(ced_render_d>0) + int(ced_real_d>0)) * self.feature_sizes[-i ]
else:
inc_feat_0 = (int(ced_real>=i) + int(ced_render>=i) + int(ced_render_d>=i) + int(ced_real_d>=i) + 1 ) * self.feature_sizes[-i]
if self.pred_flow_pyramid_add and self.pred_flow_pyramid:
inc_feat_0 += 2
out_feat = self.feature_sizes[- (i+1) ] #leave this number for now on constant
dc.append( deconv( inc_feat_0 , out_feat ) )
print( 'Network inp:', inc_feat_0, ' out: ', out_feat )
if i > len(self.feature_sizes)-len(label_feat):
if label_i == -1:
inc_feat_label = inc_feat_0
else:
inc_feat_label = label_feat[label_i]
label_i += 1
out_feat_label = label_feat[label_i]
label_layers.append( deconv( inc_feat_label , out_feat_label, bias=True ) )
if self.pred_flow_pyramid:
pred_flow_pyramid.append( predict_flow( inc_feat_0 ) )
upsample_flow_layers.append( nn.ConvTranspose2d(
2, 2, 4, 2, 1, bias=False))
label_layers.append( deconv(label_feat[-2], label_feat[-1], bias=True) )
self.label_layers = nn.ModuleList(label_layers)
self.deconvs = nn.ModuleList(dc)
pred_flow_pyramid.append( predict_flow( self.feature_sizes[0]) )
if self.pred_flow_pyramid:
self.pred_flow_pyramid= nn.ModuleList( pred_flow_pyramid )
self.upsample_flow_layers = nn.ModuleList(upsample_flow_layers)
self.up_in = torch.nn.UpsamplingBilinear2d(size=(self.size, self.size))
self.input_trafos = transforms.Compose([
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
self.norm_depth = transforms.Normalize([0.485,0.485], [0.229,0.229])
self.up_out = torch.nn.UpsamplingNearest2d(size=(480, 640))
self.up_out_bl = torch.nn.UpsamplingBilinear2d(size=(480, 640))
self.up_nn_in= torch.nn.UpsamplingNearest2d(size=(self.size, self.size))
def forward(self, data, idx=False, label=None):
"""Forward pass
Args:
data ([torch.tensor]): BS,C,H,W (C=6) if self.depth_backbone: C = 8 else: C = 6
idx ([torch.tensor]): BS,1 starting for first object with 0 endind with num_classes-1
label ([type], optional): [description]. Defaults to None.
Returns:
flow ([torch.tensor]): BS,2,H,W
segmentation ([torch.tensor]): BS,num_classes,H,W
"""
# is it smart to have the residual skip connections only for the real image of course the information should be given for the real image but therfore the network needs to learn how to fully encode the rendered image correctly
# data BS, C, H, W
BS,C,H,W = data.shape
real = self.up_in(data[:,:3] )
render = self.up_in(data[:,3:6] )
if self.depth_backbone:
data[:,6:] = data[:,6:]/10000
for i in range(BS):
real[i] = self.input_trafos( real[i] )
render[i] = self.input_trafos( render[i] )
if self.depth_backbone:
real_d = self.up_nn_in(data[:,6][:,None,:,:] )
render_d = self.up_nn_in(data[:,7][:,None,:,:] )
feat_real_d = self.feature_extractor_depth.extract_features_layerwise( real_d , idx_extract = self.idx_extract[-(self.ced_real_d):])
feat_render_d = self.feature_extractor_depth.extract_features_layerwise( render_d , idx_extract = self.idx_extract[-(self.ced_render_d):])
feat_real = self.feature_extractor.extract_features_layerwise( real , idx_extract = self.idx_extract)
feat_render = self.feature_extractor.extract_features_layerwise( render, idx_extract = self.idx_extract)
pred_flow_pyramid_feat = []
x = None
for j in range( 1,len( self.deconvs)+1 ):
# calculate input:
# accumulate input to each layer
if j-1 < self.ced_real:
x = cat( x, feat_real[-j] )
if j-1 < self.ced_render:
x = cat( x, feat_render[-j])
if j-1 < self.ced_real_d:
x = cat( x, feat_real_d[-j])
if j-1 < self.ced_render_d:
x = cat( x, feat_render_d[-j])
if j > 1 and self.pred_flow_pyramid_add:
dim = x.shape[3]
# upsample flow
f_up = self.upsample_flow_layers[j-2]( pred_flow_pyramid_feat[-1]) [:,:,:dim,:dim]
x = cat( x, f_up )
# predict flow at each level
if self.pred_flow_pyramid:
pred_flow_pyramid_feat.append( self.pred_flow_pyramid[ j-1 ](x) )
try:
dim = feat_real[-(j+1)].shape[3]
pred_flow_pyramid_feat[-1] = pred_flow_pyramid_feat[-1][:,:,:dim,:dim]
except:
pass
if j == len(self.deconvs) - len(self.label_layers)+2 :
# clone features for mask prediction.
# here the conv are with bias !!!
segmentation = x.clone()
# apply upcovn layer
x = self.deconvs[j-1](x)
try:
dim = feat_real[-(j+1)].shape[3]
x = x[:,:,:dim,:dim]
except:
pass
# predict label
for l in self.label_layers:
segmentation = l(segmentation)
segmentation = self.up_out(segmentation)
# predict flow
pred_flow_pyramid_feat.append( self.pred_flow_pyramid[-1](x) )
pred_flow_pyramid_feat.append( self.up_out_bl( pred_flow_pyramid_feat[-1] ) )
if label is None:
label = segmentation.argmax(dim=1)
return pred_flow_pyramid_feat, segmentation
if __name__ == "__main__":
model = EfficientDisparity(num_classes = 22, backbone= 'efficientnet-b2', seperate_flow_head= False, pred_flow_pyramid=True, pred_flow_pyramid_add=True, ced_real=3, ced_render=3, ced_render_d=2,ced_real_d=2)
BS = 2
H = 480
W = 640
C = 8
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
data = torch.ones( (BS,C,H,W), device=device )
model = model.to(device)
idx = torch.linspace(0,BS-1,BS)[:,None]
out = model(data, idx = idx)
# for i in range(0,7):
# model = EfficientDisparity(num_classes = 22, backbone= f'efficientnet-b{i}', connections_encoder_decoder = 2, depth_backbone = True)
| 38.183486 | 229 | 0.652811 | 1,255 | 8,324 | 4.074104 | 0.163347 | 0.046939 | 0.088011 | 0.040876 | 0.319382 | 0.200665 | 0.132408 | 0.097203 | 0.05398 | 0.036769 | 0 | 0.028023 | 0.224051 | 8,324 | 218 | 230 | 38.183486 | 0.763586 | 0.121696 | 0 | 0.115385 | 0 | 0 | 0.008679 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032051 | false | 0.012821 | 0.025641 | 0.012821 | 0.096154 | 0.00641 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f09cfb8f6847b0ac4050794ebf1bdbab43941f3 | 843 | py | Python | cnap_v2/celery_app.py | qbrc-cnap/cnap | 624683e91a64c3b4934b578c59db850242d2f94c | [
"MIT"
] | 1 | 2021-07-08T14:06:04.000Z | 2021-07-08T14:06:04.000Z | cnap_v2/celery_app.py | qbrc-cnap/cnap | 624683e91a64c3b4934b578c59db850242d2f94c | [
"MIT"
] | 12 | 2020-02-12T00:10:53.000Z | 2021-06-10T21:24:45.000Z | cnap_v2/celery_app.py | qbrc-cnap/cnap | 624683e91a64c3b4934b578c59db850242d2f94c | [
"MIT"
] | null | null | null | import os
from celery import Celery
from django.conf import settings
from django.apps import apps
from celery.schedules import crontab
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cnap_v2.settings')
app = Celery('cnap_v2')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks(lambda: [n.name for n in apps.get_app_configs()])
app.conf.beat_schedule = {
'check_jobs':{
'task': 'check_job',
'schedule': 60.0
},
'manage_file': {
'task': 'manage_files',
'schedule': crontab(hour=8, minute=15)
}
}
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
| 26.34375 | 72 | 0.705813 | 121 | 843 | 4.793388 | 0.570248 | 0.034483 | 0.068966 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012839 | 0.168446 | 843 | 31 | 73 | 27.193548 | 0.814551 | 0.185053 | 0 | 0 | 0 | 0 | 0.221083 | 0.032211 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.227273 | 0 | 0.272727 | 0.045455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f09fa31a27e27c6677b9ba18770a56c391f691a | 1,059 | py | Python | DecryptLogin/modules/clients/xiaomihealth.py | hedou/DecryptLogin | ff86a5d378c8a42d1caebbb7482658a95053f716 | [
"Apache-2.0"
] | null | null | null | DecryptLogin/modules/clients/xiaomihealth.py | hedou/DecryptLogin | ff86a5d378c8a42d1caebbb7482658a95053f716 | [
"Apache-2.0"
] | null | null | null | DecryptLogin/modules/clients/xiaomihealth.py | hedou/DecryptLogin | ff86a5d378c8a42d1caebbb7482658a95053f716 | [
"Apache-2.0"
] | null | null | null | '''
Function:
小米健康客户端
Author:
Charles
微信公众号:
Charles的皮卡丘
更新日期:
2022-03-11
'''
from .baseclient import BaseClient
'''小米健康客户端'''
class XiaomiHealthClient(BaseClient):
def __init__(self, reload_history=True, **kwargs):
super(XiaomiHealthClient, self).__init__(website_name='xiaomihealth', reload_history=reload_history, **kwargs)
'''检查会话是否已经过期, 过期返回True'''
def checksessionstatus(self, session, infos_return):
login_token = infos_return['token_info']['login_token']
url = 'https://account-cn.huami.com/v1/client/app_tokens'
headers = {'User-Agent': 'Dalvik/2.1.0 (Linux; U; Android 9; MI 6 MIUI/20.6.18)'}
params = {
'app_name': 'com.xiaomi.hm.health',
'dn': 'api-user.huami.com%2Capi-mifit.huami.com%2Capp-analytics.huami.com',
'login_token': login_token,
}
response = self.session.get(url, params=params, headers=headers)
if response.json().get('token_info', {}).get('app_token', ''):
return False
return True | 34.16129 | 118 | 0.641171 | 126 | 1,059 | 5.206349 | 0.603175 | 0.060976 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02506 | 0.208687 | 1,059 | 31 | 119 | 34.16129 | 0.757757 | 0.080264 | 0 | 0 | 0 | 0.117647 | 0.292026 | 0.071121 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.058824 | 0 | 0.352941 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f0a17f2452f2173197a297e25bb4a1a971989ed | 6,401 | py | Python | pbsmmapi/show/models.py | WGBH/django-pbsmmapi | d20d01d2724715379adb2c754ed2537688a1dd1f | [
"MIT"
] | null | null | null | pbsmmapi/show/models.py | WGBH/django-pbsmmapi | d20d01d2724715379adb2c754ed2537688a1dd1f | [
"MIT"
] | null | null | null | pbsmmapi/show/models.py | WGBH/django-pbsmmapi | d20d01d2724715379adb2c754ed2537688a1dd1f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from uuid import UUID
from django.db import models
from django.dispatch import receiver
from django.utils.translation import ugettext_lazy as _
from django.urls import reverse
from ..abstract.gatekeeper import can_object_page_be_shown
from ..abstract.helpers import time_zone_aware_now
from ..abstract.models import PBSMMGenericShow
from ..api.api import get_PBSMM_record
from ..api.helpers import check_pagination
from ..asset.ingest_asset import process_asset_record
from ..asset.models import PBSMMAbstractAsset
from .ingest_show import process_show_record
from .ingest_children import process_seasons, process_specials
PBSMM_SHOW_ENDPOINT = 'https://media.services.pbs.org/api/v1/shows/'
class PBSMMAbstractShow(PBSMMGenericShow):
ingest_seasons = models.BooleanField(
_('Ingest Seasons'),
default=False,
help_text='Also ingest all Seasons',
)
ingest_specials = models.BooleanField(
_('Ingest Specials'),
default=False,
help_text='Also ingest all Specials',
)
ingest_episodes = models.BooleanField(
_('Ingest Episodes'),
default=False,
help_text='Also ingest all Episodes (for each Season)',
)
class Meta:
verbose_name = 'PBS MM Show'
verbose_name_plural = 'PBS MM Shows'
db_table = 'pbsmm_show'
abstract = True
def get_absolute_url(self):
return reverse('show-detail', args=[self.slug])
def __unicode__(self):
if self.title:
return self.title
return "ID %d: unknown" % self.id
def __object_model_type(self):
# This handles the correspondence to the "type" field in the PBSMM JSON
# object
return 'show'
object_model_type = property(__object_model_type)
def __available_to_public(self):
return can_object_page_be_shown(None, self)
available_to_public = property(__available_to_public)
class PBSMMShow(PBSMMAbstractShow):
pass
class PBSMMShowAsset(PBSMMAbstractAsset):
show = models.ForeignKey(
PBSMMShow,
related_name='assets',
on_delete=models.CASCADE, # required for Django 2.0
)
class Meta:
verbose_name = 'PBS MM Show - Asset'
verbose_name_plural = 'PBS MM Shows - Assets'
db_table = 'pbsmm_show_asset'
def __unicode__(self):
return "%s: %s" % (self.show, self.title)
def process_show_assets(endpoint, this_show):
keep_going = True
scraped_object_ids = []
while keep_going:
(status, json) = get_PBSMM_record(endpoint)
data = json['data']
for item in data:
object_id = item.get('id')
scraped_object_ids.append(UUID(object_id))
try:
instance = PBSMMShowAsset.objects.get(object_id=object_id)
except PBSMMShowAsset.DoesNotExist:
instance = PBSMMShowAsset()
instance = process_asset_record(item, instance, origin='show')
# For now - borrow from the parent object
instance.last_api_status = status
instance.date_last_api_update = time_zone_aware_now()
instance.show = this_show
instance.ingest_on_save = True
# This needs to be here because otherwise it never updates...
instance.save()
(keep_going, endpoint) = check_pagination(json)
for asset in PBSMMShowAsset.objects.filter(show=this_show):
if asset.object_id not in scraped_object_ids:
asset.delete()
################################
# PBS MediaManager API interface
################################
# The interface/access is done with a 'pre_save' receiver based on the value of 'ingest_on_save'
# That way, one can force a reingestion from the Admin OR one can do it from a management script
# by simply getting the record, setting ingest_on_save on the record, and calling save().
@receiver(models.signals.pre_save, sender=PBSMMShow)
def scrape_PBSMMAPI(sender, instance, **kwargs):
if instance.__class__ is not PBSMMShow:
return
# If this is a new record, then someone has started it in the Admin using
# a PBSMM UUID. Depending on which, the retrieval endpoint is slightly different, so this sets
# the appropriate URL to access.
if instance.pk and instance.slug and str(instance.slug).strip():
# Object is being edited
if not instance.ingest_on_save:
return # do nothing - can't get an ID to look up!
else: # object is being added
if not instance.slug:
return # do nothing - can't get an ID to look up!
url = "{}{}/".format(PBSMM_SHOW_ENDPOINT, instance.slug)
# OK - get the record from the API
(status, json) = get_PBSMM_record(url)
instance.last_api_status = status
# Update this record's time stamp (the API has its own)
instance.date_last_api_update = time_zone_aware_now()
# If we didn't get a record, abort (there's no sense crying over spilled
# bits)
if status != 200:
return
# Process the record (code is in ingest.py)
instance = process_show_record(json, instance)
# continue saving, but turn off the ingest_on_save flag
instance.ingest_on_save = False # otherwise we could end up in an infinite loop!
# We're done here - continue with the save() operation
return
@receiver(models.signals.post_save, sender=PBSMMShow)
def handle_child_objects(sender, instance, *args, **kwargs):
if instance.last_api_status != 200:
return
this_json = instance.json
# ALWAYS GET CHILD ASSETS
assets_endpoint = this_json['links'].get('assets')
if assets_endpoint:
process_show_assets(assets_endpoint, instance)
if instance.ingest_seasons:
seasons_endpoint = this_json['links'].get('seasons')
if seasons_endpoint:
process_seasons(seasons_endpoint, instance)
if instance.ingest_specials:
specials_endpoint = this_json['links'].get('specials')
if specials_endpoint:
process_specials(specials_endpoint, instance)
# This is a tricky way to unset ingest_seasons without calling save()
rec = PBSMMShow.objects.filter(pk=instance.id)
rec.update(ingest_seasons=False, ingest_specials=False, ingest_episodes=False)
return
| 31.377451 | 100 | 0.678488 | 825 | 6,401 | 5.049697 | 0.294545 | 0.011522 | 0.017283 | 0.011522 | 0.153385 | 0.086654 | 0.073692 | 0.036006 | 0.036006 | 0.016323 | 0 | 0.002037 | 0.233089 | 6,401 | 203 | 101 | 31.53202 | 0.846608 | 0.204343 | 0 | 0.145161 | 0 | 0 | 0.071586 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064516 | false | 0.008065 | 0.120968 | 0.032258 | 0.379032 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f0ef6a186ee50f63722efe4ce641d17f8fe26d8 | 1,109 | py | Python | DeadlockAvoid.py | sid-146/OS_Programs | 501b38e9a667590ae5094294dfcd37c0ea851957 | [
"MIT"
] | 1 | 2021-12-06T12:06:35.000Z | 2021-12-06T12:06:35.000Z | DeadlockAvoid.py | sid-146/OS_Programs | 501b38e9a667590ae5094294dfcd37c0ea851957 | [
"MIT"
] | null | null | null | DeadlockAvoid.py | sid-146/OS_Programs | 501b38e9a667590ae5094294dfcd37c0ea851957 | [
"MIT"
] | null | null | null | from threading import *
import time
file = "Sudhanwa Kaveeshwar"
s = 1
r = 1
reader_count = 0
def waitc():
global s
while s == 0:
pass
s = 0
def goc():
global s
s = 1
def wait_reader():
global r
while r == 0:
pass
r = 0
def go_reader():
global r
r = 1
def reader(r):
for i in range(3):
global reader_count
wait_reader()
reader_count = reader_count + 1
if reader_count == 1:
waitc()
go_reader()
print("reader {0} reading file : {1} ".format(r, file))
wait_reader()
reader_count = reader_count - 1
if reader_count == 0:
goc()
go_reader()
time.sleep(1)
def writer():
for i in range(2):
global file
waitc()
file = input("write content in file : ")
print("writer writes : ", file)
goc()
time.sleep(1)
writer = Thread(target=writer)
reader1 = Thread(target=reader, args=(1,))
reader2 = Thread(target=reader, args=(2,))
reader1.start()
reader2.start()
writer.start()
| 14.402597 | 63 | 0.536519 | 146 | 1,109 | 3.979452 | 0.294521 | 0.151463 | 0.061962 | 0.037866 | 0.158348 | 0.158348 | 0.158348 | 0.158348 | 0.158348 | 0.158348 | 0 | 0.03453 | 0.34716 | 1,109 | 76 | 64 | 14.592105 | 0.767956 | 0 | 0 | 0.392157 | 0 | 0 | 0.080252 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0.039216 | 0.039216 | 0 | 0.156863 | 0.039216 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f112bc551065f305bd9473b6d2119a4048f2dfe | 2,796 | py | Python | cka/gram.py | nzw0301/cka | ecc431fcfe273d7a240c9615dc316a80799396d2 | [
"Apache-2.0"
] | null | null | null | cka/gram.py | nzw0301/cka | ecc431fcfe273d7a240c9615dc316a80799396d2 | [
"Apache-2.0"
] | 1 | 2021-11-30T12:42:08.000Z | 2021-11-30T12:42:08.000Z | cka/gram.py | nzw0301/cka | ecc431fcfe273d7a240c9615dc316a80799396d2 | [
"Apache-2.0"
] | null | null | null | """
The source code comes from
https://colab.research.google.com/github/google-research/google-research/blob/master/representation_similarity/Demo.ipynb
by Kornblith, Simon and Norouzi, Mohammad and Lee, Honglak and Hinton, Geoffrey.
The modifications are as follows:
1. Apply `black` & PyCharm's formatter
2. Rename `center_gram` with `_center_gram`
"""
import numpy as np
def gram_linear(x):
"""Compute Gram (kernel) matrix for a linear kernel.
Args:
x: A num_examples x num_features matrix of features.
Returns:
A num_examples x num_examples Gram matrix of examples.
"""
return x.dot(x.T)
def gram_rbf(x, threshold=1.0):
"""Compute Gram (kernel) matrix for an RBF kernel.
Args:
x: A num_examples x num_features matrix of features.
threshold: Fraction of median Euclidean distance to use as RBF kernel
bandwidth. (This is the heuristic we use in the paper. There are other
possible ways to set the bandwidth; we didn't try them.)
Returns:
A num_examples x num_examples Gram matrix of examples.
"""
dot_products = x.dot(x.T)
sq_norms = np.diag(dot_products)
sq_distances = -2 * dot_products + sq_norms[:, None] + sq_norms[None, :]
sq_median_distance = np.median(sq_distances)
return np.exp(-sq_distances / (2 * threshold ** 2 * sq_median_distance))
def _center_gram(gram, unbiased=False):
"""Center a symmetric Gram matrix.
This is equivalent to centering the (possibly infinite-dimensional) features
induced by the kernel before computing the Gram matrix.
Args:
gram: A num_examples x num_examples symmetric matrix.
unbiased: Whether to adjust the Gram matrix in order to compute an unbiased
estimate of HSIC. Note that this estimator may be negative.
Returns:
A symmetric matrix with centered columns and rows.
"""
if not np.allclose(gram, gram.T):
raise ValueError("Input must be a symmetric matrix.")
gram = gram.copy()
if unbiased:
# This formulation of the U-statistic, from Szekely, G. J., & Rizzo, M.
# L. (2014). Partial distance correlation with methods for dissimilarities.
# The Annals of Statistics, 42(6), 2382-2412, seems to be more numerically
# stable than the alternative from Song et al. (2007).
n = gram.shape[0]
np.fill_diagonal(gram, 0)
means = np.sum(gram, 0, dtype=np.float64) / (n - 2)
means -= np.sum(means) / (2 * (n - 1))
gram -= means[:, None]
gram -= means[None, :]
np.fill_diagonal(gram, 0)
else:
means = np.mean(gram, 0, dtype=np.float64)
means -= np.mean(means) / 2
gram -= means[:, None]
gram -= means[None, :]
return gram
| 33.686747 | 121 | 0.659514 | 401 | 2,796 | 4.516209 | 0.416459 | 0.048592 | 0.033131 | 0.035892 | 0.22529 | 0.154611 | 0.112645 | 0.112645 | 0.112645 | 0.112645 | 0 | 0.018475 | 0.244993 | 2,796 | 82 | 122 | 34.097561 | 0.839413 | 0.585479 | 0 | 0.222222 | 0 | 0 | 0.03167 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.037037 | 0 | 0.259259 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f11ec092d947c7aa54059d8d6ebfb6bd8f528d1 | 811 | py | Python | VB_Classes/PhotoShop_sepia.py | bobdavies2000/OpenCVB | 1d339a94643a97e2d34f82dc7776677a8566d71d | [
"MIT"
] | 69 | 2019-07-17T21:20:37.000Z | 2022-03-23T08:38:03.000Z | VB_Classes/PhotoShop_sepia.py | bobdavies2000/OpenCVB | 1d339a94643a97e2d34f82dc7776677a8566d71d | [
"MIT"
] | 5 | 2021-02-05T05:48:50.000Z | 2022-03-12T01:43:15.000Z | VB_Classes/PhotoShop_sepia.py | bobdavies2000/OpenCVB | 1d339a94643a97e2d34f82dc7776677a8566d71d | [
"MIT"
] | 6 | 2019-12-24T05:36:52.000Z | 2021-02-19T15:55:13.000Z | import cv2
import numpy as np
# https://github.com/spmallick/learnopencv/tree/master/
def sepia(img):
res = img.copy()
res = cv2.cvtColor(res, cv2.COLOR_BGR2RGB) # converting to RGB as sepia matrix is for RGB
res = np.array(res, dtype=np.float64)
res = cv2.transform(res, np.matrix([[0.393, 0.769, 0.189],
[0.349, 0.686, 0.168],
[0.272, 0.534, 0.131]]))
res[np.where(res > 255)] = 255 # clipping values greater than 255 to 255
res = np.array(res, dtype=np.uint8)
res = cv2.cvtColor(res, cv2.COLOR_RGB2BGR)
cv2.imshow("original", img)
cv2.imshow("Sepia", res)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == "__main__":
img = cv2.imread("../Data/image.jpg")
sepia(img)
| 36.863636 | 93 | 0.585697 | 117 | 811 | 3.974359 | 0.504274 | 0.077419 | 0.060215 | 0.073118 | 0.193548 | 0.193548 | 0 | 0 | 0 | 0 | 0 | 0.10906 | 0.265105 | 811 | 21 | 94 | 38.619048 | 0.671141 | 0.17016 | 0 | 0 | 0 | 0 | 0.056801 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.105263 | 0 | 0.157895 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f1215272d49157aa4f4344e17235518f74e6ff9 | 15,607 | py | Python | plot_cov2ensemble.py | FIDUCEO/FCDR_ST_ensemble | 47962346de1db624ee23bfd478fa4f75fb49719a | [
"MIT"
] | null | null | null | plot_cov2ensemble.py | FIDUCEO/FCDR_ST_ensemble | 47962346de1db624ee23bfd478fa4f75fb49719a | [
"MIT"
] | null | null | null | plot_cov2ensemble.py | FIDUCEO/FCDR_ST_ensemble | 47962346de1db624ee23bfd478fa4f75fb49719a | [
"MIT"
] | 1 | 2019-08-29T11:23:59.000Z | 2019-08-29T11:23:59.000Z | # !/usr/bin/env python
# Code include segment
# ========================================
# Version 0.5
# 28 July, 2019
# https://patternizer.github.io/
# michael.taylor AT reading DOT ac DOT uk
# ========================================
#---------------------------------------------------------------------------
# PLOT LIST (alphabetical):
#---------------------------------------------------------------------------
# plot_crs(): Constrained randing sampling (CRS) demo
# plot_eigenspectrum(ev): Eigenspectrum + cumulative relative variance with nPC label
# plot_ensemble_closure(da,draws,har): Harmonisation a_cov and a_u versus ensemble-calculated values
# plot_ensemble_decile_distribution(Z, decile, npop, nens): Draws with decile per parameter
# plot_ensemble_decile_selection(Z_norm, ensemble_idx, nens): Draws with decile selection per parameter
# plot_ensemble_deltas(da): Ensemble deltas / parameter uncertainty
# plot_ensemble_deltas_an(da,a_u): Ensemble deltas / parameter uncertainty per a(n)
# plot_ensemble_deltas_normalised(da,a_u): Ensemble deltas (not normalised)
# plot_ensemble_deltas_pc12(da_pc12, a_u): Project ensemble onto PC1 and PC2
# plot_ensemble_diff_BT_scatterplot(BT_ens,BT_mmd): Ensemble BT versus BT_mmd (in 10K bands)
# plot_ensemble_diff_BT_timeseries(BT_ens,BT): Ensemble BT minus BT [nens,n]
# plot_ensemble_diff_L_timeseries(L_ens,L): Ensemble L minus L [nens,n]
# plot_orbit_var(lat, lon, var, vmin, vmax, projection, filestr, titlestr, varstr): Swathe plot of variable with given lat and lon arrays
#---------------------------------------------------------------------------
def plot_crs():
'''
Constrained versus unconstrained random sampling demo
'''
from ensemble_func import generate_n_single
from ensemble_func import generate_n
for n in np.array([10,50,100,500,1000,5000,10000,50000]):
random_numbers_unconstrained = generate_n_single(n)
random_numbers_constrained = generate_n(n)
fig,ax = plt.subplots(1,2)
labelstr_constrained = 'n=' + str(n) + ': constrained'
labelstr_unconstrained = 'n=' + str(n) + ': unconstrained'
ax[0].plot(np.sort(np.array(random_numbers_unconstrained)), label=labelstr_unconstrained)
ax[0].plot(np.sort(np.array(random_numbers_constrained)), label=labelstr_constrained)
ax[0].legend(loc=2, fontsize=8)
ax[0].set_ylim(-5,5)
ax[0].set_ylabel('z-score')
ax[0].set_xlabel('rank')
ax[0].set_title(r'Sorted random sample from $erf^{-1}(x)$')
ax[1].hist(random_numbers_unconstrained,bins=100,alpha=0.3,label='unconstrained')
ax[1].hist(random_numbers_constrained,bins=100,alpha=0.3,label='constrained')
ax[1].set_xlim(-5,5)
ax[1].set_xlabel('z-score')
ax[1].set_ylabel('count')
plotstr = 'random_numbers_n_' + str(n) + plotstem
plt.tight_layout()
plt.savefig(plotstr)
plt.close('all')
def plot_eigenspectrum(ev):
'''
Eigenspectrum + cumulative relative variance with nPC label [npar]
'''
nPC = ev['nPC']
fig,ax = plt.subplots()
plt.plot(ev['eigenvalues_rank'], ev['eigenvalues_norm'], linestyle='-', marker='.', color='b', label=r'$\lambda/sum(lambda)$')
plt.plot(ev['eigenvalues_rank'][nPC], ev['eigenvalues_norm'][nPC], marker='o', color='k', mfc='none',label=None)
plt.plot(ev['eigenvalues_rank'], ev['eigenvalues_cumsum'], linestyle='-', marker='.', color='r',label='cumulative')
labelstr = 'n(PC)='+str(nPC+1)+' var='+"{0:.5f}".format(ev['nPC_variance'])
plt.plot(ev['eigenvalues_rank'][nPC], ev['eigenvalues_cumsum'][nPC], marker='o', color='k', mfc='none',label=labelstr)
plt.legend(loc='right', fontsize=10)
plt.xlabel('rank')
plt.ylabel('relative variance')
plotstr = 'eigenspectrum' + plotstem
plt.tight_layout()
plt.savefig(plotstr)
plt.close('all')
def plot_ensemble_closure(da,draws,har):
'''
Harmonisation a_cov and a_u versus ensemble-calculated values [npar,npar] & [npar]
'''
a_u = np.array(har.parameter_uncertainty)
a_cov = np.array(har.parameter_covariance_matrix)
da_cov = np.cov(draws.T) # [npar,npar]
da_u = np.sqrt(np.diag(da_cov)) # [npar]
norm_u = np.linalg.norm(a_u - da_u)
norm_cov = np.linalg.norm(a_cov - da_cov)
umin = np.min([a_u,da_u])
umax = np.max([a_u,da_u])
covmin = np.min([a_cov,da_cov])
covmax = np.max([a_cov,da_cov])
fig,ax = plt.subplots(2,2)
g = sns.heatmap(a_cov - da_cov,ax=ax[0,0])
ax[0,0].set_xlabel('parameter, a(n)')
ax[0,0].set_ylabel('parameter, a(n)')
ax[0,1].plot(np.arange(1,len(a_u)+1), a_u - da_u,'k.',markersize=10,alpha=0.2)
ax[0,1].set_xlabel('parameter, a(n)')
ax[0,1].set_ylabel('HAR-MNS: u(n)')
ax[1,0].plot(a_cov.ravel(), da_cov.ravel(),'k.',markersize=10,alpha=0.2)
ax[1,0].plot([covmin,covmax],[covmin,covmax], '-', color='red')
ax[1,0].set_xlabel('HAR: cov(n,n)')
ax[1,0].set_ylabel('MNS: cov(n,n)')
ax[1,1].plot(a_u, da_u,'k.',markersize=10,alpha=0.2)
ax[1,1].plot([umin,umax],[umin,umax], '-', color='red')
ax[1,1].set_xlabel('HAR: u(n)')
ax[1,1].set_ylabel('MNS: u(n)')
plotstr = 'ensemble_closure' + plotstem
plt.tight_layout()
plt.savefig(plotstr)
plt.close('all')
def plot_ensemble_decile_distribution(Z, decile, npop, nens):
'''
Draws with decile per parameter
'''
npar = decile.shape[1]
if npop > 10000:
krange = np.linspace(0,npop-1,10000).astype('int')
else:
krange = range(npop)
fig,ax = plt.subplots()
for k in krange:
plt.plot(np.arange(1,npar+1),Z[k,:],'.',alpha=0.2)
for i in range(nens):
plt.plot(np.arange(1,npar+1),decile[i,:],'-',alpha=1.0,label='decile('+\
str(i+1)+')')
plt.ylim(-5,5)
plt.xlabel('harmonisation parameter')
plt.ylabel('multinormal draw z-score')
plt.legend(loc=2,fontsize=8, ncol=5)
plotstr = 'ensemble_decile_distribution' + plotstem
plt.tight_layout()
plt.savefig(plotstr)
plt.close('all')
def plot_ensemble_decile_selection(Z_norm, ensemble_idx, nens):
'''
Draws with decile selection per parameter
'''
fig,ax = plt.subplots()
for k in range(nens):
labelstr = 'decile('+str(k+1)+')'
plt.plot(Z_norm[k,:],label=labelstr)
plt.plot(ensemble_idx[k],Z_norm[k,ensemble_idx[k]],marker='o',color='k'\
,mfc='none',label=None)
plt.ylim(0,25)
plt.xlabel('multinormal draw')
plt.ylabel(r'norm distance of multinormal draw from $k^{th}$ decile')
plt.legend(loc=2,fontsize=8, ncol=5)
plotstr = 'ensemble_decile_selection.png' + plotstem
plt.tight_layout()
plt.savefig(plotstr)
plt.close('all')
def plot_ensemble_deltas(da):
'''
Ensemble deltas (not normalised) [nens,npar]
'''
n = int(da.shape[0]/2)
npar = da.shape[1]
fig,ax = plt.subplots()
for i in range(2*n):
labelstr_c = 'ens(' + str(i+1) + ')'
plt.plot(np.arange(1,npar+1), da[i,:], lw=2, label=labelstr_c)
if n <= 5:
plt.legend(loc=2, fontsize=8, ncol=5)
plt.ylim(-0.0020,0.0020)
plt.xlabel('parameter, a(n)')
plt.ylabel(r'$\delta a(n)$')
plotstr = 'npc_deltas' + plotstem
plt.tight_layout()
plt.savefig(plotstr)
plt.close('all')
def plot_ensemble_deltas_an(da, a_u):
'''
Ensemble deltas / parameter uncertainty by parameter a(n) [nens,nsensor]
'''
n = int(da.shape[0]/2)
nensemble = da.shape[0]
nparameters = da.shape[1]
if nparameters > 27:
for i in range(4):
fig,ax = plt.subplots()
idx = np.arange(i,nparameters-1,4) # -1 --> MTA:N12 (excl N11)
for k in range(len(idx)-1):
for l in range(nensemble):
labelstr = 'ens('+str(l+1)+')'
if k == 0:
plt.plot(k, da[l,idx[k]] / a_u[idx[k]],'.',label=labelstr)
else:
plt.plot(k, da[l,idx[k]] / a_u[idx[k]],'.',label=None)
if n <= 5:
plt.legend(loc=2, fontsize=8, ncol=5)
plt.ylim(-5,5)
plt.ylabel(r'$\delta a(n)/u(n)$')
plt.xlabel('sensor')
plotstr = 'ensemble_a' + str(i) + plotstem
plt.tight_layout()
plt.savefig(plotstr)
plt.close('all')
else:
for i in range(3):
fig,ax = plt.subplots()
idx = np.arange(i,nparameters-1,3) # -1 --> MTA:N12 (excl N11)
for k in range(len(idx)-1):
for l in range(nensemble):
labelstr = 'ens('+str(l+1)+')'
if k == 0:
plt.plot(k, da[l, idx[k]] / a_u[idx[k]],'.',label=labelstr)
else:
plt.plot(k, da[l, idx[k]] / a_u[idx[k]],'.',label=None)
if n <= 5:
plt.legend(loc=2, fontsize=8, ncol=5)
plt.ylim(-5,5)
plt.ylabel(r'$\delta a(n)/u(n)$')
plt.xlabel('sensor')
plotstr = 'ensemble_a' + str(i) + plotstem
plt.tight_layout()
plt.savefig(plotstr)
plt.close('all')
def plot_ensemble_deltas_normalised(da, a_u):
'''
Ensemble deltas / parameter uncertainty [nens,npar]
'''
n = int(da.shape[0]/2)
npar = da.shape[1]
fig,ax = plt.subplots()
for i in range(2*n):
labelstr_c = 'ens(' + str(i+1) + ')'
plt.plot(np.arange(1,npar+1), da[i,:] / a_u, lw=2, label=labelstr_c)
if n <= 5:
plt.legend(loc=2, fontsize=8, ncol=5)
plt.ylim(-5,5)
plt.xlabel('parameter, a(n)')
plt.ylabel(r'$\delta a(n)/u(n)$')
plotstr = 'npc_deltas_over_Xu' + plotstem
plt.tight_layout()
plt.savefig(plotstr)
plt.close('all')
def plot_ensemble_deltas_pc12(da_pc12, a_u):
'''
Project ensemble onto PC1 and PC2
'''
n = int(da_pc12['da_pc1'].shape[0]/2)
fig,ax = plt.subplots()
for i in range(2*n):
labelstr = 'PC1: ens(' + str(i+1) + ')'
plt.plot(da_pc12['da_pc1'][i,:] / a_u, lw=2, label=labelstr)
if n <= 5:
plt.legend(loc=2, fontsize=6, ncol=2)
plt.xlim(-5,5)
plt.ylim(-5,5)
plt.xlabel('parameter, a(n)')
plt.ylabel(r'$\delta a(n)/u(n)$')
plotstr = 'pc1_deltas_over_Xu' + plotstem
plt.tight_layout()
plt.savefig(plotstr)
plt.close('all')
fig,ax = plt.subplots()
for i in range(2*n):
labelstr = 'PC1: ens(' + str(i+1) + ')'
plt.plot(da_pc12['da_pc1'][i,:] / a_u, lw=2, label=labelstr)
if n <= 5:
plt.legend(loc=2, fontsize=6, ncol=2)
plt.xlim(-5,5)
plt.ylim(-5,5)
plt.xlabel('parameter, a(n)')
plt.ylabel(r'$\delta a(n)/u(n)$')
plotstr = 'pc2_deltas_over_Xu' + plotstem
plt.tight_layout()
plt.savefig(plotstr)
plt.close('all')
def plot_ensemble_diff_BT_scatterplot(BT_ens,BT_mmd):
'''
Ensemble BT versus BT_mmd (in 10K bands) [nens,n_mmd]
'''
n = int(BT_ens.shape[1]/2)
fig,ax = plt.subplots()
for i in range(2*n):
labelstr = 'ens(' + str(i+1) + ')'
gd = BT_ens[:,i] > 0
plt.plot(BT_mmd[gd], BT_ens[gd,i], '.', markersize=2, alpha=0.2, label=labelstr)
plt.plot([220,310],[220,310], '--', color='black', label=None)
plt.xlim(220,310)
plt.ylim(220,310)
if n <= 5:
plt.legend(loc=2, fontsize=8, ncol=5)
plt.xlabel(r'brightness temperature, BT / $K$')
plt.ylabel(r'ensemble brightness temperature, ens(BT) / $K$')
plotstr = 'bt_deltas' + plotstem
plt.tight_layout()
plt.savefig(plotstr)
plt.close('all')
BT_vec = np.arange(230,310,10)
for k in range(len(BT_vec)-1):
fig,ax = plt.subplots()
for i in range(2*n):
labelstr = 'ens(' + str(i+1) + ')'
domain = (BT_mmd >= BT_vec[k]) & (BT_mmd < BT_vec[k+1])
gd = (BT_ens[:,i] > 0) & domain
plt.plot(BT_mmd[gd],BT_ens[gd,i], '.', markersize=2, alpha=0.2, label=labelstr)
plt.plot([BT_vec[k],BT_vec[k+1]],[BT_vec[k],BT_vec[k+1]], '--', color='black', label=None)
plt.xlim(BT_vec[k],BT_vec[k+1])
plt.ylim(BT_vec[k],BT_vec[k+1])
if n <= 5:
plt.legend(loc=2, fontsize=8, ncol=5)
plt.xlabel(r'brightness temperature, BT / $K$')
plt.ylabel(r'ensemble brightness temperature, ens(BT) / $K$')
plotstr = 'bt_deltas' + '_' + str(BT_vec[k]) + '_' + str(BT_vec[k+1]) + plotstem
plt.tight_layout()
plt.savefig(plotstr)
plt.close('all')
def plot_ensemble_diff_BT_timeseries(BT_ens,BT):
'''
Ensemble BT minus BT [nens,n]
'''
n = int(BT_ens.shape[1]/2)
fig, ax = plt.subplots()
for k in range(2*n):
label_str = 'Ens(' + str(k+1) + ')'
plt.plot(BT_ens[:,k] - BT, linewidth=1.0, label=label_str)
plt.legend(fontsize=10, ncol=1)
ax.set_ylabel('BT difference / K', fontsize=12)
plotstr = 'bt_deltas' + plotstem
plt.tight_layout()
plt.savefig(plotstr)
plt.close('all')
def plot_ensemble_diff_L_timeseries(L_ens,L):
'''
Ensemble L versus L [nens,n]
'''
n = int(L_ens.shape[1]/2)
fig, ax = plt.subplots()
for k in range(2*n):
label_str = 'Ens(' + str(k+1) + ')'
plt.plot(L_ens[:,k] - L, linewidth=1.0, label=label_str)
plt.legend(fontsize=10, ncol=1)
ax.set_ylabel('Radiance difference', fontsize=12)
plotstr = 'l_deltas' + plotstem
plt.tight_layout()
plt.savefig(plotstr)
plt.close('all')
def plot_orbit_var(lat, lon, var, vmin, vmax, projection, filestr, titlestr, varstr):
'''
Swathe plot of variable with given lat and lon arrays
'''
x = lon[::10,::10]
y = lat[::10,::10]
z = var[::10,::10]
cmap = 'viridis'
fig = plt.figure()
if projection == 'platecarree':
p = ccrs.PlateCarree(central_longitude=0)
threshold = 0
if projection == 'mollweide':
p = ccrs.Mollweide(central_longitude=0)
threshold = 1e6
if projection == 'robinson':
p = ccrs.Robinson(central_longitude=0)
threshold = 0
ax = plt.axes(projection=p)
ax.coastlines()
g = ccrs.Geodetic()
# trans = ax.projection.transform_points(g, x.values, y.values)
trans = ax.projection.transform_points(g, x, y)
x0 = trans[:,:,0]
x1 = trans[:,:,1]
if projection == 'platecarree':
ax.set_extent([-180, 180, -90, 90], crs=p)
gl = ax.gridlines(crs=p, draw_labels=True, linewidth=1, color='gray', alpha=0.5, linestyle='-')
gl.xlabels_top = False
gl.ylabels_right = False
gl.xlines = True
gl.ylines = True
gl.xlocator = mticker.FixedLocator([-180,-120,-60,0,60,120,180])
gl.ylocator = mticker.FixedLocator([-90,-60,-30,0,30,60,90])
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
# im = ax.pcolor(x, y, z, transform=ax.projection, cmap=cmap)
for mask in (x0>threshold,x0<=threshold):
im = ax.pcolor(ma.masked_where(mask, x), ma.masked_where(mask, y), ma.masked_where(mask, z), vmin=vmin, vmax=vmax, transform=ax.projection, cmap='seismic')
else:
for mask in (x0>threshold,x0<=threshold):
im = ax.pcolor(ma.masked_where(mask, x0), ma.masked_where(mask, x1), ma.masked_where(mask, z), vmin=vmin, vmax=vmax, transform=ax.projection, cmap='seismic')
cb = plt.colorbar(im, orientation="horizontal", extend='both', label=varstr)
plt.title(titlestr)
plt.savefig(filestr)
plt.close('all')
| 37.426859 | 169 | 0.589095 | 2,316 | 15,607 | 3.845423 | 0.130397 | 0.00494 | 0.019762 | 0.026948 | 0.658657 | 0.638783 | 0.608691 | 0.572423 | 0.554233 | 0.538738 | 0 | 0.035726 | 0.221631 | 15,607 | 416 | 170 | 37.516827 | 0.697399 | 0.155123 | 0 | 0.498403 | 0 | 0 | 0.110657 | 0.006019 | 0.00639 | 0 | 0 | 0 | 0 | 1 | 0.041534 | false | 0 | 0.00639 | 0 | 0.047923 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f12a05f3d5e3cccdbc328f63c0a7e69fb06f09c | 3,422 | py | Python | Day28-Pomodoro_GUI_App/main.py | the-whiz84/Python_Projects | 35d6c3cef9b4d90e6cb7cbf1dd88de3a7fe5dd0c | [
"MIT"
] | 1 | 2022-01-05T10:54:06.000Z | 2022-01-05T10:54:06.000Z | Day28-Pomodoro_GUI_App/main.py | the-whiz84/Python_Projects | 35d6c3cef9b4d90e6cb7cbf1dd88de3a7fe5dd0c | [
"MIT"
] | null | null | null | Day28-Pomodoro_GUI_App/main.py | the-whiz84/Python_Projects | 35d6c3cef9b4d90e6cb7cbf1dd88de3a7fe5dd0c | [
"MIT"
] | null | null | null | import math
from tkinter import *
PINK = "#e2979c"
RED = "#e7305b"
GREEN = "#019267"
YELLOW = "#F2FA5A"
FONT_NAME = "Courier"
WORK_MIN = 50
SHORT_BREAK_MIN = 10
LONG_BREAK_MIN = 30
reps = 0
timer = None
def count_down(count):
"""Create the countdown mechanism inside the canvas.
Counts down by each second for the given interval (work, short break or long break).
Adds a checkmark for each working interval completed.
Args:
count (int): Number of minutes to countdown from based on the Constants set.
"""
count_min = math.floor(count / 60)
count_sec = count % 60
if count_sec < 10:
count_sec = f"0{count_sec}"
if count_min < 10:
count_min = f"0{count_min}"
canvas.itemconfig(timer_text, text=f"{count_min}:{count_sec}")
if count > 0:
global timer
timer = window.after(1000, count_down, count - 1)
else:
start_timer()
marks = ""
work_sessions = math.floor(reps / 2)
for _ in range(work_sessions):
marks += "✔"
checkmark_label.config(text=marks)
def start_timer():
"""Add the Start Button functionality to start the countdown.
Changes the countdown timer between Work interval and Break.
Updates the Title label to show which interval is currently on.
"""
global reps
reps += 1
work_sec = WORK_MIN * 60
short_break_sec = SHORT_BREAK_MIN * 60
long_break_sec = LONG_BREAK_MIN * 60
if reps % 8 == 0:
title_label.config(text="Break", fg=RED)
count_down(long_break_sec)
focus_window("on")
elif reps % 2 == 0:
title_label.config(text="Break", fg=PINK)
count_down(short_break_sec)
focus_window("on")
else:
title_label.config(text="Work", fg=GREEN)
count_down(work_sec)
focus_window("off")
def reset_timer():
"""Add the Reset Button functionality to reset the countdown and all the text on the GUI.
"""
global reps
reps = 0
window.after_cancel(timer)
title_label.config(text="Timer")
canvas.itemconfig(timer_text, text="00:00")
checkmark_label.config(text="")
def focus_window(option):
"""Enable the Tkinter window to show on top of other windows when minimized.
Args:
option (str): Set the function to 'on' or 'off'
"""
if option == "on":
window.deiconify()
window.focus_force()
window.attributes('-topmost', 1)
elif option == "off":
window.attributes('-topmost', 0)
window = Tk()
window.title("Pomodoro")
window.config(padx=100, pady=50, bg=YELLOW)
canvas = Canvas(width=200, height=224, bg=YELLOW, highlightthickness=0)
tomato_img = PhotoImage(file="./tomato.png")
canvas.create_image(100, 112, image=tomato_img)
timer_text = canvas.create_text(100, 130, text="00:00", fill="white", font=(FONT_NAME, 28, "bold"))
canvas.grid(column=1, row=1)
title_label = Label(text="Timer", fg=GREEN, bg=YELLOW, font=(FONT_NAME, 36, "normal"))
title_label.grid(column=1, row=0)
start_button = Button(text="Start", font=(FONT_NAME, 16, "normal"), highlightthickness=0, command=start_timer)
start_button.grid(column=0, row=2)
reset_button = Button(text="Reset", font=(FONT_NAME, 16, "normal"), highlightthickness=0, command=reset_timer)
reset_button.grid(column=2, row=2)
checkmark_label = Label(fg=GREEN, bg=YELLOW)
checkmark_label.grid(column=1, row=3)
window.mainloop()
| 30.553571 | 110 | 0.6654 | 492 | 3,422 | 4.477642 | 0.300813 | 0.031775 | 0.040853 | 0.036314 | 0.129823 | 0.067181 | 0.067181 | 0.041761 | 0 | 0 | 0 | 0.039318 | 0.212157 | 3,422 | 111 | 111 | 30.828829 | 0.777448 | 0.200175 | 0 | 0.101266 | 0 | 0 | 0.075797 | 0.00863 | 0 | 0 | 0 | 0 | 0 | 1 | 0.050633 | false | 0 | 0.025316 | 0 | 0.075949 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f1339e6f6813e3cd55c4c862dfb0e92d7b3f4a9 | 1,938 | py | Python | src/sentry/utils/pubsub.py | AlexWayfer/sentry | ef935cda2b2e960bd602fda590540882d1b0712d | [
"BSD-3-Clause"
] | 1 | 2022-02-09T22:56:49.000Z | 2022-02-09T22:56:49.000Z | src/sentry/utils/pubsub.py | AlexWayfer/sentry | ef935cda2b2e960bd602fda590540882d1b0712d | [
"BSD-3-Clause"
] | 6 | 2018-10-19T10:04:23.000Z | 2019-12-09T20:29:12.000Z | src/sentry/utils/pubsub.py | AlexWayfer/sentry | ef935cda2b2e960bd602fda590540882d1b0712d | [
"BSD-3-Clause"
] | 1 | 2020-07-03T00:52:19.000Z | 2020-07-03T00:52:19.000Z | from __future__ import absolute_import
import redis
import logging
import random
from django.conf import settings
from threading import Thread
from six.moves.queue import Queue, Full
class QueuedPublisher(object):
"""
A publisher that queues items locally and publishes them to a
remote pubsub service on a background thread.
Maintains a lossy internal queue for posting, will discard the
value if the queue is full or not immediately available. Will also
drop items if the publish operation to the remote service fails.
"""
def __init__(self, publisher):
self._started = False
self.publisher = publisher
def _start(self):
if self._started:
return True
self.q = q = Queue(maxsize=100)
def worker():
while True:
(channel, key, value) = q.get()
try:
self.publisher.publish(channel, key=key, value=value)
except Exception:
logger = logging.getLogger('sentry.errors')
logger.debug('could not submit event to pubsub')
finally:
q.task_done()
t = Thread(target=worker)
t.setDaemon(True)
t.start()
self._started = True
return True
def publish(self, channel, value, key=None):
if not self._start():
return
sample_channel = getattr(settings, 'PUBSUB_SAMPLING', 1.0)
if random.random() <= sample_channel:
try:
self.q.put((channel, key, value), block=False)
except Full:
return
class RedisPublisher(object):
def __init__(self, connection):
self.rds = None if connection is None else redis.StrictRedis(**connection)
def publish(self, channel, value, key=None):
if self.rds is not None:
self.rds.publish(channel, value)
| 28.086957 | 82 | 0.603199 | 229 | 1,938 | 5.008734 | 0.436681 | 0.034002 | 0.01918 | 0.036617 | 0.061029 | 0.061029 | 0.061029 | 0.061029 | 0 | 0 | 0 | 0.003788 | 0.318885 | 1,938 | 68 | 83 | 28.5 | 0.865152 | 0.156347 | 0 | 0.177778 | 0 | 0 | 0.03743 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.155556 | 0 | 0.422222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f147f9d332c127c0660526475168efc319163b9 | 960 | py | Python | modules/Utils.py | tum-msv/mimo-cnn-est | 8915a918c08c5ae61dc2208352ebb9676395b3c8 | [
"Unlicense"
] | 2 | 2021-04-28T17:33:07.000Z | 2021-09-22T19:35:05.000Z | modules/Utils.py | tum-msv/mimo-cnn-est | 8915a918c08c5ae61dc2208352ebb9676395b3c8 | [
"Unlicense"
] | null | null | null | modules/Utils.py | tum-msv/mimo-cnn-est | 8915a918c08c5ae61dc2208352ebb9676395b3c8 | [
"Unlicense"
] | null | null | null | import time
from functools import wraps
import numpy as np
def crandn(*arg, rng=np.random.random.__self__):
#np.random.seed()
return np.sqrt(0.5) * (rng.randn(*arg) + 1j * rng.randn(*arg))
def timethis(func):
"""A decorator that prints the execution time.
Example:
Write @utils.timethis before a function definition:
@utils.timthis
def my_function():
pass
Then, every time my_function is called, the execution time is printed.
"""
@wraps(func)
def wrapper(*args, **kwargs):
tic = time.time()
result = func(*args, **kwargs)
toc = time.time()
# hours
h = (toc - tic) // (60 * 60)
s = (toc - tic) % (60 * 60)
print(
'elapsed time of {}(): '
'{:.0f} hour(s) | {:.0f} minute(s) | {:.5f} second(s).'
.format(func.__name__, h, s // 60, s % 60)
)
return result
return wrapper
| 24.615385 | 78 | 0.536458 | 121 | 960 | 4.173554 | 0.520661 | 0.031683 | 0.043564 | 0.039604 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.027565 | 0.319792 | 960 | 38 | 79 | 25.263158 | 0.745789 | 0.275 | 0 | 0 | 0 | 0.05 | 0.113636 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.15 | false | 0 | 0.15 | 0.05 | 0.45 | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f14c81c5a0498d1d6e6cfa782b064dc1e2a8d46 | 2,993 | py | Python | wordlistgen.py | akumanatt/2600-wordle | 5f8d0f76a2db2c06ee59a42106ea3593e735641f | [
"MIT"
] | 1 | 2022-03-13T22:13:41.000Z | 2022-03-13T22:13:41.000Z | wordlistgen.py | akumanatt/2600-wordle | 5f8d0f76a2db2c06ee59a42106ea3593e735641f | [
"MIT"
] | null | null | null | wordlistgen.py | akumanatt/2600-wordle | 5f8d0f76a2db2c06ee59a42106ea3593e735641f | [
"MIT"
] | null | null | null | # usage: wordlistgen.py [words.txt] [answers.txt] [wordlist.asm]
# words.txt should be sorted by word frequency
# since later entries can be removed if there's not enough space
import sys
ATOZ = "abcdefghijklmnopqrstuvwxyz"
BANK_FREES =[0xa3c, 0xfe4, 0xfe4]
NUM_BANKS = len(BANK_FREES)
fulllist = [i.strip().lower() for i in open(sys.argv[1], "r")]
anslist = [i.strip().lower() for i in open(sys.argv[2], "r")]
outfile = open(sys.argv[3], "w")
output_asm = """
; generated by wordlistgen.py
"""
section_format = """
.section wordlist_data_{0}
{1}
.send
"""
section_1_format = """
_ptrs := ({0})
wordlist_1_ptrs_lo .byte <(_ptrs)
wordlist_1_ptrs_hi .byte >(_ptrs)
wordlist_2_ofs
"""
sizes = None
banks = None
bins = {}
remaining = []
for i in ATOZ:
for j in ATOZ:
bins[i+j] = []
# first iteration: fit banks
for word in fulllist:
key = word[:2]
bins[key].append(word)
# try fitting
try_sizes = [0 for i in range(NUM_BANKS)]
try_banks = [[] for i in range(NUM_BANKS)]
cur_bank = 0
exit = False
for i in ATOZ:
group_size = sum([len(bins[i+j]) for j in ATOZ])
while (try_sizes[cur_bank] + group_size) > (BANK_FREES[cur_bank] // 2):
cur_bank += 1
if cur_bank >= NUM_BANKS:
exit = True
break
if exit:
break
try_sizes[cur_bank] += group_size
try_banks[cur_bank].append(i)
if exit:
remaining.append(word)
bins[key].remove(word)
else:
sizes = try_sizes
banks = try_banks
# second iteration: fill gaps
for word in remaining:
for i in range(NUM_BANKS):
if word[0] not in banks[i]:
continue
if (sizes[i] + 2) <= (BANK_FREES[i] // 2):
print(word)
bins[word[:2]].append(word)
sizes[i] += 2
print(sum([len(bins[i]) for i in bins]))
# write results
for i in range(NUM_BANKS):
stxt = ""
if i == 0:
stxt += section_1_format.format(", ".join(["wordlist_3_"+i for i in ATOZ]))
for j in ATOZ:
stxt += " .byte {}\n".format(", ".join(["{:3}".format(len(bins[j+k])*2) for k in ATOZ]))
stxt += "\n"
for j in banks[i]:
stxt += "wordlist_3_"+j
linectr = 0
for k in ATOZ:
# reverse the list to aid searching, since the game checks from the last member first
for word in bins[j+k][::-1]:
if linectr % 16 == 0:
stxt += "\n .word "
else:
stxt += ", "
c2 = (ord(word[2]) - ord("a") + 1)
c3 = (ord(word[3]) - ord("a") + 1)
c4 = (ord(word[4]) - ord("a") + 1)
il = 0x8000 if word in anslist else 0
stxt += "${:04x}".format(c2 | (c3 << 5) | (c4 << 10) | il)
linectr += 1
stxt += "\n"
output_asm += section_format.format(i + 1, stxt)
outfile.write(output_asm)
| 27.458716 | 103 | 0.537254 | 428 | 2,993 | 3.633178 | 0.285047 | 0.025723 | 0.038585 | 0.028296 | 0.141479 | 0.141479 | 0.061736 | 0.061736 | 0.036013 | 0 | 0 | 0.030303 | 0.316405 | 2,993 | 108 | 104 | 27.712963 | 0.729717 | 0.111928 | 0 | 0.197674 | 0 | 0 | 0.107588 | 0.009815 | 0 | 0 | 0.007928 | 0 | 0 | 1 | 0 | false | 0 | 0.011628 | 0 | 0.011628 | 0.023256 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f14f700cd145b96aa86b15d1a9499221f0c3690 | 519 | py | Python | examples/exampleXeFF.py | ogorton/dmfortfactor | 879e747a3c839687a729091f811282fdb9264869 | [
"MIT"
] | 1 | 2022-02-28T20:58:51.000Z | 2022-02-28T20:58:51.000Z | examples/exampleXeFF.py | ogorton/dmfortfactor | 879e747a3c839687a729091f811282fdb9264869 | [
"MIT"
] | 1 | 2022-01-24T20:35:32.000Z | 2022-02-28T21:51:53.000Z | examples/exampleXeFF.py | ogorton/dmfortfactor | 879e747a3c839687a729091f811282fdb9264869 | [
"MIT"
] | null | null | null | import sys
sys.path.append("../python")
import dmfortfactor as dm
import numpy as np
import matplotlib.pyplot as plt
import random
cwords = {
"wimpmass" : 150.0,
"usemomentum": 1}
Wfunc = dm.NucFormFactor(
Z = 54,
N = 77,
dres = "../data/Xe/xe131gcn",
controlwords = cwords,
epmin = 0.001,
epmax = 10.0,
epstep = 0.001,
exec_path='../bin/dmfortfactor')
q = 0.001
print("q = %10.5f"%q)
print("W_i^{tau,tau_prime}(q) = ")
print(Wfunc(q))
| 19.961538 | 40 | 0.568401 | 71 | 519 | 4.112676 | 0.633803 | 0.041096 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.079787 | 0.27553 | 519 | 25 | 41 | 20.76 | 0.696809 | 0 | 0 | 0 | 0 | 0 | 0.194605 | 0.042389 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.227273 | 0 | 0.227273 | 0.136364 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f17142a4a1c75960aa84eea1a7efd1b0957036a | 2,166 | py | Python | texapi/settings/local.py | blumug/texapi | 3caf1dd3f0c641a06964a33f7d3046bdace24eeb | [
"MIT"
] | null | null | null | texapi/settings/local.py | blumug/texapi | 3caf1dd3f0c641a06964a33f7d3046bdace24eeb | [
"MIT"
] | null | null | null | texapi/settings/local.py | blumug/texapi | 3caf1dd3f0c641a06964a33f7d3046bdace24eeb | [
"MIT"
] | null | null | null | """Development settings and globals."""
from os.path import join, normpath
from base import *
########## DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
LOCAL_MODE = True
########## END DEBUG CONFIGURATION
########## EMAIL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'mailtrap.io'
EMAIL_HOST_USER = '4537fd1a8eca5802'
EMAIL_HOST_PASSWORD = '0dfa077b609a5a'
EMAIL_PORT = '2525'
EMAIL_USE_TLS = True
########## END EMAIL CONFIGURATION
########## DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'texapi',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
'ATOMIC_REQUESTS': True,
}
}
########## END DATABASE CONFIGURATION
#
########## CACHE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
#Activate this cache if you want to use rosetta
# CACHES = {
# 'default': {
# 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
# 'LOCATION': 'texapi'
# }
# }
########## END CACHE CONFIGURATION
########## TOOLBAR CONFIGURATION
# See: http://django-debug-toolbar.readthedocs.org/en/latest/installation.html#explicit-setup
INSTALLED_APPS += (
'debug_toolbar',
)
MIDDLEWARE_CLASSES += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
DEBUG_TOOLBAR_PATCH_SETTINGS = False
# http://django-debug-toolbar.readthedocs.org/en/latest/installation.html
INTERNAL_IPS = ('127.0.0.1', '0.0.0.0', '::1')
def custom_show_toolbar(self):
return False # True
DEBUG_TOOLBAR_CONFIG = {
'SHOW_TOOLBAR_CALLBACK': 'texapi.settings.local.custom_show_toolbar',
}
########## END TOOLBAR CONFIGURATION
HOST='http://localhost:8009'
| 23.543478 | 93 | 0.668513 | 244 | 2,166 | 5.815574 | 0.409836 | 0.05074 | 0.042283 | 0.08809 | 0.336857 | 0.336857 | 0.336857 | 0.276251 | 0.276251 | 0.245243 | 0 | 0.020675 | 0.151431 | 2,166 | 91 | 94 | 23.802198 | 0.75136 | 0.435365 | 0 | 0.04878 | 0 | 0 | 0.367701 | 0.212591 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02439 | false | 0.04878 | 0.04878 | 0.02439 | 0.097561 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f173212856013758f53735b264f6510bc98fc61 | 658 | py | Python | try_json.py | victorai60/Spider | e4f50186a382fa507ca8398af10ca81fb06ddb4a | [
"Apache-2.0"
] | null | null | null | try_json.py | victorai60/Spider | e4f50186a382fa507ca8398af10ca81fb06ddb4a | [
"Apache-2.0"
] | null | null | null | try_json.py | victorai60/Spider | e4f50186a382fa507ca8398af10ca81fb06ddb4a | [
"Apache-2.0"
] | null | null | null | import requests
import json
url = "https://m.douban.com/rexxar/api/v2/subject_collection/filter_tv_american_hot/items?os=ios&for_mobile=1&start=0&count=18&loc_id=108288&_=0"
headers = {
"User-Agent": "User-Agent: Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1",
"Referer": "https://m.douban.com/tv/american"
}
response = requests.get(url, headers=headers)
json_str = response.content.decode()
dict_ret = json.loads(json_str)
print(dict_ret)
with open("douban.json", "w", encoding="utf-8") as f:
f.write(json.dumps(dict_ret, ensure_ascii=False, indent=2))
| 41.125 | 168 | 0.737082 | 113 | 658 | 4.168142 | 0.654867 | 0.044586 | 0.050955 | 0.063694 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.0625 | 0.100304 | 658 | 15 | 169 | 43.866667 | 0.733108 | 0 | 0 | 0 | 0 | 0.153846 | 0.532725 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.153846 | 0 | 0.153846 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f196729e7a123d28ff556fb7d1a262f4235c43f | 715 | py | Python | src/models/network.py | schalappe/kenyan_sign_language_classification | a578e55c96e8eced1d23d31bb2019f8be308c899 | [
"MIT"
] | null | null | null | src/models/network.py | schalappe/kenyan_sign_language_classification | a578e55c96e8eced1d23d31bb2019f8be308c899 | [
"MIT"
] | null | null | null | src/models/network.py | schalappe/kenyan_sign_language_classification | a578e55c96e8eced1d23d31bb2019f8be308c899 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Set of class for fine tune
"""
import tensorflow as tf
from .addons import models
from .head_net import NormHeadNetV2
class FineTuneModel:
@staticmethod
def build(model_name: str, dims: tuple, num_class: int, hidden_unit):
# load reference model
head = models[model_name](
input_shape=dims,
include_top=False,
weights="imagenet",
)
# Freeze the pretrained weights
head.trainable = False
# Add top to reference model
outputs = NormHeadNetV2.build(
base_model=head, len_class=num_class, dense_unit=hidden_unit
)
return tf.keras.Model(head.input, outputs)
| 23.833333 | 73 | 0.627972 | 85 | 715 | 5.141176 | 0.623529 | 0.061785 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005882 | 0.286713 | 715 | 29 | 74 | 24.655172 | 0.85098 | 0.177622 | 0 | 0 | 0 | 0 | 0.013865 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.1875 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f1a874facaa326df6344fbfb3ea914616f645d8 | 7,789 | py | Python | scraper/ssb_base.py | VolVox99/OpenCourseAPI | edfe51c8ee050ad0bf99c03f5ed421ce247cc01f | [
"MIT"
] | 9 | 2020-10-02T00:10:46.000Z | 2022-01-06T00:48:59.000Z | scraper/ssb_base.py | VolVox99/OpenCourseAPI | edfe51c8ee050ad0bf99c03f5ed421ce247cc01f | [
"MIT"
] | 8 | 2020-09-06T22:13:36.000Z | 2020-12-15T20:37:17.000Z | scraper/ssb_base.py | VolVox99/OpenCourseAPI | edfe51c8ee050ad0bf99c03f5ed421ce247cc01f | [
"MIT"
] | 4 | 2020-09-08T02:26:56.000Z | 2022-03-11T20:43:55.000Z | from os import makedirs
from os.path import join, exists
from collections import defaultdict
from datetime import datetime
import requests
from bs4 import BeautifulSoup
from tinydb import TinyDB
from marshmallow import ValidationError as MarshValidationError
from logger import log, log_info, log_err, log_trace
from data.models import classDataSchema, classTimeSchema
SOUP_PARSER = 'lxml'
class BaseHooks:
DATE_FORMAT = '%b %d, %Y' # '%d-%b-%Y'
@staticmethod
def transform_depts(depts):
return depts
@staticmethod
def transform_class(class_data):
return class_data
@classmethod
def parse_date(cls, date_str):
return datetime.strftime(datetime.strptime(date_str, cls.DATE_FORMAT), '%m/%d/%Y')
@staticmethod
def clean_units_str(units_str):
if 'TO' in units_str:
splitted = units_str.split('TO')
return splitted[-1].strip()
elif 'OR' in units_str:
splitted = units_str.split('OR')
return splitted[-1].strip()
else:
return units_str
class BaseSSBScraper:
PREFIX = ''
def __init__(self, ssb_url, db_dir, cache_dir, hooks=None, login=None, ssb_campus=None, max_terms=-1, start_term=None, use_cache=True, trace=False):
self.ssb_url = ssb_url
self.db_dir = db_dir
self.cache_dir = cache_dir
self.login = login
self.ssb_campus = ssb_campus
self.hooks = hooks or BaseHooks
self.max_terms = max_terms
self.start_term = start_term
self.use_cache = use_cache
self.trace = trace
self.loggedIn = False
self.session = requests.session()
def run(self):
# Create db dir (ex. 'db/') and cache dir (ex. 'db/.cache/scrape_advanced')
for folder in [self.db_dir, self.cache_dir]:
if not exists(folder):
makedirs(folder, exist_ok=True)
# Get all term codes (hits FHDA endpoint)
codes = self.mine_term_codes()
# Debug utilities to limit the terms mined
if self.start_term and codes.index(self.start_term):
codes = codes[codes.index(self.start_term):]
if self.max_terms > 0 and len(codes) > self.max_terms:
codes = codes[:self.max_terms]
log_info(f'Loaded {len(codes)} term codes')
for term in codes:
# Mine department data
# Hits FHDA endpoint to get all departments for the term
log(term, 'magenta', 'Mining departments... ', end='\r')
depts = self.mine_dept_data(term)
# Mine and process class data
# Hits FHDA endpoint to get all classes for the term
log(term, 'magenta', 'Mining classes... ', end='\r')
classes = self.mine_campus_term(term, depts)
# Create / load a DB for the term
log(term, 'magenta', 'Writing data to db... ', end='\r')
campus_prefix = f'{self.ssb_campus.lower()}_' if self.ssb_campus else ''
db = TinyDB(join(self.db_dir, f'{self.PREFIX}{campus_prefix}{term}_database.json'))
with db:
# Write the dept and class data to the DB
self.save_classes(db, depts, classes)
# Get counts of mined data (for logging)
dept_count = len(db.table('departments'))
course_count = len(db.table('courses'))
class_count = len(db.table('classes'))
# that's it! move on to the next term code...
info = f'Mined {dept_count} depts, {course_count} courses, and {class_count} classes'
log(term, 'magenta', f'{info} ')
def mine_term_codes(self):
'''
Mine term codes will grab all the term IDs.
:param use_cache: (bool) whether to use the cache
:return data: (list) list of term codes
'''
html = self.fetch_and_cache(
'bwckschd.p_disp_dyn_sched',
'all-terms.html',
)
soup = BeautifulSoup(html, SOUP_PARSER)
term_select = soup.find('select', {'name': 'p_term'})
options = term_select.find_all('option')
return [opt['value'] for opt in options if opt['value']]
def mine_dept_data(self, term: str):
'''
Mine dept data will grab the department IDs for a given quarter.
:param term: (str) the term to mine
:param use_cache: (bool) whether to use the cache
:return data (list(tuple)) the html body
'''
data = [('p_calling_proc', 'bwckschd.p_disp_dyn_sched'), ('p_term', term)]
html = self.fetch_and_cache(
'bwckgens.p_proc_term_date',
f'{term}-depts.html',
data=data,
)
soup = BeautifulSoup(html, SOUP_PARSER)
dept_select = soup.find('select', {'id': 'subj_id'})
options = dept_select.find_all('option')
depts = {}
for option in options:
dept_id = option['value']
if dept_id:
depts[dept_id] = option.get_text().strip() or ''
return self.hooks.transform_depts(depts)
def save_classes(self, db, depts, classes):
db_depts = []
db_courses = []
db_classes = []
depts = {k.replace(' ', ''): v for k, v in depts.items()}
for dept, t in classes.items():
db_depts.append({
'id': dept,
'name': depts[dept],
})
for course, section in t.items():
course_classes = []
course_titles = set()
for cl in section.values():
try:
data = classDataSchema.load(cl)
classTimes = [classTimeSchema.load(time) for time in cl['times']]
except MarshValidationError as e:
print(e, cl)
continue
data['times'] = classTimes
db_classes.append(data)
course_titles.add(data['title'])
course_classes.append(data['CRN'])
if len(course_titles) > 1:
log_err(f'Multiple course titles for "{dept} {course}" {str(course_titles)}')
db_courses.append({
'dept': dept,
'course': course,
'title': course_titles.pop(),
'classes': course_classes
})
db.drop_tables()
db.table('departments').insert_multiple(db_depts)
db.table('courses').insert_multiple(db_courses)
db.table('classes').insert_multiple(db_classes)
def fetch_and_cache(self, url: str, filename: str, authenticated=False, data=None):
full_filename = join(self.cache_dir, filename)
if self.use_cache:
try:
with open(full_filename, 'r') as f:
if self.trace:
log_trace(f'Loaded {url} from cache')
return f.read()
except FileNotFoundError:
pass
if self.trace:
log_trace(f'Loading {url} from network...')
if authenticated:
self.do_login()
obj = self.session if authenticated else requests
res = obj.post(self.ssb_url + url, data=data) if data else obj.get(self.ssb_url + url)
res.raise_for_status()
with open(full_filename, 'wb') as file:
file.write(res.content)
return res.content
def do_login(self):
if not self.loggedIn and self.login:
self.login(self.session)
self.loggedIn = True
log_info('Logged in')
| 33.144681 | 152 | 0.563615 | 948 | 7,789 | 4.469409 | 0.232068 | 0.013217 | 0.009441 | 0.009205 | 0.144914 | 0.08119 | 0.066084 | 0.024074 | 0.024074 | 0.024074 | 0 | 0.001153 | 0.331878 | 7,789 | 234 | 153 | 33.286325 | 0.813028 | 0.103222 | 0 | 0.094937 | 0 | 0 | 0.110498 | 0.021635 | 0 | 0 | 0 | 0 | 0 | 1 | 0.06962 | false | 0.006329 | 0.063291 | 0.018987 | 0.221519 | 0.006329 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f1d969537ae4549e68774c788f1e46159431fb4 | 2,981 | py | Python | backend/apps/recruting/routers.py | RafaelOO/FARM-Intro | 14b241353f8d1a2506f6404ba7f09fe373430e33 | [
"MIT"
] | null | null | null | backend/apps/recruting/routers.py | RafaelOO/FARM-Intro | 14b241353f8d1a2506f6404ba7f09fe373430e33 | [
"MIT"
] | null | null | null | backend/apps/recruting/routers.py | RafaelOO/FARM-Intro | 14b241353f8d1a2506f6404ba7f09fe373430e33 | [
"MIT"
] | null | null | null | from fastapi import APIRouter, Body, Request, HTTPException, status
from fastapi.responses import JSONResponse
from fastapi.encoders import jsonable_encoder
from bson.objectid import ObjectId
from .models import Candidate, UpdateCandidateModel
router = APIRouter()
@router.post("/", response_description="Add new candidate")
async def create_task(request: Request, candidate: Candidate = Body(...)):
candidate = jsonable_encoder(candidate)
new_candidate = await request.app.mongodb["candidates"].insert_one(candidate)
created_candidate = await request.app.mongodb["candidates"].find_one(
{"_id": new_candidate.inserted_id}
)
return JSONResponse(status_code=status.HTTP_201_CREATED, content=created_candidate)
@router.get("/", response_description="List all candidates")
async def list_candidates(request: Request):
candidates = []
for doc in await request.app.mongodb["candidates"].find().to_list(length=100):
candidates.append(doc)
return candidates
@router.get("/{id}", response_description="Get a single candidate from their id")
async def show_candidate(id: str, request: Request):
if (candidate := await request.app.mongodb["candidates"].find_one({"_id": ObjectId(id)})) is not None:
return candidate
raise HTTPException(status_code=404, detail=f"Candidate {id} not found")
@router.get("/surname/{surname}", response_description="Get a single candidate from their surname")
async def show_candidate(surname: str, request: Request):
if (candidate := await request.app.mongodb["candidates"].find_one({"name.surname": surname})) is not None:
return candidate
raise HTTPException(status_code=404, detail=f"Candidate {surname} not found")
@router.put("/{id}", response_description="Update a candidate")
async def update_candidate(id: str, request: Request, candidate: UpdateCandidateModel = Body(...)):
candidate = {k: v for k, v in candidate.dict().items() if v is not None}
if len(candidate) >= 1:
update_result = await request.app.mongodb["candidates"].update_one(
{"_id": id}, {"$set": candidate}
)
if update_result.modified_count == 1:
if (
updated_candidate := await request.app.mongodb["candidates"].find_one({"_id": id})
) is not None:
return updated_candidate
if (
existing_candidate := await request.app.mongodb["candidates"].find_one({"_id": id})
) is not None:
return existing_candidate
raise HTTPException(status_code=404, detail=f"Candidate {id} not found")
@router.delete("/{id}", response_description="Delete Task")
async def delete_task(id: str, request: Request):
delete_result = await request.app.mongodb["candidates"].delete_one({"_id": id})
if delete_result.deleted_count == 1:
return JSONResponse(status_code=status.HTTP_204_NO_CONTENT)
raise HTTPException(status_code=404, detail=f"Candidate {id} not found")
| 39.746667 | 110 | 0.711506 | 371 | 2,981 | 5.568733 | 0.234501 | 0.052275 | 0.065344 | 0.095837 | 0.46999 | 0.450145 | 0.339303 | 0.339303 | 0.293804 | 0.268635 | 0 | 0.009635 | 0.164374 | 2,981 | 74 | 111 | 40.283784 | 0.819751 | 0 | 0 | 0.169811 | 0 | 0 | 0.134854 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.09434 | 0 | 0.226415 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f1f36d1e0f9a1ebd133c2aae0975c9487b7d756 | 3,008 | py | Python | Scripts/Wordle_Answers_For_Input.py | krystianpietryka/Wordle_Assistant | caad82fdc780fda50931de7c51fa776ce395ccae | [
"MIT"
] | 1 | 2022-03-18T08:37:14.000Z | 2022-03-18T08:37:14.000Z | Scripts/Wordle_Answers_For_Input.py | krystianpietryka/Wordle_Assistant | caad82fdc780fda50931de7c51fa776ce395ccae | [
"MIT"
] | 1 | 2022-03-18T08:37:00.000Z | 2022-03-18T09:30:20.000Z | Scripts/Wordle_Answers_For_Input.py | krystianpietryka/Wordle_Assistant | caad82fdc780fda50931de7c51fa776ce395ccae | [
"MIT"
] | null | null | null | import re
# checks if values in arguments are the same char at the same index
def Same_Place_Letters_Check(green, yellow):
for i in range(len(green)):
if green[i].isalpha() and yellow[i].isalpha():
return 0
return 1
# checks if values in argument are alphabetic
def Valid_Symbol_Check(combined_inputs):
for letter in combined_inputs:
if not (letter == "." or letter.isalpha()):
return 0
return 1
# converts empty letters in string to dots
def Convert_Empty_Letters(letters):
result = ""
for letter in letters:
if not letter:
result += "."
else:
result += letter
return result
# Displays the possible 5 letter answers for given regex pattern
def Display_Possible_Answers(
possible_answers, excluded_letters, green_letters_input, yellow_letters
):
answers_to_delete = []
# Loop through the 5 letter words, filter by green_letters and excluded_letters
for line in possible_answers:
search_result = re.search(green_letters_input, line)
# If search result matches regex, mark for deletion if excluded letters are contained in the word
if search_result:
for letter in line:
if letter in excluded_letters:
answers_to_delete.append(line)
else:
answers_to_delete.append(line)
# Loop through filtered answers, mark possible answers for deletion if letters do not contain all of the yellow letters
for answer in possible_answers:
for letter in yellow_letters:
if letter != ".":
if letter not in answer:
answers_to_delete.append(answer)
# If a string contains the same yellow letter and green letter
# It must contain 2 of the same letter, so if the answer
# does not contain 2 of the same letters, mark it for deletion
elif letter in green_letters_input:
count = 0
for answer_letter in answer:
if answer_letter == letter:
count += 1
if count != 2:
answers_to_delete.append(answer)
# Exclude answers with same letter in the same index as yellow letters
for answer in possible_answers:
for i in range(0, 5):
if yellow_letters[i] == answer[i]:
answers_to_delete.append(answer)
# mark for deletion wordle answers used in the past
with open("Text_Files/past_answers.txt", "r") as past_answers:
for past_answer in past_answers:
if past_answer in possible_answers:
answers_to_delete.append(past_answer)
# Deleting answers marked for deletion
for marked_answer in answers_to_delete:
try:
possible_answers.remove(marked_answer)
except:
pass
possible_answers.sort()
return possible_answers
| 34.574713 | 123 | 0.627327 | 386 | 3,008 | 4.73057 | 0.264249 | 0.082147 | 0.065717 | 0.069003 | 0.159365 | 0.046002 | 0.046002 | 0.046002 | 0 | 0 | 0 | 0.006354 | 0.319814 | 3,008 | 86 | 124 | 34.976744 | 0.886119 | 0.27859 | 0 | 0.224138 | 0 | 0 | 0.014378 | 0.012523 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068966 | false | 0.017241 | 0.017241 | 0 | 0.189655 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f1fc5d68577311a61dffb534d8ff8b6e87cd185 | 5,239 | py | Python | data/transforms.py | vshipitsin/Ultrasound | deb1e7f0edee023748e675300573656f81e8a5b7 | [
"MIT"
] | 3 | 2021-05-12T06:32:06.000Z | 2021-06-15T10:58:24.000Z | data/transforms.py | vshipitsin/Ultrasound | deb1e7f0edee023748e675300573656f81e8a5b7 | [
"MIT"
] | null | null | null | data/transforms.py | vshipitsin/Ultrasound | deb1e7f0edee023748e675300573656f81e8a5b7 | [
"MIT"
] | null | null | null | import random
import math
import torch
import torchvision
import torchvision.transforms.functional as TF
class ToTensor(object):
def __init__(self, encode_map=False):
self.encode_map = encode_map
self.transform = torchvision.transforms.ToTensor()
@staticmethod
def encode_segmentation_map(mask):
labels_map = torch.zeros(mask.shape)
labels_map[mask > 0] = 1
return labels_map.to(dtype=torch.int64)
def __call__(self, sample):
image, mask = sample
if self.encode_map:
return self.transform(image), self.encode_segmentation_map(self.transform(mask))
else:
return self.transform(image), self.transform(mask)
class Resize(object):
def __init__(self, size):
self.resize = torchvision.transforms.Resize(size,
interpolation=torchvision.transforms.InterpolationMode.BILINEAR)
def __call__(self, sample):
image, mask = sample
return self.resize(image), self.resize(mask)
class HorizontalFlip(object):
def __init__(self, p=0.5):
self.flip = lambda image: TF.hflip(image) if random.random() < p else image
def __call__(self, sample):
image, mask = sample
return self.flip(image), self.flip(mask)
class RandomRotation(object):
def __init__(self, degrees):
angle = torchvision.transforms.RandomRotation.get_params((-degrees, degrees))
self.rotate = lambda image: TF.rotate(image, angle)
def __call__(self, sample):
image, mask = sample
return self.rotate(image), self.rotate(mask)
class RandomScale(object):
def __init__(self, scale):
self.scale = scale
def scale(self, image):
ret = torchvision.transforms.RandomAffine.get_params((0, 0), None, self.scale, None, image.size)
return TF.affine(image, *ret, resample=False, fillcolor=0)
def __call__(self, sample):
image, mask = sample
return self.scale(image), self.scale(mask)
class BrightContrastJitter(object):
def __init__(self, brightness=0, contrast=0):
self.transform = torchvision.transforms.ColorJitter(brightness, contrast, 0, 0)
def __call__(self, sample):
image, mask = sample
return self.transform(image), mask
class GaussianNoise(object):
def __init__(self, standard_deviation):
self.standard_deviation = standard_deviation
@staticmethod
def noise_overlay(tensor, standard_deviation):
if type(standard_deviation) is tuple:
min_value = standard_deviation[0] / 255.0
max_value = standard_deviation[1] / 255.0
else:
min_value = standard_deviation / 255.0
max_value = standard_deviation / 255.0
return torch.clamp(tensor +
torch.empty_like(tensor).normal_(mean=0.0, std=1.0) *
torch.empty_like(tensor).uniform_(min_value, max_value),
min=0.0, max=1.0)
def __call__(self, sample):
image, clean_image = sample
return self.noise_overlay(image, self.standard_deviation), clean_image
class RicianNoise(object):
def __init__(self, variance=(0, 0.1)):
self.variance = variance
@staticmethod
def noise_overlay(tensor, variance):
if type(variance) is tuple:
variance = random.uniform(variance[0], variance[1])
else:
variance = variance
return torch.clamp(torch.sqrt(torch.pow(tensor +
torch.empty_like(tensor).normal_(mean=0.0, std=variance), 2) +
torch.pow(torch.empty_like(tensor).normal_(mean=0.0, std=variance), 2)),
min=0.0, max=1.0)
def __call__(self, sample):
image, clean_image = sample
return self.noise_overlay(image, self.variance), clean_image
class RandomErasing(object):
def __init__(self, sl=0.02, sh=0.4, r1=0.3, mean=[0.4914, 0.4822, 0.4465]):
self.mean = mean
self.sl = sl
self.sh = sh
self.r1 = r1
def noise_overlay(self, tensor):
for attempt in range(100):
area = tensor.shape[1] * tensor.shape[2]
target_area = random.uniform(self.sl, self.sh) * area
aspect_ratio = random.uniform(self.r1, 1 / self.r1)
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < tensor.shape[2] and h < tensor.shape[1]:
x1 = random.randint(0, tensor.shape[1] - h)
y1 = random.randint(0, tensor.shape[2] - w)
if tensor.shape[0] == 3:
tensor[0, x1:x1 + h, y1:y1 + w] = self.mean[0]
tensor[1, x1:x1 + h, y1:y1 + w] = self.mean[1]
tensor[2, x1:x1 + h, y1:y1 + w] = self.mean[2]
else:
tensor[0, x1:x1 + h, y1:y1 + w] = self.mean[0]
return tensor
return tensor
def __call__(self, sample):
image, clean_image = sample
return self.noise_overlay(image), clean_image
| 33.369427 | 116 | 0.60355 | 646 | 5,239 | 4.698142 | 0.182663 | 0.032949 | 0.03855 | 0.050412 | 0.336079 | 0.27084 | 0.25173 | 0.241186 | 0.204942 | 0.163427 | 0 | 0.03237 | 0.286505 | 5,239 | 156 | 117 | 33.583333 | 0.779561 | 0 | 0 | 0.267241 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.198276 | false | 0 | 0.043103 | 0 | 0.456897 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f2033a2f3225dea614853557dc3ac6f05fe992a | 1,289 | py | Python | tests/nutter/test_resultreports.py | cganta/dbtest | 37b8020cae9218ce9d9e79e92d2d9419ae62f74c | [
"MIT"
] | 130 | 2020-02-13T17:20:58.000Z | 2022-03-29T20:27:49.000Z | tests/nutter/test_resultreports.py | cganta/dbtest | 37b8020cae9218ce9d9e79e92d2d9419ae62f74c | [
"MIT"
] | 27 | 2020-02-25T05:04:28.000Z | 2022-03-07T23:27:44.000Z | tests/nutter/test_resultreports.py | cganta/dbtest | 37b8020cae9218ce9d9e79e92d2d9419ae62f74c | [
"MIT"
] | 21 | 2020-02-13T19:33:42.000Z | 2022-03-18T02:36:38.000Z | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
"""
import pytest
from common.testresult import TestResults, TestResult
from common.resultreports import JunitXMLReportWriter
from common.resultreports import TagsReportWriter
def test_junitxmlreportwriter_add_result__invalid_params__raises_valueerror():
writer = JunitXMLReportWriter()
with pytest.raises(ValueError):
writer.add_result(None, None)
def test_tagsreportwriter_add_result__invalid_params__raises_valueerror():
writer = TagsReportWriter()
with pytest.raises(ValueError):
writer.add_result(None, None)
def test_tagsreportwriter_add_result__1_test_result__1_valid_row():
writer = TagsReportWriter()
test_results = TestResults()
test_name = 'case1'
duration = 10
tags = ['hello', 'hello']
test_result = TestResult(test_name, True, duration, tags)
test_results.append(test_result)
notebook_name = 'test_mynotebook'
writer.add_result(notebook_name, test_results)
assert len(writer._rows) == 1
row = writer._rows[0]
assert row.notebook_name == notebook_name
assert row.test_name == test_name
assert row.passed_str == 'PASSED'
assert row.duration == duration
assert row.tags == row._to_tag_string(tags)
| 28.644444 | 78 | 0.754849 | 153 | 1,289 | 6.019608 | 0.339869 | 0.058632 | 0.095548 | 0.062975 | 0.261672 | 0.261672 | 0.261672 | 0.175896 | 0.175896 | 0.175896 | 0 | 0.006475 | 0.161365 | 1,289 | 44 | 79 | 29.295455 | 0.845513 | 0.052754 | 0 | 0.206897 | 0 | 0 | 0.029678 | 0 | 0 | 0 | 0 | 0 | 0.206897 | 1 | 0.103448 | false | 0.034483 | 0.137931 | 0 | 0.241379 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f242ef44351b6946ec51913b2511ceb3f37212d | 648 | py | Python | trajectory/management/commands/WayPointsDatabaseLoad.py | RobertPastor/flight-profile | bdc3bb9defeb347db26f96f7accd4d06cad1e33b | [
"MIT"
] | null | null | null | trajectory/management/commands/WayPointsDatabaseLoad.py | RobertPastor/flight-profile | bdc3bb9defeb347db26f96f7accd4d06cad1e33b | [
"MIT"
] | null | null | null | trajectory/management/commands/WayPointsDatabaseLoad.py | RobertPastor/flight-profile | bdc3bb9defeb347db26f96f7accd4d06cad1e33b | [
"MIT"
] | null | null | null |
from django.core.management.base import BaseCommand
from trajectory.management.commands.WayPoints.WayPointsDatabaseFile import WayPointsDatabase
from trajectory.models import WayPoint
class Command(BaseCommand):
help = 'Reads the Synonym file and load the Aircrafts table'
def handle(self, *args, **options):
WayPoint.objects.all().delete()
wayPointsBD = WayPointsDatabase()
if (wayPointsBD.exists()):
print("acBD exists")
ret = wayPointsBD.read()
print ("read wayPoints database result = {0}".format(ret))
else:
print("wayPoints database does not exists") | 38.117647 | 93 | 0.683642 | 68 | 648 | 6.514706 | 0.691176 | 0.063205 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001984 | 0.222222 | 648 | 17 | 94 | 38.117647 | 0.876984 | 0 | 0 | 0 | 0 | 0 | 0.203704 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.214286 | 0 | 0.428571 | 0.214286 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f25e763c359b1bdd3aa8e71bc6771b962bedaf9 | 938 | py | Python | effort/src/datasets/handler.py | rahlk/Bellwether | 39e0e63504a6dfdeeb5d6e8d733e708d1485ecd9 | [
"Unlicense"
] | 9 | 2017-07-27T10:32:48.000Z | 2021-07-01T11:51:51.000Z | effort/src/datasets/handler.py | rahlk/Bellwether | 39e0e63504a6dfdeeb5d6e8d733e708d1485ecd9 | [
"Unlicense"
] | 11 | 2016-03-15T16:27:47.000Z | 2019-09-05T02:25:08.000Z | effort/src/datasets/handler.py | rahlk/Bellwether | 39e0e63504a6dfdeeb5d6e8d733e708d1485ecd9 | [
"Unlicense"
] | 5 | 2017-01-28T22:45:34.000Z | 2019-12-04T13:15:10.000Z | from __future__ import print_function, division
from pdb import set_trace
from effort import *
import pandas
import os
import sys
root = os.path.join(os.getcwd().split('src')[0], 'src')
if root not in sys.path:
sys.path.append(root)
from glob import glob
def pytocsv():
for mod in [coc81,Mystery1,Mystery2,cocomo,nasa93]:
inst = mod.run()
fname = mod.__name__.split('.')[-1]+'.csv'
head = inst.indep+[inst.less[0]]
print(fname, len(head), " ".join(head))
body = [elem.cells[:24] for elem in inst._rows]
dframe = pandas.DataFrame(body, columns = head)
dframe.to_csv(fname, index=False)
def get_all_datasets():
all = {}
files = glob(os.path.abspath(os.path.join(root, 'datasets', "*.csv")))
for f in files:
all.update({f.split("/")[-1].split('.')[0]: f})
return all
if __name__ == '__main__':
get_all_datasets()
| 26.8 | 74 | 0.608742 | 132 | 938 | 4.143939 | 0.484848 | 0.032907 | 0.036563 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018056 | 0.232409 | 938 | 34 | 75 | 27.588235 | 0.741667 | 0 | 0 | 0 | 0 | 0 | 0.037313 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.259259 | 0 | 0.37037 | 0.074074 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f272a14d6714e39b245a5ca529f31d3ab07a1d7 | 3,222 | py | Python | bot.py | gandalf3/distracticat | 22aaa9ecadea4b654f1c2f7c30ed11f10f9472a9 | [
"MIT"
] | 1 | 2022-03-08T23:30:40.000Z | 2022-03-08T23:30:40.000Z | bot.py | gandalf3/distracticat | 22aaa9ecadea4b654f1c2f7c30ed11f10f9472a9 | [
"MIT"
] | 1 | 2022-03-05T21:50:18.000Z | 2022-03-05T21:50:18.000Z | bot.py | gandalf3/distracticat | 22aaa9ecadea4b654f1c2f7c30ed11f10f9472a9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import logging
import os
import random
import sys
import discord
from dotenv import load_dotenv
import sqlalchemy as sa
from sqlalchemy import orm
from discord.ext import commands
from distracticat import chooser, model
from distracticat.config import Config
from distracticat.emotes import Reactions
logging.basicConfig(level=logging.INFO)
log = logging
load_dotenv()
def getenv(key: str) -> str:
if (value := os.getenv(key)) is not None:
return value
else:
exit(f"{key} environment variable not specified")
database_url = getenv("DATABASE_URL")
discord_secret_token = getenv("DISCORD_SECRET_TOKEN")
config = Config()
bot = commands.Bot(command_prefix=config.command_prefix)
engine: sa.engine.Engine = sa.create_engine(database_url, echo=True, future=True)
async def add_distraction(
channel: discord.PartialMessageable,
reply,
description: str,
guild_id: int,
author_id: int,
message_id: int | None = None,
):
await channel.send(Reactions.reaction())
distraction = model.Distraction(
guild_id=guild_id,
description=description,
author_id=author_id,
message_id=message_id,
)
embed = discord.Embed(
title="new distraction!",
description=distraction.description,
color=discord.Color.purple(),
)
embed.add_field(name="Suggested by", value=f"<@{author_id}>")
async with channel.typing():
with orm.Session(engine) as session:
session.add(distraction)
session.commit()
await reply(embed=embed)
@bot.command(name="distracticat")
async def distracticat_cmd(ctx: commands.Context, *, description: str):
await add_distraction(
ctx.channel,
ctx.reply,
description,
ctx.guild.id,
ctx.author.id,
ctx.message.id,
)
@bot.slash_command(name="distracticat", guild_ids=config.guild_ids())
async def distracticat_scmd(ctx: discord.ApplicationContext, description: str):
await add_distraction(
ctx.channel,
ctx.respond,
description,
ctx.guild.id,
ctx.author.id,
)
@bot.command(name="commitfelicide")
async def kill_cmd(ctx: commands.Context):
await ctx.reply("how could you do this? (ಡ‸ಡ)")
sys.exit()
@bot.slash_command(name="commitfelicide", guild_ids=config.guild_ids())
async def kill_scmd(ctx: commands.Context):
await ctx.respond("how could you do this? (ಡ‸ಡ)")
sys.exit()
@bot.command()
async def choose(ctx: commands.Context, *, choices_str: str):
choices, feedback = chooser.parse_choices(choices_str)
if feedback:
await ctx.send(feedback)
return
if len(choices) == 0:
await ctx.reply(
"That's a tough decision you're asking me to make you know. "
"Let me get back to you on that one."
)
return
if len(choices) == 1:
await ctx.reply(f"That's a sound decision {ctx.author}")
else:
chosen = random.choice(choices)
await ctx.reply(f"Hm.. :thinking: I say go with {chosen}.")
@bot.event
async def on_ready():
log.info(f"logged in as {bot.user}")
bot.run(discord_secret_token)
| 23.347826 | 81 | 0.66977 | 421 | 3,222 | 5.030879 | 0.330166 | 0.02644 | 0.033994 | 0.01983 | 0.154863 | 0.130312 | 0.130312 | 0.071766 | 0.028329 | 0.028329 | 0 | 0.001194 | 0.22005 | 3,222 | 137 | 82 | 23.518248 | 0.840828 | 0.006518 | 0 | 0.161616 | 0 | 0 | 0.129375 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.010101 | false | 0 | 0.121212 | 0 | 0.161616 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f27e3bf932ed138dde63efb1dd8abbc7155fad3 | 9,666 | py | Python | backend/group/views/oj.py | skku-npc/SKKU_Coding_Platform | 1d972e8922484cf94f6735fd08b2565e5d3517d0 | [
"MIT"
] | 1 | 2022-03-30T14:03:23.000Z | 2022-03-30T14:03:23.000Z | backend/group/views/oj.py | skku-npc/SKKU_Coding_Platform | 1d972e8922484cf94f6735fd08b2565e5d3517d0 | [
"MIT"
] | 56 | 2022-02-19T08:13:48.000Z | 2022-03-25T10:17:07.000Z | backend/group/views/oj.py | skku-npc/SKKU_Coding_Platform | 1d972e8922484cf94f6735fd08b2565e5d3517d0 | [
"MIT"
] | 1 | 2022-03-25T15:02:46.000Z | 2022-03-25T15:02:46.000Z | from drf_yasg.utils import swagger_auto_schema
from drf_yasg import openapi
from group.serializers import CreateGroupMemberJoinSerializer, EditGroupMemberPermissionSerializer, GroupMemberJoinSerializer, GroupDetailSerializer, GroupMemberSerializer
from group.serializers import GroupRegistrationRequestSerializer, GroupSummarySerializer, CreateGroupRegistrationRequestSerializer
from utils.api import APIView, validate_serializer
from utils.decorators import check_group_admin
from django.db.models import Q
from ..models import GroupMemberJoin, GroupMember, GroupRegistrationRequest, Group
class GroupRegistrationRequestAPI(APIView):
@swagger_auto_schema(
request_body=CreateGroupRegistrationRequestSerializer,
operation_description="Request to register a group",
responses={200: GroupRegistrationRequestSerializer}
)
@validate_serializer(CreateGroupRegistrationRequestSerializer)
def post(self, request):
user = request.user
if not user.is_authenticated:
return self.error("Login First")
data = request.data
name = data["name"]
if GroupRegistrationRequest.objects.filter(name=name).exists() or Group.objects.filter(name=name).exists():
return self.error("Duplicate group name")
registration_request = GroupRegistrationRequest.objects.create(
name=name,
short_description=data["short_description"],
description=data["description"],
is_official=data["is_official"],
created_by=request.user
)
return self.success(GroupRegistrationRequestSerializer(registration_request).data)
class GroupAPI(APIView):
@swagger_auto_schema(
manual_parameters=[
openapi.Parameter(
name="id",
in_=openapi.IN_QUERY,
description="Unique ID of a group. if id is not given, return group list, else return detail of a group",
type=openapi.TYPE_INTEGER,
required=False
),
],
operation_description="Get group list or detail of a group"
)
def get(self, request):
user = request.user
if not user.is_authenticated:
return self.error("Login First")
group_id = request.GET.get("id")
# Group List
if not group_id:
groups_not_admin = Group.objects.filter(groupmember__is_group_admin=False, groupmember__user=user)
groups_admin = Group.objects.filter(groupmember__is_group_admin=True, groupmember__user=user)
other_groups = Group.objects.exclude(Q(members=user))
data = {}
data["admin_groups"] = GroupSummarySerializer(groups_not_admin, many=True).data
data["groups"] = GroupSummarySerializer(groups_admin, many=True).data
data["other_groups"] = GroupSummarySerializer(other_groups, many=True).data
return self.success(data)
# Group Detail
try:
group = Group.objects.get(id=group_id)
except Group.DoesNotExist:
return self.error("Group does not exist")
data = GroupDetailSerializer(group).data
data["members"] = GroupMemberSerializer(GroupMember.objects.filter(group=group_id), many=True).data
if GroupMember.objects.filter(is_group_admin=True, group=group, user=user).exists():
group_member_join = GroupMemberJoin.objects.filter(group=group)
data["group_member_join"] = GroupMemberJoinSerializer(group_member_join, many=True).data
return self.success(data)
class GroupMemberAPI(APIView):
# Change User Group Permission
@swagger_auto_schema(
request_body=EditGroupMemberPermissionSerializer,
operation_description="Change group member permission. only can change is_group_admin field.",
responses={200: GroupMemberSerializer}
)
@validate_serializer(EditGroupMemberPermissionSerializer)
@check_group_admin()
def put(self, request):
data = request.data
user = request.user
if data["is_group_admin"]:
try:
member = GroupMember.objects.get(user=data["user_id"], group=data["group_id"])
except GroupMember.DoesNotExist:
return self.error("Group Member does not exists")
member.is_group_admin = data["is_group_admin"] # True
member.save()
return self.success(GroupMemberSerializer(member).data)
# Only group creator can downgrade group admin's permission.
try:
group = Group.objects.get(id=data["group_id"])
except Group.DoesNotExist:
return self.error("Group does not exists")
if not (group.created_by.id == user.id):
return self.error("Only group creator can change group admin's permission")
try:
member = GroupMember.objects.get(user=data["user_id"], group=data["group_id"])
except GroupMember.DoesNotExist:
return self.error("Group member does not exist")
member.is_group_admin = data["is_group_admin"] # False
member.save()
return self.success(GroupMemberSerializer(member).data)
@swagger_auto_schema(
manual_parameters=[
openapi.Parameter(
name="user_id",
in_=openapi.IN_QUERY,
description="Unique ID of a user. not member_join(intermediary model) id.",
type=openapi.TYPE_INTEGER,
required=False
),
openapi.Parameter(
name="group_id",
in_=openapi.IN_QUERY,
description="Unique ID of a group",
type=openapi.TYPE_INTEGER,
required=False
),
],
operation_description="Get group list",
responses={200: "Member successfully removed from this group."}
)
@check_group_admin()
def delete(self, request):
user_id = request.GET.get("user_id")
group_id = request.GET.get("group_id")
try:
member = GroupMember.objects.get(user=user_id, group=group_id)
except GroupMember.DoesNotExist:
return self.error("group member does not exist")
if member.is_group_admin:
return self.error("Cannot remove admin member.")
member.delete()
return self.success("Member successfully removed from this group.")
class GroupMemberJoinAPI(APIView):
@swagger_auto_schema(
request_body=CreateGroupMemberJoinSerializer,
operation_description="Post a group member join",
responses={200: GroupMemberJoinSerializer}
)
@validate_serializer(CreateGroupMemberJoinSerializer)
def post(self, request):
user = request.user
if not user.is_authenticated:
return self.error("Login First")
group_id = request.data["group_id"]
description = request.data["description"]
if Group.objects.filter(id=group_id, members=user).exists():
return self.error("You are already a member of this group.")
if GroupMemberJoin.objects.filter(group=group_id, created_by=user).exists():
return self.error("You have already submitted your member join to this group.")
group_member_join = GroupMemberJoin.objects.create(
group_id=group_id,
description=description,
created_by=user
)
return self.success(GroupMemberJoinSerializer(group_member_join).data)
@swagger_auto_schema(
manual_parameters=[
openapi.Parameter(
name="group_id", in_=openapi.IN_QUERY,
type=openapi.TYPE_INTEGER,
description="Unique id of group.",
required=True
),
openapi.Parameter(
name="member_join_id", in_=openapi.IN_QUERY,
type=openapi.TYPE_INTEGER,
description="Unique id of member_join",
required=True
),
openapi.Parameter(
name="accept", in_=openapi.IN_QUERY,
type=openapi.TYPE_BOOLEAN,
description="true if accept else reject the member_join",
required=True
),
],
operation_description="Resolve group member join. accept=True -> accept the member to join our group. accept=False or not given -> reject the member",
responses={200: GroupDetailSerializer}
)
@check_group_admin()
def delete(self, request):
group_id = request.GET.get("group_id")
member_join_id = request.GET.get("member_join_id")
accept = request.GET.get("accept")
try:
group_member_join = GroupMemberJoin.objects.get(id=member_join_id)
except GroupMemberJoin.DoesNotExist:
self.error("Group member join does not exist")
if not accept:
group_member_join.delete()
return self.success("Successfully rejected a group member join")
group_member_join_created_by = group_member_join.created_by
try:
group = Group.objects.get(id=group_id)
except Group.DoesNotExist:
self.error("Group does not exist")
if group.members.filter(id=group_member_join_created_by.id).exists():
self.error("This user is already a member. This member_join may be already resolved.")
group.members.add(group_member_join_created_by)
group_member_join.delete()
return self.success(GroupDetailSerializer(group).data)
| 39.292683 | 171 | 0.650321 | 1,035 | 9,666 | 5.900483 | 0.133333 | 0.040937 | 0.039299 | 0.01572 | 0.457672 | 0.382184 | 0.328639 | 0.282463 | 0.216473 | 0.192893 | 0 | 0.002106 | 0.263294 | 9,666 | 245 | 172 | 39.453061 | 0.855498 | 0.012622 | 0 | 0.427861 | 0 | 0.00995 | 0.151201 | 0.002517 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029851 | false | 0 | 0.039801 | 0 | 0.199005 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f28c9a7a5cb162604cdc24efd8446cbf75109a5 | 2,277 | py | Python | python/cp1_code/cp1_broadcast_client.py | ntpdrop/ieeesp2021 | 084ac380774351cb032e9c1f48c5c6f7b58372fa | [
"MIT"
] | null | null | null | python/cp1_code/cp1_broadcast_client.py | ntpdrop/ieeesp2021 | 084ac380774351cb032e9c1f48c5c6f7b58372fa | [
"MIT"
] | null | null | null | python/cp1_code/cp1_broadcast_client.py | ntpdrop/ieeesp2021 | 084ac380774351cb032e9c1f48c5c6f7b58372fa | [
"MIT"
] | null | null | null | import logging
from scapy.layers.inet import IP, UDP
from scapy.packet import Packet
from scapy.sendrecv import send
from cp1_client import CP1Client
from cp1_package import CP1Package
from cp1_session import CP1Session
from ntp_mode import NTPMode
from ntp_utils import init_ntp_client_pck
class CP1BroadcastClient(CP1Client):
"""
OBSOLETE
"""
def __init__(self, address: str, static_key: str, sniff_interface: str = 'lo',
log=logging.getLogger('CP1Client-Logger')):
super().__init__(address, static_key, sniff_interface, log)
def send_init_pck(self, ip_address, cp1_address):
"""
Sends an init-package to the desired ip-address and files in the desired cp1-address.
:param ip_address:
:param cp1_address:
:return:
"""
self.send_session = CP1Session()
ntp_pck = self.send_session.generate_init_pck(cp1_address)
ntp_pck.orig = None
ntp_pck.recv = None
ntp_pck.mode = 5
# ntp_pck.show()
pck_to_send = IP(dst=ip_address) / UDP() / ntp_pck
send(pck_to_send)
self.log.debug("Init package successfully send to " + str(ip_address))
return pck_to_send
def send_next_pck(self, ip_address, ntp_mode: NTPMode = NTPMode.CLIENT) -> Packet:
"""
Sends the next chunk of payload bits to the destination.
:param ip_address:
:param ntp_mode: the mode of the ntp package to send.
:return: the bits just send.
"""
next_bits_to_send = self.send_session.secret_to_send.next_bits(self.payload_size)
self.log.debug("Next payload bits to send: " + str(next_bits_to_send))
ntp_pck = CP1Package(ntp_pck=init_ntp_client_pck())
ntp_pck.add_payload(next_bits_to_send)
ntp_pck_ntp = ntp_pck.ntp()
ntp_pck_ntp.orig = None
ntp_pck_ntp.recv = None
ntp_pck_ntp.mode = 5
pck_to_send = IP(dst=ip_address) / UDP() / ntp_pck_ntp
send(pck_to_send)
self.log.debug("Payload package successfully send to " + str(ip_address))
if not self.send_session.secret_to_send.has_next_bits():
self.log.debug("Sending complete. Terminating sending session.")
return pck_to_send
| 33.985075 | 93 | 0.669302 | 323 | 2,277 | 4.421053 | 0.232198 | 0.063025 | 0.037815 | 0.029412 | 0.212185 | 0.212185 | 0.131653 | 0.044818 | 0.044818 | 0.044818 | 0 | 0.009889 | 0.245059 | 2,277 | 66 | 94 | 34.5 | 0.820826 | 0.139218 | 0 | 0.105263 | 0 | 0 | 0.087568 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078947 | false | 0 | 0.236842 | 0 | 0.394737 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f2aa8135e4df5892926d93df4dea8a4e0467627 | 8,103 | py | Python | backend/routes.py | ThodoriKapouranis/ECE464-Databases-Final-Quizlet | 1231b5a4a683c4dffd354d044dff02100edb9b97 | [
"MIT"
] | null | null | null | backend/routes.py | ThodoriKapouranis/ECE464-Databases-Final-Quizlet | 1231b5a4a683c4dffd354d044dff02100edb9b97 | [
"MIT"
] | null | null | null | backend/routes.py | ThodoriKapouranis/ECE464-Databases-Final-Quizlet | 1231b5a4a683c4dffd354d044dff02100edb9b97 | [
"MIT"
] | null | null | null | import json
import secrets
import os
from pprint import pprint
from bson import json_util
from bson.objectid import ObjectId
from flask import request, send_from_directory
from bson.json_util import dumps, loads
from werkzeug.datastructures import FileStorage
from main import app
from mongodb import users, decks, auths, cards
# 200 code :: Good
# 400 code :: Bad Request (use as generic error code)
# 404 code :: Requested information DNE (good response, but db object DNE)
MEDIA_PATH = "media/"
@app.route("/")
def hello_world():
return "<p>Hello, World!</p>"
# Register page
@app.route("/register", methods=['POST'])
def registerUser():
data = request.json
res = users.createUser(data["email"], data["username"], data["password"])
return {"status": 200 } if res!=-1 else {"status": 400 }
# Login Page
@app.route("/login", methods=["POST"])
def login():
data = request.json
res = users.attemptLogin(data["email"], data["password"])
if (res != None):
return {"status":200, "username": res[0], "token": res[1] }
else:
return {"status":400 }
# Logout, deletes token from db
@app.route("/logout", methods=["POST"])
def logout():
data = request.json
res = auths.deleteToken( data["token"] )
return {"status":200} if res else {"status":400}
@app.route("/checkToken", methods=["POST"])
def checkToken():
'''
This is called when the header is rerendered and the token hasnt been
checked for at least {2} minutes. Checks to see if token is still valid.
'''
data = request.json
res = auths.checkUserExist( data["token"] )
if ( res != None):
return {"status":200}
else:
return {"status":400}
# Get user by name
# /search/users/user?=asdfasdf
@app.route("/search/users/<username>", methods=["GET"])
def searchUsers(username):
res = users.getUsersByName(username)
if (res != None):
res = json.loads( dumps(res) )
return {"status":200, "users": res}
else:
return {"status":400}
@app.route("/search/decks/", methods=["GET"])
def searchDecks():
name = request.args.get('name')
tags = request.args.get('tags').split(",")
rating = request.args.get('rating')
if rating != "":
rating = int(rating)
pprint(name)
pprint(tags)
pprint(rating)
res = decks.searchDecks(name=name, tags=tags, rating=rating)
if ( res != None):
res = json.loads( dumps(res) )
return { "status":200, "decks": res }
else:
return { "status":400 }
@app.route("/search/email/", methods=["GET"])
def searchEmail():
email = request.args.get("email")
name = users.getUserByEmail(email)
return "<p> email </p>"
@app.route("/deck/create", methods=["POST"])
def createDeck():
data = request.json
tags = data["tags"].split(" ")
did = decks.createDeck(data["name"] , tags, data["token"], data["privacy"])
if (did != -1 and did != None):
return {"status":200, "did": str(did)}
else:
return {"status":400}
# https://stackoverflow.com/questions/16586180/typeerror-objectid-is-not-json-serializable
# ObjectIDs cannot be sent through JSON easily.
# The solution is to use bson.json_util.dump to convert the JSON to a string which breaks up the
# object id. Then we rebuild it back to JSON using json.load()
@app.route("/user/<username>/decks", methods=["GET"])
def requestUserDecks(username):
res = decks.getUsersDecks(username)
if (res != None):
dids_created = [json.loads(dumps(i)) for i in res[0]]
dids_favorited = [json.loads(dumps(i)) for i in res[1]]
return {"status" : 200,
"created_decks": dids_created,
"favorite_decks": dids_favorited
}
else:
return {"status":400}
# # Single Deck view (Deck, comments, ratings)
# # url: did | json: token
@app.route("/deck/<did>", methods=["POST"])
def getDeckInfo(did):
# Get the user's tokens to figure out their authorization level
# so that the proper things are returned to the frontend
# for better user-specific rendering.
data = request.json
utoken = data["token"]
uid = auths.getUid(utoken)
deck = decks.getDeck( ObjectId(did) )
if (deck != None and uid != None):
# Write the code to average the ratings to display on frontend
# ratingAverage = 0
# listOfRatings = res["ratings"]
# for i in listOfRatings:
# ...
authLevel = decks.userAuthorizationLevel(ObjectId(did), uid)
deckJson = json.loads( dumps(deck) )
avgRating = decks.getRating( ObjectId(did) )
return {"status":200, "deck": deckJson, "authLevel":authLevel, "rating": avgRating}
else:
return {"status":400}
@app.route("/deck/<did>/comment", methods=["POST"])
def addComment(did):
data = request.json
comments = data["comment"]
token = request.headers["token"]
# def addComment ( did:str, utoken:str, content:str ):
res = decks.addComment(ObjectId(did), token, comments)
return {"status": res}
@app.route("/deck/<did>/favorite", methods=["POST"])
def addToFavorite(did):
data = request.json
token = data["token"]
res = users.toggleFavorite(ObjectId(did), token)
if (res != -1):
return {"status":200}
else:
return {"status":400}
# # Add comment (visible on single deck page)
# url: did | json: comment, token
# @app.route("/deck/<did>/comment", methods=["POST"])
# def addDeckComment():
# # Add rating (visible on single deck page)
# # url: did | json: comment, token
@app.route("/deck/<did>/rate", methods=["POST"])
def addDeckRating(did):
data = request.json
token = data["token"]
rating = data["rating"]
res = decks.addRating(ObjectId(did), token, rating)
return {"status": res}
# # Promote someone's auth lv
@app.route("/deck/<did>/authorize", methods = ["POST"])
def authorizeUser(did):
data = request.json
token = data["token"]
user = data["username"]
level = data["level"]
res = decks.authorizeUser(ObjectId(did), token, user, level)
if (res != -1):
return {"status": 200}
else:
return {"status": 400}
#ftxt0
#fimg34
class Tag:
def __init__(self, tag):
self.side = tag[0]
self.type = tag[1:4]
self.id = tag[4:]
@app.route("/deck/<did>/add", methods=["POST"])
def addCard(did):
data = request.form
files = request.files
token = request.headers["token"]
front = {}
back = {}
pprint(token)
pprint(data)
# These are all the text fields
for key in data:
tag = Tag(key)
field = {tag.type: data[key]} # {"img", URL}
if (tag.side == "f"):
front[tag.id] = field # {0 : {"img", URL}}
elif (tag.side == "b"):
back[tag.id] = field
for key in files:
tag = Tag(key)
secret = secrets.token_hex()
file = files[key]
field = {tag.type: secret}
file.save(MEDIA_PATH + secret)
if (tag.side == "f"):
front[tag.id] = field # {0 : {"img", URL}}
elif (tag.side == "b"):
back[tag.id] = field
pprint(front)
pprint(back)
# Use ORM function to actually create this object
res = cards.createCard( ObjectId(did), token, front, back)
if res != -1:
return {"status": 200}
else:
return {'status': 400}
@app.route('/media/<path:path>')
def send_media(path):
return send_from_directory( MEDIA_PATH, path, as_attachment=True )
@app.route("/deck/<did>/study", methods=["GET"])
def getFullDeck(did):
token = request.headers["token"]
# res[0] user auth
# res[1] cursor object
res = cards.getDecksCards(ObjectId(did), token)
if (res[1] != -1):
res[1] = json.loads( dumps(res[1]) )
return {"status": 200, "auth": res[0], "cards": res[1]}
else :
return {"status": 400}
@app.route("/deck/<did>", methods = ["DELETE"])
def deckDelete(did):
utoken = request.headers ["token"]
uid = auths.getUid(utoken)
res = decks.deleteDeck( ObjectId(did), uid )
if (res == 0 ):
# res = json_util.loads( dumps(res) )
return {"status": 200}
else:
return {"status": res}
@app.route("/deck/<did>/card/<cid>", methods=["DELETE"])
def cardDelete(did, cid):
utoken = request.headers["token"]
uid = auths.getUid(utoken)
res = cards.deleteCard(ObjectId(did), ObjectId(cid), uid)
if (res == 0):
return {"status": 200}
else:
return {"status": 400} | 27.75 | 97 | 0.642972 | 1,095 | 8,103 | 4.73516 | 0.23379 | 0.069431 | 0.043394 | 0.043973 | 0.261909 | 0.218322 | 0.191707 | 0.148505 | 0.104147 | 0.085632 | 0 | 0.020325 | 0.186351 | 8,103 | 292 | 98 | 27.75 | 0.766116 | 0.192398 | 0 | 0.364078 | 0 | 0 | 0.128585 | 0.013722 | 0 | 0 | 0 | 0 | 0 | 1 | 0.101942 | false | 0.009709 | 0.053398 | 0.009709 | 0.320388 | 0.038835 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f2c193c99e6c3da6c18f1b4d2444bb931a8ef76 | 2,087 | py | Python | tests/test_guild.py | tylerbutler/battlenet | 5394d648c4562a711f21720499fb12ebfaf2de1d | [
"MIT"
] | null | null | null | tests/test_guild.py | tylerbutler/battlenet | 5394d648c4562a711f21720499fb12ebfaf2de1d | [
"MIT"
] | null | null | null | tests/test_guild.py | tylerbutler/battlenet | 5394d648c4562a711f21720499fb12ebfaf2de1d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import battlenet
import datetime
from battlenet import Guild
try:
import unittest2 as unittest
except ImportError:
import unittest as unittest
PUBLIC_KEY = os.environ.get('BNET_PUBLIC_KEY')
PRIVATE_KEY = os.environ.get('BNET_PRIVATE_KEY')
battlenet.Connection.setup(public_key=PUBLIC_KEY, private_key=PRIVATE_KEY)
class GuildTest(unittest.TestCase):
def test_general(self):
guild = Guild(battlenet.UNITED_STATES, 'Nazjatar', 'Excellence')
self.assertEqual(guild.name, 'Excellence')
self.assertEqual(str(guild), 'Excellence')
self.assertEqual(guild.get_realm_name(), 'Nazjatar')
self.assertEqual(guild.realm.name, 'Nazjatar')
self.assertEqual(str(guild.realm), 'Nazjatar')
def test_len(self):
guild = Guild(battlenet.UNITED_STATES, 'Nazjatar', 'Excellence', fields=[Guild.MEMBERS])
self.assertGreater(len(guild), 1)
def test_leader(self):
guild = Guild(battlenet.UNITED_STATES, 'Nazjatar', 'Excellence', fields=[Guild.MEMBERS])
character = guild.get_leader()
self.assertEqual(character.name, 'Clí')
def test_lazyload_member_character(self):
guild = Guild(battlenet.UNITED_STATES, 'Nazjatar', 'Excellence')
character = guild.get_leader()
self.assertRegexpMatches(character.get_full_class_name(), r'^(Holy|Protection|Retribution) Paladin$')
def test_achievements(self):
guild = Guild(battlenet.UNITED_STATES, 'Nazjatar', 'Excellence', fields=[Guild.ACHIEVEMENTS])
for id_, completed_ts in guild.achievements.items():
self.assertIsInstance(id_, int)
self.assertIsInstance(completed_ts, datetime.datetime)
def test_perks(self):
guild = Guild(battlenet.UNITED_STATES, 'Nazjatar', 'Excellence')
self.assertGreater(len(guild.perks), 1)
def test_rewards(self):
guild = Guild(battlenet.UNITED_STATES, 'Nazjatar', 'Excellence')
self.assertGreater(len(guild.rewards), 1)
if __name__ == '__main__':
unittest.main()
| 30.691176 | 109 | 0.695256 | 237 | 2,087 | 5.932489 | 0.28692 | 0.034851 | 0.069701 | 0.114509 | 0.446657 | 0.335704 | 0.335704 | 0.335704 | 0.298009 | 0.257468 | 0 | 0.002929 | 0.18208 | 2,087 | 67 | 110 | 31.149254 | 0.820738 | 0.010062 | 0 | 0.186047 | 0 | 0 | 0.121609 | 0.014535 | 0 | 0 | 0 | 0 | 0.27907 | 1 | 0.162791 | false | 0 | 0.162791 | 0 | 0.348837 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f2c8fc217036dfe9c29102e972d0fbdddbef0ca | 7,954 | py | Python | src/RestrictedPython/Guards.py | rahulbahal7/restricted-python | c39cffe71dfc30630e946977735303d3a65b0383 | [
"ZPL-2.1"
] | null | null | null | src/RestrictedPython/Guards.py | rahulbahal7/restricted-python | c39cffe71dfc30630e946977735303d3a65b0383 | [
"ZPL-2.1"
] | null | null | null | src/RestrictedPython/Guards.py | rahulbahal7/restricted-python | c39cffe71dfc30630e946977735303d3a65b0383 | [
"ZPL-2.1"
] | null | null | null | ##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
# This tiny set of safe builtins is extended by users of the module.
# AccessControl.ZopeGuards contains a large set of wrappers for builtins.
# DocumentTemplate.DT_UTil contains a few.
from RestrictedPython import _compat
if _compat.IS_PY2:
import __builtin__ as builtins
else:
# Do not attempt to use this package on Python2.7 as there
# might be backports for this package such as future.
import builtins
safe_builtins = {}
_safe_names = [
'None',
'False',
'True',
'abs',
'bool',
'callable',
'chr',
'complex',
'divmod',
'float',
'hash',
'hex',
'id',
'int',
'isinstance',
'issubclass',
'len',
'oct',
'ord',
'pow',
'range',
'repr',
'round',
'slice',
'str',
'tuple',
'zip'
]
_safe_exceptions = [
'ArithmeticError',
'AssertionError',
'AttributeError',
'BaseException',
'BufferError',
'BytesWarning',
'DeprecationWarning',
'EOFError',
'EnvironmentError',
'Exception',
'FloatingPointError',
'FutureWarning',
'GeneratorExit',
'IOError',
'ImportError',
'ImportWarning',
'IndentationError',
'IndexError',
'KeyError',
'KeyboardInterrupt',
'LookupError',
'MemoryError',
'NameError',
'NotImplementedError',
'OSError',
'OverflowError',
'PendingDeprecationWarning',
'ReferenceError',
'RuntimeError',
'RuntimeWarning',
'StopIteration',
'SyntaxError',
'SyntaxWarning',
'SystemError',
'SystemExit',
'TabError',
'TypeError',
'UnboundLocalError',
'UnicodeDecodeError',
'UnicodeEncodeError',
'UnicodeError',
'UnicodeTranslateError',
'UnicodeWarning',
'UserWarning',
'ValueError',
'Warning',
'ZeroDivisionError',
]
if _compat.IS_PY2:
_safe_names.extend([
'basestring',
'cmp',
'long',
'unichr',
'unicode',
'xrange',
])
_safe_exceptions.extend([
'StandardError',
])
else:
_safe_names.extend([
'__build_class__', # needed to define new classes
])
for name in _safe_names:
safe_builtins[name] = getattr(builtins, name)
for name in _safe_exceptions:
safe_builtins[name] = getattr(builtins, name)
# Wrappers provided by this module:
# delattr
# setattr
# Wrappers provided by ZopeGuards:
# __import__
# apply
# dict
# enumerate
# filter
# getattr
# hasattr
# iter
# list
# map
# max
# min
# sum
# all
# any
# Builtins that are intentionally disabled
# compile - don't let them produce new code
# dir - a general purpose introspector, probably hard to wrap
# execfile - no direct I/O
# file - no direct I/O
# globals - uncontrolled namespace access
# input - no direct I/O
# locals - uncontrolled namespace access
# open - no direct I/O
# raw_input - no direct I/O
# vars - uncontrolled namespace access
# There are several strings that describe Python. I think there's no
# point to including these, although they are obviously safe:
# copyright, credits, exit, help, license, quit
# Not provided anywhere. Do something about these? Several are
# related to new-style classes, which we are too scared of to support
# <0.3 wink>. coerce, buffer, and reload are esoteric enough that no
# one should care.
# buffer
# bytes
# bytearray
# classmethod
# coerce
# eval
# intern
# memoryview
# object
# property
# reload
# staticmethod
# super
# type
def _write_wrapper():
# Construct the write wrapper class
def _handler(secattr, error_msg):
# Make a class method.
def handler(self, *args):
try:
f = getattr(self.ob, secattr)
except AttributeError:
raise TypeError(error_msg)
f(*args)
return handler
class Wrapper(object):
def __init__(self, ob):
self.__dict__['ob'] = ob
__setitem__ = _handler(
'__guarded_setitem__',
'object does not support item or slice assignment')
__delitem__ = _handler(
'__guarded_delitem__',
'object does not support item or slice assignment')
__setattr__ = _handler(
'__guarded_setattr__',
'attribute-less object (assign or del)')
__delattr__ = _handler(
'__guarded_delattr__',
'attribute-less object (assign or del)')
return Wrapper
def _full_write_guard():
# Nested scope abuse!
# safetypes and Wrapper variables are used by guard()
safetypes = {dict, list}
Wrapper = _write_wrapper()
def guard(ob):
# Don't bother wrapping simple types, or objects that claim to
# handle their own write security.
if type(ob) in safetypes or hasattr(ob, '_guarded_writes'):
return ob
# Hand the object to the Wrapper instance, then return the instance.
return Wrapper(ob)
return guard
full_write_guard = _full_write_guard()
def guarded_setattr(object, name, value):
setattr(full_write_guard(object), name, value)
safe_builtins['setattr'] = guarded_setattr
def guarded_delattr(object, name):
delattr(full_write_guard(object), name)
safe_builtins['delattr'] = guarded_delattr
def safer_getattr(object, name, default=None, getattr=getattr):
"""Getattr implementation which prevents using format on string objects.
format() is considered harmful:
http://lucumr.pocoo.org/2016/12/29/careful-with-str-format/
"""
if isinstance(object, _compat.basestring) and name == 'format':
raise NotImplementedError(
'Using format() on a %s is not safe.' % object.__class__.__name__)
if name.startswith('_'):
raise AttributeError(
'"{name}" is an invalid attribute name because it '
'starts with "_"'.format(name=name)
)
return getattr(object, name, default)
safe_builtins['_getattr_'] = safer_getattr
def guarded_iter_unpack_sequence(it, spec, _getiter_):
"""Protect sequence unpacking of targets in a 'for loop'.
The target of a for loop could be a sequence.
For example "for a, b in it"
=> Each object from the iterator needs guarded sequence unpacking.
"""
# The iteration itself needs to be protected as well.
for ob in _getiter_(it):
yield guarded_unpack_sequence(ob, spec, _getiter_)
def guarded_unpack_sequence(it, spec, _getiter_):
"""Protect nested sequence unpacking.
Protect the unpacking of 'it' by wrapping it with '_getiter_'.
Furthermore for each child element, defined by spec,
guarded_unpack_sequence is called again.
Have a look at transformer.py 'gen_unpack_spec' for a more detailed
explanation.
"""
# Do the guarded unpacking of the sequence.
ret = list(_getiter_(it))
# If the sequence is shorter then expected the interpreter will raise
# 'ValueError: need more than X value to unpack' anyway
# => No childs are unpacked => nothing to protect.
if len(ret) < spec['min_len']:
return ret
# For all child elements do the guarded unpacking again.
for (idx, child_spec) in spec['childs']:
ret[idx] = guarded_unpack_sequence(ret[idx], child_spec, _getiter_)
return ret
safe_globals = {'__builtins__': safe_builtins}
| 24.934169 | 78 | 0.642318 | 902 | 7,954 | 5.482262 | 0.436807 | 0.019414 | 0.0091 | 0.010111 | 0.072396 | 0.056623 | 0.016582 | 0.016582 | 0 | 0 | 0 | 0.003323 | 0.2434 | 7,954 | 318 | 79 | 25.012579 | 0.818378 | 0.386849 | 0 | 0.10241 | 0 | 0 | 0.265864 | 0.010066 | 0 | 0 | 0 | 0 | 0.006024 | 1 | 0.066265 | false | 0 | 0.03012 | 0 | 0.174699 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f2d2dbffe32c783e2898f2f8436bb00cc24fbbd | 1,896 | py | Python | tests/integration/generic/config.py | evsmithx/cosmpy | 7dfc81528b287f90190d6d4387942340f8ab88cf | [
"Apache-2.0"
] | 15 | 2021-09-08T05:27:14.000Z | 2022-03-29T06:48:08.000Z | tests/integration/generic/config.py | evsmithx/cosmpy | 7dfc81528b287f90190d6d4387942340f8ab88cf | [
"Apache-2.0"
] | 36 | 2021-09-01T08:58:33.000Z | 2022-03-30T11:40:56.000Z | tests/integration/generic/config.py | evsmithx/cosmpy | 7dfc81528b287f90190d6d4387942340f8ab88cf | [
"Apache-2.0"
] | 4 | 2021-10-04T09:29:56.000Z | 2022-03-18T15:43:06.000Z | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2021 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""Module with config used in Fetchd integration tests."""
import inspect
import os
from pathlib import Path
from cosmpy.crypto.address import Address
from cosmpy.crypto.keypairs import PrivateKey
from cosmpy.protos.cosmos.base.v1beta1.coin_pb2 import Coin
# Denomination and amount of transferred tokens
DENOM = "stake"
AMOUNT = 1
COINS = [Coin(amount=str(AMOUNT), denom=DENOM)]
# Node config
GRPC_ENDPOINT_ADDRESS = "localhost:9090"
REST_ENDPOINT_ADDRESS = "http://localhost:1317"
CHAIN_ID = "testing"
# Private key of sender account
VALIDATOR_PK = PrivateKey(
bytes.fromhex("0ba1db680226f19d4a2ea64a1c0ea40d1ffa3cb98532a9fa366994bb689a34ae")
)
VALIDATOR_ADDRESS = Address(VALIDATOR_PK)
# Private key of recipient account
BOB_PK = PrivateKey(
bytes.fromhex("439861b21d146e83fe99496f4998a305c83cfbc24717c77e32b06d224bf1e636")
)
BOB_ADDRESS = Address(BOB_PK)
# Cosmwasm
CUR_PATH = os.path.dirname(inspect.getfile(inspect.currentframe())) # type: ignore
CONTRACT_FILENAME = Path(
os.path.join(CUR_PATH, "..", "..", "..", "contracts", "cw_erc1155.wasm")
)
TOKEN_ID = "444" # nosec
| 32.689655 | 85 | 0.691456 | 226 | 1,896 | 5.725664 | 0.615044 | 0.046368 | 0.020093 | 0.02473 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.069682 | 0.137131 | 1,896 | 57 | 86 | 33.263158 | 0.721271 | 0.506857 | 0 | 0 | 0 | 0 | 0.229834 | 0.141436 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.24 | 0 | 0.24 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f2dd08990b19e27a9643081b1bf9f583649ad3e | 505 | py | Python | __scraping__/thaitrade.com/main.py | whitmans-max/python-examples | 881a8f23f0eebc76816a0078e19951893f0daaaa | [
"MIT"
] | 140 | 2017-02-21T22:49:04.000Z | 2022-03-22T17:51:58.000Z | __scraping__/thaitrade.com/main.py | whitmans-max/python-examples | 881a8f23f0eebc76816a0078e19951893f0daaaa | [
"MIT"
] | 5 | 2017-12-02T19:55:00.000Z | 2021-09-22T23:18:39.000Z | __scraping__/thaitrade.com/main.py | whitmans-max/python-examples | 881a8f23f0eebc76816a0078e19951893f0daaaa | [
"MIT"
] | 79 | 2017-01-25T10:53:33.000Z | 2022-03-11T16:13:57.000Z |
# date: 2020.12.30
# https://stackoverflow.com/questions/65503481/scraping-hidden-link-using-selenium-or-requests#65503481
from selenium import webdriver
import time
link = 'https://www.thaitrade.com/store/9chemical'
driver = webdriver.Chrome()#executable_path=r"C:/chromedriver.exe") #, options=chrome_options)
driver.maximize_window()
driver.get(link)
#soup = BeautifulSoup(driver.page_source, 'html.parser')
time.sleep(5)
website = driver.find_element_by_id('seller_website').text
print(website)
| 31.5625 | 103 | 0.786139 | 69 | 505 | 5.637681 | 0.753623 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.055437 | 0.071287 | 505 | 15 | 104 | 33.666667 | 0.773987 | 0.475248 | 0 | 0 | 0 | 0 | 0.214008 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.222222 | 0 | 0.222222 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f303dbb7b5651e32ad52ebcac0ace31970f110f | 3,128 | py | Python | S4/S4 Library/simulation/interactions/jig_part_constraint_interaction.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | 1 | 2021-05-20T19:33:37.000Z | 2021-05-20T19:33:37.000Z | S4/S4 Library/simulation/interactions/jig_part_constraint_interaction.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | null | null | null | S4/S4 Library/simulation/interactions/jig_part_constraint_interaction.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | null | null | null | from interactions.base.interaction import Interaction
from interactions.base.mixer_interaction import MixerInteraction
from interactions.base.super_interaction import SuperInteraction
from interactions.constraints import Transform, Nowhere
from sims4.tuning.tunable import Tunable, TunableSet, TunableEnumWithFilter
from sims4.utils import flexmethod
from tag import Tag
import services
import sims4
logger = sims4.log.Logger('JigPartConstraintInteraction', default_owner='cjiang')
class JigPartConstraintInteraction(SuperInteraction):
def __init__(self, *args, jig_object=None, jig_part_index=0, **kwargs):
super().__init__(*args, **kwargs)
self._jig_object = jig_object
self._jig_part_index = jig_part_index
@flexmethod
def _constraint_gen(cls, inst, sim, *args, **kwargs):
yield from super()._constraint_gen(sim, *args, **kwargs)
if inst is not None and inst._jig_object is not None:
jig = inst._jig_object
parts = jig.parts
part_index = inst._jig_part_index
if parts is None:
logger.error("{} doesn't have part tuned", jig)
yield Nowhere('Exception while trying to get routing slot on the jig part.')
return
if part_index >= len(parts):
logger.error('{} only have {} parts, out of index {}', jig, len(parts), part_index)
yield Nowhere('Exception while trying to get routing slot on the jig part.')
return
part = parts[part_index]
yield Transform(part.transform, routing_surface=jig.routing_surface)
class SynchMixerInteraction(MixerInteraction):
INSTANCE_TUNABLES = {'virtual_actor_name': Tunable(description='\n The name of the virtual actor sims will be put in.\n ', tunable_type=str, default='x')}
def get_asm(self, *args, **kwargs):
asm = super().get_asm(*args, **kwargs)
asm.remove_virtual_actors_by_name(self.virtual_actor_name)
for sim in self.get_sims():
if self.sim is not sim:
asm.add_virtual_actor(self.virtual_actor_name, sim)
return asm
def _get_required_sims(self, *args, **kwargs):
sims = super()._get_required_sims(*args, **kwargs)
sims.update(self.get_sims())
return sims
def get_sims(self):
raise NotImplementedError
class SynchInSituationMixerInteraction(SynchMixerInteraction):
INSTANCE_TUNABLES = {'situation_tags': TunableSet(description='\n Tags for arbitrary groupings of situation types.\n ', tunable=TunableEnumWithFilter(tunable_type=Tag, filter_prefixes=['situation'], default=Tag.INVALID, pack_safe=True))}
def get_sims(self):
sim_list = []
situation_manager = services.get_zone_situation_manager()
situation_list = situation_manager.get_situations_by_tags(self.situation_tags)
for situation in situation_list:
if situation.is_sim_in_situation(self.sim):
sim_list.extend(situation.all_sims_in_situation_gen())
return sim_list
| 46.686567 | 263 | 0.685742 | 380 | 3,128 | 5.405263 | 0.3 | 0.035054 | 0.023369 | 0.025316 | 0.064265 | 0.064265 | 0.064265 | 0.064265 | 0.064265 | 0.064265 | 0 | 0.002066 | 0.226343 | 3,128 | 66 | 264 | 47.393939 | 0.846694 | 0 | 0 | 0.105263 | 0 | 0 | 0.131714 | 0.008951 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.157895 | 0 | 0.438596 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f30b70b3f021de85c5431ed22d59f28be1101bb | 10,633 | py | Python | paxes_cinder/k2aclient/exceptions.py | windskyer/k_cinder | 000ee539ee4842a158071d26ee99d12c7c0a87da | [
"Apache-2.0"
] | null | null | null | paxes_cinder/k2aclient/exceptions.py | windskyer/k_cinder | 000ee539ee4842a158071d26ee99d12c7c0a87da | [
"Apache-2.0"
] | null | null | null | paxes_cinder/k2aclient/exceptions.py | windskyer/k_cinder | 000ee539ee4842a158071d26ee99d12c7c0a87da | [
"Apache-2.0"
] | null | null | null | #
#
# All Rights Reserved.
# Copyright 2010 Jacob Kaplan-Moss
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Exception definitions.
"""
from paxes_cinder.k2aclient import _
from paxes_k2.k2operator import K2Error
from paxes_k2.k2operator import K2ConnectionError
from paxes_k2.k2operator import K2SSLError
from paxes_k2.k2operator import K2TimeoutError
class K2aException(Exception):
"""Base class for exceptions for all k2aclient exceptions"""
def __init__(self, msg):
self.msg = msg
super(K2aException, self).__init__(msg)
def __unicode__(self):
return unicode(self.msg)
class K2aCrudException(K2aException):
"""Exception due to issue with K2 response"""
def __init__(self, msg, k2resp, exclogger=None):
diagfspec = None
if exclogger is not None:
diagfspec = exclogger.emit("CRUD", msg, k2resp)
msg = (_("%(msg)s, exception diagnostics have"
" been written to: >%(diagfspec)s<") %
{"msg": msg,
"diagfspec": diagfspec, })
super(K2aCrudException, self).__init__(msg)
self.k2resp = k2resp
self.diagfspec = diagfspec
class K2aK2Other(K2aException):
def __init__(self, e, addmsg=None):
msg = _("Other Exception off of K2: >%s<") % e
if addmsg is not None:
msg = (_("%(msg)s, during: >%(addmsg)s<") %
{"msg": msg,
"addmsg": addmsg, })
super(K2aK2Other, self).__init__(msg)
self.e = e
class K2aK2SslError(K2aException):
"""Exceptions due to k2 SSL processing"""
def __init__(self, k2error, addmsg=None):
msg = _("SSL exception off of K2: >%s<") % k2error
if addmsg is not None:
msg = (_("%(msg)s, during: >%(addmsg)s<") %
{"msg": msg,
"addmsg": addmsg, })
super(K2aK2SslError, self).__init__(msg)
self.k2error = k2error
class K2aK2ConnectionError(K2aException):
"""Exceptions due to k2 connection processing"""
def __init__(self, k2error, addmsg=None):
msg = _("Connection exception off of K2: >%s<") % k2error
if addmsg is not None:
msg = (_("%(msg)s, during: >%(addmsg)s<") %
{"msg": msg,
"addmsg": addmsg, })
super(K2aK2ConnectionError, self).__init__(msg)
self.k2error = k2error
class K2aK2TimeoutError(K2aException):
"""Exceptions due to k2 timeout"""
def __init__(self, k2error, addmsg=None):
msg = _("Timeout exception off of K2: >%s<") % k2error
if addmsg is not None:
msg = (_("%(msg)s, during: >%(addmsg)s<") %
{"msg": msg,
"addmsg": addmsg, })
super(K2aK2TimeoutError, self).__init__(msg)
self.k2error = k2error
# TODO get list of status codes that can come off of K2
class K2aK2Error(K2aException):
"""
Base class for K2Error w/ response codes exceptions coming off of K2
"""
def __init__(self, status, k2error, addmsg=None, k2msg=None,
diagfspec=None):
msg = (_("K2Error off of K2: >%(msg)s<,"
" Status: >%(status)d<, k2Error: >%(k2error)s<") %
{"msg": self.__class__.msg,
"status": status, "k2error": k2error, })
if addmsg is not None:
msg = (_("%(msg)s, during: >%(addmsg)s<") %
{"msg": msg,
"addmsg": addmsg, })
super(K2aK2Error, self).__init__(msg)
self.status = status
self.k2error = k2error
self.k2msg = k2msg
self.diagfspec = diagfspec
class K2aK2ErrorBadRequest(K2aK2Error):
"""
K2aK2Error HTTP 400 - The request was missing
required input, had errors in the provided input,
or included extraneous input. Additional information
regarding the error is provided in an error response
body that includes a reason code with additional information.
"""
http_status = 400
msg = _("Bad Request")
class K2aK2ErrorUnauthorized(K2aK2Error):
"""
K2aK2Error HTTP 401 - Unauthorized: bad credentials.
"""
http_status = 401
msg = _("Unauthorized")
class K2aK2ErrorForbidden(K2aK2Error):
"""
K2aK2Error HTTP 403 - Multiple error conditions
result in this status code:
- The request requires authentication but no X-API-Session
header was provided, or one was provided but the session
ID was invalid.
- The user under which the API request was authenticated
is not authorized to perform the requested operation.
"""
http_status = 403
msg = _("Forbidden")
class K2aK2ErrorNotFound(K2aK2Error):
"""
K2aK2Error HTTP 404 - Multiple error conditions result
in this status code:
- The URI does not designate an extant resource, or
designates a resource for which the API user does not
have object-access permission.
- The URI designates a resource or operation that is not
supported by the MC because it is currently the alternate MC.
"""
http_status = 404
msg = _("Not Found")
class K2aK2ErrorMethodNotAllowed(K2aK2Error):
"""
K2aK2Error HTTP 405 - The request specifies an HTTP
method that is not valid for the designated URI.
"""
http_status = 405
msg = _("Method Not Allowed")
class K2aK2ErrorNotAcceptable(K2aK2Error):
"""
K2aK2Error HTTP 406 - The Accept header for the request
does not include at least one content representation
supported by the Web Services API.
"""
http_status = 406
msg = _("Not Acceptable")
class K2aK2ErrorConflict(K2aK2Error):
"""
K2aK2Error HTTP 409 - The managed resource is in an incorrect
state (status) for performing the requested operation.
Additional information regarding the error is provided in an error
response body that includes a reason code with additional
information.
"""
http_status = 409
msg = _("Conflict")
class K2aK2ErrorPreConditionFailed(K2aK2Error):
"""
K2aK2Error HTTP 412 - PreCondition failed
"""
http_status = 412
msg = _("PreCondition failed")
class K2aK2ErrorRequestBodyTooLarge(K2aK2Error):
"""
K2aK2Error HTTP 413 - The request includes a
request body that is too large.
"""
http_status = 413
msg = _("Request Body Too Large")
class K2aK2ErrorUnsupportedMediaType(K2aK2Error):
"""
K2aK2Error HTTP 415 - The Content-Type header for
the request specifies a representation that is
not supported by the Web Services API.
"""
http_status = 415
msg = _("Unsupported Media Type")
class K2aK2ErrorInternaServerError(K2aK2Error):
"""
K2aK2Error HTTP 500 - A server error occurred
during processing of the request.
"""
http_status = 500
msg = _("Internal Server Error")
class K2aK2ErrorNotImplemented(K2aK2Error):
"""
K2aK2Error HTTP 501 - The request specifies
an HTTP method that is not recognized by the
server (for any resource).
"""
http_status = 501
msg = _("Not Implemented")
class K2aK2ServiceUnavailable(K2aK2Error):
"""
K2aK2Error HTTP 503 - The request could not
be carried out by the MC due to some
temporary condition.
"""
http_status = 503
msg = _("Service Unavailable")
class K2aK2HttpVersionNotSupported(K2aK2Error):
"""
K2aK2Error HTTP 505 - The request specifies an HTTP
protocol version that is not supported by
the Web Services API.
"""
http_status = 505
msg = _("HTTP Version Not Supported")
class K2aK2ErrorUnclassified(K2aK2Error):
"""
HTTP ??? - Unclassified K2Error
"""
http_status = -1
msg = _("Not Classified")
_code_to_exception_map = dict((c.http_status, c)
for c in K2aK2Error.__subclasses__())
_EXCLUDED_EXCEPTIONS = [412]
def create_k2a_exception_from_k2o_exception(e, addmsg=None, exclogger=None):
"""
Return an instance of an K2aK2Error or subclass
based on an requests response. Optionally add additional message.
Optionally log details of the exception.
"""
if not isinstance(e, K2Error):
return K2aK2Other(e, addmsg)
if isinstance(e, K2SSLError):
return K2aK2SslError(e, addmsg)
if isinstance(e, K2ConnectionError):
return K2aK2ConnectionError(e, addmsg)
if isinstance(e, K2TimeoutError):
return K2aK2TimeoutError(e, addmsg)
k2response = e.k2response
# If non status_code then use -1
status = -1
if k2response is not None and k2response.status is not None:
status = k2response.status
# if activated place k2response in exception log
diagfspec = None
if (k2response is not None
and exclogger is not None
and status not in _EXCLUDED_EXCEPTIONS):
category = "UNC"
if status > -1:
category = str(status)
diagfspec = exclogger.emit(category, addmsg, k2response, exc=e)
addmsg += (_(", exception diagnostics have been written to: >%s<") %
diagfspec)
cls = _code_to_exception_map.get(status, K2aK2ErrorUnclassified)
k2msg = None
if hasattr(k2response, 'k2err'):
m = k2response.k2err.find('./Message')
if m is not None:
k2msg = m.text
return cls(status, e, addmsg, k2msg=k2msg,
diagfspec=diagfspec)
#################
class K2JobFailure(K2aException):
"""Raised when a K2 Job fails"""
def __init__(self, msg, k2resp,
diagfspeci=None,
diagfspec=None):
super(K2JobFailure, self).__init__(msg)
self.k2resp = k2resp
self.diagfspeci = diagfspeci
self.diagfspec = diagfspec
#################
class UnsupportedVersion(K2aException):
"""Raised whan an unsupported version of the k2aclient
is requested."""
pass
class CommandError(K2aException):
pass
class NotFound(K2aException):
pass
class NoUniqueMatch(K2aException):
pass
| 29.372928 | 77 | 0.641117 | 1,214 | 10,633 | 5.497529 | 0.252883 | 0.012736 | 0.050345 | 0.015733 | 0.253971 | 0.203476 | 0.177405 | 0.153281 | 0.135301 | 0.10893 | 0 | 0.040808 | 0.264836 | 10,633 | 361 | 78 | 29.454294 | 0.812972 | 0.32512 | 0 | 0.241176 | 0 | 0 | 0.119593 | 0 | 0 | 0 | 0 | 0.00277 | 0 | 1 | 0.058824 | false | 0.023529 | 0.029412 | 0.005882 | 0.458824 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f32f2246845e937b93ff16ba2dea08cdfb5f192 | 16,378 | py | Python | misc/util.py | NirDiamant/pytorch-glow | 2ab11f3a8486b86a279fe4fa64f25aa91226ee8a | [
"MIT"
] | null | null | null | misc/util.py | NirDiamant/pytorch-glow | 2ab11f3a8486b86a279fe4fa64f25aa91226ee8a | [
"MIT"
] | null | null | null | misc/util.py | NirDiamant/pytorch-glow | 2ab11f3a8486b86a279fe4fa64f25aa91226ee8a | [
"MIT"
] | 1 | 2020-04-29T15:27:39.000Z | 2020-04-29T15:27:39.000Z | import os
import re
import cv2
import sys
import glob
import json
import shutil
import numpy as np
import torch
from PIL import Image
from easydict import EasyDict
from torchvision.transforms import transforms
# Profile
def load_profile(filepath):
"""
Load experiment profile as EasyDict
:param filepath: path to profile
:type filepath: str
:return: hyper-parameters
:rtype: EasyDict
"""
if os.path.exists(filepath):
with open(filepath) as f:
return EasyDict(json.load(f))
# Device
def get_devices(devices, verbose=True):
"""
Get devices for running model
:param devices: list of devices from profile
:type devices: list
:param verbose: print log
:type verbose: bool
:return: list of usable devices according to desired and available hardware
:rtype: list[str]
"""
def parse_cuda_device(device):
"""
Parse device into device id
:param device: given device
:type device: str or int
:return: device id
:rtype: int
"""
origin = str(device)
if isinstance(device, str) and re.search('cuda:([\d]+)', device):
device = int(re.findall('cuda:([\d]+)', device)[0])
if isinstance(device, int):
if 0 <= device <= torch.cuda.device_count() - 1:
return device
_print('[Builder] Incorrect device "{}"'.format(origin), verbose=verbose)
return
use_cpu = any([d.find('cpu') >= 0 for d in devices])
use_cuda = any([(d.find('cuda') >= 0 or isinstance(d, int)) for d in devices])
assert not (use_cpu and use_cuda), 'CPU and GPU cannot be mixed.'
if use_cuda:
devices = [parse_cuda_device(d) for d in devices]
devices = [d for d in devices if d is not None]
if len(devices) == 0:
_print('[Builder] No available GPU found, use CPU only', verbose=verbose)
devices = ['cpu']
return devices
# Logger
class OutputLogger(object):
"""Output logger"""
def __init__(self):
self.file = None
self.buffer = ''
def set_log_file(self, filename, mode='wt'):
assert self.file is None
self.file = open(filename, mode)
if self.buffer is not None:
self.file.write(self.buffer)
self.buffer = None
def write(self, data):
if self.file is not None:
self.file.write(data)
if self.buffer is not None:
self.buffer += data
def flush(self):
if self.file is not None:
self.file.flush()
class TeeOutputStream(object):
"""Redirect output stream"""
def __init__(self, child_streams, autoflush=False):
self.child_streams = child_streams
self.autoflush = autoflush
def write(self, data):
if isinstance(data, bytes):
data = data.decode('utf-8')
for stream in self.child_streams:
stream.write(data)
if self.autoflush:
self.flush()
def flush(self):
for stream in self.child_streams:
stream.flush()
output_logger = None
def init_output_logging():
"""
Initialize output logger
"""
global output_logger
if output_logger is None:
output_logger = OutputLogger()
sys.stdout = TeeOutputStream([sys.stdout, output_logger], autoflush=True)
sys.stderr = TeeOutputStream([sys.stderr, output_logger], autoflush=True)
def set_output_log_file(filename, mode='wt'):
"""
Set file name of output log
:param filename: file name of log
:type filename: str
:param mode: the mode in which the file is opened
:type mode: str
"""
if output_logger is not None:
output_logger.set_log_file(filename, mode)
# Result directory
def create_result_subdir(result_dir, desc, profile):
"""
Create and initialize result sub-directory
:param result_dir: path to root of result directory
:type result_dir: str
:param desc: description of current experiment
:type desc: str
:param profile: profile
:type profile: dict
:return: path to result sub-directory
:rtype: str
"""
result_dir = 'results'
# determine run id
run_id = 0
for fname in glob.glob(os.path.join(result_dir, '*')):
fbase = os.path.basename(fname)
finds = re.findall('^([\d]+)-', fbase)
if len(finds) != 0:
ford = int(finds[0])
run_id = max(run_id, ford + 1)
# create result sub-directory
result_subdir = os.path.join(result_dir, '{:03d}-{:s}'.format(run_id, desc))
if not os.path.exists(result_subdir):
os.makedirs(result_subdir)
set_output_log_file(os.path.join(result_subdir, 'log.txt'))
print("[Builder] Saving results to {}".format(result_subdir))
# export profile
with open(os.path.join(result_subdir, 'config.json'), 'w') as f:
json.dump(profile, f)
return result_subdir
def locate_result_subdir(result_dir, run_id_or_result_subdir):
"""
Locate result subdir by given run id or path
:param result_dir: path to root of result directory
:type result_dir: str
:param run_id_or_result_subdir: run id or subdir path
:type run_id_or_result_subdir: int or str
:return: located result subdir
:rtype: str
"""
# if isinstance(run_id_or_result_subdir, str) and os.path.isdir(run_id_or_result_subdir):
# return run_id_or_result_subdir
#
# searchdirs = ['', 'results', 'networks']
#
# for searchdir in searchdirs:
# d = result_dir if searchdir == '' else os.path.join(result_dir, searchdir)
# # search directly by name
# d = os.path.join(d, str(run_id_or_result_subdir))
# if os.path.isdir(d):
# return d
# # search by prefix
# if isinstance(run_id_or_result_subdir, int):
# prefix = '{:03d}'.format(run_id_or_result_subdir)
# else:
# prefix = str(run_id_or_result_subdir)
# dirs = sorted(glob.glob(os.path.join(result_dir, searchdir, prefix + '-*')))
# dirs = [d for d in dirs if os.path.isdir(d)]
# if len(dirs) == 1:
# return dirs[0]
# print('[Builder] Cannot locate result subdir for run: {}'.format(run_id_or_result_subdir))
# return None
return result_dir
def format_time(seconds):
"""
Format seconds into desired format
:param seconds: number of seconds
:type seconds: float
:return: formatted time
:rtype: str
"""
s = int(np.rint(seconds))
if s < 60:
return '{:d}s'.format(s)
elif s < 60 * 60:
return '{:d}m {:02d}s'.format(s // 60, s % 60)
elif s < 24 * 60 * 60:
return '{:d}h {:02d}m {:02}ds'.format(s // (60 * 60), (s // 60) % 60, s % 60)
else:
return '{:d}d {:02d}h {:02d}m'.format(s // (24 * 60 * 60), (s // (60 * 60)) % 24, (s // 60) % 60)
# Model
def get_model_name(step):
"""
Return filename of model snapshot by step
:param step: global step of model
:type step: int
:return: model snapshot file name
:rtype: str
"""
return 'network-snapshot-{:06d}.pth'.format(step)
def get_best_model_name():
"""
Return filename of best model snapshot by step
:return: filename of best model snapshot
:rtype: str
"""
return 'network-snapshot-best.pth'
def get_last_model_name(result_subdir):
"""
Return filename of best model snapshot by step
:param result_subdir: path to result sub-directory
:type result_subdir: str
:return: filename of last model snapshot
:rtype: str
"""
latest = -1
for f in os.listdir(result_subdir):
if os.path.isfile(os.path.join(result_subdir, f)) and \
re.search('network-snapshot-([\d]+).pth', f):
f_step = int(re.findall('network-snapshot-([\d]+).pth', f)[0])
if latest < f_step:
latest = f_step
return get_model_name(latest)
def save_model(result_subdir, step, graph, optimizer, seconds, is_best, criterion_dict=None):
"""
Save model snapshot to result subdir
:param result_subdir: path to result sub-directory
:type result_subdir: str
:param step: global step of model
:type step: int
:param graph: model graph
:type graph: torch.nn.Module
:param optimizer: optimizer
:type optimizer: torch.optim.Optimizer
:param seconds: seconds of running time
:type seconds: float
:param is_best: whether this model is best
:type is_best: bool
:param criterion_dict: dict of criterion
:type criterion_dict: dict
"""
# construct state
state = {
'step': step,
# DataParallel wraps model in `module` attribute.
'graph': graph.module.state_dict() if hasattr(graph, "module") else graph.state_dict(),
'optimizer': optimizer.state_dict(),
'criterion': {},
'seconds': seconds
}
if criterion_dict is not None:
state['criterion'] = {k: v.state_dict() for k, v in criterion_dict.items()}
# save current state
save_path = os.path.join(result_subdir, get_model_name(step))
torch.save(state, save_path)
# save best state
if is_best:
best_path = os.path.join(result_subdir, get_best_model_name())
shutil.copy(save_path, best_path)
def load_model(result_subdir, step_or_model_path, graph, optimizer=None, criterion_dict=None, device=None):
"""
lOad model snapshot from esult subdir
:param result_subdir: path to result sub-directory
:type result_subdir: str
:param step_or_model_path: step or model path
:type step_or_model_path: int or str
:param graph: model graph
:type graph: torch.nn.Module
:param optimizer: optimizer
:type optimizer: torch.optim.Optimizer
:param criterion_dict: dict of criterion
:type criterion_dict: dict
:param device: device to run mode
:type device: str
:return: state
:rtype: dict
"""
# check existence of model file
model_path = step_or_model_path
if isinstance(step_or_model_path, int):
model_path = get_model_name(step_or_model_path)
if step_or_model_path == 'best':
model_path = get_best_model_name()
if step_or_model_path == 'latest':
model_path = None
if not os.path.exists(model_path):
model_path = os.path.join(result_subdir, model_path)
if not os.path.exists(model_path):
raise FileNotFoundError('Failed to find model snapshot with {}'.format(step_or_model_path))
# load model snapshot
if isinstance(device, int):
device = 'cuda:{}'.format(device)
state = torch.load(model_path, map_location=device)
step = state['step']
graph.load_state_dict(state['graph'])
graph.set_actnorm_inited()
if optimizer is not None:
optimizer.load_state_dict(state['optimizer'])
if criterion_dict is not None:
for k in criterion_dict.keys():
criterion_dict[k].load_state_dict(state['criterion'][k])
print('[Builder] Load model snapshot successfully from {}'.format(model_path))
return state
# Dataset
def is_image(filepath):
"""
Determine whether file is an image or not
:param filepath: file path
:type filepath: str
:return: whether file is an image
:rtype: bool
"""
image_extensions = ['.png', '.jpg', '.jpeg']
basename = os.path.basename(filepath)
_, extension = os.path.splitext(basename)
return extension.lower() in image_extensions
def tensor_to_ndarray(tensor):
"""
Convert float tensor into numpy image
:param tensor: input tensor
:type tensor: torch.Tensor
:return: numpy image
:rtype: np.ndarray
"""
tensor_np = tensor.permute(1, 2, 0).cpu().numpy()
tensor_np = tensor_np.astype(np.float32)
tensor_np = (tensor_np * 255).astype(np.uint8)
return tensor_np
def tensor_to_pil(tensor):
"""
Convert float tensor into PIL image
:param tensor: input tensor
:type tensor: torch.Tensor
:return: PIL image
:rtype: Image.Image
"""
transform = transforms.ToPILImage()
tensor = tensor.cpu()
return transform(tensor)
def ndarray_to_tensor(img, shape=(128, 128, 3), bgr2rgb=True):
"""
Convert numpy image to float tensor
:param img: numpy image
:type img: np.ndarray
:param shape: image shape in (H, W, C)
:type shape: tuple or list
:param bgr2rgb: convert color space from BGR to RGB
:type bgr2rgb: bool
:return: tensor
:rtype: torch.Tensor
"""
if bgr2rgb:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (shape[0], shape[1]))
img = (img / 255.0).astype(np.float32)
img = torch.Tensor(img).permute(2, 0, 1)
return img
def pil_to_tensor(img, shape=(128, 128, 3), transform=None):
"""
Convert PIL image to float tensor
:param img: PIL image
:type img: Image.Image
:param shape: image shape in (H, W, C)
:type shape: tuple or list
:param transform: image transform
:return: tensor
:rtype: torch.Tensor
"""
if transform is None:
transform = transforms.Compose((
transforms.Resize(shape[0]),
transforms.ToTensor()
))
return transform(img)
def image_to_tensor(img, shape=(128, 128, 3), bgr2rgb=True):
"""
Convert image to torch tensor
:param img: image
:type img: Image.Image or np.ndarray
:param shape: image shape in (H, W, C)
:type shape: tuple or list
:param bgr2rgb: convert color space from BGR to RGB
:type bgr2rgb: bool
:return: image tensor
:rtype: torch.Tensor
"""
if isinstance(img, Image.Image):
return pil_to_tensor(img, shape)
if isinstance(np.ndarray, img):
return ndarray_to_tensor(img, shape, bgr2rgb)
else:
raise NotImplementedError('Unsupported image type: {}'.format(type(img)))
def save_deltaz(deltaz, save_dir):
"""
Save deltaz as numpy
:param deltaz: delta vector of attributes in latent space
:type deltaz: np.ndarray
:param save_dir: directory to save
:type save_dir: str
"""
check_path(save_dir)
np.save(os.path.join(save_dir, 'deltaz.npy'), deltaz)
def load_deltaz(path):
"""
Load deltaz as numpy
:param path: path to numpy file
:type path: str
:return: delta vector of attributes in latent space
:rtype: np.ndarray
"""
if os.path.exists(path):
return np.load(path)
# Misc
def manual_seed(seed):
"""
Set manual random seed
:param seed: random seed
:type seed: int
"""
np.random.seed(seed)
torch.manual_seed(seed)
# torch.cuda.manual_seed_all(seed)
def _print(*args, verbose=True, **kwargs):
"""
Print with condition
:param verbose: whether to verbose or not
:type verbose: bool
"""
if verbose:
print(*args, **kwargs)
def check_path(path):
"""
Check existence of directory path. If not, then create it.
:param path: path to directory
:type path: str
"""
if not os.path.exists(path):
os.makedirs(path)
def make_batch(tensor, batch_size):
"""
Generate fake batch
:param tensor: input tensor
:type tensor: torch.Tensor
:param batch_size: batch size
:type batch_size: int
:return: fake batch
:rtype: torch.Tensor
"""
assert len(tensor.shape) == 3, 'Assume 3D input tensor'
return tensor.unsqueeze(0).repeat(batch_size, 1, 1, 1)
def make_interpolation_vector(num_classes, step=0.25,
minimum=-1., maximum=1.):
"""
Generate interpolation vector
:param num_classes: number of classes
:type num_classes: int
:param step: increasing step
:type step: float
:param minimum: minimum value
:type minimum: float
:param maximum: maximum value
:type maximum: float
:return: interpolation vector
:rtype: np.ndarray
"""
num_levels = int((maximum - minimum) / step) + 1
levels = [-1. + step * i for i in range(num_levels)]
interpolation_vector = np.zeros([num_classes, num_levels, num_classes])
for cls in range(num_classes):
for lv in range(num_levels):
interpolation_vector[cls, lv, cls] = levels[lv]
return interpolation_vector
| 27.665541 | 107 | 0.634632 | 2,221 | 16,378 | 4.550203 | 0.135525 | 0.045122 | 0.009005 | 0.01415 | 0.304176 | 0.229765 | 0.177123 | 0.133881 | 0.116564 | 0.110133 | 0 | 0.011561 | 0.255343 | 16,378 | 591 | 108 | 27.712352 | 0.817071 | 0.368665 | 0 | 0.081081 | 0 | 0 | 0.069106 | 0.011589 | 0 | 0 | 0 | 0 | 0.013514 | 1 | 0.148649 | false | 0 | 0.054054 | 0 | 0.31982 | 0.027027 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f331a2bbebd94fbbed32bbf0c22269547b1ac25 | 5,096 | py | Python | icinga2api/base.py | wftech/python-icinga2api | f47b8e5903f5035353082f07f8d5077fecdbd5c9 | [
"BSD-2-Clause"
] | null | null | null | icinga2api/base.py | wftech/python-icinga2api | f47b8e5903f5035353082f07f8d5077fecdbd5c9 | [
"BSD-2-Clause"
] | null | null | null | icinga2api/base.py | wftech/python-icinga2api | f47b8e5903f5035353082f07f8d5077fecdbd5c9 | [
"BSD-2-Clause"
] | 1 | 2020-05-05T11:37:12.000Z | 2020-05-05T11:37:12.000Z | # -*- coding: utf-8 -*-
'''
Copyright 2017 fmnisme@gmail.com
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Icinga 2 API client base
'''
from __future__ import print_function
import logging
import sys
import requests
# pylint: disable=import-error,no-name-in-module
if sys.version_info >= (3, 0):
from urllib.parse import urljoin
else:
from urlparse import urljoin
# pylint: enable=import-error,no-name-in-module
from icinga2api.exceptions import Icinga2ApiException
LOG = logging.getLogger(__name__)
class Base(object):
'''
Icinga 2 API Base class
'''
base_url_path = None # 继承
def __init__(self, manager):
'''
initialize object
'''
self.manager = manager
self.stream_cache = ""
def _create_session(self, method='POST'):
'''
create a session object
'''
session = requests.Session()
# prefer certificate authentification
if self.manager.certificate and self.manager.key:
# certificate and key are in different files
session.cert = (self.manager.certificate, self.manager.key)
elif self.manager.certificate:
# certificate and key are in the same file
session.cert = self.manager.certificate
elif self.manager.username and self.manager.password:
# use username and password
session.auth = (self.manager.username, self.manager.password)
session.headers = {
'User-Agent': 'Python-icinga2api/{0}'.format(self.manager.version),
'X-HTTP-Method-Override': method.upper(),
'Accept': 'application/json'
}
return session
def _request(self, method, url_path, payload=None, stream=False):
'''
make the request and return the body
:param method: the HTTP method
:type method: string
:param url_path: the requested url path
:type url_path: string
:param payload: the payload to send
:type payload: dictionary
:returns: the response as json
:rtype: dictionary
'''
request_url = urljoin(self.manager.url, url_path)
LOG.debug("Request URL: %s", request_url)
# create session
session = self._create_session(method)
# create arguments for the request
request_args = {
'url': request_url,
'timeout': self.manager.timeout,
}
if payload:
request_args['json'] = payload
if self.manager.ca_certificate:
request_args['verify'] = self.manager.ca_certificate
else:
request_args['verify'] = False
if stream:
request_args['stream'] = True
# do the request
response = session.post(**request_args)
if not stream:
session.close()
# # for debugging
# from pprint import pprint
# pprint(request_url)
# pprint(payload)
# pprint(response)
if not 200 <= response.status_code <= 299:
raise Icinga2ApiException(
'Request "{}" failed with status {}: {}'.format(
response.url,
response.status_code,
response.text,
))
if stream:
return response
else:
return response.json()
@staticmethod
def _get_message_from_stream(stream):
'''
make the request and return the body
:param stream: the stream
:type method: request
:returns: the message
:rtype: dictionary
'''
# TODO: test iter_lines()
message = []
for char in stream.iter_content():
if char == b'\n':
yield b''.join(message)
message = []
else:
message.append(char)
| 31.652174 | 79 | 0.634419 | 596 | 5,096 | 5.347315 | 0.385906 | 0.058676 | 0.027612 | 0.014434 | 0.129903 | 0.080326 | 0.064638 | 0.064638 | 0.042673 | 0.042673 | 0 | 0.006061 | 0.287677 | 5,096 | 160 | 80 | 31.85 | 0.871901 | 0.431515 | 0 | 0.117647 | 0 | 0 | 0.062196 | 0.016111 | 0 | 0 | 0 | 0.00625 | 0 | 1 | 0.058824 | false | 0.029412 | 0.102941 | 0 | 0.235294 | 0.014706 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f36583faead235efbf9e5d323d62677a00d5973 | 1,866 | py | Python | decode.py | somdipdey/SmartNoshWaste | 77150e7a3d308ae466fd1bbc676f3e1f4a39fb2e | [
"MIT"
] | 1 | 2021-04-27T18:01:09.000Z | 2021-04-27T18:01:09.000Z | decode.py | somdipdey/SmartNoshWaste | 77150e7a3d308ae466fd1bbc676f3e1f4a39fb2e | [
"MIT"
] | null | null | null | decode.py | somdipdey/SmartNoshWaste | 77150e7a3d308ae466fd1bbc676f3e1f4a39fb2e | [
"MIT"
] | null | null | null | # pip install pyzbar
from pyzbar.pyzbar import decode
from PIL import Image
import hashlib
# function to return key for any value
def get_key(val, dict):
for key, value in dict.items():
if val == value:
return key
return "key doesn't exist"
#create the Food class
class Food:
def __init__(self, name, variety, farm, size, production_date, expiry_date):
self.name = name
self.variety = variety
self.farm = farm
self.size = size
self.production_date = production_date
self.expiry_date = expiry_date
self.info = name + ";" + variety + ";" + farm + ";" + size + ";" + production_date + ";" + expiry_date
#create a dictionary of framers with their unique has code
farmers_dict = {
"BOYDELLS DAIRY FARM": str(hashlib.sha256("BOYDELLS DAIRY FARM".encode()).hexdigest()),
"Foxes Farm Produce": str(hashlib.sha256("Foxes Farm Produce".encode()).hexdigest()),
"Spinningdale Farm (Essex) Ltd": str(hashlib.sha256("Spinningdale Farm (Essex) Ltd".encode()).hexdigest())
}
#qr = qrtools.QR()
#qr.decode("qrcode_apple.png")
qr = decode(Image.open('qrcode_milk.png'))
print(qr[0].data)
#data = "apple;gala apple;640bf572c70d06fd1d92137c5b6f69bf6f098842993032f0ca7585323407387a;2020-07-11;2020-08-10;f23f6da1e096620df2db706f55e5d9f4a59ec30f8eb3580b23a68ca15157930e"
decoded_item = str(qr[0].data).split(";")
farm = get_key(decoded_item[2], farmers_dict)
food_item = Food(decoded_item[0], decoded_item[1], farm, decoded_item[3], decoded_item[4], decoded_item[5])
#print info of the food items
print("Item name: " + food_item.name + "\n")
print("Variety: " + food_item.variety + "\n")
print("Farm: " + food_item.farm + "\n")
print("Size: " + food_item.size + "\n")
print("Production date: " + food_item.production_date + "\n")
print("Expiry date: " + food_item.expiry_date + "\n")
| 37.32 | 178 | 0.692926 | 250 | 1,866 | 5.036 | 0.352 | 0.06116 | 0.03336 | 0.030183 | 0.068308 | 0.068308 | 0.068308 | 0.068308 | 0 | 0 | 0 | 0.076874 | 0.163451 | 1,866 | 49 | 179 | 38.081633 | 0.72966 | 0.205788 | 0 | 0 | 0 | 0 | 0.165648 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060606 | false | 0 | 0.090909 | 0 | 0.242424 | 0.212121 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f3998d39a2c170485e2967bb1cce29713b51dd3 | 2,528 | py | Python | tests/inverse_kinematics_solutions.py | dmklee/nuro-arm | 78a21e17e0140ed73c022bd5e5caef8a71470f21 | [
"MIT"
] | 4 | 2021-12-29T20:34:39.000Z | 2022-01-30T22:41:33.000Z | tests/inverse_kinematics_solutions.py | dmklee/learning-robotics | bd8f6d3db97f3f6db78c16e228a4b8e7770554d5 | [
"MIT"
] | 19 | 2021-05-02T00:34:18.000Z | 2021-07-16T21:19:51.000Z | tests/inverse_kinematics_solutions.py | dmklee/nuro-arm | 78a21e17e0140ed73c022bd5e5caef8a71470f21 | [
"MIT"
] | 4 | 2021-08-24T18:25:04.000Z | 2022-02-27T19:54:16.000Z | import pybullet as pb
import time
from neu_ro_arm.robot.robot_arm import RobotArm
from neu_ro_arm.constants import cube_size
PI = 3.141592653589793
robot = RobotArm('sim')
robot.set_gripper_state(0.5)
client = robot.controller._client
pb.setGravity(0,0,0,client)
# create
id_ = pb.createVisualShape(pb.GEOM_BOX,
halfExtents=3*[cube_size/2],
rgbaColor=[0.1,0.1,0.8,0.5])
pos_body = [0, 0, 0]
body = pb.createMultiBody(1, -1, id_, pos_body)
d_toggle = pb.addUserDebugParameter('toggle orientation specification',
1, 0, 0,
physicsClientId=client)
dbg_params = {
'pitch': pb.addUserDebugParameter('gripper_pitch', rangeMin=0, rangeMax=PI,
startValue= 2*PI, physicsClientId=client),
'roll' : pb.addUserDebugParameter('gripper_roll', rangeMin=-PI/2, rangeMax= PI/2,
startValue= 0.0, physicsClientId=client),
'x' : pb.addUserDebugParameter('cube_x', rangeMin=0., rangeMax= 0.25,
startValue= 0.15, physicsClientId=client),
'y' : pb.addUserDebugParameter('cube_y', rangeMin=-0.15, rangeMax= 0.15,
startValue= 0.0, physicsClientId=client),
'z' : pb.addUserDebugParameter('cube_z', rangeMin=cube_size/2, rangeMax= 0.45,
startValue= 0.2, physicsClientId=client),
}
dbg_values = {d:pb.readUserDebugParameter(i, physicsClientId=client)
for d,i in dbg_params.items()}
while True:
button_val = pb.readUserDebugParameter(d_toggle, physicsClientId=client)
reset_cube = False
move_arm = False
for name, prm in dbg_params.items():
new_val = pb.readUserDebugParameter(prm, physicsClientId=client)
if abs(new_val-dbg_values[name]) > 1e-4:
dbg_values[name] = new_val
if name in 'xyz':
reset_cube = True
move_arm = True
elif name in ('pitch', 'roll') and button_val % 2 == 1:
move_arm = True
pos = (dbg_values['x'],dbg_values['y'],dbg_values['z'])
if reset_cube:
pb.resetBasePositionAndOrientation(body, pos, (0,0,0,1),physicsClientId=client)
if move_arm:
if button_val % 2 == 0:
robot.move_hand_to(pos)
else:
robot.move_hand_to(pos, (dbg_values['pitch'],dbg_values['roll']))
time.sleep(1)
print('done')
time.sleep(0.1)
| 38.892308 | 87 | 0.599288 | 310 | 2,528 | 4.722581 | 0.283871 | 0.143443 | 0.006148 | 0.047131 | 0.069672 | 0 | 0 | 0 | 0 | 0 | 0 | 0.04355 | 0.282437 | 2,528 | 64 | 88 | 39.5 | 0.763506 | 0.002373 | 0 | 0.072727 | 0 | 0 | 0.046825 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.072727 | 0 | 0.072727 | 0.018182 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f3c5bd2ec280fd81609df6042a1145ce063d8ea | 3,360 | py | Python | komadu_client/models/model_creator.py | Data-to-Insight-Center/CKN | 8cb8f54119061386f016535612f290a0dee86b02 | [
"Apache-2.0"
] | null | null | null | komadu_client/models/model_creator.py | Data-to-Insight-Center/CKN | 8cb8f54119061386f016535612f290a0dee86b02 | [
"Apache-2.0"
] | null | null | null | komadu_client/models/model_creator.py | Data-to-Insight-Center/CKN | 8cb8f54119061386f016535612f290a0dee86b02 | [
"Apache-2.0"
] | null | null | null | from komadu_client.models.ingest_models import entityType, fileType, activityType, serviceInformationType, \
instanceOfType, \
usageType, activityEntityType, generationType, addAttributesType
from komadu_client.util.association_enums import AssociationEnum
from komadu_client.util.util import get_node_id, get_attributes
from komadu_client.util.constants import DUMMY_MD5
from datetime import datetime
def create_file_entity(filename, file_uri, attributes=None, location=None, created_date=None, owner=None,
size=None):
entity = entityType()
file = fileType()
file.fileName = filename
file.fileURI = str(file_uri)
file.md5sum = DUMMY_MD5
if created_date is not None:
file.createDate = created_date
else:
file.createDate = datetime.now()
if owner is not None:
file.ownerDN = owner
if size is not None:
file.size = size
entity.file = file
if location is not None:
entity.location = location
if attributes is not None:
entity.attributes = attributes
return entity
def create_workflow_activity(workflow_id, node_id, service_id, instance_workflow, instance_version,
instance_creation_time, location, attributes=None):
activity = activityType()
activity.location = location
instance_of = instanceOfType()
instance_of.creationTime = instance_creation_time
instance_of.instanceOfID = instance_workflow
instance_of.version = instance_version
service_info = serviceInformationType()
service_info.instanceOf = instance_of
service_info.serviceID = service_id
service_info.workflowID = workflow_id
service_info.workflowNodeID = node_id
if attributes is not None:
service_info.attributes = attributes
activity.serviceInformation = service_info
return activity
def get_activity_entity(activity, entity, timestamp, activity_id, entity_id, type=AssociationEnum.USAGE,
attributes=None):
relationship = activityEntityType()
relationship.activity = activity
relationship.entity = entity
if type is AssociationEnum.GENERATION:
generation = generationType()
__populate_relation(activity_id, entity_id, generation, timestamp, attributes)
relationship.generation = generation
elif type is AssociationEnum.USAGE:
usage = usageType()
__populate_relation(activity_id, entity_id, usage, timestamp, attributes)
relationship.usage = usage
return relationship
def __populate_relation(activity_id, entity_id, relation, timestamp, attributes=None):
relation.activityID = activity_id
relation.entityID = entity_id
relation.timestamp = timestamp
if attributes is not None:
relation.attributes = attributes
def add_attributes_activity(workflow_id, node_id, key, value, attributes=None):
workflow_node_id = get_node_id(workflow_id, node_id)
new_attr_doc = addAttributesType()
new_attr_doc.objectID = workflow_node_id
new_attr_doc.objectType = "ACTIVITY"
new_attr_doc.notificationTimestamp = datetime.now()
if attributes is None:
new_attributes = {key: value}
new_attr_doc.attributes = get_attributes(new_attributes)
else:
new_attr_doc.attributes = attributes
return new_attr_doc
| 35 | 108 | 0.732738 | 379 | 3,360 | 6.242744 | 0.224274 | 0.020287 | 0.026627 | 0.030431 | 0.10355 | 0.043111 | 0 | 0 | 0 | 0 | 0 | 0.001122 | 0.204464 | 3,360 | 95 | 109 | 35.368421 | 0.884025 | 0 | 0 | 0.064935 | 0 | 0 | 0.002381 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064935 | false | 0 | 0.064935 | 0 | 0.181818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f3d939e1f04e87cf10ce18e1ac2a71fb2a8f407 | 2,204 | py | Python | tests/test_docs.py | TimSimpson/frontdoor | b4770ed6c66366383b479975ff53abd50d2acd96 | [
"CC0-1.0",
"MIT"
] | null | null | null | tests/test_docs.py | TimSimpson/frontdoor | b4770ed6c66366383b479975ff53abd50d2acd96 | [
"CC0-1.0",
"MIT"
] | null | null | null | tests/test_docs.py | TimSimpson/frontdoor | b4770ed6c66366383b479975ff53abd50d2acd96 | [
"CC0-1.0",
"MIT"
] | 1 | 2017-02-23T18:20:35.000Z | 2017-02-23T18:20:35.000Z | import os
import sys
import pytest
ROOT = os.path.dirname(os.path.realpath(__file__))
if sys.version_info[0] >= 3:
def from_root(path):
# type: (str) -> str
"""Returns a path relative to the root directory."""
if os.name == 'nt':
path = path.replace('/', '\\')
return os.path.join(ROOT, path)
def get_code_text_from_in_readme_md(ext):
result = []
record = False
with open(from_root('../README.md'), 'r') as f:
for line in f.readlines():
if '```' in line:
if '```{}'.format(ext) in line:
record = True
else:
record = False
elif record:
result.append(line.strip())
return '\n'.join(result)
def get_ci_py_code_in_readme_md():
return get_code_text_from_in_readme_md('py3')
def read_ci_py():
with open(from_root('../ci.py'), 'r') as file:
return '\n'.join(line.strip() for line in file.readlines())
def test_readme_python_snippet_is_correct():
"""
The stand alone ci.py is run through pep8 and mypy, so it's best to
make sure README.md matches it.
"""
expected = read_ci_py()
actual = get_ci_py_code_in_readme_md()
assert expected == actual
def get_ci_py_no_arg_output_in_readme_md():
return get_code_text_from_in_readme_md('bash')
def get_actual_ci_py_no_arg_output(monkeypatch, capsys):
import ci
monkeypatch.setattr(sys, 'argv', ['ci.py'])
with pytest.raises(SystemExit):
ci.main()
out, err = capsys.readouterr()
return '\n'.join(
['$ python ci.py'] + [line.strip() for line in out.split('\n')]
)
def test_readme_example_output_is_correct(monkeypatch, capsys):
"""
This runs the actual ci.py script with no args to make sure the output
matches what's shown in README.md.
"""
expected = get_ci_py_no_arg_output_in_readme_md()
actual = get_actual_ci_py_no_arg_output(monkeypatch, capsys)
assert expected == actual
| 31.042254 | 78 | 0.57441 | 296 | 2,204 | 4.006757 | 0.344595 | 0.043845 | 0.067454 | 0.030354 | 0.255481 | 0.225126 | 0.225126 | 0.177066 | 0.177066 | 0.06914 | 0 | 0.002635 | 0.311252 | 2,204 | 70 | 79 | 31.485714 | 0.778656 | 0.123412 | 0 | 0.086957 | 0 | 0 | 0.039163 | 0 | 0 | 0 | 0 | 0 | 0.043478 | 1 | 0.173913 | false | 0 | 0.086957 | 0.043478 | 0.391304 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f3d9e11e63c47d832dad04123d10ac7e6934e64 | 2,065 | py | Python | private_sdk/signature.py | teambge/bge-private-sdk | b27d4a6caf35bcb89a260938260fd75dba173311 | [
"MIT"
] | null | null | null | private_sdk/signature.py | teambge/bge-private-sdk | b27d4a6caf35bcb89a260938260fd75dba173311 | [
"MIT"
] | null | null | null | private_sdk/signature.py | teambge/bge-private-sdk | b27d4a6caf35bcb89a260938260fd75dba173311 | [
"MIT"
] | null | null | null | #-*- coding: utf-8 -*-
from base64 import b64encode
from datetime import datetime, timezone, timedelta
from uuid import uuid4
try:
from urllib import quote, quote_plus
except ImportError:
from urllib.parse import quote, quote_plus
import string
import hmac
class Signature(object):
salt = string.ascii_letters
def __init__(self, client_secret, expiration_time=300):
self.client_secret = client_secret
self.expiration_time = expiration_time
def get_timestamp(self):
return datetime.now(tz=timezone.utc).strftime('%Y-%m-%dT%H:%M:%SZ')
def is_expired(self, timestamp):
now = datetime.now(tz=timezone.utc)
timestamp = datetime.strptime(
timestamp, '%Y-%m-%dT%H:%M:%SZ').replace(tzinfo=timezone.utc)
return now > (timestamp + timedelta(seconds=self.expiration_time))
def get_sign_nonce(self):
return uuid4().hex
def _get_stringtosign(self, params):
t = []
items = list(params.items())
items.sort(key=lambda i: i[0])
for key, value in items:
if value is None:
continue
key = quote_plus(key)
value = quote_plus(str(value))
value = value.replace('%7E', '~').replace('+', '%20')
t.append('%s=%s' % (key, value))
qs = '&'.join(t)
qs = quote_plus(qs).replace('%7E', '~').replace('+', '%20')
return qs
def _make_signed_string(self, params):
text = self._get_stringtosign(params)
message = '&'.join([self.salt, text])
key = (self.client_secret + '&').encode('utf-8')
message = message.encode('utf-8')
h = hmac.new(key, message, digestmod='sha1')
return b64encode(h.digest()).decode('utf-8')
def sign(self, params):
return self._make_signed_string(params)
def verify(self, params, signed_string):
timestamp = params['timestamp']
if self.is_expired(timestamp):
return False
return self._make_signed_string(params) == signed_string
| 31.287879 | 75 | 0.614528 | 257 | 2,065 | 4.789883 | 0.354086 | 0.036556 | 0.038993 | 0.032494 | 0.103981 | 0.064988 | 0 | 0 | 0 | 0 | 0 | 0.014848 | 0.249879 | 2,065 | 65 | 76 | 31.769231 | 0.779858 | 0.010169 | 0 | 0 | 0 | 0 | 0.043074 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.156863 | false | 0 | 0.156863 | 0.058824 | 0.509804 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f3e7e1d153c48efd2c180d30f0e1eb79462c023 | 1,610 | py | Python | src/post_comment/app.py | datasetu/eventbridge-integration-solution-zendesk-attachment-processing | d3bf1d69154adfe6acc8b44cb0d8d27bc1eef95f | [
"MIT-0"
] | 2 | 2020-11-23T16:35:13.000Z | 2021-10-30T17:42:25.000Z | src/post_comment/app.py | datasetu/eventbridge-integration-solution-zendesk-attachment-processing | d3bf1d69154adfe6acc8b44cb0d8d27bc1eef95f | [
"MIT-0"
] | null | null | null | src/post_comment/app.py | datasetu/eventbridge-integration-solution-zendesk-attachment-processing | d3bf1d69154adfe6acc8b44cb0d8d27bc1eef95f | [
"MIT-0"
] | 3 | 2020-06-27T04:49:03.000Z | 2021-10-30T17:42:13.000Z | import os
import json
from zenpy.lib.api_objects import Comment
from zenpy import Zenpy #importing zenpy (https://github.com/facetoe/zenpy)
def lambda_handler(event, context):
output = merge_branch_output(event)
text_detected_body, images_detected_body = assemble_message_body(output)
zenpy_update_ticket(output, text_detected_body, images_detected_body)
return
def merge_branch_output(event):
text_branch_output = event[0]
image_branch_output = event[1]
output = {**text_branch_output, **image_branch_output}
return output
def assemble_message_body(output):
if output['image_labels_detected'] == True:
images_detected_body = f"Main images detected: {output['attachment_images']}"
else:
images_detected_body = "No images detected"
if output['text_detected'] == True:
text_detected_body = f"Text detected: {output['attachment_text']}"
else:
text_detected_body = "No text detected"
return text_detected_body, images_detected_body
def zenpy_update_ticket(output, text_detected_body, images_detected_body):
credentials = {
'email': os.environ['ZENDESK_EMAIL'],
'token': os.environ['ZENDESK_TOKEN'],
'subdomain': os.environ['ZENDESK_SUBDOMAIN']
}
zenpy_client = Zenpy(**credentials)
ticket = zenpy_client.tickets(id=output['attachment_data']['ticket_id'])
ticket.comment = Comment(body=text_detected_body, html_body='<h4>Attachment processed by Amazon Textract & Amazon Rekognition</h4><p>{}<p><p>{}</p>'.format(images_detected_body, text_detected_body), public=False)
zenpy_client.tickets.update(ticket)
return | 29.272727 | 214 | 0.754037 | 211 | 1,610 | 5.440758 | 0.298578 | 0.156794 | 0.111498 | 0.076655 | 0.200348 | 0.158537 | 0.099303 | 0.099303 | 0.099303 | 0.099303 | 0 | 0.002872 | 0.134783 | 1,610 | 55 | 215 | 29.272727 | 0.821249 | 0.031056 | 0 | 0.114286 | 0 | 0.028571 | 0.213462 | 0.070513 | 0 | 0 | 0 | 0 | 0 | 1 | 0.114286 | false | 0 | 0.114286 | 0 | 0.342857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f3eb5d4fbfa76e8121ceb71f9823d54a1848d77 | 4,031 | py | Python | ytutils/Video.py | SanjayDevTech/ytutils | 775aa5dfa5552ad165086d08e5a0bdd2c06a167c | [
"MIT"
] | null | null | null | ytutils/Video.py | SanjayDevTech/ytutils | 775aa5dfa5552ad165086d08e5a0bdd2c06a167c | [
"MIT"
] | null | null | null | ytutils/Video.py | SanjayDevTech/ytutils | 775aa5dfa5552ad165086d08e5a0bdd2c06a167c | [
"MIT"
] | null | null | null | import requests
import urllib.parse
import re
class Video:
"""Video class for accessing YouTube video information."""
def __init__(self, api_key):
self.__pattern__ = r"^(?:http(?:s)?:\/\/)?(?:www\.)?(?:m\.)?(?:youtu\.be\/|youtube\.com\/(?:(?:watch)?\?(?:.*&)?v(?:i)?=|(?:embed)\/))([^\?&\"'>]+)"
self.api_key = api_key
self.__dictChart = {}
def set_key(self, api_key):
"""To change the api_key that is used for fetch details"""
self.api_key= api_key
def start(self, video_url=None, video_id=None):
"""Pass video_url or video_id, two is not mandatory but atleast one should be given.
It will raise
SyntaxError => if none of the parameters were given
KeyError => if video url is not matching the pattern
ConnectionError => if network connection failed"""
if video_url is None and video_id is None:
raise SyntaxError('There must be given video_url or video_id')
elif video_url is None:
self.video_id = video_id
else:
id_pattern = re.search(self.__pattern__, video_url)
if id_pattern is None:
raise KeyError('Invalid Video Url')
self.video_id = id_pattern[1]
self.__URL ='https://www.googleapis.com/youtube/v3/videos?part=snippet,statistics&id='+self.video_id+'&key='+self.api_key
try:
self.__response = requests.get(self.__URL)
except:
raise ConnectionError('Network connection failed')
self.__json = self.__response.json()
if 'error' not in self.__json:
if int(self.__json['pageInfo']['totalResults']) > 0:
self.__dictChart['result'] = 'OK'
self.__dictChart['code'] = 200
self.__dictChart['message'] = ''
self.__dictChart['reason'] = ''
self.__dictChart['extended_help'] = ''
self.__dictChart['title'] = self.__json['items'][0]['snippet']['title']
self.__dictChart['des'] = self.__json['items'][0]['snippet']['description']
self.__dictChart['thumbnails'] = self.__json['items'][0]['snippet']['thumbnails']
self.__dictChart['channelId'] = self.__json['items'][0]['snippet']['channelId']
self.__dictChart['publishedAt'] = self.__json['items'][0]['snippet']['publishedAt']
self.__dictChart['channelTitle'] = self.__json['items'][0]['snippet']['channelTitle']
self.__dictChart['viewCount'] = self.__json['items'][0]['statistics']['viewCount']
self.__dictChart['commentCount'] = self.__json['items'][0]['statistics']['commentCount']
self.__dictChart['likeCount'] = self.__json['items'][0]['statistics']['likeCount']
self.__dictChart['dislikeCount'] = self.__json['items'][0]['statistics']['dislikeCount']
else:
self.__dictChart['result'] = 'FAILURE'
self.__dictChart['code'] = 0
self.__dictChart['message'] = 'Please check your video id'
self.__dictChart['reason'] = 'emptyResult'
self.__dictChart['extended_help'] = ''
else:
self.__dictChart['result'] = 'FAILURE'
self.__dictChart['code'] = int(self.__json['error']['code'])
self.__dictChart['message'] = self.__json['error']['message']
self.__dictChart['reason'] = self.__json['error']['errors'][0]['reason']
self.__dictChart['extended_help'] = 'Use this link to know the meaning of the error code:- https://developers.google.com/youtube/v3/docs/videos/list?hl=en-US#errors_1'
def result(self):
"""Returns the YT video details"""
return self.__dictChart
| 48.566265 | 180 | 0.554701 | 424 | 4,031 | 4.95283 | 0.316038 | 0.167143 | 0.061905 | 0.066667 | 0.235238 | 0.044762 | 0.044762 | 0.044762 | 0 | 0 | 0 | 0.007022 | 0.293476 | 4,031 | 83 | 181 | 48.566265 | 0.730337 | 0.09551 | 0 | 0.157895 | 0 | 0.017544 | 0.273116 | 0.034247 | 0 | 0 | 0 | 0 | 0 | 1 | 0.070175 | false | 0 | 0.052632 | 0 | 0.157895 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f3efcbaeed1949a3f0c19cb7137d4131273fc97 | 9,761 | py | Python | smlcs/training/classifier.py | AnandBhatUpb/smlcs-final-submission | 54d810d2ed1dc5907973ce55d93d318bec0d4011 | [
"Apache-2.0"
] | null | null | null | smlcs/training/classifier.py | AnandBhatUpb/smlcs-final-submission | 54d810d2ed1dc5907973ce55d93d318bec0d4011 | [
"Apache-2.0"
] | null | null | null | smlcs/training/classifier.py | AnandBhatUpb/smlcs-final-submission | 54d810d2ed1dc5907973ce55d93d318bec0d4011 | [
"Apache-2.0"
] | null | null | null | """This script trains --clf classifier for multiclass classification.
Usage:
classifier.py --env=<run_environment> --job=<jobid> --subjob=<subjobid> --clf=<classifier> --cw=<classweight>
classifier.py (-h | --help)
classifier.py
Options:
-h --help Show this screen.
--env=<run_environment> specifies the running environment cluster/PC
--job=<jobid> specifies cluster job id
--subjob=<subjobid> specifies cluster subjob id
--clf=<classifier> specifies classifier to train
--cw=<classweight> specifies class weight strategy applied
"""
import datetime
import logging
import json
from docopt import docopt
import numpy as np
from joblib import dump
from smlcs.helper.read_data import ReadData
from smlcs.evaluation.metrics import CalculateMetrics
from smlcs.evaluation.plotters import PlotResults
from smlcs.helper.preprocessing import Preprocessing
from smlcs.helper.write_training_result import WriteToCSV
#from imblearn.over_sampling import SMOTE
from collections import Counter
from sklearn.preprocessing import StandardScaler
from sklearn import svm, ensemble
from skopt import BayesSearchCV
class Classifier:
def local_training(environment, clf, X, Y, outercv, logger):
try:
print('Not Implemented')
except Exception as e:
print('Error')
#logger.error('Failed in local training: ' + str(e))
def cluster_training(environment, clf, job_id, subjob_id, cw, logger):
try:
logger.info('Training environment: {}'.format(environment))
logger.info('Classifier selected: {}'.format(clf))
logger.info('Class balance strategy selected: {}'.format(cw))
with open('../configurations/outer_fold_data_clf.txt') as json_file:
data = json.load(json_file)
datasource = data['datasource']
outer_split_strategy = data['outer_split_strategy']
logger.info('Data source selected for training: {}'.format(datasource))
X, Y, pgm_features = ReadData(datasource, logger).read_clf_data(logger) # Read data from local/remote
X[:, 0:42], imputerobject = Preprocessing().handle_missing_data(X[:, 0:42], logger) # Handle missing data
onehotcoded_data, config_features = Preprocessing().encode_categorical_data(X[:, 42:51],
logger) # OneHotCode categorical data
feature_names = pgm_features + config_features
X = np.delete(X, np.s_[42:51], axis=1)
logger.info('Shape of the onehotcoded data: {}'.format(onehotcoded_data.shape))
logger.info('Shape of the program feature data: {}'.format(X.shape))
logger.info('Feature names after onehotencoding: {}'.format(feature_names))
X = np.concatenate((X, onehotcoded_data), axis=1)
logger.info('Shape of the final processed data: {}'.format(X.shape))
Y = Preprocessing().encode_labels(Y, logger) # Encoding class labels
for f in data['folds']:
if int(subjob_id) == int(f['foldId']):
X_train, X_test = X[f['outer_train_index']], X[f['outer_test_index']]
y_train, y_test = Y[f['outer_train_index']], Y[f['outer_test_index']]
#if cw == 'smote':
# logger.info('Original dataset shape before smote: {}'.format(Counter(y_train)))
# sm = SMOTE(random_state=42)
# X_train, y_train = sm.fit_resample(X_train, y_train)
# logger.info('Dataset shape after smote: {}'.format(Counter(y_train)))
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
dump(scaler, '../../models_persisted/clf_scalar_' + clf + '_' + job_id + '_' + subjob_id + '.joblib')
dump(imputerobject, '../../models_persisted/clf_imputer_' + clf + '_' + job_id + '_' + subjob_id + '.joblib')
with open('../configurations/clf_config.txt') as json_file:
clf_config = json.load(json_file)
innercvfolds = int(clf_config['innercv_folds'])
logger.info('Inner cross validation number of folds: {}'.format(innercvfolds))
estimator = None
tuning_parameters = None
if cw == 'imbalanced':
class_weight = None
elif cw == 'balanced':
class_weight = 'balanced'
elif cw == 'classweight':
class_weight = {0: 5.0,
1: 1.0
}
for c in clf_config['classifiers']:
if clf == c['clf_name']:
if clf == 'rf':
if cw == 'smote':
estimator = ensemble.RandomForestClassifier(random_state=0)
else:
estimator = ensemble.RandomForestClassifier(class_weight=class_weight, random_state=1)
tuning_parameters = c['clf_parameters']
break
elif clf == 'svc':
if cw == 'smote' or cw == 'imbalanced':
estimator = svm.SVC(random_state=0)
else:
estimator = svm.SVC(class_weight=class_weight, random_state=1)
tuning_parameters = c['clf_parameters']
break
else:
estimator = ensemble.GradientBoostingClassifier(random_state=1)
tuning_parameters = c['clf_parameters']
break
logger.info('estimator is : {}'.format(estimator))
logger.info('Tunning parameters are: {}'.format(tuning_parameters))
start_time = datetime.datetime.now()
logger.info('Started Skopt CV at: {}'.format(start_time))
opt_clf = BayesSearchCV(estimator, tuning_parameters, cv=innercvfolds)
opt_clf.fit(X_train, y_train)
end_time = datetime.datetime.now()
logger.info('Ended Skopt CV at: {}'.format(end_time))
logger.info('Total time for parameter search: {}'.format(end_time-start_time))
metrics = CalculateMetrics(opt_clf)
metrics.grid_models_metrics(logger, job_id, subjob_id)
best_params = metrics.grid_best_params(logger)
best_estimator = metrics.grid_best_estimator(logger)
grid_score = metrics.grid_score(logger)
test_score = metrics.test_score(X_test, y_test, logger)
important_features = []
if clf == 'rf':
important_features = metrics.get_imprtant_features(logger)
log_path = './logs/log_'+str(job_id)+'_'+str(subjob_id)+'.log'
cm_path = './plots/cm_'+str(job_id)+'_'+str(subjob_id)+'.png'
fi_path = './plots/fi_' + str(job_id) + "_" + str(subjob_id) + '.png'
# dump all results into the training_result.csv file
writer = WriteToCSV()
writer.write_result_to_csv(logger, job_id, subjob_id, subjob_id, datetime.datetime.now(), clf, best_params,
grid_score, test_score, innercvfolds, outer_split_strategy, 'none', datasource,
start_time, end_time, end_time-start_time, X_train.shape, X_test.shape,
log_path, cm_path, fi_path)
logger.info('Saving trained model')
dump(opt_clf, '../../models_persisted/clf_'+clf+'_'+job_id+'_'+subjob_id+'.joblib')
logger.info('Saved model: {}'.format('clf_'+clf+'_'+job_id+'_'+subjob_id+'.joblib'))
if environment == 'local':
plot = PlotResults(opt_clf)
plot.plot_confusion_matrix(X_test, y_test, logger, job_id, subjob_id)
if clf == 'rf':
plot.plot_feature_imp(feature_names, logger, job_id, subjob_id)
logger.info('Done')
except Exception as e:
logger.error('Failed in cluster training: ' + str(e))
if __name__ == '__main__':
try:
arguments = docopt(__doc__, version=None)
if arguments['--env'] is None:
environment = 'cluster'
else:
environment = arguments['--env']
if arguments['--job'] is None:
job_id = -1
else:
job_id = arguments['--job']
if arguments['--subjob'] is None:
subjob_id = -1
else:
subjob_id = arguments['--subjob']
if arguments['--clf'] is None:
clf = 'rf'
else:
clf = arguments['--clf']
if arguments['--cw'] is None:
cw = 'balanced'
else:
cw = arguments['--cw']
logging.basicConfig(filename='../../logs/log_' + str(job_id) + '_' + str(subjob_id) + '.log', filemode='w',
format='%(name)s - %(levelname)s - %(message)s', level=logging.INFO)
logger = logging.getLogger('Clf_training')
logger.info('Cluster job ID: {}'.format(job_id))
logger.info('Cluster sub job ID: {}'.format(subjob_id))
cluster_training(environment, clf, job_id, subjob_id, cw, logger)
except Exception as e:
logger.error('Failed in the main of classifier.py: ' + str(e))
| 45.826291 | 126 | 0.564184 | 1,056 | 9,761 | 5.004735 | 0.224432 | 0.039735 | 0.022895 | 0.024598 | 0.16859 | 0.126963 | 0.106149 | 0.078903 | 0.064901 | 0.045033 | 0 | 0.004684 | 0.321996 | 9,761 | 212 | 127 | 46.042453 | 0.793895 | 0.11925 | 0 | 0.149351 | 0 | 0 | 0.146305 | 0.019702 | 0 | 0 | 0 | 0 | 0 | 1 | 0.012987 | false | 0 | 0.11039 | 0 | 0.12987 | 0.012987 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f3f65e11551758b33fce5afba522f25f1619d03 | 758 | py | Python | examples/sharepoint/connect_with_azure_app.py | wreiner/Office365-REST-Python-Client | 476bbce4f5928a140b4f5d33475d0ac9b0783530 | [
"MIT"
] | null | null | null | examples/sharepoint/connect_with_azure_app.py | wreiner/Office365-REST-Python-Client | 476bbce4f5928a140b4f5d33475d0ac9b0783530 | [
"MIT"
] | null | null | null | examples/sharepoint/connect_with_azure_app.py | wreiner/Office365-REST-Python-Client | 476bbce4f5928a140b4f5d33475d0ac9b0783530 | [
"MIT"
] | null | null | null | import os
from office365.sharepoint.client_context import ClientContext
from settings import settings
app_settings = {
'url': settings.get('team_site_url'),
'client_id': '51d03106-4726-442c-86db-70b32fa7547f',
'thumbprint': "6B36FBFC86FB1C019EB6496494B9195E6D179DDB",
'certificate_path': '{0}/selfsigncert.pem'.format(os.path.dirname(__file__))
}
ctx = ClientContext.connect_with_certificate(app_settings['url'],
app_settings['client_id'],
app_settings['thumbprint'],
app_settings['certificate_path'])
current_web = ctx.web
ctx.load(current_web)
ctx.execute_query()
print("{0}".format(current_web.url))
| 34.454545 | 80 | 0.635884 | 76 | 758 | 6.052632 | 0.513158 | 0.119565 | 0.06087 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.093146 | 0.24934 | 758 | 21 | 81 | 36.095238 | 0.71529 | 0 | 0 | 0 | 0 | 0 | 0.248021 | 0.100264 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.176471 | 0 | 0.176471 | 0.176471 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f3f6efc75fc8212e5ff0fedb24746071b27e2d1 | 2,933 | py | Python | apps/configuration/api/views.py | sotkonstantinidis/testcircle | 448aa2148fbc2c969e60f0b33ce112d4740a8861 | [
"Apache-2.0"
] | 3 | 2019-02-24T14:24:43.000Z | 2019-10-24T18:51:32.000Z | apps/configuration/api/views.py | sotkonstantinidis/testcircle | 448aa2148fbc2c969e60f0b33ce112d4740a8861 | [
"Apache-2.0"
] | 17 | 2017-03-14T10:55:56.000Z | 2022-03-11T23:20:19.000Z | apps/configuration/api/views.py | sotkonstantinidis/testcircle | 448aa2148fbc2c969e60f0b33ce112d4740a8861 | [
"Apache-2.0"
] | 2 | 2016-02-01T06:32:40.000Z | 2019-09-06T04:33:50.000Z | from collections import OrderedDict
from django.http import Http404
from rest_framework.generics import GenericAPIView
from rest_framework.response import Response
from api.views import PermissionMixin, LogUserMixin
from configuration.structure import ConfigurationStructure
from configuration.models import Configuration
class ConfigurationStructureView(PermissionMixin, LogUserMixin, GenericAPIView):
"""
Get the structure of the configuration of a questionnaire.
Return information about the categories, questiongroups and questions that
build a questionnaire.
``code``: The code of the configuration (e.g. "technologies").
``edition``: The edition of the configuration (e.g. "2018").
Optional request params:
``flat``: If present, the structure will be a flat list of questions.
"""
def get(self, request, *args, **kwargs) -> Response:
flat = request.GET.get('flat', False)
structure_obj = ConfigurationStructure(
code=kwargs['code'],
edition=kwargs['edition'],
flat=flat,
)
if structure_obj.error:
# No configuration was found for this code and edition.
raise Http404()
return Response(structure_obj.structure)
class ConfigurationView(PermissionMixin, LogUserMixin, GenericAPIView):
"""
Get available configurations.
Return the available configurations codes.
Optional request params:
``flat``: If present, the structure will be a flat list of configurations.
"""
def get(self, request) -> Response:
flat = request.GET.get('flat', False)
configurations_obj = Configuration.objects.all()
if not configurations_obj:
# No configurations were found
raise Http404()
configurations_obj = configurations_obj.values_list('code', flat=flat).distinct().order_by('code')
data = {"configurations": list(configurations_obj)}
# Return all available configurations
return Response(data)
class ConfigurationEditionView(PermissionMixin, LogUserMixin, GenericAPIView):
"""
Get available editions for the configuration.
Return the available editions in the configuration.
``code``: The code of the configuration (e.g. "technologies").
Optional request params:
``flat``: If present, the structure will be a flat list of questions.
"""
def get(self, request, *args, **kwargs) -> Response:
flat = request.GET.get('flat', False)
editions_obj = Configuration.objects.filter(code=kwargs['code'])
if not editions_obj:
# No editions were found for the code
raise Http404()
editions_obj = editions_obj.values_list('edition', flat=flat).distinct().order_by('edition')
data = {"editions": list(editions_obj)}
# Return all available configurations
return Response(data)
| 31.537634 | 106 | 0.685646 | 320 | 2,933 | 6.225 | 0.253125 | 0.048193 | 0.036145 | 0.066265 | 0.36496 | 0.278614 | 0.278614 | 0.261546 | 0.208333 | 0.165161 | 0 | 0.007039 | 0.225026 | 2,933 | 92 | 107 | 31.880435 | 0.869336 | 0.342994 | 0 | 0.277778 | 0 | 0 | 0.038904 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.194444 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f40804db63c1af07a13fce826b215bb90285b4b | 3,209 | py | Python | script/build.py | Jeket/electron | f41cce96a3afa8c4cf6fe57f9ec904502abed524 | [
"MIT"
] | 2 | 2019-07-17T08:09:02.000Z | 2021-10-04T04:44:42.000Z | script/build.py | Jeket/electron | f41cce96a3afa8c4cf6fe57f9ec904502abed524 | [
"MIT"
] | 1 | 2018-04-03T23:04:37.000Z | 2018-04-03T23:04:37.000Z | script/build.py | Jeket/electron | f41cce96a3afa8c4cf6fe57f9ec904502abed524 | [
"MIT"
] | 1 | 2021-10-04T04:47:00.000Z | 2021-10-04T04:47:00.000Z | #!/usr/bin/env python
import argparse
import os
import subprocess
import sys
from lib.config import MIPS64EL_GCC, get_target_arch, build_env, \
enable_verbose_mode, is_verbose_mode
from lib.util import electron_gyp, import_vs_env
CONFIGURATIONS = ['Release', 'Debug']
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
VENDOR_DIR = os.path.join(SOURCE_ROOT, 'vendor')
LIBCC_SOURCE_ROOT = os.path.join(SOURCE_ROOT, 'vendor', 'libchromiumcontent')
LIBCC_DIST_MAIN = os.path.join(LIBCC_SOURCE_ROOT, 'dist', 'main')
GCLIENT_DONE = os.path.join(SOURCE_ROOT, '.gclient_done')
def main():
os.chdir(SOURCE_ROOT)
args = parse_args()
if args.verbose:
enable_verbose_mode()
# Update the VS build env.
import_vs_env(get_target_arch())
# decide which ninja executable to use
ninja_path = args.ninja_path
if not ninja_path:
ninja_path = os.path.join('vendor', 'depot_tools', 'ninja')
if sys.platform == 'win32':
ninja_path += '.exe'
# decide how to invoke ninja
ninja = [ninja_path]
if is_verbose_mode():
ninja.append('-v')
if args.libcc:
if ('D' not in args.configuration
or not os.path.exists(GCLIENT_DONE)
or not os.path.exists(os.path.join(LIBCC_DIST_MAIN, 'build.ninja'))):
sys.stderr.write('--libcc should only be used when '
'libchromiumcontent was built with bootstrap.py -d '
'--debug_libchromiumcontent' + os.linesep)
sys.exit(1)
script = os.path.join(LIBCC_SOURCE_ROOT, 'script', 'build')
subprocess.check_call([sys.executable, script, '-D', '-t',
get_target_arch()])
subprocess.check_call(ninja + ['-C', LIBCC_DIST_MAIN])
env = build_env()
for config in args.configuration:
build_path = os.path.join('out', config[0])
ret = subprocess.call(ninja + ['-C', build_path, args.target], env=env)
if ret != 0:
sys.exit(ret)
def parse_args():
parser = argparse.ArgumentParser(description='Build project')
parser.add_argument('-c', '--configuration',
help='Build with Release or Debug configuration',
nargs='+',
default=CONFIGURATIONS,
required=False)
parser.add_argument('-v', '--verbose',
action='store_true',
default=False,
help='Verbose output')
parser.add_argument('-t', '--target',
help='Build specified target',
default=electron_gyp()['project_name%'],
required=False)
parser.add_argument('--libcc',
help=(
'Build libchromiumcontent first. Should be used only '
'when libchromiumcontent as built with boostrap.py '
'-d --debug_libchromiumcontent.'
),
action='store_true', default=False)
parser.add_argument('--ninja-path',
help='Path of ninja command to use.',
required=False)
return parser.parse_args()
if __name__ == '__main__':
sys.exit(main())
| 34.138298 | 78 | 0.603615 | 382 | 3,209 | 4.861257 | 0.311518 | 0.042003 | 0.04308 | 0.025848 | 0.145396 | 0.054927 | 0 | 0 | 0 | 0 | 0 | 0.003007 | 0.27454 | 3,209 | 93 | 79 | 34.505376 | 0.794674 | 0.033967 | 0 | 0.041096 | 0 | 0 | 0.190245 | 0.017119 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027397 | false | 0 | 0.09589 | 0 | 0.136986 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f40b30ac69c7da5a7f9a164e6b58dd814dda554 | 8,496 | py | Python | edit/datasets/base_sr_dataset.py | tpoisonooo/basicVSR_mge | 53df836a7dcc075083ef7c9ff7cabea69fec3192 | [
"Apache-2.0"
] | 28 | 2021-03-23T09:00:33.000Z | 2022-03-10T03:55:00.000Z | edit/datasets/base_sr_dataset.py | tpoisonooo/basicVSR_mge | 53df836a7dcc075083ef7c9ff7cabea69fec3192 | [
"Apache-2.0"
] | 2 | 2021-04-17T20:08:55.000Z | 2022-02-01T17:48:55.000Z | edit/datasets/base_sr_dataset.py | tpoisonooo/basicVSR_mge | 53df836a7dcc075083ef7c9ff7cabea69fec3192 | [
"Apache-2.0"
] | 5 | 2021-05-19T07:35:56.000Z | 2022-01-13T02:11:50.000Z | import shutil
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
import os
import os.path as osp
import copy
from collections import defaultdict
from .base_dataset import BaseDataset
from pathlib import Path
from edit.utils import scandir, is_list_of, mkdir_or_exist, is_tuple_of, imread, imwrite
IMG_EXTENSIONS = ('.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm',
'.PPM', '.bmp', '.BMP')
class BaseSRDataset(BaseDataset):
"""Base class for image super resolution Dataset.
"""
def __init__(self, pipeline, scale, mode="train"):
super(BaseSRDataset, self).__init__(pipeline, mode)
self.scale = scale
@staticmethod
def scan_folder(path):
"""Obtain image path list (including sub-folders) from a given folder.
Args:
path (str | :obj:`Path`): Folder path.
Returns:
list[str]: image list obtained form given folder.
"""
if isinstance(path, (str, Path)):
path = str(path)
else:
raise TypeError("'path' must be a str or a Path object, "
f'but received {type(path)}.')
images = sorted(list(scandir(path, suffix=IMG_EXTENSIONS, recursive=True)))
images = [osp.join(path, v) for v in images]
assert images, f'{path} has no valid image file.'
return images
def __getitem__(self, idx):
"""Get item at each call.
Args:
idx (int): Index for getting each item.
"""
results = copy.deepcopy(self.data_infos[idx])
results['scale'] = self.scale
return self.pipeline(results)
def evaluate(self, results):
"""Evaluate with different metrics.
Args:
results (list of dict): for every dict, record metric -> value for one frame
Return:
dict: Evaluation results dict.
"""
assert is_list_of(results, dict), f'results must be a list of dict, but got {type(results)}'
assert len(results) >= len(self), "results length should >= dataset length, due to multicard eval"
self.logger.info("eval samples length: {}, dataset length: {}, only select front {} results".format(len(results), len(self), len(self)))
results = results[:len(self)]
eval_results = defaultdict(list) # a dict of list
for res in results:
for metric, val in res.items():
eval_results[metric].append(val)
for metric, val_list in eval_results.items():
assert len(val_list) == len(self), (
f'Length of evaluation result of {metric} is {len(val_list)}, '
f'should be {len(self)}')
# average the results
eval_results = {
metric: sum(values) / len(self)
for metric, values in eval_results.items()
}
return eval_results
class BaseVSRDataset(BaseDataset):
"""Base class for video super resolution Dataset.
"""
def __init__(self, pipeline, scale, mode="train"):
super(BaseVSRDataset, self).__init__(pipeline, mode)
self.scale = scale
def __getitem__(self, idx):
"""Get item at each call.
Args:
idx (int): Index for getting each item.
"""
results = copy.deepcopy(self.data_infos[idx])
results['scale'] = self.scale
return self.pipeline(results)
def test_aggre(self, save_path, padding_len = 4, start_index = 1):
clip_names = sorted(self.frame_num.keys()) # e.g. [`city`, `walk`]
frame_nums = [ self.frame_num[clip] for clip in clip_names ]
do_frames = 0
now_clip_idx = 0
total_deal = 0
for _ in range(len(self)):
do_frames += 1
if do_frames == frame_nums[now_clip_idx]:
clip_name = clip_names[now_clip_idx]
# move images to dir use shutil
save_dir_path = osp.join(save_path, clip_name)
mkdir_or_exist(save_dir_path)
# index from [total_deal, total_deal + do_frames)
for idx in range(total_deal, total_deal + do_frames):
ensemble_path_1 = osp.join(save_path, "idx_{}_epoch_1.png".format(idx))
desti_path = osp.join(save_dir_path, str(idx - total_deal + start_index).zfill(padding_len) + ".png")
if osp.exists(ensemble_path_1):
# get the content
path = osp.join(save_path, "idx_{}.png".format(idx))
sum_result = imread(path, flag='unchanged').astype(np.float32)
os.remove(path)
for e in range(1, 8):
path = osp.join(save_path, "idx_{}_epoch_{}.png".format(idx, e))
sum_result = sum_result + imread(path, flag='unchanged').astype(np.float32)
os.remove(path)
sum_result = sum_result / 8
# 四舍五入
sum_result = sum_result.round().astype(np.uint8)
# save
imwrite(sum_result, desti_path)
else:
# move
shutil.move(osp.join(save_path, "idx_" + str(idx) + ".png"), desti_path)
total_deal += do_frames
do_frames = 0
now_clip_idx += 1
def evaluate(self, results, save_path):
""" Evaluate with different metrics.
Args:
results (list of dict): for every dict, record metric -> value for one frame
Return:
dict: Evaluation results dict.
"""
save_SVG_path = osp.join(save_path, "SVG")
mkdir_or_exist(save_SVG_path)
assert is_list_of(results, dict), f'results must be a list of dict, but got {type(results)}'
assert len(results) >= len(self), "results length should >= dataset length, due to multicard eval"
self.logger.info("eval samples length: {}, dataset length: {}, only select front {} results".format(len(results), len(self), len(self)))
results = results[:len(self)]
clip_names = sorted(self.frame_num.keys()) # e.g. [`city`, `walk`]
frame_nums = [ self.frame_num[clip] for clip in clip_names ]
eval_results = defaultdict(list) # a dict of list
do_frames = 0
now_clip_idx = 0
eval_results_one_clip = defaultdict(list)
for res in results:
for metric, val in res.items():
eval_results_one_clip[metric].append(val)
do_frames += 1
if do_frames == frame_nums[now_clip_idx]: # 处理一个clip
clip_name = clip_names[now_clip_idx]
self.logger.info("{}: {} is ok".format(now_clip_idx, clip_name))
for metric, values in eval_results_one_clip.items():
# metric clip_name values to save an svg
average = sum(values) / len(values)
save_filename = clip_name + "_" + metric
title = "{} for {}, length: {}, average: {:.4f}".format(metric, clip_name, len(values), average)
plt.figure(figsize=(len(values) // 4 + 1, 8))
plt.plot(list(range(len(values))), values, label=metric) # promise that <= 10000
plt.title(title)
plt.xlabel('frame idx')
plt.ylabel('{} value'.format(metric))
plt.legend()
fig = plt.gcf()
fig.savefig(osp.join(save_SVG_path, save_filename + '.svg'), dpi=600, bbox_inches='tight')
# plt.show()
plt.clf()
plt.close()
eval_results[metric].append(average)
do_frames = 0
now_clip_idx += 1
eval_results_one_clip = defaultdict(list)
for metric, val_list in eval_results.items():
assert len(val_list) == len(clip_names), (
f'Length of evaluation result of {metric} is {len(val_list)}, '
f'should be {len(clip_names)}')
# average the results
eval_results = {
metric: sum(values) / len(values)
for metric, values in eval_results.items()
}
return eval_results
| 39.516279 | 144 | 0.555791 | 1,020 | 8,496 | 4.447059 | 0.209804 | 0.038801 | 0.019841 | 0.019841 | 0.576279 | 0.559965 | 0.526455 | 0.468254 | 0.452822 | 0.43254 | 0 | 0.006384 | 0.336276 | 8,496 | 214 | 145 | 39.700935 | 0.798014 | 0.125824 | 0 | 0.455882 | 0 | 0 | 0.120095 | 0 | 0 | 0 | 0 | 0 | 0.051471 | 1 | 0.058824 | false | 0 | 0.080882 | 0 | 0.191176 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f40db84d7bca7d0656befc3d3837a41b5c794b2 | 13,400 | py | Python | rbotreborn.py | Bobscorn/rbotreborn | 4a16e577c349b1a3461e12643dbb583a35af50f5 | [
"MIT"
] | null | null | null | rbotreborn.py | Bobscorn/rbotreborn | 4a16e577c349b1a3461e12643dbb583a35af50f5 | [
"MIT"
] | null | null | null | rbotreborn.py | Bobscorn/rbotreborn | 4a16e577c349b1a3461e12643dbb583a35af50f5 | [
"MIT"
] | null | null | null | import discord
import asyncio
from discord.ext import commands
import praw
import logging
import config
from gfycat import Gfycat
from custom_embeds import *
from reddit import *
from exceptions import *
from processors import *
Config = config.Config('config.ini')
bot = commands.Bot(command_prefix=Config.bot_prefix,
description='R-BotReborn\n https://github.com/colethedj/rbotreborn')
@bot.command(pass_context=True, description="Get x amount of comments from the last post")
async def rcl(ctx, *comment_count:int):
await bot.delete_message(ctx.message)
if comment_count:
comment_count = comment_count[0]
if comment_count > Config.r_max_comment_count:
comment_count = Config.r_max_comment_count
else:
comment_count = Config.r_default_comment_count
loading_message = RedditLoadingEmbed()
loading_message.create_embed(subreddit='unknown', post_count=1,
comment_count=comment_count,
custom_message="Getting comments... This will take a moment")
bot_message = await bot.send_message(ctx.message.channel, embed=loading_message.get_embed())
try:
post_id = Config.r_last_post_url[ctx.message.server.id][ctx.message.channel.id]
getcomments = Reddit(reddit2)
comments = getcomments.get_comments(post_id, comment_count)
embed = RedditCommentEmbed()
embed.create_embed(comments=comments)
except KeyError:
embed = RedditErrorEmbed()
embed.create_embed(title=":warning: There is no last post from this channel saved",
)
except UnknownException as e:
embed = RedditErrorEmbed()
embed.create_embed(title=":warning: Error getting comments from that post: " + str(e),
)
# no saved post from this channel
await bot.edit_message(bot_message, embed=embed.get_embed())
@bot.command(pass_context=True, description="Get posts from a Reddit comments link. "
"Can also grab comments from that post")
async def ru(ctx, url: str, *comment_count:int):
if comment_count:
comment_count = comment_count[0]
if comment_count > Config.r_max_comment_count:
comment_count = Config.r_max_comment_count
else:
comment_count = 0
await reddit_handler(ctx, url=url, comment_num=comment_count)
@bot.command(pass_context=True, description="Get posts with comments from Reddit")
async def rc(ctx, subreddit: str, *comment_count: int):
if comment_count:
comment_count = comment_count[0]
if comment_count > Config.r_max_comment_count:
comment_count = Config.r_max_comment_count
else:
comment_count = Config.r_default_comment_count
await reddit_handler(ctx, subreddit=subreddit, post_count=Config.r_postcount, comment_num=comment_count)
@bot.command(pass_context=True, description="Get posts from reddit (any type)")
async def r(ctx, subreddit: str, *post_count: int):
if post_count:
post_count = post_count[0]
else:
post_count = Config.r_postcount
await reddit_handler(ctx, subreddit=subreddit, post_count=post_count, image=None)
@bot.command(pass_context=True, description="Get image-only posts from reddit")
async def ri(ctx, subreddit: str, *post_count: int):
if post_count:
post_count = post_count[0]
else:
post_count = Config.r_postcount
await reddit_handler(ctx, subreddit=subreddit, post_count=post_count, image=True)
@bot.command(pass_context=True, description="Get text-only posts from reddit")
async def rt(ctx, subreddit: str, *post_count: int):
# TODO
if post_count:
post_count = post_count[0]
else:
post_count = Config.r_postcount
await reddit_handler(ctx, subreddit=subreddit, post_count=post_count, image=False)
# where all the reddit commands use
# REQUEST TYPES: 'default', 'url'
async def reddit_handler(ctx, **kwargs):
subreddit = kwargs.get('subreddit', None)
url = kwargs.get('url', None)
post_count = int(kwargs.get('post_count', 1))
image = kwargs.get('image', None)
comment_num = int(kwargs.get('comment_num', 0))
request_type = 'default'
if subreddit is not None:
subreddit = subreddit.lower()
if url is not None:
request_type = 'url'
# this is already done in reddit.py but we want to show how much posts we are getting in chat
if post_count is not None:
if post_count > Config.r_maxpostcount:
post_count = Config.r_maxpostcount
else:
post_count = 1
# delete the request message
await bot.delete_message(ctx.message)
# send a message to show the requester whats happening
loading_message = RedditLoadingEmbed()
loading_message.create_embed(subreddit=('unknown' if subreddit is None else subreddit), post_count=post_count, comment_count=comment_num)
bot_message = await bot.send_message(ctx.message.channel, embed=loading_message.get_embed())
# check if discord channel is marked as NSFW
if str(ctx.message.channel.id) in Config.nsfw_channels[ctx.message.server.id]:
nsfw = True
else:
nsfw = False
# start off with getting posts
red = Reddit(reddit2)
error_embed = None
try:
post, comments = await red.get(subreddit=subreddit,
post_count=post_count,
nsfw=nsfw,
get_image=image,
comment_count=comment_num,
request_type=request_type,
url=url)
except SubredditNotExist:
error_embed = RedditErrorEmbed()
error_embed.create_embed(title="r/" + str(subreddit) + " does not exist.",
description="check your spelling")
except SubredditIsNSFW:
error_embed = RedditErrorEmbed()
error_embed.create_embed(title="r/" + str(subreddit) + " is a NSFW subreddit",
description="This channel is not set as a NSFW channel. "
"If you want to add this channel as a NSFW channel, "
"use the command -addnsfw.")
except NoPostsReturned:
error_embed = RedditErrorEmbed()
error_embed.create_embed(title="No Posts Returned",
description="Maybe try again with larger post count. "
"If you are getting only images or only text,"
" some subreddits may not have e.g only images.")
except InvalidRedditURL:
error_embed = RedditErrorEmbed()
error_embed.create_embed(title="Invalid Reddit Submission URL entered",
description="Make sure the URL you entered is correct and links "
"to a post.")
except RedditOAuthException as e:
error_embed = RedditErrorEmbed()
error_embed.create_embed(title="Reddit Authentication Failure",
description="Make sure you have enter credentials and that they are correct"
"in the config file. Also make sure only application using your "
"API credentials at once. " + str(e))
except UnknownException as e:
error_embed = RedditErrorEmbed()
error_embed.create_embed(title="Unknown Error",
description="""R-BOT has not been programmed to handle this error.
Error Output: """ + str(e))
finally:
if error_embed is not None:
await bot.edit_message(bot_message, embed=error_embed.get_embed())
return
# TODO: ERROR HANDLERS
# handle the post types
post_type = post.get('post_type')
post_text = post.get('post_text')
post_id = post.get('post_id')
image_url = "NONE" # had issues with None being turned to a str type for some reason
if post_type != "link" and post_type != "reddit":
if post_type != "gif" and post_type != "image":
if post_type == "gfycat":
post_gfycat = Gfycat(Config.gfycat_client_id, Config.gfycat_client_secret)
gfyjson = await post_gfycat.get_gfy_info(str(post.get('post_url'))[19:(len(str(post.get('post_url'))))])
print(gfyjson) # TODO: fails if starts with http://
image_url = gfyjson['gfyItem']['max5mbGif']
# TODO: maybe some error handling here?
elif post_type == "imgur":
post['post_text'] = "R-BotReborn: Imgur Links are not supported yet"
else:
processing_embed = GfycatLoadingEmbed()
await bot.edit_message(bot_message, embed=processing_embed.get_embed())
image_url = await gfycat_url_handler(post.get('post_url'))
if image_url is GfycatErrorEmbed:
# time to send error to channel
await bot.edit_message(bot_message, embed=error_embed.get_embed())
image_url = None
return
elif post_type == "gif" or post_type == "image": # either gif or image
image_url = post.get('post_url')
elif post_type == "link":
if Config.enable_sumy:
# tldrify if user wants
# TODO: add this function
# we are going to TLDRify the link (but only if there is not text to start with)
if post_text == "":
post_text = "**TL;DR:** " + await sumy_url(post.get('post_url'))
# create reddit embed
comment_embed = None
if len(comments) > 0:
comment_embed = RedditCommentEmbed()
comment_embed.create_embed(comments=comments)
post_embed = RedditPostEmbed()
post_embed.create_embed(title=str(post.get('post_title')),
url=str(post.get('post_permalink')),
author=str(post.get('post_author')),
nsfw=bool(post.get('nsfw')),
score=int(post.get('post_score')),
description=str(post_text),
image=str(image_url),
time=str(post.get('created_utc')) + " UTC",
subreddit=str(post.get('post_subreddit'))
)
await bot.edit_message(bot_message, embed=post_embed.get_embed())
if comment_embed is not None:
await bot.send_message(ctx.message.channel, embed=comment_embed.get_embed())
# now we will save the post id
if str(ctx.message.server.id) in Config.r_last_post_url:
Config.r_last_post_url[str(ctx.message.server.id)][str(ctx.message.channel.id)] = post_id
else:
Config.r_last_post_url[str(ctx.message.server.id)] = {str(ctx.message.channel.id): post_id}
@bot.command(pass_context=True, description="Allow NSFW on current channel")
async def addnsfw(ctx):
new_channels, message = config.UpdateConfig('config.ini').add_nsfw_channels(str(ctx.message.server.id), str(ctx.message.channel.id))
Config.nsfw_channels = new_channels
if message is None:
embed = discord.Embed(title=":underage: Added this channel as a NSFW Channel")
else:
embed = discord.Embed(title=":warning:" + str(message))
await bot.send_message(ctx.message.channel, embed=embed)
@bot.command(pass_context=True, description="Allow NSFW on current channel")
async def removensfw(ctx):
new_channels, message = config.UpdateConfig('config.ini').remove_nsfw_channels(str(ctx.message.server.id), str(ctx.message.channel.id))
Config.nsfw_channels = new_channels
if message is None:
embed = discord.Embed(title=":wastebasket: Removed this channel as a NSFW Channel")
else:
embed = discord.Embed(title=":warning: " + str(message))
await bot.send_message(ctx.message.channel, embed=embed)
@bot.event
# when ready display this shit
async def on_ready():
logging.info('Logged into discord as:')
logging.info(bot.user.name)
logging.info(bot.user.id)
print(vars(reddit2))
print(dir(reddit2))
logging.info("Logged into reddit as:")
#logging.info(reddit2.user.me())
logging.info('------')
await bot.change_presence(game=discord.Game(name=Config.bot_game))
# main method run when starting
def start():
# connect to discord
print(Config.discord_token)
bot.run(Config.discord_token)
# connect to reddit (instance)
# connect to reddit (instance)
def connect_reddit():
reddit = praw.Reddit(client_id=Config.r_client_id,
client_secret=Config.r_client_secret,
user_agent=Config.r_user_agent,
)
return reddit
if __name__ == '__main__':
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
reddit2 = connect_reddit()
start()
| 37.535014 | 141 | 0.623731 | 1,646 | 13,400 | 4.899757 | 0.176792 | 0.053565 | 0.020831 | 0.024551 | 0.451457 | 0.409423 | 0.380657 | 0.344947 | 0.303162 | 0.278611 | 0 | 0.00229 | 0.28306 | 13,400 | 356 | 142 | 37.640449 | 0.837202 | 0.068433 | 0 | 0.271255 | 0 | 0 | 0.146816 | 0 | 0 | 0 | 0 | 0.002809 | 0 | 1 | 0.008097 | false | 0.032389 | 0.044534 | 0 | 0.064777 | 0.016194 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f429fee86ffe0485046fceb6efe8634730b1750 | 2,954 | py | Python | eventbusk/brokers/base.py | Airbase/eventbusk | 704d50a4c9c1f7d332dba93ee04ab07afa59d216 | [
"BSD-3-Clause"
] | null | null | null | eventbusk/brokers/base.py | Airbase/eventbusk | 704d50a4c9c1f7d332dba93ee04ab07afa59d216 | [
"BSD-3-Clause"
] | 1 | 2021-06-13T18:08:50.000Z | 2021-06-13T18:08:50.000Z | eventbusk/brokers/base.py | Airbase/eventbusk | 704d50a4c9c1f7d332dba93ee04ab07afa59d216 | [
"BSD-3-Clause"
] | null | null | null | """
Base interface for event consumer and producers.
"""
from __future__ import annotations
import logging
from abc import ABC, abstractmethod
from contextlib import ContextDecorator
from types import TracebackType
from typing import Callable, Optional, Type, Union
from confluent_kafka import cimpl # type: ignore
logger = logging.getLogger(__name__)
__all__ = [
"BaseBrokerURI",
"BaseConsumer",
"BaseProducer",
]
# Type hints
# callback method `on_delivery` on the producer
DeliveryCallBackT = Callable[..., None]
MessageT = Union[str, bytes, cimpl.Message]
class BaseBrokerURI(ABC):
"""
Base class that defines the interface for all broker URIs
"""
@classmethod
@abstractmethod
def from_uri(cls, uri: str) -> BaseBrokerURI:
"""
Return a instance created from a URI
"""
class BaseConsumer(ContextDecorator, ABC):
"""
Base class for consumers
All event consumers are exposed as a ContextDecorator, so it can be used via a
`with` statement and any connections are automatically closed on exit.
"""
broker: BaseBrokerURI
topic: str
group: str
def __repr__(self) -> str:
return (
f"<{self.__class__.__name__}("
f"broker=*, "
f"topic={self.topic}, "
f"group='{self.group}')>"
)
def __enter__(self) -> BaseConsumer:
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
exc_traceback: Optional[TracebackType],
) -> None:
pass
@abstractmethod
def poll(self, timeout: int) -> Optional[MessageT]: # type: ignore
"""
Poll for a specified time in seconds for new messages
"""
@abstractmethod
def ack(self, message: str) -> None:
"""
Acknowledge successful consumption of a message.
"""
class BaseProducer(ABC):
"""
Base class for producers
"""
def __repr__(self) -> str:
return f"<{self.__class__.__name__}(" f"broker=*>"
@abstractmethod
def __init__(self, broker: str):
super().__init__()
@abstractmethod
def produce( # type: ignore # pylint: disable=too-many-arguments
self,
topic: str,
value: MessageT,
flush: bool = True,
on_delivery: DeliveryCallBackT = None,
fail_silently: bool = False,
) -> None:
"""
Send a message on the specific topic.
Arguments
----------
topic:
The name of the topic
value:
Serialized message to send.
on_delivery:
Callback function on delivery of a message.
flush:
Flush any pending messages after every send.
Useful for brokers like Kafka which do batches.
fail_silently:
If True, ignore all delivery errors.
"""
| 23.822581 | 82 | 0.61002 | 315 | 2,954 | 5.52381 | 0.419048 | 0.048851 | 0.02069 | 0.017241 | 0.047126 | 0.047126 | 0.047126 | 0.047126 | 0.047126 | 0.047126 | 0 | 0 | 0.296886 | 2,954 | 123 | 83 | 24.01626 | 0.837747 | 0.317197 | 0 | 0.189655 | 0 | 0 | 0.08661 | 0.043305 | 0 | 0 | 0 | 0 | 0 | 1 | 0.155172 | false | 0.017241 | 0.12069 | 0.051724 | 0.431034 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f44d6bc8334ac97aa9e714bc57dd06d1424b56c | 11,558 | py | Python | mw4/gui/mainWmixin/tabSettDome.py | mworion/MountWizzard4 | 4e06b29ec2ef70be40e114b911b7bdf2f858a4b1 | [
"Apache-2.0"
] | 16 | 2020-01-11T22:32:26.000Z | 2022-03-31T15:18:14.000Z | mw4/gui/mainWmixin/tabSettDome.py | mworion/MountWizzard4 | 4e06b29ec2ef70be40e114b911b7bdf2f858a4b1 | [
"Apache-2.0"
] | 196 | 2020-01-16T13:56:01.000Z | 2022-03-29T02:06:51.000Z | mw4/gui/mainWmixin/tabSettDome.py | mworion/MountWizzard4 | 4e06b29ec2ef70be40e114b911b7bdf2f858a4b1 | [
"Apache-2.0"
] | 6 | 2019-12-01T19:39:33.000Z | 2021-05-27T13:14:20.000Z | ############################################################
# -*- coding: utf-8 -*-
#
# # # # # # #
# ## ## # ## # #
# # # # # # # # # # #
# # ## # ## ## ######
# # # # # # #
#
# Python-based Tool for interaction with the 10micron mounts
# GUI with PyQT5 for python
#
# written in python3, (c) 2019-2021 by mworion
# Licence APL2.0
#
###########################################################
# standard libraries
# external packages
# local import
class SettDome(object):
"""
"""
def __init__(self):
self.ui.domeRadius.valueChanged.connect(self.setUseGeometry)
self.ui.offGEM.valueChanged.connect(self.setUseGeometry)
self.ui.offLAT.valueChanged.connect(self.setUseGeometry)
self.ui.domeEastOffset.valueChanged.connect(self.setUseGeometry)
self.ui.domeNorthOffset.valueChanged.connect(self.setUseGeometry)
self.ui.domeZoffGEM.valueChanged.connect(self.setZoffGEMInMount)
self.ui.domeZoff10micron.valueChanged.connect(self.setZoff10micronInMount)
self.ui.domeClearOpening.valueChanged.connect(self.setUseGeometry)
self.ui.domeOpeningHysteresis.valueChanged.connect(self.setUseGeometry)
self.ui.domeClearanceZenith.valueChanged.connect(self.setUseGeometry)
self.ui.useOvershoot.clicked.connect(self.setUseGeometry)
self.ui.settleTimeDome.valueChanged.connect(self.setDomeSettlingTime)
self.ui.useDomeGeometry.clicked.connect(self.setUseGeometry)
self.ui.useDynamicFollowing.clicked.connect(self.setUseGeometry)
self.ui.copyFromDomeDriver.clicked.connect(self.updateDomeGeometryToGui)
self.app.mount.signals.firmwareDone.connect(self.setUseGeometry)
self.app.mount.signals.firmwareDone.connect(self.setZoffGEMInMount)
self.ui.domeRadius.valueChanged.connect(self.tab1)
self.ui.domeNorthOffset.valueChanged.connect(self.tab2)
self.ui.domeEastOffset.valueChanged.connect(self.tab3)
self.ui.domeZoffGEM.valueChanged.connect(self.tab4)
self.ui.domeZoff10micron.valueChanged.connect(self.tab5)
self.ui.offGEM.valueChanged.connect(self.tab6)
self.ui.offLAT.valueChanged.connect(self.tab7)
self.ui.domeClearOpening.valueChanged.connect(self.tab8)
self.ui.domeOpeningHysteresis.valueChanged.connect(self.tab9)
self.ui.domeClearanceZenith.valueChanged.connect(self.tab10)
self.app.update1s.connect(self.updateShutterStatGui)
self.ui.domeAbortSlew.clicked.connect(self.domeAbortSlew)
self.ui.domeOpenShutter.clicked.connect(self.domeOpenShutter)
self.ui.domeCloseShutter.clicked.connect(self.domeCloseShutter)
def tab1(self):
self.ui.tabDomeExplain.setCurrentIndex(0)
self.ui.tabDomeExplain.setStyleSheet(self.getStyle())
def tab2(self):
self.ui.tabDomeExplain.setCurrentIndex(1)
self.ui.tabDomeExplain.setStyleSheet(self.getStyle())
def tab3(self):
self.ui.tabDomeExplain.setCurrentIndex(2)
self.ui.tabDomeExplain.setStyleSheet(self.getStyle())
def tab4(self):
self.ui.tabDomeExplain.setCurrentIndex(3)
self.ui.tabDomeExplain.setStyleSheet(self.getStyle())
def tab5(self):
self.ui.tabDomeExplain.setCurrentIndex(4)
self.ui.tabDomeExplain.setStyleSheet(self.getStyle())
def tab6(self):
self.ui.tabDomeExplain.setCurrentIndex(5)
self.ui.tabDomeExplain.setStyleSheet(self.getStyle())
def tab7(self):
self.ui.tabDomeExplain.setCurrentIndex(6)
self.ui.tabDomeExplain.setStyleSheet(self.getStyle())
def tab8(self):
self.ui.tabDomeExplain.setCurrentIndex(7)
self.ui.tabDomeExplain.setStyleSheet(self.getStyle())
def tab9(self):
self.ui.tabDomeExplain.setCurrentIndex(8)
self.ui.tabDomeExplain.setStyleSheet(self.getStyle())
def tab10(self):
self.ui.tabDomeExplain.setCurrentIndex(9)
self.ui.tabDomeExplain.setStyleSheet(self.getStyle())
def initConfig(self):
"""
initConfig read the key out of the configuration dict and stores it to the gui
elements. if some initialisations have to be proceeded with the loaded persistent
data, they will be launched as well in this method.
:return: True for test purpose
"""
config = self.app.config['mainW']
self.ui.domeClearOpening.setValue(config.get('domeClearOpening', 0.4))
self.ui.domeOpeningHysteresis.setValue(config.get('domeOpeningHysteresis',
0.0))
self.ui.domeClearanceZenith.setValue(config.get('domeClearanceZenith', 0.2))
self.ui.useOvershoot.setChecked(config.get('useOvershoot', False))
self.ui.domeNorthOffset.setValue(config.get('domeNorthOffset', 0))
self.ui.domeEastOffset.setValue(config.get('domeEastOffset', 0))
self.ui.domeZoffGEM.setValue(config.get('domeZoffGEM', 0))
self.ui.offGEM.setValue(config.get('offGEM', 0))
self.ui.offLAT.setValue(config.get('offLAT', 0))
self.ui.domeRadius.setValue(config.get('domeRadius', 1.5))
self.ui.useDomeGeometry.setChecked(config.get('useDomeGeometry', False))
self.ui.autoDomeDriver.setChecked(config.get('autoDomeDriver', False))
self.ui.useDynamicFollowing.setChecked(config.get('useDynamicFollowing', False))
self.ui.settleTimeDome.setValue(config.get('settleTimeDome', 0))
self.setUseGeometry()
return True
def storeConfig(self):
"""
storeConfig writes the keys to the configuration dict and stores. if some
saving has to be proceeded to persistent data, they will be launched as
well in this method.
:return: True for test purpose
"""
config = self.app.config['mainW']
config['domeRadius'] = self.ui.domeRadius.value()
config['domeClearOpening'] = self.ui.domeClearOpening.value()
config['domeOpeningHysteresis'] = self.ui.domeOpeningHysteresis.value()
config['domeClearanceZenith'] = self.ui.domeClearanceZenith.value()
config['useOvershoot'] = self.ui.useOvershoot.isChecked()
config['domeNorthOffset'] = self.ui.domeNorthOffset.value()
config['domeEastOffset'] = self.ui.domeEastOffset.value()
config['domeZoffGEM'] = self.ui.domeZoffGEM.value()
config['offGEM'] = self.ui.offGEM.value()
config['offLAT'] = self.ui.offLAT.value()
config['useDomeGeometry'] = self.ui.useDomeGeometry.isChecked()
config['autoDomeDriver'] = self.ui.autoDomeDriver.isChecked()
config['useDynamicFollowing'] = self.ui.useDynamicFollowing.isChecked()
config['settleTimeDome'] = self.ui.settleTimeDome.value()
return True
def setZoffGEMInMount(self):
"""
:return:
"""
self.app.mount.geometry.offVertGEM = self.ui.domeZoffGEM.value()
self.ui.domeZoff10micron.setValue(self.app.mount.geometry.offVert)
self.app.updateDomeSettings.emit()
return True
def setZoff10micronInMount(self):
"""
:return:
"""
self.app.mount.geometry.offVert = self.ui.domeZoff10micron.value()
self.ui.domeZoffGEM.setValue(self.app.mount.geometry.offVertGEM)
self.app.updateDomeSettings.emit()
return True
def setUseGeometry(self):
"""
setUseGeometry updates the mount class with the new setting if use
geometry for dome calculation should be used or not.
:return: true for test purpose
"""
if self.ui.autoDomeDriver.isChecked():
self.updateDomeGeometryToGui()
self.app.mount.geometry.domeRadius = self.ui.domeRadius.value()
self.app.dome.radius = self.ui.domeRadius.value()
self.app.mount.geometry.offGEM = self.ui.offGEM.value()
self.app.mount.geometry.offLAT = self.ui.offLAT.value()
self.app.mount.geometry.offNorth = self.ui.domeNorthOffset.value()
self.app.mount.geometry.offEast = self.ui.domeEastOffset.value()
clearOpening = self.ui.domeClearOpening.value()
self.app.dome.clearOpening = clearOpening
self.ui.domeOpeningHysteresis.setMaximum(clearOpening / 2.1)
self.app.dome.openingHysteresis = self.ui.domeOpeningHysteresis.value()
self.app.dome.clearanceZenith = self.ui.domeClearanceZenith.value()
useGeometry = self.ui.useDomeGeometry.isChecked()
self.app.dome.useGeometry = useGeometry
useDynamicFollowing = self.ui.useDynamicFollowing.isChecked()
self.app.dome.useDynamicFollowing = useDynamicFollowing
self.app.dome.overshoot = self.ui.useOvershoot.isChecked()
self.app.updateDomeSettings.emit()
return True
def updateDomeGeometryToGui(self):
"""
:return: true for test purpose
"""
value = float(self.app.dome.data.get('DOME_MEASUREMENTS.DM_OTA_OFFSET', 0))
self.ui.offGEM.setValue(value)
value = float(self.app.dome.data.get('DOME_MEASUREMENTS.DM_DOME_RADIUS', 0))
self.ui.domeRadius.setValue(value)
value = float(self.app.dome.data.get('DOME_MEASUREMENTS.DM_SHUTTER_WIDTH', 0))
self.ui.domeClearOpening.setValue(value)
value = float(self.app.dome.data.get('DOME_MEASUREMENTS.DM_NORTH_DISPLACEMENT', 0))
self.ui.domeNorthOffset.setValue(value)
value = float(self.app.dome.data.get('DOME_MEASUREMENTS.DM_EAST_DISPLACEMENT', 0))
self.ui.domeEastOffset.setValue(value)
value = float(self.app.dome.data.get('DOME_MEASUREMENTS.DM_UP_DISPLACEMENT', 0))
self.ui.domeZoffGEM.setValue(value)
return True
def setDomeSettlingTime(self):
"""
:return: true for test purpose
"""
self.app.dome.settlingTime = self.ui.settleTimeDome.value()
return True
def updateShutterStatGui(self):
"""
:return: True for test purpose
"""
value = self.app.dome.data.get('DOME_SHUTTER.SHUTTER_OPEN', None)
if value is True:
self.changeStyleDynamic(self.ui.domeOpenShutter, 'running', True)
self.changeStyleDynamic(self.ui.domeCloseShutter, 'running', False)
elif value is False:
self.changeStyleDynamic(self.ui.domeOpenShutter, 'running', False)
self.changeStyleDynamic(self.ui.domeCloseShutter, 'running', True)
else:
self.changeStyleDynamic(self.ui.domeOpenShutter, 'running', False)
self.changeStyleDynamic(self.ui.domeCloseShutter, 'running', False)
value = self.app.dome.data.get('Status.Shutter', None)
if value:
self.ui.domeShutterStatusText.setText(value)
return True
def domeAbortSlew(self):
"""
:return:
"""
suc = self.app.dome.abortSlew()
if not suc:
self.app.message.emit('Dome slew abort could not be executed', 2)
return suc
def domeOpenShutter(self):
"""
:return:
"""
suc = self.app.dome.openShutter()
if not suc:
self.app.message.emit('Dome open shutter could not be executed', 2)
return suc
def domeCloseShutter(self):
"""
:return:
"""
suc = self.app.dome.closeShutter()
if not suc:
self.app.message.emit('Dome close shutter could not be executed', 2)
return suc
| 41.278571 | 91 | 0.661447 | 1,209 | 11,558 | 6.303557 | 0.160463 | 0.085028 | 0.063378 | 0.045663 | 0.577352 | 0.433539 | 0.240257 | 0.123737 | 0.098281 | 0.098281 | 0 | 0.009336 | 0.212234 | 11,558 | 279 | 92 | 41.426523 | 0.827677 | 0.090587 | 0 | 0.186441 | 0 | 0 | 0.080084 | 0.027694 | 0 | 0 | 0 | 0 | 0 | 1 | 0.124294 | false | 0 | 0 | 0 | 0.19209 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f468cd48e78952f7f0e78c7dfbb72964775a0d2 | 2,179 | py | Python | lib/mixer.py | voc/multiview-monitor | b1435d6613882e3ebbc05589e5265fe596fbfed2 | [
"MIT"
] | 51 | 2016-02-02T00:51:24.000Z | 2022-02-03T21:46:20.000Z | lib/mixer.py | voc/multiview-monitor | b1435d6613882e3ebbc05589e5265fe596fbfed2 | [
"MIT"
] | null | null | null | lib/mixer.py | voc/multiview-monitor | b1435d6613882e3ebbc05589e5265fe596fbfed2 | [
"MIT"
] | 5 | 2017-02-03T11:23:18.000Z | 2021-06-21T15:49:28.000Z | #!/usr/bin/python3
import os, logging, gi, math
from gi.repository import Gst
# import library components
from lib.config import Config
class Mixer(object):
output_width = 0
output_height = 0
def __init__(self):
self.log = logging.getLogger('Mixer')
self.sources = []
def append(self, source):
self.sources.append(source)
def configure(self):
grid = Config.get('output', 'grid')
grid_width, grid_height = [int(n) for n in grid.split('x', 1)]
self.log.info('Configuring grid of %ux%u tiles', grid_width, grid_height)
# intervideosrc(es) -> videomixer -> intervideosink
pipeline = """
compositor name=mix
"""
pos_x = 0
pos_y = 0
col_w = 0
for tile_x in range(0, grid_width):
pos_y = 0
pos_x += col_w
col_w = 0
self.log.debug('')
for tile_y in range(0, grid_height):
index = tile_x * grid_height + tile_y
source = self.sources[index]
self.log.debug('Placing tile #%2u %u/%u of type %10s (size: %4u/%4upx) at %4u/%4upx in the viewport',
index, tile_x, tile_y,
source.type, source.width, source.height,
pos_x, pos_y)
pipeline += """
sink_{index}::xpos={x} sink_{index}::ypos={y} sink_{index}::width={width} sink_{index}::height={height}
""".format(
index=index,
x=pos_x,
y=pos_y,
width=source.width,
height=source.height,
)
pos_y += source.height
col_w = max(col_w, source.width)
self.log.debug('')
self.output_width = pos_x + col_w
self.output_height = pos_y
self.log.info('Calculated final grid-size to be %ux%upx',
self.output_width, self.output_height)
pipeline += """
! intervideosink channel=out
""".format(
)
for source in self.sources:
pipeline += """
intervideosrc channel=in_{name} !
video/x-raw,width={width},height={height} !
textoverlay text={name} font-desc="Normal 40" !
mix.
""".format(
name=source.name,
width=source.width,
height=source.height,
)
self.log.debug('Configured Mix-Pipeline:\n%s', pipeline)
self.pipeline = Gst.parse_launch(pipeline)
def start(self):
self.log.debug('Starting Mix-Pipeline')
self.pipeline.set_state(Gst.State.PLAYING)
| 22.936842 | 108 | 0.654429 | 320 | 2,179 | 4.309375 | 0.315625 | 0.040609 | 0.04351 | 0.027556 | 0.049311 | 0.049311 | 0 | 0 | 0 | 0 | 0 | 0.011448 | 0.198256 | 2,179 | 94 | 109 | 23.180851 | 0.777905 | 0.04268 | 0 | 0.231884 | 0 | 0.028986 | 0.263082 | 0.067691 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057971 | false | 0 | 0.043478 | 0 | 0.144928 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f471508739ee756f36bead1b4722b0b4521114d | 2,747 | py | Python | src/parse/city_extractor.py | hse-ml-da/drink_cider | 96e76c8f1429b7776b3b9055ed98b4835dd2b6a9 | [
"Apache-2.0"
] | 1 | 2021-06-20T17:29:16.000Z | 2021-06-20T17:29:16.000Z | src/parse/city_extractor.py | hse-ml-da/drink_cider | 96e76c8f1429b7776b3b9055ed98b4835dd2b6a9 | [
"Apache-2.0"
] | null | null | null | src/parse/city_extractor.py | hse-ml-da/drink_cider | 96e76c8f1429b7776b3b9055ed98b4835dd2b6a9 | [
"Apache-2.0"
] | null | null | null | from os.path import join
from typing import Optional
from natasha import Doc
from natasha.grammars.addr import Settlement, INT, DOT, TITLE, NOUN, ADJF, DASH
from yargy import Parser, or_, rule, and_
from yargy.pipelines import morph_pipeline
from yargy.predicates import in_caseless, caseless, normalized, dictionary
from yargy.rule import InterpretationRule
class CityExtractor:
__city_abbreviations = {
"Москва": ["мск"],
"Санкт-Петербург": ["спб", "питер", "петербург", "расчленинград"],
"Новосибирск": ["нск"],
"Екатеринбург": ["екб"],
}
__simple_city_names_file = join("src", "resources", "simple_city_names.txt")
__complex_city_names_file = join("src", "resources", "complex_city_names.txt")
def __init__(self):
yargi_interpolation_rule = self.__rebuild_yargi_parser_rules()
self.__yargi_parser = Parser(yargi_interpolation_rule)
def extract_city(self, message: Doc) -> Optional[str]:
for span in message.spans:
if span.type == "LOC":
return self.__back_translation(span.normal)
for token in message.tokens:
if self.__yargi_parser.match(token.lemma) is not None:
return self.__back_translation(token.lemma)
return None
def __back_translation(self, city: str) -> str:
for name, abbreviation in self.__city_abbreviations.items():
if city.lower() in abbreviation:
return name
return city
def __rebuild_yargi_parser_rules(self) -> InterpretationRule:
with open(self.__simple_city_names_file, "r") as f:
simple_city_names = dictionary([name.strip() for name in f])
with open(self.__complex_city_names_file, "r") as f:
complex_city_names = morph_pipeline([name.strip() for name in f])
city_abbreviations = in_caseless(sum(self.__city_abbreviations.values(), []))
city_name = or_(rule(simple_city_names), complex_city_names, rule(city_abbreviations)).interpretation(
Settlement.name
)
simple_name = and_(TITLE, or_(NOUN, ADJF))
complex_name = or_(
rule(simple_name, DASH.optional(), simple_name),
rule(TITLE, DASH.optional(), caseless("на"), DASH.optional(), TITLE),
)
name = or_(rule(simple_name), complex_name)
maybe_city_name = or_(name, rule(name, "-", INT)).interpretation(Settlement.name)
city_words = or_(rule(normalized("город")), rule(caseless("г"), DOT.optional())).interpretation(
Settlement.type.const("город")
)
city = or_(rule(city_words, maybe_city_name), rule(city_words.optional(), city_name)).interpretation(Settlement)
return city
| 42.261538 | 120 | 0.666545 | 329 | 2,747 | 5.243161 | 0.303951 | 0.052174 | 0.043478 | 0.027826 | 0.129855 | 0.075362 | 0 | 0 | 0 | 0 | 0 | 0 | 0.220604 | 2,747 | 64 | 121 | 42.921875 | 0.805698 | 0 | 0 | 0.037037 | 0 | 0 | 0.061522 | 0.015653 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.148148 | 0 | 0.407407 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f475cd22072cd673e6e509fb236783e4554da6a | 13,336 | py | Python | TEmarker_utils/TM_genos_combine_ref_close_loci.py | yanhaidong1/TEmarker | 120a9555d075c14db8b2c6a409d8df96e4acfead | [
"BSD-3-Clause"
] | 2 | 2022-01-17T19:29:58.000Z | 2022-02-23T02:03:12.000Z | TEmarker_utils/TM_genos_combine_ref_close_loci.py | yanhaidong1/TEmarker | 120a9555d075c14db8b2c6a409d8df96e4acfead | [
"BSD-3-Clause"
] | 1 | 2022-03-30T06:43:20.000Z | 2022-03-30T12:22:42.000Z | TEmarker_utils/TM_genos_combine_ref_close_loci.py | yanhaidong1/TEmarker | 120a9555d075c14db8b2c6a409d8df96e4acfead | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
##updation 122120 change the c and s cover case
##updation 121920 add the case when first line is the last one
##updation 110220this version did not consider the s or c is covered by the reference we need to have a check
##we need to generate a dic to store the s or c TEs that are covered by the o type and finally, we will filter out these locations
##this script we will combine the close loci with the overlapped regions
##import modules
import re
def combine_close_loci (genotype_fl,working_dir):
########
##step 1: store the id line and store the same o to one dic
##generate a temp genotype file with id information
store_id_line_list = []
count = 0
with open (genotype_fl,'r') as ipt:
for eachline in ipt:
count += 1
eachline = eachline.strip('\n')
new_line = str(count) + '\t' + eachline
store_id_line_list.append(new_line)
with open (working_dir + '/temp_te_genotype_annot_add_id.txt','w+') as opt:
for eachline in store_id_line_list:
opt.write(eachline + '\n')
##initial a list contain all the TE situations
dic_list = []
##initial a dictionary to store the target lines
dic_te = {}
##get the last id
with open(working_dir + '/temp_te_genotype_annot_add_id.txt', 'r') as ipt_rmk_out:
last_line = ipt_rmk_out.readlines()[-1]
last_col = last_line.strip().split()
last_id = last_col[0]
##updation 110220
store_covered_c_with_o_loc_dic = {}
##store the te infor
with open(working_dir + '/temp_te_genotype_annot_add_id.txt', 'r') as ipt_rmk_out:
for line in ipt_rmk_out:
col = line.strip().split()
chr = col[1]
#te_nm = col[4]
id = col[0]
bg = col[2]
ed = col[3]
#dir = col[3]
#lib_bg = col[5]
#lib_ed = col[6]
#lib_left = col[7]
comb_type = col[4]
if id == str(1): ##if the id is 1, it will directly store in the dic_te
dic_te[id] = {'chr': chr, 'begin': bg, 'end': ed, 'comb_type':comb_type, 'line':line}
if id == last_id: ##should be out of the previous loop
dic_list.append(dic_te)
else: ##if the id is over 1
##if the chr is the same as the previous one
if dic_te[str(int(id) - 1)]['chr'] == chr:
if dic_te[str(int(id) - 1)]['comb_type'] == comb_type: ##if the comb_type is the same
if comb_type == 'o':
##detect whether they are overlapped
pre_st = dic_te[str(int(id) - 1)]['begin']
pre_ed = dic_te[str(int(id) - 1)]['end']
##it means there is a overlap
if int(ed) >= int(pre_st) and int(bg) <= int(pre_ed):
dic_te[id] = {'chr': chr, 'begin': bg, 'end': ed, 'comb_type': comb_type, 'line': line}
if id == last_id: ##should be out of the previous loop
dic_list.append(dic_te)
else: ##if there is no overlap we will store the previous dic and assign a new dic id
dic_list.append(dic_te)
dic_te = {}
dic_te[id] = {'chr': chr, 'begin': bg, 'end': ed, 'comb_type': comb_type, 'line': line}
if id == last_id: ##should be out of the previous loop
dic_list.append(dic_te)
else:
##directly store the previous dic and assign a new dic id
dic_list.append(dic_te)
dic_te = {}
dic_te[id] = {'chr': chr, 'begin': bg, 'end': ed, 'comb_type': comb_type, 'line': line}
if id == last_id: ##should be out of the previous loop
dic_list.append(dic_te)
##if the comb_type is not the same
##we also need to store
else:
##updation 110520
##it means the pre comb_type is o and current is c or s
pre_comb_type = dic_te[str(int(id) - 1)]['comb_type']
if pre_comb_type == 'o':
##the current is s or c
##or the pre comb_type is c or s and current o
##updation 110220 we need to check whether the c is covered with the o
##detect whether they are overlapped
pre_st = dic_te[str(int(id) - 1)]['begin']
pre_ed = dic_te[str(int(id) - 1)]['end']
##it means there is a overlap
if int(ed) >= int(pre_st) and int(bg) <= int(pre_ed):
loc_str = chr + '_' + bg + '_' + ed
store_covered_c_with_o_loc_dic[loc_str] = 1
else:
##updation 122120
##if the pre is s or c
##the current could be o
##or could be s or c
if comb_type == 'o':
##if the pre is s or c
##the current is o
pre_st = dic_te[str(int(id) - 1)]['begin']
pre_ed = dic_te[str(int(id) - 1)]['end']
if int(ed) >= int(pre_st) and int(bg) <= int(pre_ed):
loc_str = chr + '_' + pre_st + '_' + pre_ed
store_covered_c_with_o_loc_dic[loc_str] = 1
##if current is not o so it would be c or s
##the reason to case this case is because we enlarge the searching range from s and c case
##and allow there are some overlapping
##in this case, we need to follow the single case TE since the combined case
##there is no else since we will modify the searching range that allows there is no cover for the s and c
#else:
dic_list.append(dic_te)
dic_te = {}
dic_te[id] = {'chr': chr, 'begin': bg, 'end': ed, 'comb_type': comb_type, 'line': line}
if id == last_id: ##should be out of the previous loop
dic_list.append(dic_te)
else: ##if the chr is not the same as the previous one
##store the dic_te which has been stored the te informations
dic_list.append(dic_te)
dic_te = {}
dic_te[id] = {'chr': chr, 'begin': bg, 'end': ed, 'comb_type': comb_type, 'line': line}
if id == last_id: ##should be out of the previous loop
dic_list.append(dic_te)
#print(dic_list)
print(store_covered_c_with_o_loc_dic)
########
##step 2: combine the overlapped o ref loci
store_final_line_list = []
##updation 110220 write a file to show whether we remove some locations for the c and s
store_remove_c_s_line_list = []
#comb_count = 0
for each_te_dic in dic_list:
if len(each_te_dic.keys()) == 1:
final_line_no_id_str = ''
for eachid in each_te_dic: ##key is id 1,2,3,4,5
line = each_te_dic[eachid]['line']
col_line = line.strip().split()
first_item = col_line[1]
final_line_no_id_str = first_item
for eachid in each_te_dic: ##key is id 1,2,3,4,5
line = each_te_dic[eachid]['line']
col_line = line.strip().split()
for i in range (2,len(col_line)):
final_line_no_id_str = final_line_no_id_str + '\t' + col_line[i]
##updation 110220 check location of c or s TE
wrong_count = 0
for eachid in each_te_dic:
chr = each_te_dic[eachid]['chr']
bg = each_te_dic[eachid]['begin']
ed = each_te_dic[eachid]['end']
loc_infor = chr + '_' + bg + '_' + ed
if loc_infor in store_covered_c_with_o_loc_dic:
wrong_count += 1
if wrong_count == 0:
store_final_line_list.append(final_line_no_id_str)
else:
store_remove_c_s_line_list.append(final_line_no_id_str)
else:
#comb_count += 1
#print(len(each_te_dic.keys()) )
#print(each_te_dic)
##it means we need to decide a new o locus
##first we need to select the smallest bg
bg_list = []
ed_list = []
chr = ''
##since we need to generate a new genotype line we need to extract the the first several col information
ori_id = ''
#ori_num = ''
#ori_total = ''
#ori_pro = ''
ori_geno = 'o'
#ori_sap_infor_str = ''
ori_te_nm = ''
for eachid in each_te_dic:
bg = int(each_te_dic[eachid]['begin'])
bg_list.append(bg)
ed = int(each_te_dic[eachid]['end'])
ed_list.append(ed)
chr = each_te_dic[eachid]['chr']
ori_line = each_te_dic[eachid]['line']
ori_line_col = ori_line.split()
ori_id = ori_line_col[0]
#ori_num = ori_line_col[4]
#ori_total = ori_line_col[5]
#ori_pro = ori_line_col[6]
ori_te_nm = ori_line_col[5]
#ori_sap_infor_str = ori_line_col[8]
smallest_bg = min(bg_list)
largest_ed = max(ed_list)
##second we need to store the name of the sample
store_sp_name_dic = {}
for eachid in each_te_dic:
loc_line = each_te_dic[eachid]['line']
loc_col = loc_line.split()
for i in range(6,len(loc_col)):
mt = re.match('(.+):.+',loc_col[i])
sp_nm = mt.group(1)
store_sp_name_dic[sp_nm] = 1
#print(store_sp_name_dic)
##third we need analyze on each sample
store_sp_str = '' ##DRR054229:0.0;0/15;0/0;Unknown_TE DRR054234:0.0;0/4;0/0;Unknown_TE
for eachsp in store_sp_name_dic:
#store_geno_value_list = [] ##1/1: 2, 0/1:1, 0/0:0
##then we need to compare the value of them and select the largest one
sp_id = 0 ##
store_same_sp_loc_dic = {}
##key is the sp_id (0 or 1 or 2...) and value has two part, first is the geno line (eg. DRR054241:0.0;0/7;0/0;Unknown_TE) and second is geno_value
for eachid in each_te_dic:
loc_line = each_te_dic[eachid]['line']
loc_col = loc_line.split()
for i in range(6, len(loc_col)):
mt = re.match('(.+):.+', loc_col[i])
sp_nm = mt.group(1)
if eachsp == sp_nm:
sp_id += 1
##so the sp_id is the location id since if we allow the eachsp == sp_nm so there is only one value from 6 to len(loc_col)
##since this is the genos file so there is no missing information
geno_line = loc_col[i]
geno_col = geno_line.split(';')
geno = geno_col[2]
geno_value = ''
if geno == '0/0':
geno_value = 0
if geno == '0/1':
geno_value = 1
if geno == '1/1':
geno_value = 2
#store_geno_value_list.append(geno_value)
store_same_sp_loc_dic[str(sp_id) + '_' + geno_line] = geno_value
##eg 1_DRR054229:0.0;0/15;0/0;LTR
max_geno_id = max(store_same_sp_loc_dic, key=store_same_sp_loc_dic.get)
#print(max_geno_id)
mt = re.match('.+?_(.+)',max_geno_id)
real_id = mt.group(1) ##DRR054229:0.0;0/15;0/0;LTR
#print(real_id)
store_sp_str = store_sp_str + '\t' + real_id
##generate the final line
final_line = chr + '\t' + str(smallest_bg) + '\t' + str(largest_ed) + '\t' + 'o' + '\t' + ori_te_nm + store_sp_str
store_final_line_list.append(final_line)
return (store_final_line_list,store_remove_c_s_line_list)
| 40.907975 | 162 | 0.484253 | 1,792 | 13,336 | 3.364397 | 0.128906 | 0.026538 | 0.031348 | 0.027368 | 0.458119 | 0.40073 | 0.354288 | 0.329574 | 0.322276 | 0.296401 | 0 | 0.026367 | 0.42269 | 13,336 | 325 | 163 | 41.033846 | 0.756722 | 0.270021 | 0 | 0.436364 | 0 | 0 | 0.04192 | 0.01069 | 0 | 0 | 0 | 0 | 0 | 1 | 0.006061 | false | 0 | 0.006061 | 0 | 0.018182 | 0.006061 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f4b04a45100aa19af536e46188a9406fa130b44 | 15,027 | py | Python | MATTS/runner.py | mwesleyj/geo-deep-learning | 9bb8942156fc1a19f8f5ac911e237daa30740ca9 | [
"MIT"
] | null | null | null | MATTS/runner.py | mwesleyj/geo-deep-learning | 9bb8942156fc1a19f8f5ac911e237daa30740ca9 | [
"MIT"
] | null | null | null | MATTS/runner.py | mwesleyj/geo-deep-learning | 9bb8942156fc1a19f8f5ac911e237daa30740ca9 | [
"MIT"
] | null | null | null | import argparse, io, sys, os, h5py
from pathlib import Path
from ruamel_yaml import YAML
import csv
from PIL import Image
import numpy as np
from rich.console import Console, RenderGroup
from rich.panel import Panel
from rich.text import Text
from rich.table import Table
from rich.tree import Tree
from rich.columns import Columns
from rich import box
from torchsummary import summary as torch_summary
# from ray import tune
# from ray.tune import CLIReporter
# from ray.tune.schedulers import ASHAScheduler
# from utils.tracker import Tracker
from models.model_choice import net
from images_to_samples import main as IM_TO_SAMPLES_main
from train_segmentation import main as TRAIN_main
def make_csv_trckr(csv_filename):
"""
Open csv file and parse it, returning a list of dict.
- tif full path
- metadata yml full path (may be empty string if unavailable)
- gpkg full path
- attribute_name
- dataset (trn or tst)
"""
list_values = []
with open(csv_filename, 'r') as f:
reader = csv.reader(f)
for index, row in enumerate(reader):
row_length = len(row) if index == 0 else row_length
assert len(row) == row_length, "Rows in csv should be of same length"
row.extend([None] * (5 - len(row))) # fill row with None values to obtain row of length == 5
list_values.append({'tif': row[0], 'meta': row[1], 'gpkg': row[2], 'attribute_name': row[3], 'dataset': row[4]})
assert Path(row[0]).is_file(), f'Tif raster not found "{row[0]}"'
if row[2]:
assert Path(row[2]).is_file(), f'Gpkg not found "{row[2]}"'
assert isinstance(row[3], str)
try:
# Try sorting according to dataset name (i.e. group "train", "val" and "test" rows together)
list_values = sorted(list_values, key=lambda k: k['dataset'])
except TypeError:
list_values
return list_values
def read_params(param_file):
yaml = YAML()
yaml.preserve_quotes = True
with open('./config/travis_CI/config_ci_segmentation_local.yaml') as fp:
data = yaml.load(fp)
fp.close()
return data
def write_params(param_file, data):
yaml = YAML()
with open('./config/travis_CI/config_ci_segmentation_local.yaml', 'w') as fp:
yaml.dump(data, fp)
fp.close()
def save_samp_ims(data_location, experiment_dir, set):
dataset_ims_to_show = ('map_img', 'sat_img')
f = h5py.File(data_location + '\\' + experiment_dir + '\\' + set + '_samples.hdf5', 'r')
for dataset_name in dataset_ims_to_show:
dataset = f[dataset_name]
for imN in range(dataset.shape[0]):
print(dataset_name)
print(dataset[imN,...].shape[0])
sample_html = open('hello.html', 'w+')
sample_html.close()
if __name__ == '__main__':
# 0) read in params
parser = argparse.ArgumentParser(description='Sample preparation')
parser.add_argument('ParamFile', metavar='DIR',help='Path to training parameters stored in yaml')
args = parser.parse_args()
param_path = Path(args.ParamFile)
print(args.ParamFile)
params = read_params(args.ParamFile)
# 1) options
#-----------------------------------------------------------------------------------------------------------------------
OPTS = {'HPC' : False,
'show model layers' : False,
'vis_samples' : True,
'output_html' : 'test',
'out_html_dir' : ''}
# config = {"l1": tune.sample_from(lambda _: 2**np.random.randint(2, 9)),
# "l2": tune.sample_from(lambda _: 2**np.random.randint(2, 9)),
# "lr": tune.loguniform(1e-4, 1e-1),
# "batch_size": tune.choice([2, 4, 8, 16])}
path_name = 'samples'+ \
str(params['global']['samples_size'])+ \
'_overlap'+ \
str(params['sample']['overlap'])+ \
'_min-annot'+ \
str(params['sample']['sampling_method']['min_annotated_percent'])+ \
'_'+\
str(params['global']['number_of_bands'])+ \
'bands_'+ \
str(params['global']['mlflow_experiment_name'])
# 2) set up console
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
console = Console(record=True) # width=...
if OPTS['HPC']: sys.stdout = open(os.devnull, "w")
# 3) run im-samp & prints
#-----------------------------------------------------------------------------------------------------------------------
console.print(' DEBUG = ', params['global']['debug_mode'], style='bold purple', justify='left')
console.print(' ',justify='center',style='on #FFFFFF')
console.print(' ',justify='center',style='on #FFFFFF')
console.print(' ',justify='left',style='bold #FFFFFF on #000000')
console.print('STEP 1:',justify='center',style='bold #FFFFFF on #000000')
console.print('MAKE SAMPLES',justify='center',style='bold #FFFFFF on #000000')
console.print(' ',justify='left',style='bold #FFFFFF on #000000')
console.print(' ',justify='center',style='on #FFFFFF')
console.print(' ',justify='center',style='on #FFFFFF')
# info Panel
txt = Text('parent dir =\t')
txt.append(params['global']['data_path'])
txt.append('\nsmples dir =\t')
txt.append(path_name)
txt.append('\ncsv =\t')
txt.append(params['sample']['prep_csv_file'])
params['sample']['prep_csv_file'] = 'C:/Users/muzwe/Documents/GitHub'
if os.path.isdir(Path(params['global']['data_path']+'/'+path_name)):
console.print(Panel(txt,title='NOT, prcessing new Samples', style='red'))
else:
console.print(Panel(txt,title='YES, prcessing new Samples', style='green'))
IM_TO_SAMPLES_main(params, console)
# output data panel
trees = []
num_samples = {}
for sN, set in enumerate(['trn', 'tst', 'val']):
trees.append(Tree(set, style='color('+str(sN+2)+')'))
with h5py.File(params['global']['data_path'] + '/' + path_name + '/' + set + '_samples.hdf5', 'r') as f:
for dataset_name in ('map_img', 'sat_img'):
dataset = f[dataset_name]
new = trees[sN].add(dataset_name)
new.add('[white]'+str(dataset.shape[0]))
new.add(str(dataset.shape))
num_samples[set] = dataset.shape[0]
console.print(Panel(Columns((trees[0], trees[1], trees[2]), equal=True, expand=True), title='Smples output'))
# 4) run training
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
console.print(' ',justify='center',style='on #FFFFFF')
console.print(' ',justify='center',style='on #FFFFFF')
console.print(' ',justify='left',style='bold #FFFFFF on #000000')
console.print('STEP 2:',justify='center',style='bold #FFFFFF on #000000')
console.print('TRAIN',justify='center',style='bold #FFFFFF on #000000')
console.print(' ',justify='left',style='bold #FFFFFF on #000000')
console.print(' ',justify='center',style='on #FFFFFF')
console.print(' ',justify='center',style='on #FFFFFF')
# assert model info
# console.print(' ',justify='center',style='on #FFFFFF')
# console.print(' model = ',params['global']['model_name'],justify='center',style='bold #FFFFFF on #000000')
# console.print(' ',justify='center',style='on #FFFFFF')
#
# if params['global']['model_name'] == 'deeplabv3_resnet101':
# table = Table(title='[bold]deeplabv3_resnet101', show_lines=True, expand=True)
# table.add_column('required stat',justify="center", style="cyan", no_wrap=True)
# table.add_column('[#787878]op', justify="center", style="cyan", no_wrap=True)
# table.add_column('requirement',justify="center", style="cyan", no_wrap=True)
# table.add_column('assert',justify="center", style="cyan", no_wrap=True)
#
# table.add_row('range', '=', str([0, 1]), str([0, 1]==params['global']['scale_data']))
# table.add_row('mean', '=', str([0.485, 0.456, 0.406]))#, str(np.equal([0.485, 0.456, 0.406], params['global']['scale_data'])))
# table.add_row('std', '=', str([0.229, 0.224, 0.225]))#, str(np.equal([0.229, 0.224, 0.225], params['global']['scale_data'])))
# table.add_row('min_pixel_res', '>=', str(224))#,str(224>=0))
#
# console.print(table)
# console.print(' ',justify='center',style='on #FFFFFF')
# info Panel
list = Table(expand=True, show_lines=True, style="orchid1")
list.add_column('[bold]catagory',justify='center', no_wrap=False)
list.add_column('[bold]path',justify='center', no_wrap=False)
list.add_column('[bold]exists?',justify='center', no_wrap=False)
list.add_row('parent dir',
str(params['global']['data_path']),
str(Path(params['global']['data_path']).is_dir()))
list.add_row('samples dir',
str(path_name),
str(Path(params['global']['data_path']).joinpath(path_name).is_dir()))
list.add_row('model dir',
'model_' + str(params['global']['model_output_dir']),
str(Path(params['global']['data_path']).joinpath(path_name).joinpath('model_' + str(params['global']['model_output_dir'])).is_dir()))
txt = Text(justify='center')
# txt.append_text(Text('model = ' + str(params['global']['model_name']) + '\n', style='bold cyan2'))
# txt.append_text(Text('loss = ' + str(params['training']['loss_fn']) + '\n', style='bold cyan2'))
# txt.append_text(Text('optmzr = ' + str(params['training']['optimizer']) + '\n', style='bold cyan2'))
def list_layers(model, count):
modules = dict(model.named_modules())
keys = []
for key in modules:
if key == '': continue
console.print(key, justify='right')#, style='color('+str(count)+')')
console.print(modules[key], justify='left')#, style='color('+str(count)+')')
# console.print(dict(modules[key].named_modules()), justify='center', style='color('+str(count)+')')
console.print(count, justify='center', style='on color('+str(count)+')')
list_layers(modules[key], count+1)
console.print(count, justify='center', style='on color('+str(count)+')')
# model layers
if OPTS['show model layers']:
txt.append('\n\n')
model, model_name, criterion, optimizer, lr_scheduler = net(params, params['global']['num_classes']+1)
# console.print(len(dict(model.named_modules())))
# console.print(dict(model.named_modules()))
# console.print()
list_layers(model, 0)
sys.exit()
try:
summary = torch_summary(model, (params['global']['number_of_bands'], params['global']['samples_size'], params['global']['samples_size']))
table = Table(title=params['global']['model_name'], expand=True)
table.add_column("Layer (type)", justify="center", style="bright_cyan", no_wrap=True)
table.add_column("Output Shape", justify="center", style="bright_cyan", no_wrap=True)
table.add_column("Param #", justify="center", style="bright_cyan", no_wrap=True)
for layer in summary:
table.add_row(layer, str(summary[layer]["output_shape"]), "{0:,}".format(summary[layer]["nb_params"]))
console.print(Panel(RenderGroup(txt,table, summary['final_summary']),
title='pre-training info', box=box.DOUBLE_EDGE, style="magenta1"))
console.print(str(model))
except AttributeError:
console.print(Panel(RenderGroup(txt,Text('model = ' + str(params['global']['model_name']))),
title='pre-training info', box=box.DOUBLE_EDGE, style="magenta1"))
console.print(str(model))
else:
console.print(Panel(RenderGroup(list, txt),
title='pre-training info', box=box.DOUBLE_EDGE, style="magenta1"))
changes = {}
changes['learning_rate'] = [0.0001]
changes['weight_decay'] = [0]
changes['step_size'] = [4]
changes['gamma'] = [0.9]
experiments = Table('exp. num.', 'model', 'optimzier', 'loss func',
'learning_rate', 'weight_decay', 'step_size', 'gamma',
title='experiments', expand=True, style='purple')
experiments.add_row(str(1),
str(params['training']['learning_rate']),
str(params['training']['weight_decay']),
str(params['training']['step_size']),
str(params['training']['gamma']))
console.print(experiments)
for change in changes:
params['training'][change] = changes[change][0]
# if params['training']['loss_fn'] == 'Lovasz':
# params['training']['class_weights'] = None
trckr = h5py.File('output_path', 'w')
# for set in ['trn', 'tst', 'val']:
# trckr.create_group(set)
trckr.create_dataset('acc')
trckr.create_dataset('pers')
trckr.create_dataset('iou')
trckr.create_dataset('fscore')
write_params(args.ParamFile, params)
TRAIN_main(params, param_path, console, trckr) # TODO: make sure model_NAME doesnt exist already
#-----------------------------------------------------------------------------------------------------------------------
# console.export_html(clear=False)
# console.save_html(OPTS['output_html']+'.html', clear=False)
# #-----------------------------------------------------------------------------------------------------------------------
#
# from rich.tree import Tree
# tree = Tree("Rich Tree")
# baz_tree = tree.add("baz")
# tree.add("baz")
# baz_tree.add("[red]Red").add("[green]Green").add("[blue]Blue")
# # console.print(tree)
#
# #-----------------------------------------------------------------------------------------------------------------------
#
# from rich.columns import Columns
# columns = Columns((tree, tree), equal=True, expand=True)
# # console.print(columns)
#
# #-----------------------------------------------------------------------------------------------------------------------
#
# from rich.panel import Panel
# panel = Panel(columns, title='[bold]just an example')
#
# console.print(panel)
#
# #-----------------------------------------------------------------------------------------------------------------------
#
# from rich.table import Table
# table = Table(title="[bold]\nThe Worst Star Wars[/bold] Movies", show_lines=True)
# table.add_column("Released", justify="center", style="cyan", no_wrap=True)
#
# table.add_row("Dec 20, 2019", "Star Wars: The Rise of Skywalker")
# table.add_row("May 25, 2018", "Solo: A Star Wars Story", "$393,151,347")
# console.print(table) | 43.68314 | 150 | 0.562854 | 1,780 | 15,027 | 4.620225 | 0.202247 | 0.062743 | 0.059095 | 0.031615 | 0.366367 | 0.307758 | 0.266537 | 0.234801 | 0.222398 | 0.177408 | 0 | 0.018736 | 0.211486 | 15,027 | 344 | 151 | 43.68314 | 0.675331 | 0.320689 | 0 | 0.151042 | 0 | 0 | 0.21223 | 0.017786 | 0 | 0 | 0 | 0.002907 | 0.020833 | 1 | 0.026042 | false | 0 | 0.088542 | 0 | 0.125 | 0.171875 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f4fe424e452a2068dbc9a4317c9b75f67f3c367 | 1,157 | py | Python | vaegan/utils/progress.py | amirjaber/vaegan | 6b8f89f9c70b384d88158822f1a9beeaba5802f0 | [
"MIT"
] | 101 | 2016-03-20T04:29:16.000Z | 2022-02-16T05:00:43.000Z | vaegan/utils/progress.py | amirjaber/vaegan | 6b8f89f9c70b384d88158822f1a9beeaba5802f0 | [
"MIT"
] | 4 | 2017-02-14T01:20:49.000Z | 2018-06-04T04:17:33.000Z | vaegan/utils/progress.py | amirjaber/vaegan | 6b8f89f9c70b384d88158822f1a9beeaba5802f0 | [
"MIT"
] | 19 | 2016-07-29T12:32:39.000Z | 2021-03-04T11:53:17.000Z | #coding : utf-8
import sys
class Progress( object ):
def __init__( self, max_count, size ):
if size <= max_count:
self.__size = size
else:
self.__size = max_count
self.__max_count = max_count
self.__sep = int(max_count/size) + 1
self.__count = 0
def prog( self ):
if int( self.__count % self.__sep ) != 0:
self.__count += 1
return
p = int( self.__count / self.__sep ) + 1
s = u'|' + u'=' * p + u' ' * (self.__size-p) + u'| %d/%d' \
% (self.__count,self.__max_count)
sys.stdout.write("\r%s" % s)
sys.stdout.flush()
self.__count += 1
def end( self ):
self.__count = 0
p = self.__size
s = u'|' + u'=' * p + u' ' * (self.__size-p) + u'| %d/%d' \
% (self.__max_count ,self.__max_count)
sys.stdout.write("\r%s" % s)
sys.stdout.flush()
print >>sys.stdout
if __name__ == '__main__':
a = range(1000000)
prog = Progress(len(a),50)
for e in range(10):
for i,v in enumerate(a):
prog.prog()
prog.end()
| 25.711111 | 67 | 0.491789 | 154 | 1,157 | 3.311688 | 0.298701 | 0.141176 | 0.117647 | 0.1 | 0.390196 | 0.270588 | 0.270588 | 0.270588 | 0.270588 | 0.270588 | 0 | 0.025435 | 0.354365 | 1,157 | 44 | 68 | 26.295455 | 0.657296 | 0.0121 | 0 | 0.285714 | 0 | 0 | 0.031524 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085714 | false | 0 | 0.028571 | 0 | 0.171429 | 0.028571 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f5147d3f4e60c38090cf067509503b19803e937 | 1,124 | py | Python | metadata-ingestion/tests/unit/stateful_ingestion/state/test_sql_common_state.py | bskim45/datahub | c10456d2bcc0f41d4b8361768e1e07ad0eb79f37 | [
"Apache-2.0"
] | 1,603 | 2016-03-03T17:21:03.000Z | 2020-01-22T22:12:02.000Z | metadata-ingestion/tests/unit/stateful_ingestion/state/test_sql_common_state.py | bskim45/datahub | c10456d2bcc0f41d4b8361768e1e07ad0eb79f37 | [
"Apache-2.0"
] | 1,157 | 2016-03-03T19:29:22.000Z | 2020-01-20T14:41:59.000Z | metadata-ingestion/tests/unit/stateful_ingestion/state/test_sql_common_state.py | bskim45/datahub | c10456d2bcc0f41d4b8361768e1e07ad0eb79f37 | [
"Apache-2.0"
] | 570 | 2016-03-03T17:21:05.000Z | 2020-01-21T06:54:10.000Z | from datahub.emitter.mce_builder import make_container_urn, make_dataset_urn
from datahub.ingestion.source.state.sql_common_state import (
BaseSQLAlchemyCheckpointState,
)
def test_sql_common_state() -> None:
state1 = BaseSQLAlchemyCheckpointState()
test_table_urn = make_dataset_urn("test_platform", "db1.test_table1", "test")
state1.add_table_urn(test_table_urn)
test_view_urn = make_dataset_urn("test_platform", "db1.test_view1", "test")
state1.add_view_urn(test_view_urn)
test_container_urn = make_container_urn("test_container")
state1.add_container_guid(test_container_urn)
state2 = BaseSQLAlchemyCheckpointState()
table_urns_diff = list(state1.get_table_urns_not_in(state2))
assert len(table_urns_diff) == 1 and table_urns_diff[0] == test_table_urn
view_urns_diff = list(state1.get_view_urns_not_in(state2))
assert len(view_urns_diff) == 1 and view_urns_diff[0] == test_view_urn
container_urns_diff = list(state1.get_container_urns_not_in(state2))
assert (
len(container_urns_diff) == 1 and container_urns_diff[0] == test_container_urn
)
| 38.758621 | 86 | 0.774021 | 160 | 1,124 | 4.96875 | 0.25 | 0.090566 | 0.05283 | 0.064151 | 0.260377 | 0.181132 | 0.090566 | 0.090566 | 0 | 0 | 0 | 0.021583 | 0.134342 | 1,124 | 28 | 87 | 40.142857 | 0.795478 | 0 | 0 | 0 | 0 | 0 | 0.068505 | 0 | 0 | 0 | 0 | 0 | 0.142857 | 1 | 0.047619 | false | 0 | 0.095238 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f5202b60405893a1a16c170a0706a914d313c0f | 2,636 | py | Python | inn/inn_hotels/doctype/ar_city_ledger_invoice/ar_city_ledger_invoice.py | vinhnguyent090/front-desk | 7384642e9206e30855986465a7ef63c8fd76ef2a | [
"MIT"
] | 4 | 2021-08-19T03:33:36.000Z | 2021-08-28T16:37:52.000Z | inn/inn_hotels/doctype/ar_city_ledger_invoice/ar_city_ledger_invoice.py | vinhnguyent090/front-desk | 7384642e9206e30855986465a7ef63c8fd76ef2a | [
"MIT"
] | 98 | 2020-02-24T08:12:47.000Z | 2021-08-21T07:54:03.000Z | inn/inn_hotels/doctype/ar_city_ledger_invoice/ar_city_ledger_invoice.py | vinhnguyent090/front-desk | 7384642e9206e30855986465a7ef63c8fd76ef2a | [
"MIT"
] | 13 | 2021-01-24T18:08:43.000Z | 2022-03-29T09:23:25.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Core Initiative and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class ARCityLedgerInvoice(Document):
pass
@frappe.whitelist()
def get_payments_accounts(mode_of_payment):
account = frappe.db.get_value('Mode of Payment Account', {'parent': mode_of_payment, 'company': frappe.get_doc(
"Global Defaults").default_company}, "default_account")
against = frappe.db.get_list('Account', filters={'account_number': '1133.001'})[0].name
return account, against
@frappe.whitelist()
def make_payment(id):
doc = frappe.get_doc('AR City Ledger Invoice', id)
arc_id = []
folio_list = doc.folio
if len(folio_list) == 0:
frappe.msgprint("Please add the Folio to be Collected first before making payment")
else:
for folio in folio_list:
arc_id.append(folio.ar_city_ledger_id)
payments = doc.get('payments')
return_status = 1
for payment in payments:
remark = 'AR City Ledger Invoice Payments: ' + payment.name
doc_je = frappe.new_doc('Journal Entry')
doc_je.title = payment.name
doc_je.voucher_type = 'Journal Entry'
doc_je.naming_series = 'ACC-JV-.YYYY.-'
doc_je.posting_date = payment.payment_reference_date
doc_je.company = frappe.get_doc('Global Defaults').default_company
doc_je.total_amount_currency = frappe.get_doc('Global Defaults').default_currency
doc_je.remark = remark
doc_je.user_remark = remark
doc_jea_debit = frappe.new_doc('Journal Entry Account')
doc_jea_debit.account = payment.account
doc_jea_debit.debit = payment.payment_amount
doc_jea_debit.debit_in_account_currency = payment.payment_amount
doc_jea_debit.party_type = 'Customer'
doc_jea_debit.party = doc.customer_id
doc_jea_debit.user_remark = remark
doc_jea_credit = frappe.new_doc('Journal Entry Account')
doc_jea_credit.account = payment.account_against
doc_jea_credit.credit = payment.payment_amount
doc_jea_credit.credit_in_account_currency = payment.payment_amount
doc_jea_credit.party_type = 'Customer'
doc_jea_credit.party = doc.customer_id
doc_jea_credit.user_remark = remark
doc_je.append('accounts', doc_jea_debit)
doc_je.append('accounts', doc_jea_credit)
doc_je.save()
doc_je.submit()
if frappe.db.get_value('Journal Entry', {'title': payment.name}, 'remark') == remark:
return_status = 2
if return_status == 1:
doc.status = 'Paid'
doc.save()
for arc in arc_id:
doc_arc_ledger = frappe.get_doc('AR City Ledger', arc)
doc_arc_ledger.is_paid = 1
doc_arc_ledger.save()
return return_status | 34.233766 | 112 | 0.76214 | 393 | 2,636 | 4.811705 | 0.272265 | 0.050767 | 0.046536 | 0.048652 | 0.328398 | 0.268112 | 0.13432 | 0.13432 | 0 | 0 | 0 | 0.007871 | 0.132398 | 2,636 | 77 | 113 | 34.233766 | 0.818977 | 0.046282 | 0 | 0.032258 | 0 | 0 | 0.162485 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032258 | false | 0.016129 | 0.048387 | 0 | 0.129032 | 0.016129 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f541dfb90c1bf063d05a6e72bfe89bac2f84a44 | 1,572 | py | Python | examples/maths/__init__.py | tryal-ai/mnkytw | 28d78d0f378a985e4c5601bd28f8f5d2df57848e | [
"Apache-2.0"
] | null | null | null | examples/maths/__init__.py | tryal-ai/mnkytw | 28d78d0f378a985e4c5601bd28f8f5d2df57848e | [
"Apache-2.0"
] | null | null | null | examples/maths/__init__.py | tryal-ai/mnkytw | 28d78d0f378a985e4c5601bd28f8f5d2df57848e | [
"Apache-2.0"
] | null | null | null | import mnkytw
from examples.maths.IntegerMatch import IntegerMatch
from examples.maths.FloatMatch import FloatMatch
# Create a single unified matcher that attempts to identify
# an integer or a float
Constants = mnkytw.MatchAlternation([
FloatMatch(),
IntegerMatch()
])
# symbols
Symbols = mnkytw.MatchAlternation([
mnkytw.LiteralMatch("+"),
mnkytw.LiteralMatch("-"),
mnkytw.LiteralMatch("*"),
mnkytw.LiteralMatch("/")
])
class OperationMatch:
def __init__(self):
self.matcher = mnkytw.MatchAlternation([
# Either this is a chain of operations
mnkytw.MatchJoin([
Constants,
Symbols,
self
]),
# Or a constant
Constants
])
def parser(self, body : str, hard_fail = True):
result = self.matcher.parser(body, hard_fail)
if not result:
return result
#we can infer that it matched the "MatchJoin" if it's a list
if type(result[0]) is list:
# make a dictionary that shows the lhs and rhs and the symbol
return [{
'lhs': result[0][0],
'symbol': result[0][1],
'rhs': result[0][2]
}, result[1]]
else:
#Otherwise it matched the constant so just return the constant
return result
Operation = OperationMatch()
print(mnkytw.peg_parse("3+4", Operation))
print(mnkytw.peg_parse("3+4+5", Operation))
print(mnkytw.peg_parse("3+4*5-6", Operation)) | 28.071429 | 74 | 0.585878 | 175 | 1,572 | 5.211429 | 0.445714 | 0.078947 | 0.078947 | 0.118421 | 0.169956 | 0.169956 | 0.067982 | 0.067982 | 0 | 0 | 0 | 0.015712 | 0.311705 | 1,572 | 56 | 75 | 28.071429 | 0.827172 | 0.20229 | 0 | 0.128205 | 0 | 0 | 0.02488 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051282 | false | 0 | 0.076923 | 0 | 0.230769 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f5698174d653f570858341752fa6fba2fa609e8 | 354 | py | Python | WEEKS/CD_Sata-Structures/general/practice/BeautifulText/solution.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | null | null | null | WEEKS/CD_Sata-Structures/general/practice/BeautifulText/solution.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | null | null | null | WEEKS/CD_Sata-Structures/general/practice/BeautifulText/solution.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | null | null | null | def beautifulText(inputString, l, r):
for w in range(l, r + 1):
i = w
while i < len(inputString):
if inputString[i] != " ":
break
i += w + 1
if i == len(inputString):
return True
return False
s = "Look at this example of a correct text"
print(beautifulText(s, 5, 15))
| 23.6 | 44 | 0.50565 | 47 | 354 | 3.808511 | 0.638298 | 0.022346 | 0.167598 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022936 | 0.384181 | 354 | 14 | 45 | 25.285714 | 0.798165 | 0 | 0 | 0 | 0 | 0 | 0.110169 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0 | 0 | 0.25 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f591d0adbc9428bd5196057d6b311feb4ee4e6e | 20,015 | py | Python | api/stacking/stacking.py | ruizca/rapidxmm | 4b2dacefcb73464ad4dfd6d404b5795a15046ffc | [
"MIT"
] | 3 | 2021-06-24T07:53:15.000Z | 2022-03-18T12:03:26.000Z | api/stacking/stacking.py | ruizca/rapidxmm | 4b2dacefcb73464ad4dfd6d404b5795a15046ffc | [
"MIT"
] | null | null | null | api/stacking/stacking.py | ruizca/rapidxmm | 4b2dacefcb73464ad4dfd6d404b5795a15046ffc | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 16 16:13:39 2021
@author: ruizca
"""
import matplotlib.pyplot as plt
import numpy as np
from astropy import units as u
from astropy.coordinates import SkyCoord, FK5
from astropy.table import Table, unique, join
from astropy.utils.console import color_print
from astropy_healpix import HEALPix
from matplotlib.collections import PatchCollection
from matplotlib.colors import Normalize
from matplotlib.patches import Polygon
from mocpy import MOC
from mocpy.mocpy import flatten_pixels
from scipy.stats import median_abs_deviation
from tqdm.auto import tqdm
from .. import rapidxmm
from .ecf import ECF
plt.rc('font', family='serif')
plt.rc('xtick', labelsize='x-small')
plt.rc('ytick', labelsize='x-small')
#plt.rc('text', usetex=True)
plt.rcParams['mathtext.fontset'] = "stix"
plt.rcParams['mathtext.rm'] = "STIXGeneral"
plt.rcParams['font.family'] = "STIXGeneral"
plt.rcParams["axes.formatter.use_mathtext"] = True
# Numpy random number generator
rng = np.random.default_rng()
def get_neighbours(npixel, hp, level=5):
# The central pixel is the first one
# The output of hp.neighbours always follows the
# same order, starting SW and rotating clockwise
neighbours_level = [None] * (level + 1)
neighbours_level[0] = [npixel]
npixel_neighbours = [npixel]
for i in range(1, level + 1):
neighbours_level[i] = hp.neighbours(neighbours_level[i - 1]).flatten()
npixel_neighbours += list(neighbours_level[i])
sorted_neighbours = Table()
sorted_neighbours["npixel"] = npixel_neighbours
sorted_neighbours["order"] = range(len(npixel_neighbours))
sorted_neighbours = unique(sorted_neighbours, keys=["npixel"])
sorted_neighbours.sort("order")
return sorted_neighbours
def get_bkg_npixels(src_center, nside, npixels=100):
order = np.log2(nside).astype(int)
bkg_moc_outer = MOC.from_cone(src_center.ra, src_center.dec, 120*u.arcsec, order)
bkg_moc_inner = MOC.from_cone(src_center.ra, src_center.dec, 60*u.arcsec, order)
bkg_moc = bkg_moc_outer.difference(bkg_moc_inner)
bkg_npixels = flatten_pixels(bkg_moc._interval_set._intervals, order)
return rng.choice(bkg_npixels, size=npixels, replace=False).tolist()
def get_bkg_data(npixel, obsid, hp):
src_center = hp.healpix_to_skycoord(npixel)
bkg_npixels = get_bkg_npixels(src_center, hp.nside, npixels=100)
bkg_data = rapidxmm.query_npixels(
bkg_npixels, obstype="pointed", instrum="PN"
)
mask = bkg_data["obsid"] == obsid
bkg_data = bkg_data[mask]
if len(bkg_data) < 15:
bkg_data = None
return bkg_data
def stats_bootstrap(src, bkg, exp, eef, ecf, ac=None, nbkg=None, nsim=1000):
# Calculate median and MAD for the stack using bootstraping
nstack, npixels, nbands = src.shape
cr = np.zeros((nsim, npixels, nbands))
cr_err = np.zeros((nsim, npixels, nbands))
snr = np.zeros((nsim, npixels, nbands))
texp = np.zeros((nsim, npixels, nbands))
ecf_sample = np.zeros((nsim, nbands))
# msrc = np.zeros((nsim, npixels, nbands))
# mbkg = np.zeros((nsim, npixels, nbands))
# mexp = np.zeros((nsim, npixels, nbands))
for i in range(nsim):
idx_sample = np.random.randint(nstack, size=nstack)
S = np.sum(src[idx_sample, :, :], axis=0)
B = np.sum(bkg[idx_sample, :, :], axis=0)
t = np.sum(exp[idx_sample, :, :], axis=0)
if ac is None:
Bcorr = np.sum(bkg[idx_sample, :, :] / nbkg[idx_sample, :, :], axis=0)
ac = np.ones_like(bkg)
else:
Bcorr = np.sum(ac[idx_sample, :, :] * bkg[idx_sample, :, :], axis=0)
cr[i, :, :] = (
np.sum(src[idx_sample, :, :] / eef[idx_sample, :, :], axis=0) -
np.sum(bkg[idx_sample, :, :] / eef[idx_sample, :, :], axis=0)
) / t
cr_err[i, :, :] = np.sqrt(
np.sum(src[idx_sample, :, :] / eef[idx_sample, :, :]**2, axis=0) +
np.sum(ac[idx_sample, :, :] * bkg[idx_sample, :, :] / eef[idx_sample, :, :]**2, axis=0)
) / t
snr[i, :, :] = (S - B) / np.sqrt(S + Bcorr)
#snr[i, :, :] = cr[i, :, :] / cr_err[i, :, :]
ecf_sample[i, :] = np.mean(ecf[idx_sample, :], axis=0)
# msrc[i, :, :] = np.sum(src[idx_sample, :, :], axis=0)
# mbkg[i, :, :] = np.sum(bkg[idx_sample, :, :], axis=0)
# mexp[i, :, :] = np.sum(exp[idx_sample, :, :], axis=0)
texp[i, :, :] = t
cr_median = np.nanmedian(cr, axis=0)
snr_median = np.nanmedian(snr, axis=0)
ecf_median = np.nanmedian(ecf_sample, axis=0)
texp_median = np.nanmedian(texp, axis=0)
#cr_median = np.mean(cr, axis=0)
#snr_median = np.mean(snr, axis=0)
# src_median = np.median(msrc, axis=0)
# bkg_median = np.median(mbkg, axis=0)
# exp_median = np.median(mexp, axis=0)
# kk1 = (src_median - bkg_median) / exp_median
# kk2 = np.sqrt(src_median + bkg_median) / exp_median
# kk3 = (src_median - bkg_median) / np.sqrt(src_median)
cr_mad = np.zeros((npixels, nbands))
snr_mad = np.zeros((npixels, nbands))
for i in range(nbands):
cr_mad[:, i] = median_abs_deviation(cr[:, :, i], axis=0, nan_policy="omit", scale="normal")
snr_mad[:, i] = median_abs_deviation(snr[:, :, i], axis=0, nan_policy="omit", scale="normal")
return cr_median, cr_mad, snr_median, snr_mad, ecf_median, texp_median
def flux_bootstrap(src_flux, src_flux_err, bkg_flux, bkg_flux_err, nsim=1000):
nstack, nbands = src_flux.shape
flux = np.zeros((nsim, nbands))
flux_err = np.zeros((nsim, nbands))
for i in range(nsim):
idx_sample = np.random.randint(nstack, size=nstack)
ngood = np.zeros(nbands, dtype=int)
for j in range(nbands):
good_idx = np.where(np.isfinite(src_flux[idx_sample, j]))
ngood[j] = len(good_idx[0])
flux[i, :] = (
np.nansum(src_flux[idx_sample, :], axis=0) -
np.nansum(bkg_flux[idx_sample, :], axis=0)
) / ngood
flux_err[i, :] = np.sqrt(
np.nansum(
src_flux_err[idx_sample, :]**2 +
bkg_flux_err[idx_sample, :]**2,
axis=0
)
) / ngood
flux_median = np.median(flux, axis=0)
flux_err_median = np.median(flux_err, axis=0)
flux_mad = median_abs_deviation(flux, axis=0, scale="normal")
return flux_median, flux_mad
def print_stats(cr, cr_err, snr, snr_err, texp, flux, flux_err, ebands=["6", "7", "8"]):
color_print("\nStatistics", "yellow")
color_print("----------", "yellow")
for i, eband in enumerate(ebands):
idx_max = np.argmax(cr[:, i])
cr_peak = cr[idx_max, i]
cr_peak_mad = cr_err[idx_max, i]
texp_peak = texp[idx_max, i]
idx_max = np.argmax(snr[:, i])
snr_peak = snr[idx_max, i]
snr_peak_mad = snr_err[idx_max, i]
color_print(f"Energy band {eband}:", "white")
print(f"Median net CR at peak: {cr_peak:.01e} ± {cr_peak_mad:.01e} counts/s")
print(f"Median exposure time at peak: {texp_peak:.01e} s")
if flux is not None:
f, ferr = flux[i], flux_err[i]
print(f"Median flux: {f:.01e} ± {ferr:.01e} erg/s/cm-2")
print(f"Median SNR at peak: {snr_peak:.01f} ± {snr_peak_mad:.01f}\n")
def print_params(parnames, params):
color_print("\nAverage parameters", "yellow")
color_print("------------------", "yellow")
color_print("Weighted by number of repetitions in the stack")
average_params = np.median(params, axis=0)
for name, par in zip(parnames, average_params):
color_print(f"{name}: {par:.04f}", "white")
return average_params
def plot_stack(npixels, hp, cr, snr, filename=None, scale=None):
lon, lat = hp.healpix_to_lonlat(npixels)
boundaries = hp.boundaries_lonlat(npixels, 1)
patches = []
for blon, blat in zip(*boundaries):
patches.append(Polygon(np.array([blon.value, blat.value]).T, closed=True))
if not scale:
vmin_cr, vmax_cr = cr.flatten().min(), cr.flatten().max()
vmin_snr, vmax_snr = snr.flatten().min(), snr.flatten().max()
scale = [vmin_cr, vmax_cr, vmin_snr, vmax_snr]
else:
vmin_cr, vmax_cr = scale[0], scale[1]
vmin_snr, vmax_snr = scale[2], scale[3]
norm_cr = Normalize(vmin=vmin_cr, vmax=vmax_cr)
norm_snr = Normalize(vmin=vmin_snr, vmax=vmax_snr)
fig, axs = plt.subplots(2, 3, constrained_layout=False, figsize=(5.5, 4))
for i, eband in enumerate(["6", "7", "8"]):
# Count-rate "images"
pcm_cr = axs[0, i].scatter(
lon, lat, c=cr[:, i], s=1, vmin=vmin_cr, vmax=vmax_cr
)
p = PatchCollection(patches, alpha=1)
p.set_array(cr[:, i])
p.set_norm(norm_cr)
axs[0, i].add_collection(p)
axs[0, i].set_title(f"Energy band {eband}")
axs[0, i].set_xticks([])
axs[0, i].set_yticks([])
# signal-to-noise ratio "images"
pcm_snr = axs[1, i].scatter(
lon, lat, c=snr[:, i], s=1, vmin=vmin_snr, vmax=vmax_snr
)
p = PatchCollection(patches, alpha=1)
p.set_array(snr[:, i])
p.set_norm(norm_snr)
axs[1, i].add_collection(p)
axs[1, i].set_xticks([])
axs[1, i].set_yticks([])
if i == 0:
axs[0, i].set_ylabel("Stack net CR (median)")
axs[1, i].set_ylabel("Stack SNR (median)")
plt.tight_layout()
fig.colorbar(pcm_cr, ax=axs[0, :], shrink=0.6, location='bottom', pad=0.02)
fig.colorbar(pcm_snr, ax=axs[1, :], shrink=0.6, location='bottom', pad=0.02)
if filename:
fig.savefig(filename, bbox_inches='tight', pad_inches=0)
plt.close()
else:
plt.show()
return scale
def plot_radial(npixels, level, hp, cr, cr_err, snr, snr_err, filename=None):
radius = list(range(level + 1))
cr_radial = np.zeros((level + 1, 3))
cr_err_radial = np.zeros((level + 1, 3))
snr_radial = np.zeros((level + 1, 3))
snr_err_radial = np.zeros((level + 1, 3))
cr_radial[0, :] = cr[0, :]
cr_err_radial[0, :] = cr_err[0, :]
snr_radial[0, :] = snr[0, :]
snr_err_radial[0, :] = snr_err[0, :]
npixel_neighbours = [npixels[0]]
for i in range(1, level + 1):
npixel_neighbours = list(set(hp.neighbours(npixel_neighbours).flatten()))
mask = [p in npixel_neighbours for p in npixels]
cr_radial[i] = np.sum(cr[mask], axis=0) / len(npixels[mask])
cr_err_radial[i] = np.sqrt(np.sum(cr_err[mask]**2, axis=0)) / len(npixels[mask])
snr_radial[i] = np.sum(snr[mask], axis=0) / len(npixels[mask])
snr_err_radial[i] = np.sqrt(np.sum(snr_err[mask]**2, axis=0)) / len(npixels[mask])
cr_min = np.nanmin(cr_radial - 1.1*cr_err_radial)
cr_max = np.nanmax(cr_radial + 1.1*cr_err_radial)
snr_min = np.nanmin(snr_radial - 1.1*snr_err_radial)
snr_max = np.nanmax(snr_radial + 1.1*snr_err_radial)
# filename_npz = filename.parent.joinpath(filename.stem + "_radial.npz")
# np.savez(
# filename_npz,
# cr_radial=cr_radial,
# cr_err_radial=cr_err_radial,
# snr_radial=snr_radial,
# snr_err_radial=snr_err_radial,
# )
fig, axs = plt.subplots(
2, 3, sharex=True, constrained_layout=False, figsize=(5.5, 3.5)
)
for i, eband in enumerate(["6", "7", "8"]):
axs[0, i].errorbar(
radius, cr_radial[:, i], yerr=cr_err_radial[:, i], fmt="o", capsize=2
)
axs[0, i].set_title(f"Energy band {eband}", size="x-small")
axs[0, i].set_ylim(cr_min, cr_max)
axs[0, i].ticklabel_format(
axis="y", style="sci", scilimits=(0,0), useMathText=True
)
axs[0, i].xaxis.offsetText.set_fontsize(8)
axs[0, i].grid(color='gray', linestyle=':')
axs[1, i].errorbar(
radius, snr_radial[:, i], yerr=snr_err_radial[:, i], fmt="o", capsize=2
)
axs[1, i].set_ylim(snr_min, snr_max)
axs[1, i].grid(color='gray', linestyle=':')
if i == 0:
axs[0, i].set_ylabel("net counts / s / pixel")
axs[1, i].set_ylabel("SNR / pixel")
fig.add_subplot(111, frameon=False)
plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
plt.xlabel("Distance to central npixel")
plt.tight_layout()
if filename:
filename = filename.parent.joinpath(filename.stem + "_radial" + filename.suffix)
fig.savefig(filename, bbox_inches='tight', pad_inches=0)
plt.close()
else:
plt.show()
def stack_npixels(
npixels,
level_neighbours=5,
params=None,
max_data=1000,
calc_flux=True,
use_flagged_pixels=False,
skip_detections=False,
custom_bkg=False,
moc_masked_sources=None,
order=16,
with_plots=False,
plotfile=None,
scale=None,
):
ecf_pn = {
"6": ECF.ecf_det_eband("PN", "6"),
"7": ECF.ecf_det_eband("PN", "7"),
"8": ECF.ecf_det_eband("PN", "8"),
}
num_neighbours = sum([8*k for k in range(level_neighbours + 1)]) + 1
ebands = ["6", "7", "8"]
src_stack = np.zeros((max_data, num_neighbours, len(ebands)))
bkg_stack = np.zeros((max_data, num_neighbours, len(ebands)))
exp_stack = np.zeros((max_data, num_neighbours, len(ebands)))
eef_stack = np.ones((max_data, num_neighbours, len(ebands)))
ac_stack = np.zeros((max_data, num_neighbours, len(ebands)))
npixels_bkg_stack = np.ones((max_data, num_neighbours, len(ebands)))
ecf_stack = np.zeros((max_data, len(ebands)))
if calc_flux:
src_flux_center = np.full((max_data, len(ebands)), np.nan)
bkg_flux_center = np.full((max_data, len(ebands)), np.nan)
src_flux_err_center = np.full((max_data, len(ebands)), np.nan)
bkg_flux_err_center = np.full((max_data, len(ebands)), np.nan)
if params:
params_stack = np.zeros((max_data, len(params.colnames)))
hp = HEALPix(nside=2 ** order, order="nested", frame=FK5())
n, nsrc = 0, 0
for j, npixel in enumerate(tqdm(npixels)):
sorted_neighbours = get_neighbours(npixel, hp, level=level_neighbours)
data = rapidxmm.query_npixels(
sorted_neighbours["npixel"], obstype="pointed", instrum="PN"
)
if len(data) == 0:
continue
nsrc += 1
data = data.group_by(["obsid", "instrum"])
for group in data.groups:
data_obs_order = join(
sorted_neighbours, group, keys=["npixel"], join_type="left"
)
data_obs_order.sort("order")
if skip_detections:
if np.any(data_obs_order["band8_flags"] >= 8):
continue
if custom_bkg:
bkg_data = get_bkg_data(npixel, group["obsid"][0], hp)
if bkg_data is None:
# We couldn't find a good background region for this npixel,
# so it's rejected from the stack
continue
for i, eband in enumerate(ebands):
if use_flagged_pixels:
mask = [True] * len(sorted_neighbours)
else:
mask = data_obs_order[f"band{eband}_flags"] == 0
src_stack[n, mask, i] = data_obs_order[f"band{eband}_src_counts"][mask]
exp_stack[n, mask, i] = data_obs_order[f"band{eband}_exposure"][mask]
eef_stack[n, mask, i] = data_obs_order["eef"][mask]
ac_stack[n, mask, i] = data_obs_order["area_ratio"][mask]
if custom_bkg:
mask_bkg = bkg_data[f"band{eband}_flags"] == 0
# The same average bkg value is assigned to all npixels in the detection
bkg_counts = bkg_data[f"band{eband}_bck_counts"][mask_bkg]
bkg_stack[n, mask, i] = np.mean(bkg_counts)
npixels_bkg_stack[n, mask, i] = len(bkg_counts)
else:
bkg_stack[n, mask, i] = data_obs_order[f"band{eband}_bck_counts"][mask]
if calc_flux and np.any(mask):
ecf_stack[n, i] = ecf_pn[eband][group["filt"][0]].get_ecf(params["NHGAL"][j], 1.9)
exp = np.mean(exp_stack[n, mask, i])
ngood = len(exp_stack[n, mask, i])
src_flux_center[n, i] = (
np.sum(src_stack[n, mask, i])
/ exp / ecf_stack[n, i] / 1e11 / ngood
)
src_flux_err_center[n, i] = (
np.sqrt(np.sum(src_stack[n, mask, i]))
/ exp / ecf_stack[n, i] / 1e11 / ngood
)
if custom_bkg:
exp_bkg = np.mean(bkg_data[f"band{eband}_exposure"][mask_bkg])
ngood_bkg = len(bkg_data[f"band{eband}_exposure"][mask_bkg])
bkg_flux_center[n, i] = (
np.sum(bkg_counts)
/ exp_bkg / ecf_stack[n, i] / 1e11 / ngood_bkg
)
bkg_flux_err_center[n, i] = (
np.sqrt(np.sum(bkg_counts))
/ exp_bkg / ecf_stack[n, i] / 1e11 / ngood_bkg
)
else:
bkg_flux_center[n, i] = (
np.sum(bkg_stack[n, mask, i])
/ exp / ecf_stack[n, i] / 1e11 / ngood
)
bkg_flux_err_center[n, i] = (
np.sqrt(np.sum(bkg_stack[n, mask, i]))
/ exp / ecf_stack[n, i] / 1e11 / ngood
)
if params:
for i, col in enumerate(params.colnames):
params_stack[n, i] = params[col][j]
n += 1
src_stack = src_stack[:n, :, :]
bkg_stack = bkg_stack[:n, :, :]
exp_stack = exp_stack[:n, :, :]
ecf_stack = ecf_stack[:n, :]
if custom_bkg:
# No need to take into account the area correction when using custom
# backgrounds, since counts are extracted in regions with the same size
ac_stack = None
npixels_bkg_stack = npixels_bkg_stack[:n, :]
else:
ac_stack = ac_stack[:n, :]
npixels_bkg_stack = None
if n < 2:
return None, None, None, None, None, None, None
cr, cr_mad, snr, snr_mad, ecf, texp = stats_bootstrap(
src_stack, bkg_stack, exp_stack, eef_stack, ecf_stack, ac_stack, npixels_bkg_stack, nsim=1000
)
flux, flux_mad = None, None
flux2, flux2_mad = None, None
if calc_flux:
src_flux_center = src_flux_center[:n, :]
src_flux_err_center = src_flux_err_center[:n, :]
bkg_flux_center = bkg_flux_center[:n, :]
bkg_flux_err_center = bkg_flux_err_center[:n, :]
flux, flux_mad = flux_bootstrap(
src_flux_center,
src_flux_err_center,
bkg_flux_center,
bkg_flux_err_center,
nsim=1000
)
flux2 = np.mean(cr, axis=0) / ecf / 1e11
flux2_mad = np.sqrt(np.mean(cr_mad**2, axis=0)) / ecf / 1e11
if with_plots:
scale = plot_stack(
sorted_neighbours["npixel"], hp, cr, snr, plotfile, scale
)
plot_radial(
sorted_neighbours["npixel"],
level_neighbours,
hp,
cr,
cr_mad,
snr,
snr_mad,
plotfile
)
print_stats(
cr, cr_mad, snr, snr_mad, texp, flux, flux_mad
)
if params:
average_params = print_params(params.colnames, params_stack[:n, :])
else:
average_params = None
return flux, flux_mad, flux2, flux2_mad, average_params, scale, n, nsrc
| 35.299824 | 102 | 0.579216 | 2,814 | 20,015 | 3.911869 | 0.142502 | 0.016806 | 0.01399 | 0.016533 | 0.375908 | 0.27589 | 0.206577 | 0.16379 | 0.108648 | 0.073129 | 0 | 0.019578 | 0.277792 | 20,015 | 566 | 103 | 35.362191 | 0.74175 | 0.077242 | 0 | 0.143902 | 0 | 0.002439 | 0.057422 | 0.006187 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02439 | false | 0 | 0.039024 | 0 | 0.085366 | 0.039024 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f59d5843303a8468919fb4b9547b41d7cdf2bb8 | 2,575 | py | Python | saas/data_bean.py | yongli82/CodeGenerator | 4ca9255c3c4c5392e45815fd20f605ccbbfd2325 | [
"MIT"
] | null | null | null | saas/data_bean.py | yongli82/CodeGenerator | 4ca9255c3c4c5392e45815fd20f605ccbbfd2325 | [
"MIT"
] | null | null | null | saas/data_bean.py | yongli82/CodeGenerator | 4ca9255c3c4c5392e45815fd20f605ccbbfd2325 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import sys
reload(sys)
sys.path.append("..")
sys.setdefaultencoding('utf-8')
from project_util import *
################################################################################
# 业务函数
################################################################################
#############
# 扫描API函数
#############
def is_sql_map_xml(file_path, arguments):
return file_path.endswith(".xml") and "/sqlmap/" in file_path
def handle_sql_map_xml(file_path, arguments):
content = read_file(file_path)
class_set = arguments[0]
if "</sqlMap>" not in content:
return
logger.info(file_path)
# result map
class_list = re.findall(r"""<typeAlias\s+.*?\s*type="([\w\.]+)"/>""", content, flags=re.I)
for class_name in class_list:
logger.info(class_name)
class_set.add(class_name)
class_list = re.findall(r"""<resultMap\s+.*?\s*class="([\w\.]+)">""", content, flags=re.I)
for class_name in class_list:
if "." in class_name:
logger.info(class_name)
class_set.add(class_name)
def is_data_bean(file_path, arguments):
if file_path.endswith(".java"):
start_pos = file_path.find("com/dianping/ba/")
if start_pos < 0:
return False
class_set = arguments[0]
class_name = file_path[start_pos:-5].replace("/", ".")
if class_name in class_set:
logger.info("[DATA_BEAN] %s" % class_name)
return True
else:
return False
return False
def handle_data_bean(file_path, arguments):
content = read_file(file_path)
if "extends DataBase" in content:
return
logger.info("**************Handle %s " % file_path)
matched = re.findall("((public\s+class\s+\w+)([\s\w]+\{))", content)[0]
line = matched[0]
content = content.replace(line, "%s extends DataBase %s" % (matched[1], matched[2]))
content = content.replace("implements Serializable", "")
content = content.replace("\n", "\n\nimport com.dianping.ba.finance.expense.api.base.DataBase;\n", 1)
write_file(file_path, content)
#############
# main
#############
if __name__ == "__main__":
data_bean_set = set()
scan_module(module_name="expense",
func_match_pattern=is_sql_map_xml,
func_handler=handle_sql_map_xml,
args=(data_bean_set,))
scan_module(module_name="expense",
func_match_pattern=is_data_bean,
func_handler=handle_data_bean,
args=(data_bean_set,))
| 31.402439 | 105 | 0.568544 | 318 | 2,575 | 4.336478 | 0.292453 | 0.081218 | 0.026106 | 0.034808 | 0.35388 | 0.266135 | 0.240754 | 0.240754 | 0.182741 | 0.126178 | 0 | 0.005416 | 0.211262 | 2,575 | 81 | 106 | 31.790123 | 0.67356 | 0.027184 | 0 | 0.339286 | 0 | 0.017857 | 0.151727 | 0.070398 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.053571 | 0.017857 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f5ba763d8dc076a6f987c5dcaf800e5147fe387 | 2,187 | py | Python | classes/gaussian.py | Vbtesh/easy_EM | 5b8e2dc07f7c63c74e4e3bf641a92ef0814ae622 | [
"MIT"
] | null | null | null | classes/gaussian.py | Vbtesh/easy_EM | 5b8e2dc07f7c63c74e4e3bf641a92ef0814ae622 | [
"MIT"
] | null | null | null | classes/gaussian.py | Vbtesh/easy_EM | 5b8e2dc07f7c63c74e4e3bf641a92ef0814ae622 | [
"MIT"
] | null | null | null | import numpy as np
class Gaussian_mean:
def __init__(self, name, num_clusters, data, variance=None, means=None):
self.name = name
self.c = num_clusters
self.type = 'gaussian_mean'
self.n_iter = 0
if variance:
self.std = np.sqrt(variance)
else:
# Default is standard error
self.std = np.sqrt(np.var(data.flatten())) / np.sqrt(len(data.flatten()))
# Can be a single parameter or a vector of parameters, usually the latter
if not isinstance(means, np.ndarray):
# If none are given generate a vector of rate normally distributed around the sample mean with sample variance
self.params_init = np.random.normal(loc=np.mean(data), scale=self.std, size=self.c)
self.params = self.params_init
else:
self.initial_params = means
self.params = means
# Observation of the normal random variable, should be a length n column vector where n is the number of observations
self.data = data.reshape((len(data), 1))
# Compute likelihood and log likelihood
self.update()
def get_likelihood(self, obs):
# obs must be an integer or a column vector
return 1 / np.sqrt(2 * np.pi * self.std**2) * np.exp(- (1/(2 * self.std**2)) * (obs - self.params)**2)
def get_log_likelihood(self, obs):
# obs must be an integer or a column vector
return - 1 / (2 * self.std**2) * (obs - self.params)**2
def maximise(self, q_h):
self.params_old = self.params
# Optimise the energy w.r.t to mean parameters, q_h is the optimmised variational distribution output from the expectation step
self.params = np.sum(q_h * self.data, axis=0) / np.sum(q_h, axis=0)
self.update()
self.n_iter += 1
def update(self):
# Likelihood of each observation given the current rates
self.likelihood = self.get_likelihood(self.data)
# Log likelihood, up to proportionality, of each observation given the current rates
self.log_likelihood = self.get_log_likelihood(self.data)
| 32.161765 | 135 | 0.622314 | 310 | 2,187 | 4.309677 | 0.36129 | 0.067365 | 0.050898 | 0.019461 | 0.186377 | 0.186377 | 0.186377 | 0.186377 | 0.125 | 0.125 | 0 | 0.010224 | 0.284408 | 2,187 | 67 | 136 | 32.641791 | 0.84345 | 0.323731 | 0 | 0.129032 | 0 | 0 | 0.008874 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.16129 | false | 0 | 0.032258 | 0.064516 | 0.290323 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f5dd0b94acf376ad82df1f237683a4b69b1bdd7 | 3,451 | py | Python | prioritize_health.py | bryanhpchiang/Twilio-Backend | b1fc70e8404c211a2a39fd3f5f024ad738edbbe2 | [
"Apache-2.0"
] | null | null | null | prioritize_health.py | bryanhpchiang/Twilio-Backend | b1fc70e8404c211a2a39fd3f5f024ad738edbbe2 | [
"Apache-2.0"
] | null | null | null | prioritize_health.py | bryanhpchiang/Twilio-Backend | b1fc70e8404c211a2a39fd3f5f024ad738edbbe2 | [
"Apache-2.0"
] | null | null | null | import nltk
nltk.download('averaged_perceptron_tagger')
nltk.download('punkt')
import gensim
from ibm_watson import NaturalLanguageUnderstandingV1
from ibm_watson.natural_language_understanding_v1 import Features, KeywordsOptions
from ibm_key import *
def is_useless(word):
pos=nltk.pos_tag([word])[0][1]
if pos in ["DT","IN","WRB","RB"]:
return True
return False
def prioritize_health(sentence,model):
priority_dict={}
with open("priority_dict.csv","r") as f:
for line in f:
# print(line.strip().split(","))
comma_split=line.strip().split(",")
symptom=",".join(comma_split[:-1])
priority=int(comma_split[-1])
print(symptom,priority)
priority_dict[symptom]=priority
symptoms=priority_dict.keys()
natural_language_understanding = NaturalLanguageUnderstandingV1(
version='2019-07-12',
iam_apikey=ibm_key,
url='https://gateway.watsonplatform.net/natural-language-understanding/api'
)
result=natural_language_understanding.analyze(
language="en",
text=sentence,
features=Features(keywords=KeywordsOptions())).get_result()
print(result)
keywords=[x['text'] for x in result['keywords']]
print(keywords)
#find key words in the sentence. for each word, find nearest keywords based on the avg cosine score
# print(symptoms)
closest_symptoms=[] #add one for each keyword
best_symptom=None
for cur_keyword in keywords:
best_avg_sim=0
print("Finding closest match for keyword={}".format(cur_keyword))
test_keyword=cur_keyword.split(" ")[-1]
print("Test keyword={}".format(test_keyword))
for cur_symptom in symptoms:
symptom_tokens=[x.lower() for x in nltk.word_tokenize(cur_symptom) if not is_useless(x) and x.isalpha()]
total_sim=0
total_cnt=0
# print("Symptom={}".format(cur_symptom))
# print("Symptom tokens={}".format(symptom_tokens))
for token in symptom_tokens:
# print("Token={}".format(token))
if token not in model.vocab or token==test_keyword:
# print("Token not found in vocab, skipping")
continue
#compute similarity of keyword and otken
cur_sim=model.similarity(test_keyword,token)
# print("cur_sim={}".format(cur_sim))
if cur_sim<-0.2 or cur_sim>0.2:
total_sim+=cur_sim
total_cnt+=1
if total_cnt:
avg_sim=float(total_sim)/total_cnt
print("total_sim={},total_cnt={},avg_sim={}".format(total_sim,total_cnt,avg_sim))
if avg_sim>best_avg_sim:
best_avg_sim=avg_sim
best_symptom=cur_symptom
print(best_avg_sim,best_symptom)
if best_symptom is not None:
priority_score=priority_dict[best_symptom]
else:
priority_score=0
res=(keywords,priority_score)
print("Result={}".format(res))
return res
def main():
sentence="i have a small wound and broken leg"
model = gensim.models.KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300-SLIM.bin', binary=True)
# print("hello" in model.vocab)
result=prioritize_health(sentence,model)
# print(result)
if __name__ == '__main__':
main()
| 38.775281 | 116 | 0.63286 | 427 | 3,451 | 4.899297 | 0.330211 | 0.025813 | 0.053537 | 0.020076 | 0.033461 | 0.021033 | 0 | 0 | 0 | 0 | 0 | 0.011288 | 0.255578 | 3,451 | 89 | 117 | 38.775281 | 0.803036 | 0.131266 | 0 | 0 | 0 | 0 | 0.111186 | 0.033825 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.069444 | 0 | 0.152778 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f5f6c43c0a2b98dab6270e3ab4c4138890053eb | 9,266 | py | Python | dev/phonts/visualization/visualize_freq_vs_lifetime_Ar.py | eragasa/pypospack | 21cdecaf3b05c87acc532d992be2c04d85bfbc22 | [
"MIT"
] | 4 | 2018-01-18T19:59:56.000Z | 2020-08-25T11:56:52.000Z | dev/phonts/visualization/visualize_freq_vs_lifetime_Ar.py | eragasa/pypospack | 21cdecaf3b05c87acc532d992be2c04d85bfbc22 | [
"MIT"
] | 1 | 2018-04-22T23:02:13.000Z | 2018-04-22T23:02:13.000Z | dev/phonts/visualization/visualize_freq_vs_lifetime_Ar.py | eragasa/pypospack | 21cdecaf3b05c87acc532d992be2c04d85bfbc22 | [
"MIT"
] | 1 | 2019-09-14T07:04:42.000Z | 2019-09-14T07:04:42.000Z | import os,copy
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from collections import OrderedDict
class PhontsBteData(object):
"""
Args:
directory(str): the directory where the phonts simulation
output files exist
natoms(int):number of atoms in the simulation cel
Attributes:
directory(str): the directory where the phonts simulation
output files exist
ph_lt_file(pypospack.io.phonts.PhononLifetimeFile)
ph_freq_file(pypospack.io.phonts.PhononFrequencyFile)
natoms(int):number of atoms in the simulation cel
"""
def __init__(self,directory,natoms):
self.directory =directory
self.ph_lt_file = None
self.ph_freq_file = None
self.ph_lt_filename = 'phon_lifetime.dat'
self.ph_freq_filename = 'freq.dat'
self.natoms = natoms
self.data = None
def read(self,directory=None):
if directory is not None:
self.directory = directory
self.ph_lt_file = PhononLifetimeFile(
natoms=self.natoms,
filename=os.path.join(
self.directory,
self.ph_lt_filename))
self.ph_freq_file = PhononFrequencyFile(
natoms=self.natoms,
filename=os.path.join(
self.directory,
self.ph_freq_filename))
self.ph_lt_file.read()
self.ph_freq_file.read()
def build_data(self):
self.data = OrderedDict()
for temp in self.ph_lt_file.temp:
print('temp={}'.format(temp))
self.data[temp] = None
self.build_data_at_temp(temp)
def build_data_at_temp(self,temp):
"""
This method consolidates the data contained in two different files
so that we can compare phonon frequencies with with phonon lifetimes.
Args:
temp(int): the temperature from the BTE calculation, this value
must be in list of values contained in the list ph_lt_file.temp
"""
if self.data is None:
self.data = OrderedDict()
self.data[temp] = [] # initialize our list
self.kpoint_keys_format = "{kp1:.6f}_{kp2:.6f}_{kp3:.6f}"
freq_n_rows, freq_n_cols = self.ph_freq_file.data.shape
lt_n_rows, lt_n_cols = self.ph_lt_file.data[temp].shape
#these indices are the column index for the columns kp1,kp2,kp3 in
#phonon frequency file (ph_fr_file)
freq_kp1_idx = self.ph_freq_file.col_names.index('kp1')
freq_kp2_idx = self.ph_freq_file.col_names.index('kp2')
freq_kp3_idx = self.ph_freq_file.col_names.index('kp3')
# these indices are the the column indices for the columns kp1,kp2,kp3
# in the lifetime frequency file (ph_lt_file)
lt_kp1_idx = self.ph_lt_file.col_names.index('kp1')
lt_kp2_idx = self.ph_lt_file.col_names.index('kp2')
lt_kp3_idx = self.ph_lt_file.col_names.index('kp3')
for i in range(freq_n_rows):
freq_kpoint_key = self.kpoint_keys_format.format(
kp1 = self.ph_freq_file.data[i,freq_kp1_idx],
kp2 = self.ph_freq_file.data[i,freq_kp2_idx],
kp3 = self.ph_freq_file.data[i,freq_kp3_idx])
for j in range(lt_n_rows):
lt_kpoint_key = self.kpoint_keys_format.format(
kp1 = self.ph_lt_file\
.data[temp][j,lt_kp1_idx],
kp2 = self.ph_lt_file\
.data[temp][j,lt_kp2_idx],
kp3 = self.ph_lt_file\
.data[temp][j,lt_kp3_idx])
if freq_kpoint_key == lt_kpoint_key:
for k in range(3*self.natoms):
# here we are building the row for the phonon frequency
# and the associated limetime with that phonon
# ph_id(int) - unique integer assigned to a phonon for
# identification
# kp1,kp2,kp3 - the location of the kpoint associated with
# phonon frequency represented in the basis of the
# reciprocal lattice
# fr - this is the frequency of the phonon in meV
# lt - this is the phonon lifetime in ps
ph_id = len(self.data[temp])
kp1 = self.ph_lt_file\
.data[temp][j,lt_kp1_idx]
kp2 = self.ph_lt_file\
.data[temp][j,lt_kp2_idx]
kp3 = self.ph_lt_file\
.data[temp][j,lt_kp3_idx]
# we need the index associated with the phonon freq
fr_idx = self.ph_freq_file.col_names.index(
"freq{}".format(k+1))
# wew need the index associated with the phonon lifetime
lt_idx = self.ph_lt_file.col_names.index(
"lt{}".format(k+1))
fr = self.ph_freq_file.data[i,fr_idx]
lt = self.ph_lt_file.data[temp][j,lt_idx]
self.data[temp].append([
ph_id,
kp1,kp2,kp3,
fr,lt])
self.data[temp] = np.array(self.data[temp])
class PhononLifetimeFile(object):
def __init__(self,natoms,filename='phon_lifetime.dat'):
self.col_names = ['index','kp1','kp2','kp3']\
+ ['lt{}'.format(i+1) for i in range(3*natoms)]
self.natoms = natoms
self.filename = filename
self.data = read_phon_lifetime(self.filename)
self.temp = [k for k,v in self.data.items()]
def read(self):
self.data = read_phon_lifetime(self.filename)
def print(self):
for k,v in self.data.items():
print(k,v.shape)
class PhononFrequencyFile(object):
def __init__(self,natoms,filename='freq.dat'):
self.col_names = ['index','kp1','kp2','kp3']\
+ ['freq{}'.format(i+1) for i in range(3*natoms)]
self.filename = filename
self.data = None
self.natoms = natoms
def read(self,filename=None):
if filename is not None:
self.filename = filename
try:
with open(self.filename,'r') as f:
lines = f.readlines()
except FileNotFoundErr as e:
raise
values_all = []
for i,line in enumerate(lines):
args = line.strip().split()
values_all.append([float(arg) for arg in args])
self.data = np.array(values_all)
def print(self):
print(self.data.shape)
def get_data_from_phonts_file(filename):
#def process_first_line(line):
# args = line.strip().split()
# args = [arg.strip() for arg in args]
# args = args[1:]
# return args
#labels = None
data = None
data_all = None
with open(filename) as f:
lines = f.readlines()
# except FileNotFoundErr
#initialize variables
values_all = []
for i,line in enumerate(lines):
# if i == 0:
# labels = process_first_line(line)
# else:
args = line.strip().split()
values_all.append([float(arg) for arg in args])
data_all = np.array(values_all)
return data_all
def get_freq_data(filename='freq.dat'):
freq_data = get_data_from_phonts_file(filename)
return freq_data
def read_phon_lifetime(filename='phon_lifetime.dat'):
"""
Reads phonon lifetime information
"""
def subselect_table_block(i_start,lines):
i=i_start+1
table = []
while(lines[i].strip() !=""):
args = lines[i].split()
args = [arg.strip() for arg in args]
args = [float(arg) for arg in args]
table.append(args)
i += 1
return np.array(table)
line = None #initialize
with open(filename,'r') as f:
lines = f.readlines()
lines = [s.strip() for s in lines]
temperatures = []
phon_lifetime = OrderedDict()
for il,line in enumerate(lines):
if line.startswith('# Temp:'):
args = line.split(':')
T = int(float(args[1].strip()))
temperatures.append(T)
phon_lifetime[T] = subselect_table_block(il,lines)
return {k:v.copy() for k,v in phon_lifetime.items()}
if __name__ == "__main__":
phonts_sim_dir = 'Ar_result'
freq_data_filename = os.path.join(
phonts_sim_dir,
'freq.dat')
phon_lifetime_data_filename = os.path.join(
phonts_sim_dir,
'phon_lifetime.dat')
bte_data = PhontsBteData(natoms=4,directory=phonts_sim_dir)
bte_data.read()
bte_data.build_data_at_temp(temp=400)
ph_freq = bte_data.data[400][:,4]
ph_lt = bte_data.data[400][:,5]
idx_not_zero = np.where(ph_lt != 0)[0]
ph_freq = bte_data.data[400][idx_not_zero,4]
ph_inv_lt = 1/bte_data.data[400][idx_not_zero,5]
| 34.70412 | 81 | 0.564213 | 1,200 | 9,266 | 4.143333 | 0.16 | 0.038616 | 0.030571 | 0.038616 | 0.441875 | 0.4107 | 0.359413 | 0.254224 | 0.16432 | 0.134956 | 0 | 0.014479 | 0.336607 | 9,266 | 266 | 82 | 34.834586 | 0.794371 | 0.185625 | 0 | 0.252941 | 0 | 0 | 0.031013 | 0.003927 | 0 | 0 | 0 | 0 | 0 | 1 | 0.082353 | false | 0 | 0.029412 | 0 | 0.152941 | 0.029412 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f5ffb83eb270ebe0264b1392d0c080b2974987c | 3,828 | py | Python | leprikon/forms/refundrequest.py | leprikon-cz/leprikon | b1bec36fb6bcf0220bffccca53b6f200f9e95910 | [
"BSD-3-Clause"
] | 4 | 2018-10-29T17:46:09.000Z | 2021-12-16T08:57:48.000Z | leprikon/forms/refundrequest.py | leprikon-cz/leprikon | b1bec36fb6bcf0220bffccca53b6f200f9e95910 | [
"BSD-3-Clause"
] | 68 | 2016-07-11T07:48:54.000Z | 2022-03-18T01:32:06.000Z | leprikon/forms/refundrequest.py | leprikon-cz/leprikon | b1bec36fb6bcf0220bffccca53b6f200f9e95910 | [
"BSD-3-Clause"
] | 2 | 2016-07-12T20:39:53.000Z | 2020-10-10T03:14:42.000Z | from django import forms
from django.utils.translation import ugettext_lazy as _
from ..models.courses import CourseRegistration
from ..models.events import EventRegistration
from ..models.orderables import OrderableRegistration
from ..models.refundrequest import RefundRequest
from ..models.transaction import Transaction
from ..utils import comma_separated, currency, first_upper
from .fields import ReadonlyField
from .form import FormMixin
class RefundRequestBaseForm(FormMixin, forms.ModelForm):
def __init__(self, registration, *args, **kwargs):
super().__init__(*args, **kwargs)
self.registration = registration
self.readonly_fields = [
ReadonlyField(label=first_upper(registration.subject.subject_type.name), value=registration.subject.name)
]
if registration.subject.registration_type_participants:
if len(registration.all_participants) > 1:
label = _("Participants")
else:
label = _("Participant")
self.readonly_fields.append(
ReadonlyField(label=label, value=comma_separated(registration.all_participants))
)
elif registration.subject.registration_type_groups:
self.readonly_fields.append(ReadonlyField(label=_("Contact person"), value=registration.group.full_name))
if registration.group.name:
self.readonly_fields.append(ReadonlyField(label=_("Group name"), value=registration.group.name))
self.readonly_fields.append(
ReadonlyField(label=_("Overpaid amount"), value=currency(registration.payment_status.overpaid))
)
class RefundRequestForm(RefundRequestBaseForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.instance.registration = self.registration
self.instance.requested_by_id = self.registration.user_id
class Meta:
model = RefundRequest
fields = ["bank_account"]
class PaymentTransferForm(RefundRequestBaseForm):
instance: Transaction
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
valid_target_registration_ids = [
registration.id
for Registration in (CourseRegistration, EventRegistration, OrderableRegistration)
for registration in Registration.objects.filter(user_id=self.registration.user_id)
if registration.payment_status.amount_due
]
registration_choices = self.fields["target_registration"].widget.choices
registration_choices.queryset = registration_choices.queryset.filter(id__in=valid_target_registration_ids)
self.instance.source_registration = self.registration
self.instance.accounted_by_id = self.registration.user_id
self.instance.transaction_type = Transaction.TRANSFER
def clean(self):
self.cleaned_data = super().clean()
target_registration = self.cleaned_data.get("target_registration")
if target_registration:
self.instance.amount = min(
self.registration.payment_status.overpaid,
target_registration.payment_status.amount_due,
)
return self.cleaned_data
class Meta:
model = Transaction
fields = ["target_registration"]
class DonationForm(RefundRequestBaseForm):
instance: Transaction
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.instance.source_registration = self.registration
self.instance.accounted_by_id = self.registration.user_id
self.instance.transaction_type = Transaction.DONATION_TRANSFER
self.instance.amount = self.registration.payment_status.overpaid
class Meta:
model = Transaction
fields = []
| 40.723404 | 117 | 0.702456 | 379 | 3,828 | 6.831135 | 0.248021 | 0.06798 | 0.034762 | 0.029355 | 0.405176 | 0.286983 | 0.244496 | 0.23175 | 0.23175 | 0.183082 | 0 | 0.000331 | 0.210554 | 3,828 | 93 | 118 | 41.16129 | 0.856387 | 0 | 0 | 0.25641 | 0 | 0 | 0.034222 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064103 | false | 0 | 0.128205 | 0 | 0.320513 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f62c371e97016904243e582e0e0941f4163bf85 | 1,922 | py | Python | tests/integration/states/service.py | pass-by-value/salt | 2ede44fe54516242e10fe428629d5f5a18e5f7ea | [
"Apache-2.0",
"MIT"
] | null | null | null | tests/integration/states/service.py | pass-by-value/salt | 2ede44fe54516242e10fe428629d5f5a18e5f7ea | [
"Apache-2.0",
"MIT"
] | 1 | 2019-09-06T13:57:28.000Z | 2019-09-06T13:57:28.000Z | tests/integration/states/service.py | pass-by-value/salt | 2ede44fe54516242e10fe428629d5f5a18e5f7ea | [
"Apache-2.0",
"MIT"
] | 1 | 2020-09-30T16:09:48.000Z | 2020-09-30T16:09:48.000Z | # -*- coding: utf-8 -*-
'''
Tests for the service state
'''
# Import python libs
from __future__ import absolute_import
# Import Salt Testing libs
from salttesting import skipIf
from salttesting.helpers import (
ensure_in_syspath,
destructiveTest
)
ensure_in_syspath('../../')
# Import salt libs
import integration
import salt.utils
INIT_DELAY = 5
SERVICE_NAME = 'crond'
@destructiveTest
@skipIf(salt.utils.which('crond') is None, 'crond not installed')
class ServiceTest(integration.ModuleCase,
integration.SaltReturnAssertsMixIn):
'''
Validate the service state
'''
def check_service_status(self, exp_return):
'''
helper method to check status of service
'''
check_status = self.run_function('service.status', name=SERVICE_NAME)
if check_status is not exp_return:
self.assertFalse('status of service is not returning correctly')
def test_service_dead(self):
'''
test service.dead state module
'''
start_service = self.run_state('service.running', name=SERVICE_NAME)
self.assertSaltTrueReturn(start_service)
self.check_service_status(True)
ret = self.run_state('service.dead', name=SERVICE_NAME)
self.assertSaltTrueReturn(ret)
self.check_service_status(False)
def test_service_dead_init_delay(self):
'''
test service.dead state module with init_delay arg
'''
start_service = self.run_state('service.running', name=SERVICE_NAME)
self.assertSaltTrueReturn(start_service)
self.check_service_status(True)
ret = self.run_state('service.dead', name=SERVICE_NAME,
init_delay=INIT_DELAY)
self.assertSaltTrueReturn(ret)
self.check_service_status(False)
if __name__ == '__main__':
from integration import run_tests
run_tests(ServiceTest)
| 28.686567 | 77 | 0.67898 | 223 | 1,922 | 5.591928 | 0.304933 | 0.052927 | 0.072173 | 0.060946 | 0.384924 | 0.384924 | 0.336808 | 0.336808 | 0.2502 | 0.2502 | 0 | 0.00135 | 0.229448 | 1,922 | 66 | 78 | 29.121212 | 0.840648 | 0.135796 | 0 | 0.27027 | 0 | 0 | 0.098978 | 0 | 0 | 0 | 0 | 0 | 0.162162 | 1 | 0.081081 | false | 0 | 0.162162 | 0 | 0.27027 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f63dc2f21bee87e13f8d7f4d271327d23cf44c1 | 763 | py | Python | 6.py | msatuqi/homework | c6110ce26cba4e279622667f02b06edb4308d26d | [
"MIT"
] | null | null | null | 6.py | msatuqi/homework | c6110ce26cba4e279622667f02b06edb4308d26d | [
"MIT"
] | null | null | null | 6.py | msatuqi/homework | c6110ce26cba4e279622667f02b06edb4308d26d | [
"MIT"
] | null | null | null | import sys
import os
if __name__ == "__main__":
files = []
if len(sys.argv) > 1:
for i in range(1, len(sys.argv)):
file = sys.argv[i]
if os.path.isfile(file):
files.append(file)
continue
print(f"File {file} doesn't exist")
else:
print("Not enough arguments!")
sys.exit()
funcs = []
for i in files:
with open(i, "r", encoding="utf-8") as f:
a = f.readlines()
for j in range(len(a)):
if a[j].startswith("def "):
if not a[j - 1].startswith("#"):
funcs.append(f"file name: {i}, line: {j} function name: {a[j][4:]}")
for i in funcs:
print(i) | 29.346154 | 89 | 0.454784 | 101 | 763 | 3.356436 | 0.445545 | 0.061947 | 0.053097 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01087 | 0.397117 | 763 | 26 | 90 | 29.346154 | 0.726087 | 0 | 0 | 0 | 0 | 0.041667 | 0.156969 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.083333 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f6443bcf6ed89d7221f462318c15bab9a25357c | 2,107 | py | Python | mobilecoind/clients/python/cli/get_public_address.py | jgreat/mobilecoin | 7df58d88f67e3b92122b814acae9c08498429092 | [
"Apache-2.0"
] | 1 | 2022-01-17T21:12:44.000Z | 2022-01-17T21:12:44.000Z | mobilecoind/clients/python/cli/get_public_address.py | jgreat/mobilecoin | 7df58d88f67e3b92122b814acae9c08498429092 | [
"Apache-2.0"
] | 292 | 2020-10-22T00:34:35.000Z | 2022-03-29T09:29:14.000Z | mobilecoind/clients/python/cli/get_public_address.py | eranrund/mobilecoin | ef19480f5a2c5dd7f79aba5650138e0f730735b4 | [
"Apache-2.0"
] | 1 | 2022-03-26T20:34:00.000Z | 2022-03-26T20:34:00.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2021 The MobileCoin Foundation
""" displays the b58 public address and URI that correspond to a master key """
import argparse
import os,sys
sys.path.insert(1, os.path.realpath(os.path.join(os.path.pardir, "lib")))
import mobilecoin
if __name__ == '__main__':
# Connect to mobilecoind
mobilecoind = mobilecoin.Client("localhost:4444", ssl=False)
# Parse the arguments
parser = argparse.ArgumentParser(
description='Displays public address information for a provided master key, or for a random master key if no key is provided.')
parser.add_argument('-k', '--key', help='account master key', type=str)
parser.add_argument('-m', '--mnemonic', help='account key as mnemonic string', type=str)
parser.add_argument('-s', '--subaddress', help='(optional) subaddress', nargs='?', const=mobilecoin.DEFAULT_SUBADDRESS_INDEX, type=int, default=mobilecoin.DEFAULT_SUBADDRESS_INDEX)
args = parser.parse_args()
# create a monitor and use it to calculate the public address
if args.key:
entropy_bytes = bytes.fromhex(args.key) if args.key else mobilecoind.generate_entropy()
account_key = mobilecoind.get_account_key(entropy_bytes).account_key
entropy_display = entropy_bytes.hex()
else:
mnemonic = args.mnemonic if args.mnemonic else mobilecoind.generate_mnemonic()
account_key = mobilecoind.get_account_key_from_mnemonic(mnemonic)
entropy_display = mnemonic
monitor_id = mobilecoind.add_monitor(account_key, first_subaddress=args.subaddress).monitor_id
public_address = mobilecoind.get_public_address(monitor_id, subaddress_index=args.subaddress).public_address
# print the public address information
print("\n")
print(" {:<18}{}".format("Master Key:", entropy_display))
print(" {:<18}{}".format("Subaddress Index:", args.subaddress))
print(" {:<18}{}".format("Address Code:", public_address.b58_code))
print(" {:<18}{}".format("Address URL:", "mob58://"+ public_address.b58_code))
print("\n")
| 46.822222 | 184 | 0.712862 | 271 | 2,107 | 5.365314 | 0.376384 | 0.080468 | 0.035763 | 0.022008 | 0.114168 | 0.046768 | 0 | 0 | 0 | 0 | 0 | 0.017465 | 0.15757 | 2,107 | 44 | 185 | 47.886364 | 0.80169 | 0.14523 | 0 | 0.071429 | 0 | 0.035714 | 0.197427 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.107143 | 0 | 0.107143 | 0.214286 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f66c650220248159eeb9079ee30655ab1b2a320 | 1,200 | py | Python | qichacha/crawler.py | johnson7788/webinfo-crawler | 42a1194b32d600a2c41c8eccab9afa1bcb61d053 | [
"MIT"
] | null | null | null | qichacha/crawler.py | johnson7788/webinfo-crawler | 42a1194b32d600a2c41c8eccab9afa1bcb61d053 | [
"MIT"
] | null | null | null | qichacha/crawler.py | johnson7788/webinfo-crawler | 42a1194b32d600a2c41c8eccab9afa1bcb61d053 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# -*-: coding: utf-8 -*-
"""
:author: lubosson
:date: 2019-04-15
:desc:
"""
import logging as log
from qichacha.client import QichachaClient
from qichacha.manager import QichachaManager
from db.model.model import Company
from db.mysql_connector import insert as save
# 企查查客户端
qcc_client = QichachaClient()
manager = QichachaManager()
def start():
keywords = globals().get('keywords')
for keyword in keywords:
raw_companies = qcc_client.search(keyword)
cost_time = 2 * raw_companies.__len__() + 4
log.info('正在处理爬取[%s],大概需要%s秒' % (keyword, cost_time))
# company对象
company = Company()
for raw_company in raw_companies:
company.keyword = keyword
# 组装公司信息
manager.assembly(company, raw_company)
raw_company_detail = qcc_client.search_detail(raw_company.get('KeyNo'))
# 补充公司详细信息
manager.assembly_detail(company, raw_company_detail)
# 保存到数据库
# save(company.__dict__)
log.info(company)
company.clear()
log.info('completed')
def load_keys(keys: list):
globals().setdefault('keywords', keys)
| 22.641509 | 83 | 0.640833 | 137 | 1,200 | 5.416058 | 0.489051 | 0.067385 | 0.068733 | 0.061995 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013333 | 0.25 | 1,200 | 52 | 84 | 23.076923 | 0.811111 | 0.121667 | 0 | 0 | 0 | 0 | 0.046647 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.208333 | 0 | 0.291667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f67013369c20b543aea358f0f79f4d5815f4539 | 961 | py | Python | sources/Entities/BackgroundScrolling.py | rsoultan/Pygame-shoot-them-up | 0ae41522253b7405e6d00a8c4094de7480846535 | [
"Apache-2.0"
] | null | null | null | sources/Entities/BackgroundScrolling.py | rsoultan/Pygame-shoot-them-up | 0ae41522253b7405e6d00a8c4094de7480846535 | [
"Apache-2.0"
] | null | null | null | sources/Entities/BackgroundScrolling.py | rsoultan/Pygame-shoot-them-up | 0ae41522253b7405e6d00a8c4094de7480846535 | [
"Apache-2.0"
] | null | null | null | import pygame
from sources.Entities.Entity import Entity
from sources.Settings import SETTINGS
class BackgroundScrolling(pygame.sprite.Sprite, Entity):
def __init__(self):
super().__init__()
self.music = pygame.mixer.music.load("assets/sounds/game.ogg")
pygame.mixer.music.play()
self.image = pygame.image.load("assets/images/game_background.png").convert_alpha()
self.rect = self.image.get_rect()
self.rect.x = 0
self.rect.y = 0
self.rel_x = self.rect.x % self.rect.width
def event(self, event):
pass
def update(self, elapsed_time):
self.rect.x -= 1
self.rel_x = self.rect.x % self.rect.width
if self.rect.x < -self.rect.width:
self.rect.x = 0
def draw(self, window):
window.blit(self.image, (self.rel_x - self.rect.width, 0))
if self.rel_x < SETTINGS['WIDTH']:
window.blit(self.image, (self.rel_x, 0))
| 32.033333 | 91 | 0.626431 | 136 | 961 | 4.301471 | 0.330882 | 0.164103 | 0.092308 | 0.095727 | 0.246154 | 0.232479 | 0.194872 | 0.102564 | 0.102564 | 0 | 0 | 0.008219 | 0.240375 | 961 | 29 | 92 | 33.137931 | 0.793151 | 0 | 0 | 0.166667 | 0 | 0 | 0.062435 | 0.057232 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0.041667 | 0.125 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f6bbd07c06394980185670c03f5513e2f6231c0 | 2,573 | py | Python | src/tinkoff/client.py | tlgtaa/education-backend | 86f8af315f9cff2c1fd19406899d593fc0852124 | [
"MIT"
] | 1 | 2021-03-03T19:51:24.000Z | 2021-03-03T19:51:24.000Z | src/tinkoff/client.py | tlgtaa/education-backend | 86f8af315f9cff2c1fd19406899d593fc0852124 | [
"MIT"
] | null | null | null | src/tinkoff/client.py | tlgtaa/education-backend | 86f8af315f9cff2c1fd19406899d593fc0852124 | [
"MIT"
] | null | null | null | import requests
from collections import OrderedDict
from django.conf import settings
from hashlib import sha256
from urllib.parse import urljoin
from app.banking import Bank
from tinkoff.exceptions import TinkoffRequestException
class TinkoffBank(Bank):
def get_initial_payment_url(self):
return self.Init()['PaymentURL']
def Init(self) -> dict:
return self.call('Init', payload={
'Amount': self.price,
'OrderId': self.order.id,
'CustomerKey': self.user.id,
'SuccessURL': self.success_url,
'FailURL': self.fail_url,
'Receipt': self.get_receipt(),
'NotificationURL': self.get_notification_url(),
})
def call(self, method: str, payload: dict) -> dict:
"""Query Tinkoff API
"""
payload.update({'TerminalKey': settings.TINKOFF_TERMINAL_KEY})
r = requests.post(f'https://securepay.tinkoff.ru/v2/{method}/', json={
'Token': self._get_token(payload),
**payload,
})
if r.status_code != 200:
raise TinkoffRequestException(f'Incorrect HTTP-status code for {method}: {r.status_code}')
parsed = r.json()
if not parsed['Success']:
raise TinkoffRequestException(f'Non-success request for {method}: {parsed["ErrorCode"]}, {parsed["Message"]} ({parsed["Details"]}')
return parsed
def get_receipt(self):
return {
'Email': self.user.email,
'Taxation': 'usn_income',
'Items': self.get_items(),
}
def get_items(self):
return [{
'Name': self.order.item.name_receipt,
'Price': self.price,
'Quantity': 1,
'Amount': self.price,
'PaymentObject': 'service',
'Tax': "none", # fuck
}]
@staticmethod
def _get_token(request: dict) -> str:
"""Get request signature based on https://oplata.tinkoff.ru/landing/develop/documentation/request_sign"""
_request = request.copy()
for key_to_ignore in ['DATA', 'Receipt']:
_request.pop(key_to_ignore, None)
_request['Password'] = settings.TINKOFF_TERMINAL_PASSWORD
sorted_request = OrderedDict(sorted(_request.items(), key=lambda key, *args: key))
return sha256(''.join(str(value) for value in sorted_request.values()).encode()).hexdigest().upper()
@staticmethod
def get_notification_url():
return urljoin(settings.ABSOLUTE_HOST, '/api/v2/banking/tinkoff-notifications/')
| 32.1625 | 143 | 0.609405 | 278 | 2,573 | 5.507194 | 0.435252 | 0.019595 | 0.019595 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006299 | 0.259619 | 2,573 | 79 | 144 | 32.56962 | 0.797375 | 0.051302 | 0 | 0.103448 | 0 | 0.017241 | 0.176543 | 0.024691 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12069 | false | 0.017241 | 0.12069 | 0.086207 | 0.37931 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f6c0bb22a1598a62cc05707e29dda3aac3c16ed | 4,564 | py | Python | policy_sentry/command/initialize.py | cclauss/policy_sentry | 98e46f5c785b2845b2971e3c8c484f3e2355d756 | [
"MIT"
] | null | null | null | policy_sentry/command/initialize.py | cclauss/policy_sentry | 98e46f5c785b2845b2971e3c8c484f3e2355d756 | [
"MIT"
] | null | null | null | policy_sentry/command/initialize.py | cclauss/policy_sentry | 98e46f5c785b2845b2971e3c8c484f3e2355d756 | [
"MIT"
] | null | null | null | """
Create the Policy Sentry config folder (~/.policy_sentry/) and the contents within
Create the SQLite database and fill it with the tables scraped from the AWS Docs
"""
import shutil
import click
from policy_sentry.configuration.access_level_overrides import create_default_overrides_file
from policy_sentry.configuration.analysis import create_default_report_config_file
from policy_sentry.configuration.config_directory import create_policy_sentry_config_directory, \
create_audit_directory, create_policy_analysis_directory, create_html_docs_directory
from policy_sentry.querying.all import get_all_service_prefixes
from policy_sentry.scraping.awsdocs import update_html_docs_directory, get_list_of_service_prefixes_from_links_file, \
create_service_links_mapping_file
from policy_sentry.shared.database import connect_db, create_database
from policy_sentry.shared.constants import HOME, CONFIG_DIRECTORY, HTML_DIRECTORY_PATH, LINKS_YML_FILE_LOCAL, \
BUNDLED_DATABASE_FILE_PATH
@click.command(
short_help='Create a local database to store AWS IAM information.'
)
@click.option(
'--access-level-overrides-file',
type=str,
required=False,
help='Path to access level overrides file, used to override the Access Levels per action provided by AWS docs'
)
@click.option(
'--fetch',
is_flag=True,
required=False,
default=False,
help='Specify this flag to fetch the HTML Docs directly from the AWS website. This will be helpful if the docs '
'in the Git repository are behind the live docs and you need to use the latest version of the docs right '
'now.'
)
@click.option(
'--build',
is_flag=True,
required=False,
default=False,
help='Build the SQLite database from the HTML files rather than copying the SQLite database file from '
'the python package. Defaults to false'
)
def initialize(access_level_overrides_file, fetch, build):
"""
Initialize the local database to store AWS IAM information, which can be used to generate IAM policies, and for
querying the database.
"""
if not access_level_overrides_file:
overrides_file = HOME + CONFIG_DIRECTORY + 'access-level-overrides.yml'
else:
overrides_file = access_level_overrides_file
# Create the config directory
database_path = create_policy_sentry_config_directory()
# Copy over the html docs, which will be used to build the database
create_html_docs_directory()
# Create the directory to download IAM policies to
create_policy_analysis_directory()
# Create audit directory to host list of permissions for analyze_iam_policy
create_audit_directory()
# Create overrides file, which allows us to override the Access Levels
# provided by AWS documentation
create_default_overrides_file()
# Create the default reporting configuration file. This is used by
# analyze_iam_policy
create_default_report_config_file()
if not build and not fetch:
# copy from the bundled database location to the destination path
shutil.copy(BUNDLED_DATABASE_FILE_PATH, database_path)
# Connect to the database at that path with SQLAlchemy
db_session = connect_db(database_path, initialization=True)
# --fetch: wget the AWS IAM Actions, Resources and Condition Keys pages and store them locally.
# if --build and --fetch are both supplied, just do --fetch
if fetch:
# `wget` the html docs to the local directory
update_html_docs_directory(HTML_DIRECTORY_PATH)
# Update the links.yml file
prefix_list = create_service_links_mapping_file(
HTML_DIRECTORY_PATH, LINKS_YML_FILE_LOCAL)
print(f"Services: {prefix_list}")
# initialize --build
if build or access_level_overrides_file or fetch:
# Use the list of services that were listed in the links.yml file
all_aws_services = get_list_of_service_prefixes_from_links_file(
LINKS_YML_FILE_LOCAL)
print(f"Services to build for: ${LINKS_YML_FILE_LOCAL}")
# Fill in the database with data on the AWS services
create_database(db_session, all_aws_services, overrides_file)
print("Created tables for all services!")
# Query the database for all the services that are now in the database.
all_aws_service_prefixes = get_all_service_prefixes(db_session)
total_count_of_services = str(len(all_aws_service_prefixes))
print(f"{total_count_of_services} AWS services in the database. \nServices: {all_aws_service_prefixes}")
| 43.466667 | 118 | 0.758107 | 642 | 4,564 | 5.137072 | 0.249221 | 0.047301 | 0.048514 | 0.043663 | 0.213159 | 0.10279 | 0.10279 | 0.046089 | 0 | 0 | 0 | 0 | 0.18624 | 4,564 | 104 | 119 | 43.884615 | 0.887991 | 0.27213 | 0 | 0.151515 | 0 | 0.030303 | 0.233679 | 0.039353 | 0 | 0 | 0 | 0 | 0 | 1 | 0.015152 | false | 0 | 0.136364 | 0 | 0.151515 | 0.060606 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f6ca83e6f6ee2952775de43a42c712196dbdb99 | 4,128 | py | Python | tests/flake8_integration/test_formatter.py | s-weigand/flake8-nb | 39c6cf6158cc231c420ff783a550b09ee5f7e4c7 | [
"Apache-2.0"
] | 23 | 2019-12-05T06:02:43.000Z | 2022-03-11T18:17:19.000Z | tests/flake8_integration/test_formatter.py | s-weigand/flake8-nb | 39c6cf6158cc231c420ff783a550b09ee5f7e4c7 | [
"Apache-2.0"
] | 191 | 2019-10-04T06:22:14.000Z | 2022-03-29T04:02:28.000Z | tests/flake8_integration/test_formatter.py | s-weigand/flake8-nb | 39c6cf6158cc231c420ff783a550b09ee5f7e4c7 | [
"Apache-2.0"
] | 6 | 2020-06-13T13:35:15.000Z | 2021-11-28T19:50:12.000Z | import os
from optparse import Values
from typing import List
import pytest
from flake8.style_guide import Violation
from flake8_nb.flake8_integration.formatter import IpynbFormatter
from flake8_nb.flake8_integration.formatter import map_notebook_error
from flake8_nb.parsers.notebook_parsers import NotebookParser
TEST_NOTEBOOK_PATH = os.path.join("tests", "data", "notebooks", "notebook_with_flake8_tags.ipynb")
def get_test_intermediate_path(intermediate_names):
return [
filename
for filename in intermediate_names
if filename.endswith("notebook_with_flake8_tags.ipynb_parsed")
][0]
def get_mocked_option(notebook_cell_format: str, formatter="default_notebook") -> Values:
return Values(
{"output_file": "", "format": formatter, "notebook_cell_format": notebook_cell_format}
)
def get_mocked_violation(filename: str, line_number: int) -> Violation:
return Violation(
filename=os.path.normpath(filename),
line_number=line_number,
physical_line=0,
column_number=2,
code="AB123",
text="This is just for the coverage",
)
@pytest.mark.parametrize(
"line_number,cell_nr,expected_line_number",
[
(8, 1, 2),
(15, 2, 2),
(29, 4, 2),
(30, 4, 3),
(38, 5, 3),
],
)
@pytest.mark.parametrize(
"notebook_cell_format,cell_format_str",
(
("{nb_path}#In[{exec_count}]", "#In[{}]"),
("{nb_path}:code_cell#{exec_count}", ":code_cell#{}"),
),
)
def test_IpynbFormatter__map_notebook_error(
notebook_parser: NotebookParser,
notebook_cell_format: str,
cell_format_str: str,
line_number: int,
cell_nr: int,
expected_line_number: int,
):
expected_filename = f"{TEST_NOTEBOOK_PATH}{cell_format_str.format(cell_nr)}"
filename = get_test_intermediate_path(notebook_parser.intermediate_py_file_paths)
mock_error = get_mocked_violation(filename, line_number)
map_result = map_notebook_error(mock_error, notebook_cell_format)
assert map_result is not None
filename, input_cell_line_number = map_result
assert input_cell_line_number == expected_line_number
assert filename == expected_filename
@pytest.mark.parametrize(
"format_str,file_path_list,expected_result_str",
[
(
"default_notebook",
[],
"{expected_filename}:2:2: AB123 This is just for the coverage",
),
(
"%(path)s:%(row)d: %(text)s",
[],
"{expected_filename}:2: This is just for the coverage",
),
(
"default_notebook",
["tests", "data", "notebooks", "falsy_python_file.py"],
"{expected_filename}:8:2: AB123 This is just for the coverage",
),
(
"default_notebook",
[
"tests",
"data",
"intermediate_py_files",
"notebook_with_flake8_tags.ipynb_parsed",
],
"{expected_filename}:8:2: AB123 This is just for the coverage",
),
],
)
@pytest.mark.parametrize(
"notebook_cell_format,cell_format_str",
(
("{nb_path}#In[{exec_count}]", "#In[1]"),
("{nb_path}:code_cell#{exec_count}", ":code_cell#1"),
),
)
def test_IpynbFormatter__format(
notebook_cell_format: str,
cell_format_str: str,
notebook_parser: NotebookParser,
file_path_list: List[str],
format_str: str,
expected_result_str: str,
):
mocked_option = get_mocked_option(notebook_cell_format, format_str)
formatter = IpynbFormatter(mocked_option) # type: ignore
if file_path_list:
filename = expected_filename = os.path.join(*file_path_list)
else:
expected_filename = f"{TEST_NOTEBOOK_PATH}{cell_format_str}"
filename = get_test_intermediate_path(notebook_parser.intermediate_py_file_paths)
mock_error = get_mocked_violation(filename, 8)
result = formatter.format(mock_error)
expected_result = expected_result_str.format(expected_filename=expected_filename)
assert result == expected_result
| 31.51145 | 98 | 0.664244 | 494 | 4,128 | 5.188259 | 0.200405 | 0.058525 | 0.063207 | 0.025361 | 0.415139 | 0.404604 | 0.353102 | 0.318767 | 0.253999 | 0.200546 | 0 | 0.016614 | 0.227229 | 4,128 | 130 | 99 | 31.753846 | 0.786834 | 0.002907 | 0 | 0.273504 | 0 | 0 | 0.239912 | 0.142197 | 0 | 0 | 0 | 0 | 0.034188 | 1 | 0.042735 | false | 0 | 0.068376 | 0.025641 | 0.136752 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |