max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
pystagram/views.py | Parkyes90/pystagram | 0 | 6620151 | <filename>pystagram/views.py
from django.views.generic import View
from django.http import HttpResponse
from django.conf import settings
import os
# noinspection PyUnusedLocal,PyMethodMayBeStatic
class ReactAppView(View):
def get(self, request):
try:
path = os.path.join(
str(settings.ROOT_DIR),
'client',
'build',
'index.html'
)
with open(path) as file:
return HttpResponse(file.read())
except Exception as e:
return HttpResponse(str(e), status=501)
| <filename>pystagram/views.py
from django.views.generic import View
from django.http import HttpResponse
from django.conf import settings
import os
# noinspection PyUnusedLocal,PyMethodMayBeStatic
class ReactAppView(View):
def get(self, request):
try:
path = os.path.join(
str(settings.ROOT_DIR),
'client',
'build',
'index.html'
)
with open(path) as file:
return HttpResponse(file.read())
except Exception as e:
return HttpResponse(str(e), status=501)
| en | 0.199865 | # noinspection PyUnusedLocal,PyMethodMayBeStatic | 2.272074 | 2 |
csv_cti/blueprints/op/redis.py | Osmond1689/csv-cti | 0 | 6620152 | <gh_stars>0
from flask_redis import FlaskRedis
redis_client=FlaskRedis() | from flask_redis import FlaskRedis
redis_client=FlaskRedis() | none | 1 | 1.304134 | 1 | |
face_detector(video).py | naralakamsani/Face-Detector | 0 | 6620153 | <reponame>naralakamsani/Face-Detector
import cv2, time
#variable declaration
consistent_face = 0
x_old = 0
y_old = 0
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
video = cv2.VideoCapture(0)
while True:
#Taker the video input
check, frame = video.read()
#convert to gray scale
grey_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#Identify faces from video feed
faces = face_cascade.detectMultiScale(grey_frame, scaleFactor=1.1, minNeighbors=5)
for x, y, w, h in faces:
predicted_group = "50% confident"
if (x_old + 15 > x > x_old - 15) and (y_old + 15 > y > y_old - 15):
consistent_face = consistent_face + 1
else:
consistent_face = 0
if consistent_face > 5:
predicted_group = "100% confident"
x_old = x
y_old = y
frame = cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)
cv2.putText(frame, predicted_group, (x, y - 5), cv2.FONT_ITALIC, 1, (0, 255, 0), 2)
face_frame = frame[y - 50:y + h + 50, x - 50:x + w + 50]
cv2.imshow("video", frame)
# If key is q: exit
key = cv2.waitKey(1)
if key == ord('q'):
break
video.release()
cv2.destroyAllWindows()
| import cv2, time
#variable declaration
consistent_face = 0
x_old = 0
y_old = 0
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
video = cv2.VideoCapture(0)
while True:
#Taker the video input
check, frame = video.read()
#convert to gray scale
grey_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#Identify faces from video feed
faces = face_cascade.detectMultiScale(grey_frame, scaleFactor=1.1, minNeighbors=5)
for x, y, w, h in faces:
predicted_group = "50% confident"
if (x_old + 15 > x > x_old - 15) and (y_old + 15 > y > y_old - 15):
consistent_face = consistent_face + 1
else:
consistent_face = 0
if consistent_face > 5:
predicted_group = "100% confident"
x_old = x
y_old = y
frame = cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)
cv2.putText(frame, predicted_group, (x, y - 5), cv2.FONT_ITALIC, 1, (0, 255, 0), 2)
face_frame = frame[y - 50:y + h + 50, x - 50:x + w + 50]
cv2.imshow("video", frame)
# If key is q: exit
key = cv2.waitKey(1)
if key == ord('q'):
break
video.release()
cv2.destroyAllWindows() | en | 0.647899 | #variable declaration #Taker the video input #convert to gray scale #Identify faces from video feed # If key is q: exit | 2.823865 | 3 |
algorithm/stringpattern/kmp.py | ruslanlvivsky/python-algorithm | 3 | 6620154 | # 접두사 접미사 사용
def make_table(pattern):
table = [0] * len(pattern)
j = 0
for i in range(1, len(pattern)):
while j > 0 and pattern[i] != pattern[j]:
j = table[j - 1]
if pattern[i] == pattern[j]:
j += 1
table[i] = j
return table
def kmp(pattern, string):
table = make_table(pattern)
j = 0
for i in range(len(string)):
while j > 0 and string[i] != pattern[j]:
j = table[j - 1]
if string[i] == pattern[j]:
if j == len(pattern) - 1:
print(i - len(pattern) + 1)
j = table[j]
else:
j += 1
kmp('abcabcabcadab', 'abdaabcabcabcbcabcdabcadfdsbcabce')
| # 접두사 접미사 사용
def make_table(pattern):
table = [0] * len(pattern)
j = 0
for i in range(1, len(pattern)):
while j > 0 and pattern[i] != pattern[j]:
j = table[j - 1]
if pattern[i] == pattern[j]:
j += 1
table[i] = j
return table
def kmp(pattern, string):
table = make_table(pattern)
j = 0
for i in range(len(string)):
while j > 0 and string[i] != pattern[j]:
j = table[j - 1]
if string[i] == pattern[j]:
if j == len(pattern) - 1:
print(i - len(pattern) + 1)
j = table[j]
else:
j += 1
kmp('abcabcabcadab', 'abdaabcabcabcbcabcdabcadfdsbcabce')
| ko | 1.00007 | # 접두사 접미사 사용 | 3.369235 | 3 |
ex6/logic.py | AbdManian/pythonclass-exercises | 0 | 6620155 | <filename>ex6/logic.py<gh_stars>0
import database
#database.load_from_file()
def get_one_time_password(account, static_password):
return 'Not implemented account="{}" pass="{}"'.format(account, static_password)
def do_transfer(transfer_info):
return 'Not implemented transfer_info={}'.format(str(transfer_info))
def get_account_list():
acc_list = [
dict(account='NotImplemented_1', balance='xxxx'),
dict(account='NotImplemented_2', balance='yyyy'),
dict(account='NotImplemented_3', balance='zzzz'),
]
return acc_list
def get_account_info(account):
info = dict(
account=account,
balance='xyz',
static_pass='<PASSWORD>',
dynamic_pass_expire='<PASSWORD>',
)
return info
| <filename>ex6/logic.py<gh_stars>0
import database
#database.load_from_file()
def get_one_time_password(account, static_password):
return 'Not implemented account="{}" pass="{}"'.format(account, static_password)
def do_transfer(transfer_info):
return 'Not implemented transfer_info={}'.format(str(transfer_info))
def get_account_list():
acc_list = [
dict(account='NotImplemented_1', balance='xxxx'),
dict(account='NotImplemented_2', balance='yyyy'),
dict(account='NotImplemented_3', balance='zzzz'),
]
return acc_list
def get_account_info(account):
info = dict(
account=account,
balance='xyz',
static_pass='<PASSWORD>',
dynamic_pass_expire='<PASSWORD>',
)
return info
| fa | 0.295696 | #database.load_from_file() | 2.738551 | 3 |
src/models/full_system.py | Orange-OpenSource/AIVC | 18 | 6620156 | # Software Name: AIVC
# SPDX-FileCopyrightText: Copyright (c) 2021 Orange
# SPDX-License-Identifier: BSD 3-Clause "New"
#
# This software is distributed under the BSD-3-Clause license.
#
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
import os
import torch
from torch.nn import Module, ReplicationPad2d
# Custom modules
from func_util.console_display import print_log_msg, print_dic_content
from func_util.img_processing import get_y_u_v, save_yuv_separately, cast_before_png_saving
from func_util.nn_util import get_value, dic_zeros_like, push_dic_to_device
from func_util.cluster_mngt import COMPUTE_PARAM
from func_util.GOP_structure import get_name_frame_code, get_depth_gop,\
FRAME_B, FRAME_P, FRAME_I, GOP_STRUCT_DIC
from models.codec_net import CodecNet
from models.mode_net import ModeNet
from models.motion_compensation import MotionCompensation
from layers.ae.ae_layers import InputLayer, OutputLayer
from real_life.utils import GOP_HEADER_SUFFIX
from real_life.check_md5sum import write_md5sum
from real_life.header import write_gop_header
from real_life.cat_binary_files import cat_one_gop
class FullNet(Module):
def __init__(self, model_param):
super(FullNet, self).__init__()
# ===== RETRIEVE PARAMETERS ===== #
self.model_param = model_param
self.name = self.model_param.get('net_name')
self.current_lr = self.model_param.get('initial_lr')
# Retrieve distortion metrics (<mse>, <l1> or <ms_ssim>)
self.dist_loss = self.model_param.get('dist_loss')
self.warping_mode = self.model_param.get('warping_mode')
# ===== COMPUTE VARIOUS FLAGS ===== #
# By default, all flags are False
# Construct alternate training list
self.training_mode_list = ['mode_net', 'codec_net']
# ===== COMPUTE VARIOUS FLAGS ===== #
# ===== OTHERS PARAMETERS ===== #
# Prefix used for saving during training
self.train_save_idx = 0
# Used to save the number of epochs done
self.nb_epoch_done = 0
self.batch_cnt = 0
# ===== OTHERS PARAMETERS ===== #
# ===== SUB NETWORKS ===== #
self.in_layer = InputLayer()
self.out_layer = OutputLayer()
# Hard-wire some quantities here
# For CodecNet, if there is a shortcut transform for y and/or z, we
# need to have 3 more features at the input (in_c_shortcut_y). This
# results in 6 total features for input (in_c).
if model_param.get('codec_net_param').get('out_c_shortcut_y') or\
model_param.get('codec_net_param').get('out_c_shortcut_z'):
model_param.get('codec_net_param')['in_c_shortcut_y'] = 3
model_param.get('codec_net_param')['in_c'] = 6
# Same things for mode_net
if model_param.get('mode_net_param').get('out_c_shortcut_y') or\
model_param.get('mode_net_param').get('out_c_shortcut_z'):
model_param.get('mode_net_param')['in_c_shortcut_y'] = 3
model_param.get('mode_net_param')['in_c'] = 6
# Always 3, because we output an image
model_param.get('codec_net_param')['out_c'] = 3
# 3 = alpha + v_x + v_y
model_param.get('mode_net_param')['out_c'] = 3
self.codec_net = CodecNet(model_param.get('codec_net_param'))
self.mode_net = ModeNet(model_param.get('mode_net_param'))
self.motion_compensation = MotionCompensation(model_param.get('motion_comp_param'))
# ===== SUB NETWORKS ===== #
print_log_msg(
'INFO', '__init__ FullNet', 'network_name', self.name
)
print_log_msg(
'DEBUG', '__init__ FullNet', 'training', self.training
)
print_log_msg(
'DEBUG', '__init__ FullNet', 'warping_mode', self.warping_mode
)
print_log_msg('DEBUG', '__init__ FullNet', 'state', 'done')
# ===== PRINT MISC STUFF ===== #
print_dic_content(self.model_param, dic_name='Codec Model parameters')
print_log_msg('INFO', '__init__ Codec', 'Printing entire', ' network')
if not(COMPUTE_PARAM.get('flag_quiet')):
print(self)
# After architecture declaration, print number of parameters
print_dic_content(self.get_nb_param(), dic_name='Codec Nb. parameters')
# ===== PRINT MISC STUFF ===== #
def get_nb_param(self):
nb_param_dic = {}
accumulator = 0
for name, param in self.named_parameters():
nb_param_dic[name] = param.numel()
accumulator += param.numel()
nb_param_dic['total'] = accumulator
return nb_param_dic
def forward(self, param):
"""
Compress one frame.
"""
DEFAULT_PARAM = {
# YUV dictionnary to encode/decode
'code_dic': None,
# YUV dictionnary of the previous frame
'prev_dic': None,
# YUV dictionnary of the next frame
'next_dic': None,
# A tensor of dimension B, indicating the type of the frame for
# each of the B examples which are either: FRAME_I, FRAME_P or
# FRAME_B
'frame_type': None,
# If not None: override the ModeNet y with external y
'external_y_modenet': None,
# If not None: override the CodecNet y with external y
'external_y_codecnet': None,
# For multi-rate
'idx_rate': 0.,
# If True, alpha is equal to 1 for the entire frame (everything goes
# into the CodecNet and ignore skip mode)
# ! Deprecated
'flag_no_copy': False,
# If True, alpha is equal to 0 for the entire frame (everything goes
# into the Skip Mode and ignore CodecNet)
# ! Deprecated
'flag_no_coder': False,
'generate_bitstream': False,
# Path where the bistream is written
'bitstream_path': '',
# Set to true to generate more stuff, useful for debug
'flag_bitstream_debug': False,
}
net_out = {}
# ===== RETRIEVE INPUTS ===== #
p = get_value('prev_dic', param, DEFAULT_PARAM)
c = get_value('code_dic', param, DEFAULT_PARAM)
n = get_value('next_dic', param, DEFAULT_PARAM)
frame_type = get_value('frame_type', param, DEFAULT_PARAM)
external_y_modenet = get_value('external_y_modenet', param, DEFAULT_PARAM)
external_y_codecnet = get_value('external_y_codecnet', param, DEFAULT_PARAM)
idx_rate = get_value('idx_rate', param, DEFAULT_PARAM)
generate_bitstream = get_value('generate_bitstream', param, DEFAULT_PARAM)
bitstream_path = get_value('bitstream_path', param, DEFAULT_PARAM)
flag_bitstream_debug = get_value('flag_bitstream_debug', param, DEFAULT_PARAM)
# ===== RETRIEVE INPUTS ===== #
# ===== PRE-PROCESSING ===== #
prev_ref = self.in_layer(p)
next_ref = self.in_layer(n)
code = self.in_layer(c)
# ===== PRE-PROCESSING ===== #
B, C, H, W = prev_ref.size()
cur_device = prev_ref.device
# ===== MODE NET ===== #
mode_net_input = {
'code': code,
'prev': prev_ref,
'next': next_ref,
'external_y': external_y_modenet,
'idx_rate': idx_rate,
'frame_type': frame_type,
'use_shortcut_vector': torch.ones(B, device=cur_device).bool(),
'generate_bitstream': generate_bitstream,
'bitstream_path': bitstream_path,
'flag_bitstream_debug': flag_bitstream_debug,
}
# We always ignore useless modules that is:
# - I-frame: MOFNet and CodecNet shortcut
# - P-frame: MOFNet shortcut
# - B-frame: Nothing
# I-frame: skip the entire MOFNet
if frame_type == FRAME_I:
# No rate because we didn't use MOFNet
# Dummy net_out_mode tensor
net_out_mode = {}
net_out_mode['rate_y'] = torch.zeros(1, 1, 1, 1, device=cur_device)
net_out_mode['rate_z'] = torch.zeros(1, 1, 1, 1, device=cur_device)
net_out_mode['alpha'] = torch.ones_like(code, device=cur_device)
net_out_mode['beta'] = torch.ones_like(code, device=cur_device)
net_out_mode['v_prev'] = torch.zeros(B, 2, H, W, device=cur_device)
net_out_mode['v_next'] = torch.zeros(B, 2, H, W, device=cur_device)
else:
mode_net_input = {
'code': code,
'prev': prev_ref,
'next': next_ref,
'external_y': external_y_modenet,
'idx_rate': idx_rate,
'frame_type': frame_type,
'generate_bitstream': generate_bitstream,
'bitstream_path': bitstream_path,
'flag_bitstream_debug': flag_bitstream_debug,
}
net_out_mode = self.mode_net(mode_net_input)
# Retrieve value from net_out
alpha = net_out_mode.get('alpha')
beta = net_out_mode.get('beta')
v_prev = net_out_mode.get('v_prev')
v_next = net_out_mode.get('v_next')
# alpha is not used for I frame
if frame_type == FRAME_I:
alpha[:, :, :, :] = 1.
# Beta is only relevant for B frame
if frame_type != FRAME_B:
beta[:, :, :, :] = 1.
# ===== MODE NET ===== #
# ===== INTER PRED ===== #
motion_comp_input = {
'prev': prev_ref,
'next': next_ref,
'v_prev': v_prev,
'v_next': v_next,
'beta': beta,
'interpol_mode': self.warping_mode,
}
motion_comp_out = self.motion_compensation(motion_comp_input)
warped_ref = motion_comp_out.get('x_warp')
skip_part = warped_ref * (1 - alpha)
# ===== INTER PRED ===== #
# ===== CODEC NET ===== #
in_codec_net = alpha * code
in_prediction_codec_net = alpha * warped_ref
codec_net_input = {
'code': in_codec_net,
'prediction': in_prediction_codec_net,
'external_y': external_y_codecnet,
'idx_rate': idx_rate,
'use_shortcut_vector': frame_type != FRAME_I, # Shortcut in CodecNet is useless for I-frame
'frame_type': frame_type,
'generate_bitstream': generate_bitstream,
'bitstream_path': bitstream_path,
'flag_bitstream_debug': flag_bitstream_debug,
}
net_out_codec = self.codec_net(codec_net_input)
codec_part = net_out_codec.get('x_hat')
# ===== CODEC NET ===== #
result = codec_part + skip_part
# ===== DOWNSCALING AND 420 STUFF ===== #
x_hat = self.out_layer(result)
# Downscaled version of u and v can be smaller than
# their true size by one pixel
# Difference in size should be of 0 or 1 pixel
x_hat_y, x_hat_u, x_hat_v = get_y_u_v(x_hat)
code_y, code_u, code_v = get_y_u_v(c)
nb_pad_row = abs(code_u.size()[2] - x_hat_u.size()[2])
nb_pad_col = abs(code_u.size()[3] - x_hat_u.size()[3])
my_padding = ReplicationPad2d((0, nb_pad_col, 0, nb_pad_row))
# Remove supplementary pixels if needed
x_hat = {
'y': x_hat_y,
'u': my_padding(x_hat_u),
'v': my_padding(x_hat_v),
}
if generate_bitstream:
x_hat = cast_before_png_saving({'x': x_hat, 'data_type': 'yuv_dic'})
# ===== DOWNSCALING AND 420 STUFF ===== #
net_out['x_hat'] = x_hat
# We don't use this in the loss, it's only here for logging purpose.
# However as no optimizer goes through its gradient and reset it,
# it keeps accumulating its computational graph.
# Using detach() avoid this issue
net_out['alpha'] = alpha.detach()
net_out['beta'] = beta.detach()
net_out['warping'] = warped_ref.detach()
net_out['code'] = code.detach()
net_out['codec_rate_y'] = net_out_codec.get('rate_y')
net_out['codec_rate_z'] = net_out_codec.get('rate_z')
net_out['mode_rate_y'] = net_out_mode.get('rate_y')
net_out['mode_rate_z'] = net_out_mode.get('rate_z')
return net_out
def GOP_forward(self, param):
"""
Compress a GOP.
"""
DEFAULT_PARAM = {
# The GOP structure defined as in func_util/GOP_structure.py
'GOP_struct': None,
# The uncompressed frames (i.e. the frames to code), defined as:
# frame_0: {'y': tensor, 'u': tensor, 'v': tensor}
# frame_1: {'y': tensor, 'u': tensor, 'v': tensor}
'raw_frames': None,
# For multi-rate, not used for now
'idx_rate': 0.,
# Index of the GOP in the video. Scalar in [0, N]
'index_GOP_in_video': 0,
# If true, we generate a bitstream at the end
'generate_bitstream': False,
# Path of the directory in which we output the bitstream
'bitstream_dir': '',
# Frame index in the video of the first frame (I) of the
# GOP.
'real_idx_first_frame': 0,
# Set to true to generate more stuff, useful for debug
'flag_bitstream_debug': False,
}
# ========== RETRIEVE INPUTS ========== #
GOP_struct = get_value('GOP_struct', param, DEFAULT_PARAM)
raw_frames = get_value('raw_frames', param, DEFAULT_PARAM)
idx_rate = get_value('idx_rate', param, DEFAULT_PARAM)
index_GOP_in_video = get_value('index_GOP_in_video', param, DEFAULT_PARAM)
generate_bitstream = get_value('generate_bitstream', param, DEFAULT_PARAM)
bitstream_dir = get_value('bitstream_dir', param, DEFAULT_PARAM)
real_idx_first_frame = get_value('real_idx_first_frame', param, DEFAULT_PARAM)
flag_bitstream_debug = get_value('flag_bitstream_debug', param, DEFAULT_PARAM)
# ========== RETRIEVE INPUTS ========== #
# Outputs, each dic are structured as:
# net_out: {'frame_0': {all entries}, 'frame_1': all entries}...
net_out = {}
# Number of temporal layers in the GOP, i.e. number of forward pass
# to be performed. Get depth gop return the biggest coding order.
N = get_depth_gop(GOP_struct)
# Loop on the temporal layer. We go until N + 1 because if we have
# a depth of 2, we want to code the depth of 0, 1 & 2!
# Inside a temporal layer, all frames are independent so we code
# them in parallel.
for i in range(N + 1):
# For a gop4 I - B - B - B - P,
# i = 0 --> name_frame_code = ['frame_0']
# i = 1 --> name_frame_code = ['frame_4']
# i = 2 --> name_frame_code = ['frame_2']
# i = 3 --> name_frame_code = ['frame_1', 'frame_3']
# Return a list of one single element, so we remove the list
name_frame_code = get_name_frame_code(GOP_struct, i)[0]
# YUV dictionnary of the frame to code
code = raw_frames.get(name_frame_code)
# Type of the frame to code
type_frame = GOP_struct.get(name_frame_code).get('type')
# Get the references. We have a future ref. Retrieve the compressed version!
if type_frame == FRAME_B:
next_ref = net_out.get(GOP_struct.get(name_frame_code).get('next_ref')).get('x_hat')
else:
next_ref = dic_zeros_like(code)
# We have a previous frame. Retrieve the compressed version
if type_frame != FRAME_I:
prev_ref = net_out.get(GOP_struct.get(name_frame_code).get('prev_ref')).get('x_hat')
else:
prev_ref = dic_zeros_like(code)
# Push frame to device before the forward pass
my_device = COMPUTE_PARAM.get('device')
code = push_dic_to_device(code, my_device)
prev_ref = push_dic_to_device(prev_ref, my_device)
next_ref = push_dic_to_device(next_ref, my_device)
model_input = {
'code_dic': code,
'prev_dic': prev_ref,
'next_dic': next_ref,
'idx_rate': idx_rate,
'frame_type': type_frame,
'generate_bitstream': generate_bitstream,
# Complete bitstream path is: bitstream_dir/<idx_frame>
# where idx_frame = real_idx_first_frame + X (X is found in frame_X)
'bitstream_path': bitstream_dir + str(real_idx_first_frame + int(name_frame_code.split('_')[-1])),
'flag_bitstream_debug': flag_bitstream_debug,
}
cur_net_out = self.forward(model_input)
# Push frame to cpu after the forward pass
my_device = 'cpu'
code = push_dic_to_device(code, my_device)
prev_ref = push_dic_to_device(prev_ref, my_device)
next_ref = push_dic_to_device(next_ref, my_device)
torch.cuda.empty_cache()
# Add the current frame dictionaries to the global ones
net_out[name_frame_code] = cur_net_out
# If we're generating a bitstream and if we're debugging this
# bitstream, save the reconstructed PNGs (YUV) for the frame,
# and compute the md5sum for the three PNGs.
if generate_bitstream and flag_bitstream_debug:
# /RealLife/debug/SequenceName/
root_debug_path = '.' + '/'.join(bitstream_dir.split('/')[:-3]) + '/debug/' + bitstream_dir.split('/')[-2] + '/'
# Real index of the frame in the video
idx_frame = real_idx_first_frame + int(name_frame_code.split('_')[-1])
# Save yuv as PNGs
decoded_frame = cast_before_png_saving({
'x': net_out.get(name_frame_code).get('x_hat'), 'data_type': 'yuv_dic'
})
# print('Encoder decoded frame y: ' + str(decoded_frame.get('y').abs().sum()))
save_yuv_separately(decoded_frame, root_debug_path + str(idx_frame))
# Write the md5 in files and delete the PNGs
for channel in ['y', 'u', 'v']:
write_md5sum({
'in_file': root_debug_path + str(idx_frame) + '_' + channel + '.png',
'out_file': root_debug_path + str(idx_frame) + '_' + channel + '.md5',
})
os.system('rm ' + root_debug_path + str(idx_frame) + '_' + channel + '.png')
if generate_bitstream:
# Write a header for this GOP
# We need the GOP struct, and the x, y and z shape (we'll retrieve
# this from the rate estimation tensor).
for k in GOP_STRUCT_DIC:
if GOP_struct == GOP_STRUCT_DIC.get(k):
GOP_struct_name = k
break
write_gop_header({
'header_path': bitstream_dir + str(index_GOP_in_video) + GOP_HEADER_SUFFIX,
# Write only the last two dimensions (H and W)
'data_dim': {
'x': raw_frames.get('frame_0').get('y').size()[2:],
'y': net_out.get('frame_0').get('codec_rate_y').size()[2:],
'z': net_out.get('frame_0').get('codec_rate_z').size()[2:],
},
'GOP_struct_name': GOP_struct_name,
'idx_rate': idx_rate,
})
cat_one_gop({
'bitstream_dir': bitstream_dir,
'idx_gop': index_GOP_in_video,
})
return net_out
| # Software Name: AIVC
# SPDX-FileCopyrightText: Copyright (c) 2021 Orange
# SPDX-License-Identifier: BSD 3-Clause "New"
#
# This software is distributed under the BSD-3-Clause license.
#
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
import os
import torch
from torch.nn import Module, ReplicationPad2d
# Custom modules
from func_util.console_display import print_log_msg, print_dic_content
from func_util.img_processing import get_y_u_v, save_yuv_separately, cast_before_png_saving
from func_util.nn_util import get_value, dic_zeros_like, push_dic_to_device
from func_util.cluster_mngt import COMPUTE_PARAM
from func_util.GOP_structure import get_name_frame_code, get_depth_gop,\
FRAME_B, FRAME_P, FRAME_I, GOP_STRUCT_DIC
from models.codec_net import CodecNet
from models.mode_net import ModeNet
from models.motion_compensation import MotionCompensation
from layers.ae.ae_layers import InputLayer, OutputLayer
from real_life.utils import GOP_HEADER_SUFFIX
from real_life.check_md5sum import write_md5sum
from real_life.header import write_gop_header
from real_life.cat_binary_files import cat_one_gop
class FullNet(Module):
def __init__(self, model_param):
super(FullNet, self).__init__()
# ===== RETRIEVE PARAMETERS ===== #
self.model_param = model_param
self.name = self.model_param.get('net_name')
self.current_lr = self.model_param.get('initial_lr')
# Retrieve distortion metrics (<mse>, <l1> or <ms_ssim>)
self.dist_loss = self.model_param.get('dist_loss')
self.warping_mode = self.model_param.get('warping_mode')
# ===== COMPUTE VARIOUS FLAGS ===== #
# By default, all flags are False
# Construct alternate training list
self.training_mode_list = ['mode_net', 'codec_net']
# ===== COMPUTE VARIOUS FLAGS ===== #
# ===== OTHERS PARAMETERS ===== #
# Prefix used for saving during training
self.train_save_idx = 0
# Used to save the number of epochs done
self.nb_epoch_done = 0
self.batch_cnt = 0
# ===== OTHERS PARAMETERS ===== #
# ===== SUB NETWORKS ===== #
self.in_layer = InputLayer()
self.out_layer = OutputLayer()
# Hard-wire some quantities here
# For CodecNet, if there is a shortcut transform for y and/or z, we
# need to have 3 more features at the input (in_c_shortcut_y). This
# results in 6 total features for input (in_c).
if model_param.get('codec_net_param').get('out_c_shortcut_y') or\
model_param.get('codec_net_param').get('out_c_shortcut_z'):
model_param.get('codec_net_param')['in_c_shortcut_y'] = 3
model_param.get('codec_net_param')['in_c'] = 6
# Same things for mode_net
if model_param.get('mode_net_param').get('out_c_shortcut_y') or\
model_param.get('mode_net_param').get('out_c_shortcut_z'):
model_param.get('mode_net_param')['in_c_shortcut_y'] = 3
model_param.get('mode_net_param')['in_c'] = 6
# Always 3, because we output an image
model_param.get('codec_net_param')['out_c'] = 3
# 3 = alpha + v_x + v_y
model_param.get('mode_net_param')['out_c'] = 3
self.codec_net = CodecNet(model_param.get('codec_net_param'))
self.mode_net = ModeNet(model_param.get('mode_net_param'))
self.motion_compensation = MotionCompensation(model_param.get('motion_comp_param'))
# ===== SUB NETWORKS ===== #
print_log_msg(
'INFO', '__init__ FullNet', 'network_name', self.name
)
print_log_msg(
'DEBUG', '__init__ FullNet', 'training', self.training
)
print_log_msg(
'DEBUG', '__init__ FullNet', 'warping_mode', self.warping_mode
)
print_log_msg('DEBUG', '__init__ FullNet', 'state', 'done')
# ===== PRINT MISC STUFF ===== #
print_dic_content(self.model_param, dic_name='Codec Model parameters')
print_log_msg('INFO', '__init__ Codec', 'Printing entire', ' network')
if not(COMPUTE_PARAM.get('flag_quiet')):
print(self)
# After architecture declaration, print number of parameters
print_dic_content(self.get_nb_param(), dic_name='Codec Nb. parameters')
# ===== PRINT MISC STUFF ===== #
def get_nb_param(self):
nb_param_dic = {}
accumulator = 0
for name, param in self.named_parameters():
nb_param_dic[name] = param.numel()
accumulator += param.numel()
nb_param_dic['total'] = accumulator
return nb_param_dic
def forward(self, param):
"""
Compress one frame.
"""
DEFAULT_PARAM = {
# YUV dictionnary to encode/decode
'code_dic': None,
# YUV dictionnary of the previous frame
'prev_dic': None,
# YUV dictionnary of the next frame
'next_dic': None,
# A tensor of dimension B, indicating the type of the frame for
# each of the B examples which are either: FRAME_I, FRAME_P or
# FRAME_B
'frame_type': None,
# If not None: override the ModeNet y with external y
'external_y_modenet': None,
# If not None: override the CodecNet y with external y
'external_y_codecnet': None,
# For multi-rate
'idx_rate': 0.,
# If True, alpha is equal to 1 for the entire frame (everything goes
# into the CodecNet and ignore skip mode)
# ! Deprecated
'flag_no_copy': False,
# If True, alpha is equal to 0 for the entire frame (everything goes
# into the Skip Mode and ignore CodecNet)
# ! Deprecated
'flag_no_coder': False,
'generate_bitstream': False,
# Path where the bistream is written
'bitstream_path': '',
# Set to true to generate more stuff, useful for debug
'flag_bitstream_debug': False,
}
net_out = {}
# ===== RETRIEVE INPUTS ===== #
p = get_value('prev_dic', param, DEFAULT_PARAM)
c = get_value('code_dic', param, DEFAULT_PARAM)
n = get_value('next_dic', param, DEFAULT_PARAM)
frame_type = get_value('frame_type', param, DEFAULT_PARAM)
external_y_modenet = get_value('external_y_modenet', param, DEFAULT_PARAM)
external_y_codecnet = get_value('external_y_codecnet', param, DEFAULT_PARAM)
idx_rate = get_value('idx_rate', param, DEFAULT_PARAM)
generate_bitstream = get_value('generate_bitstream', param, DEFAULT_PARAM)
bitstream_path = get_value('bitstream_path', param, DEFAULT_PARAM)
flag_bitstream_debug = get_value('flag_bitstream_debug', param, DEFAULT_PARAM)
# ===== RETRIEVE INPUTS ===== #
# ===== PRE-PROCESSING ===== #
prev_ref = self.in_layer(p)
next_ref = self.in_layer(n)
code = self.in_layer(c)
# ===== PRE-PROCESSING ===== #
B, C, H, W = prev_ref.size()
cur_device = prev_ref.device
# ===== MODE NET ===== #
mode_net_input = {
'code': code,
'prev': prev_ref,
'next': next_ref,
'external_y': external_y_modenet,
'idx_rate': idx_rate,
'frame_type': frame_type,
'use_shortcut_vector': torch.ones(B, device=cur_device).bool(),
'generate_bitstream': generate_bitstream,
'bitstream_path': bitstream_path,
'flag_bitstream_debug': flag_bitstream_debug,
}
# We always ignore useless modules that is:
# - I-frame: MOFNet and CodecNet shortcut
# - P-frame: MOFNet shortcut
# - B-frame: Nothing
# I-frame: skip the entire MOFNet
if frame_type == FRAME_I:
# No rate because we didn't use MOFNet
# Dummy net_out_mode tensor
net_out_mode = {}
net_out_mode['rate_y'] = torch.zeros(1, 1, 1, 1, device=cur_device)
net_out_mode['rate_z'] = torch.zeros(1, 1, 1, 1, device=cur_device)
net_out_mode['alpha'] = torch.ones_like(code, device=cur_device)
net_out_mode['beta'] = torch.ones_like(code, device=cur_device)
net_out_mode['v_prev'] = torch.zeros(B, 2, H, W, device=cur_device)
net_out_mode['v_next'] = torch.zeros(B, 2, H, W, device=cur_device)
else:
mode_net_input = {
'code': code,
'prev': prev_ref,
'next': next_ref,
'external_y': external_y_modenet,
'idx_rate': idx_rate,
'frame_type': frame_type,
'generate_bitstream': generate_bitstream,
'bitstream_path': bitstream_path,
'flag_bitstream_debug': flag_bitstream_debug,
}
net_out_mode = self.mode_net(mode_net_input)
# Retrieve value from net_out
alpha = net_out_mode.get('alpha')
beta = net_out_mode.get('beta')
v_prev = net_out_mode.get('v_prev')
v_next = net_out_mode.get('v_next')
# alpha is not used for I frame
if frame_type == FRAME_I:
alpha[:, :, :, :] = 1.
# Beta is only relevant for B frame
if frame_type != FRAME_B:
beta[:, :, :, :] = 1.
# ===== MODE NET ===== #
# ===== INTER PRED ===== #
motion_comp_input = {
'prev': prev_ref,
'next': next_ref,
'v_prev': v_prev,
'v_next': v_next,
'beta': beta,
'interpol_mode': self.warping_mode,
}
motion_comp_out = self.motion_compensation(motion_comp_input)
warped_ref = motion_comp_out.get('x_warp')
skip_part = warped_ref * (1 - alpha)
# ===== INTER PRED ===== #
# ===== CODEC NET ===== #
in_codec_net = alpha * code
in_prediction_codec_net = alpha * warped_ref
codec_net_input = {
'code': in_codec_net,
'prediction': in_prediction_codec_net,
'external_y': external_y_codecnet,
'idx_rate': idx_rate,
'use_shortcut_vector': frame_type != FRAME_I, # Shortcut in CodecNet is useless for I-frame
'frame_type': frame_type,
'generate_bitstream': generate_bitstream,
'bitstream_path': bitstream_path,
'flag_bitstream_debug': flag_bitstream_debug,
}
net_out_codec = self.codec_net(codec_net_input)
codec_part = net_out_codec.get('x_hat')
# ===== CODEC NET ===== #
result = codec_part + skip_part
# ===== DOWNSCALING AND 420 STUFF ===== #
x_hat = self.out_layer(result)
# Downscaled version of u and v can be smaller than
# their true size by one pixel
# Difference in size should be of 0 or 1 pixel
x_hat_y, x_hat_u, x_hat_v = get_y_u_v(x_hat)
code_y, code_u, code_v = get_y_u_v(c)
nb_pad_row = abs(code_u.size()[2] - x_hat_u.size()[2])
nb_pad_col = abs(code_u.size()[3] - x_hat_u.size()[3])
my_padding = ReplicationPad2d((0, nb_pad_col, 0, nb_pad_row))
# Remove supplementary pixels if needed
x_hat = {
'y': x_hat_y,
'u': my_padding(x_hat_u),
'v': my_padding(x_hat_v),
}
if generate_bitstream:
x_hat = cast_before_png_saving({'x': x_hat, 'data_type': 'yuv_dic'})
# ===== DOWNSCALING AND 420 STUFF ===== #
net_out['x_hat'] = x_hat
# We don't use this in the loss, it's only here for logging purpose.
# However as no optimizer goes through its gradient and reset it,
# it keeps accumulating its computational graph.
# Using detach() avoid this issue
net_out['alpha'] = alpha.detach()
net_out['beta'] = beta.detach()
net_out['warping'] = warped_ref.detach()
net_out['code'] = code.detach()
net_out['codec_rate_y'] = net_out_codec.get('rate_y')
net_out['codec_rate_z'] = net_out_codec.get('rate_z')
net_out['mode_rate_y'] = net_out_mode.get('rate_y')
net_out['mode_rate_z'] = net_out_mode.get('rate_z')
return net_out
def GOP_forward(self, param):
"""
Compress a GOP.
"""
DEFAULT_PARAM = {
# The GOP structure defined as in func_util/GOP_structure.py
'GOP_struct': None,
# The uncompressed frames (i.e. the frames to code), defined as:
# frame_0: {'y': tensor, 'u': tensor, 'v': tensor}
# frame_1: {'y': tensor, 'u': tensor, 'v': tensor}
'raw_frames': None,
# For multi-rate, not used for now
'idx_rate': 0.,
# Index of the GOP in the video. Scalar in [0, N]
'index_GOP_in_video': 0,
# If true, we generate a bitstream at the end
'generate_bitstream': False,
# Path of the directory in which we output the bitstream
'bitstream_dir': '',
# Frame index in the video of the first frame (I) of the
# GOP.
'real_idx_first_frame': 0,
# Set to true to generate more stuff, useful for debug
'flag_bitstream_debug': False,
}
# ========== RETRIEVE INPUTS ========== #
GOP_struct = get_value('GOP_struct', param, DEFAULT_PARAM)
raw_frames = get_value('raw_frames', param, DEFAULT_PARAM)
idx_rate = get_value('idx_rate', param, DEFAULT_PARAM)
index_GOP_in_video = get_value('index_GOP_in_video', param, DEFAULT_PARAM)
generate_bitstream = get_value('generate_bitstream', param, DEFAULT_PARAM)
bitstream_dir = get_value('bitstream_dir', param, DEFAULT_PARAM)
real_idx_first_frame = get_value('real_idx_first_frame', param, DEFAULT_PARAM)
flag_bitstream_debug = get_value('flag_bitstream_debug', param, DEFAULT_PARAM)
# ========== RETRIEVE INPUTS ========== #
# Outputs, each dic are structured as:
# net_out: {'frame_0': {all entries}, 'frame_1': all entries}...
net_out = {}
# Number of temporal layers in the GOP, i.e. number of forward pass
# to be performed. Get depth gop return the biggest coding order.
N = get_depth_gop(GOP_struct)
# Loop on the temporal layer. We go until N + 1 because if we have
# a depth of 2, we want to code the depth of 0, 1 & 2!
# Inside a temporal layer, all frames are independent so we code
# them in parallel.
for i in range(N + 1):
# For a gop4 I - B - B - B - P,
# i = 0 --> name_frame_code = ['frame_0']
# i = 1 --> name_frame_code = ['frame_4']
# i = 2 --> name_frame_code = ['frame_2']
# i = 3 --> name_frame_code = ['frame_1', 'frame_3']
# Return a list of one single element, so we remove the list
name_frame_code = get_name_frame_code(GOP_struct, i)[0]
# YUV dictionnary of the frame to code
code = raw_frames.get(name_frame_code)
# Type of the frame to code
type_frame = GOP_struct.get(name_frame_code).get('type')
# Get the references. We have a future ref. Retrieve the compressed version!
if type_frame == FRAME_B:
next_ref = net_out.get(GOP_struct.get(name_frame_code).get('next_ref')).get('x_hat')
else:
next_ref = dic_zeros_like(code)
# We have a previous frame. Retrieve the compressed version
if type_frame != FRAME_I:
prev_ref = net_out.get(GOP_struct.get(name_frame_code).get('prev_ref')).get('x_hat')
else:
prev_ref = dic_zeros_like(code)
# Push frame to device before the forward pass
my_device = COMPUTE_PARAM.get('device')
code = push_dic_to_device(code, my_device)
prev_ref = push_dic_to_device(prev_ref, my_device)
next_ref = push_dic_to_device(next_ref, my_device)
model_input = {
'code_dic': code,
'prev_dic': prev_ref,
'next_dic': next_ref,
'idx_rate': idx_rate,
'frame_type': type_frame,
'generate_bitstream': generate_bitstream,
# Complete bitstream path is: bitstream_dir/<idx_frame>
# where idx_frame = real_idx_first_frame + X (X is found in frame_X)
'bitstream_path': bitstream_dir + str(real_idx_first_frame + int(name_frame_code.split('_')[-1])),
'flag_bitstream_debug': flag_bitstream_debug,
}
cur_net_out = self.forward(model_input)
# Push frame to cpu after the forward pass
my_device = 'cpu'
code = push_dic_to_device(code, my_device)
prev_ref = push_dic_to_device(prev_ref, my_device)
next_ref = push_dic_to_device(next_ref, my_device)
torch.cuda.empty_cache()
# Add the current frame dictionaries to the global ones
net_out[name_frame_code] = cur_net_out
# If we're generating a bitstream and if we're debugging this
# bitstream, save the reconstructed PNGs (YUV) for the frame,
# and compute the md5sum for the three PNGs.
if generate_bitstream and flag_bitstream_debug:
# /RealLife/debug/SequenceName/
root_debug_path = '.' + '/'.join(bitstream_dir.split('/')[:-3]) + '/debug/' + bitstream_dir.split('/')[-2] + '/'
# Real index of the frame in the video
idx_frame = real_idx_first_frame + int(name_frame_code.split('_')[-1])
# Save yuv as PNGs
decoded_frame = cast_before_png_saving({
'x': net_out.get(name_frame_code).get('x_hat'), 'data_type': 'yuv_dic'
})
# print('Encoder decoded frame y: ' + str(decoded_frame.get('y').abs().sum()))
save_yuv_separately(decoded_frame, root_debug_path + str(idx_frame))
# Write the md5 in files and delete the PNGs
for channel in ['y', 'u', 'v']:
write_md5sum({
'in_file': root_debug_path + str(idx_frame) + '_' + channel + '.png',
'out_file': root_debug_path + str(idx_frame) + '_' + channel + '.md5',
})
os.system('rm ' + root_debug_path + str(idx_frame) + '_' + channel + '.png')
if generate_bitstream:
# Write a header for this GOP
# We need the GOP struct, and the x, y and z shape (we'll retrieve
# this from the rate estimation tensor).
for k in GOP_STRUCT_DIC:
if GOP_struct == GOP_STRUCT_DIC.get(k):
GOP_struct_name = k
break
write_gop_header({
'header_path': bitstream_dir + str(index_GOP_in_video) + GOP_HEADER_SUFFIX,
# Write only the last two dimensions (H and W)
'data_dim': {
'x': raw_frames.get('frame_0').get('y').size()[2:],
'y': net_out.get('frame_0').get('codec_rate_y').size()[2:],
'z': net_out.get('frame_0').get('codec_rate_z').size()[2:],
},
'GOP_struct_name': GOP_struct_name,
'idx_rate': idx_rate,
})
cat_one_gop({
'bitstream_dir': bitstream_dir,
'idx_gop': index_GOP_in_video,
})
return net_out
| en | 0.741657 | # Software Name: AIVC # SPDX-FileCopyrightText: Copyright (c) 2021 Orange # SPDX-License-Identifier: BSD 3-Clause "New" # # This software is distributed under the BSD-3-Clause license. # # Authors: <NAME> <<EMAIL>> # <NAME> <<EMAIL>> # Custom modules # ===== RETRIEVE PARAMETERS ===== # # Retrieve distortion metrics (<mse>, <l1> or <ms_ssim>) # ===== COMPUTE VARIOUS FLAGS ===== # # By default, all flags are False # Construct alternate training list # ===== COMPUTE VARIOUS FLAGS ===== # # ===== OTHERS PARAMETERS ===== # # Prefix used for saving during training # Used to save the number of epochs done # ===== OTHERS PARAMETERS ===== # # ===== SUB NETWORKS ===== # # Hard-wire some quantities here # For CodecNet, if there is a shortcut transform for y and/or z, we # need to have 3 more features at the input (in_c_shortcut_y). This # results in 6 total features for input (in_c). # Same things for mode_net # Always 3, because we output an image # 3 = alpha + v_x + v_y # ===== SUB NETWORKS ===== # # ===== PRINT MISC STUFF ===== # # After architecture declaration, print number of parameters # ===== PRINT MISC STUFF ===== # Compress one frame. # YUV dictionnary to encode/decode # YUV dictionnary of the previous frame # YUV dictionnary of the next frame # A tensor of dimension B, indicating the type of the frame for # each of the B examples which are either: FRAME_I, FRAME_P or # FRAME_B # If not None: override the ModeNet y with external y # If not None: override the CodecNet y with external y # For multi-rate # If True, alpha is equal to 1 for the entire frame (everything goes # into the CodecNet and ignore skip mode) # ! Deprecated # If True, alpha is equal to 0 for the entire frame (everything goes # into the Skip Mode and ignore CodecNet) # ! Deprecated # Path where the bistream is written # Set to true to generate more stuff, useful for debug # ===== RETRIEVE INPUTS ===== # # ===== RETRIEVE INPUTS ===== # # ===== PRE-PROCESSING ===== # # ===== PRE-PROCESSING ===== # # ===== MODE NET ===== # # We always ignore useless modules that is: # - I-frame: MOFNet and CodecNet shortcut # - P-frame: MOFNet shortcut # - B-frame: Nothing # I-frame: skip the entire MOFNet # No rate because we didn't use MOFNet # Dummy net_out_mode tensor # Retrieve value from net_out # alpha is not used for I frame # Beta is only relevant for B frame # ===== MODE NET ===== # # ===== INTER PRED ===== # # ===== INTER PRED ===== # # ===== CODEC NET ===== # # Shortcut in CodecNet is useless for I-frame # ===== CODEC NET ===== # # ===== DOWNSCALING AND 420 STUFF ===== # # Downscaled version of u and v can be smaller than # their true size by one pixel # Difference in size should be of 0 or 1 pixel # Remove supplementary pixels if needed # ===== DOWNSCALING AND 420 STUFF ===== # # We don't use this in the loss, it's only here for logging purpose. # However as no optimizer goes through its gradient and reset it, # it keeps accumulating its computational graph. # Using detach() avoid this issue Compress a GOP. # The GOP structure defined as in func_util/GOP_structure.py # The uncompressed frames (i.e. the frames to code), defined as: # frame_0: {'y': tensor, 'u': tensor, 'v': tensor} # frame_1: {'y': tensor, 'u': tensor, 'v': tensor} # For multi-rate, not used for now # Index of the GOP in the video. Scalar in [0, N] # If true, we generate a bitstream at the end # Path of the directory in which we output the bitstream # Frame index in the video of the first frame (I) of the # GOP. # Set to true to generate more stuff, useful for debug # ========== RETRIEVE INPUTS ========== # # ========== RETRIEVE INPUTS ========== # # Outputs, each dic are structured as: # net_out: {'frame_0': {all entries}, 'frame_1': all entries}... # Number of temporal layers in the GOP, i.e. number of forward pass # to be performed. Get depth gop return the biggest coding order. # Loop on the temporal layer. We go until N + 1 because if we have # a depth of 2, we want to code the depth of 0, 1 & 2! # Inside a temporal layer, all frames are independent so we code # them in parallel. # For a gop4 I - B - B - B - P, # i = 0 --> name_frame_code = ['frame_0'] # i = 1 --> name_frame_code = ['frame_4'] # i = 2 --> name_frame_code = ['frame_2'] # i = 3 --> name_frame_code = ['frame_1', 'frame_3'] # Return a list of one single element, so we remove the list # YUV dictionnary of the frame to code # Type of the frame to code # Get the references. We have a future ref. Retrieve the compressed version! # We have a previous frame. Retrieve the compressed version # Push frame to device before the forward pass # Complete bitstream path is: bitstream_dir/<idx_frame> # where idx_frame = real_idx_first_frame + X (X is found in frame_X) # Push frame to cpu after the forward pass # Add the current frame dictionaries to the global ones # If we're generating a bitstream and if we're debugging this # bitstream, save the reconstructed PNGs (YUV) for the frame, # and compute the md5sum for the three PNGs. # /RealLife/debug/SequenceName/ # Real index of the frame in the video # Save yuv as PNGs # print('Encoder decoded frame y: ' + str(decoded_frame.get('y').abs().sum())) # Write the md5 in files and delete the PNGs # Write a header for this GOP # We need the GOP struct, and the x, y and z shape (we'll retrieve # this from the rate estimation tensor). # Write only the last two dimensions (H and W) | 1.804855 | 2 |
src/logger/migrations/0001_initial.py | btmc/mogura | 0 | 6620157 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='job',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ts_queue', models.DateTimeField(auto_now_add=True, null=True)),
('ts_start', models.DateTimeField(null=True)),
('ts_finish', models.DateTimeField(null=True)),
('status', models.NullBooleanField()),
('error', models.TextField(null=True, blank=True)),
('ttr', models.IntegerField(null=True)),
('priority', models.BigIntegerField(null=True)),
('is_meta', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='job_body',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('body', models.TextField(null=True, blank=True)),
],
),
migrations.CreateModel(
name='report',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ts', models.DateTimeField(auto_now_add=True)),
('reset_cache', models.NullBooleanField()),
('is_cached', models.NullBooleanField()),
('rc_exists', models.NullBooleanField()),
('send_email', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='schedule',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ts', models.DateTimeField(auto_now_add=True)),
('status', models.NullBooleanField()),
('error', models.TextField(null=True, blank=True)),
],
),
]
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='job',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ts_queue', models.DateTimeField(auto_now_add=True, null=True)),
('ts_start', models.DateTimeField(null=True)),
('ts_finish', models.DateTimeField(null=True)),
('status', models.NullBooleanField()),
('error', models.TextField(null=True, blank=True)),
('ttr', models.IntegerField(null=True)),
('priority', models.BigIntegerField(null=True)),
('is_meta', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='job_body',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('body', models.TextField(null=True, blank=True)),
],
),
migrations.CreateModel(
name='report',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ts', models.DateTimeField(auto_now_add=True)),
('reset_cache', models.NullBooleanField()),
('is_cached', models.NullBooleanField()),
('rc_exists', models.NullBooleanField()),
('send_email', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='schedule',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ts', models.DateTimeField(auto_now_add=True)),
('status', models.NullBooleanField()),
('error', models.TextField(null=True, blank=True)),
],
),
]
| en | 0.769321 | # -*- coding: utf-8 -*- | 1.712492 | 2 |
tests/test_server.py | H--o-l/aiocouch | 0 | 6620158 | <reponame>H--o-l/aiocouch<filename>tests/test_server.py
import asyncio
import pytest
from aiocouch import CouchDB, Database
# All test coroutines will be treated as marked.
pytestmark = pytest.mark.asyncio
async def test_list_databases(couchdb: CouchDB) -> None:
dbs = await couchdb.keys()
assert "aiocouch_test_fixture_database" not in dbs
async def test_list_database(couchdb: CouchDB, database: Database) -> None:
dbs = await couchdb.keys()
assert "aiocouch_test_fixture_database" in dbs
async def test_create_delete_database(couchdb: CouchDB) -> None:
database = await couchdb.create("aiocouch_test_fixture_database2")
dbs = await couchdb.keys()
assert "aiocouch_test_fixture_database2" in dbs
await database.delete()
dbs = await couchdb.keys()
assert "aiocouch_test_fixture_database2" not in dbs
async def test_create_for_existing(couchdb: CouchDB, database: Database) -> None:
from aiocouch import PreconditionFailedError
with pytest.raises(PreconditionFailedError):
await couchdb.create(database.id)
async def test_create_for_existing_but_mismatching_params(
couchdb: CouchDB, database: Database
) -> None:
from aiocouch import PreconditionFailedError
with pytest.raises(PreconditionFailedError):
await couchdb.create(database.id, partitioned=True)
async def test_create_for_existing_ok_with_race(
couchdb: CouchDB, database_id: str
) -> None:
try:
# try to trigger a race-condition during the creation of the database
await asyncio.gather(
*[couchdb.create(database_id, exists_ok=True) for _ in range(5)]
)
finally:
# for this specific test, we need to do a manual cleanup
db = await couchdb.create(database_id, exists_ok=True)
await db.delete()
async def test_create_for_existing_ok(couchdb: CouchDB, database: Database) -> None:
await couchdb.create(database.id, exists_ok=True)
async def test_get_for_existing(couchdb: CouchDB, database: Database) -> None:
await couchdb[database.id]
async def test_get_for_non_existing(couchdb: CouchDB, database: Database) -> None:
from aiocouch import NotFoundError
with pytest.raises(NotFoundError):
await couchdb[database.id + "not_existing"]
async def test_get_info(couchdb: CouchDB) -> None:
await couchdb.info()
async def test_get_database_info(database: Database) -> None:
await database.info()
| import asyncio
import pytest
from aiocouch import CouchDB, Database
# All test coroutines will be treated as marked.
pytestmark = pytest.mark.asyncio
async def test_list_databases(couchdb: CouchDB) -> None:
dbs = await couchdb.keys()
assert "aiocouch_test_fixture_database" not in dbs
async def test_list_database(couchdb: CouchDB, database: Database) -> None:
dbs = await couchdb.keys()
assert "aiocouch_test_fixture_database" in dbs
async def test_create_delete_database(couchdb: CouchDB) -> None:
database = await couchdb.create("aiocouch_test_fixture_database2")
dbs = await couchdb.keys()
assert "aiocouch_test_fixture_database2" in dbs
await database.delete()
dbs = await couchdb.keys()
assert "aiocouch_test_fixture_database2" not in dbs
async def test_create_for_existing(couchdb: CouchDB, database: Database) -> None:
from aiocouch import PreconditionFailedError
with pytest.raises(PreconditionFailedError):
await couchdb.create(database.id)
async def test_create_for_existing_but_mismatching_params(
couchdb: CouchDB, database: Database
) -> None:
from aiocouch import PreconditionFailedError
with pytest.raises(PreconditionFailedError):
await couchdb.create(database.id, partitioned=True)
async def test_create_for_existing_ok_with_race(
couchdb: CouchDB, database_id: str
) -> None:
try:
# try to trigger a race-condition during the creation of the database
await asyncio.gather(
*[couchdb.create(database_id, exists_ok=True) for _ in range(5)]
)
finally:
# for this specific test, we need to do a manual cleanup
db = await couchdb.create(database_id, exists_ok=True)
await db.delete()
async def test_create_for_existing_ok(couchdb: CouchDB, database: Database) -> None:
await couchdb.create(database.id, exists_ok=True)
async def test_get_for_existing(couchdb: CouchDB, database: Database) -> None:
await couchdb[database.id]
async def test_get_for_non_existing(couchdb: CouchDB, database: Database) -> None:
from aiocouch import NotFoundError
with pytest.raises(NotFoundError):
await couchdb[database.id + "not_existing"]
async def test_get_info(couchdb: CouchDB) -> None:
await couchdb.info()
async def test_get_database_info(database: Database) -> None:
await database.info() | en | 0.954602 | # All test coroutines will be treated as marked. # try to trigger a race-condition during the creation of the database # for this specific test, we need to do a manual cleanup | 2.123061 | 2 |
src/server/master/master_manager/deploy.py | dpaola2/djangy | 15 | 6620159 | #!/usr/bin/env python
from ConfigParser import RawConfigParser
from mako.lookup import TemplateLookup
from shared import *
def main():
check_trusted_uid(sys.argv[0])
kwargs = check_and_return_keyword_args(sys.argv, ['application_name'])
deploy(**kwargs)
def deploy(application_name):
print ''
print ''
print 'Welcome to Djangy!'
print ''
print 'Deploying project %s.' % application_name
print ''
try:
bundle_version = create_latest_bundle_via_db(application_name)
print 'Deploying to worker hosts...',
call_worker_managers_allocate(application_name)
call_proxycache_managers_configure(application_name)
log_info_message("Successfully deployed application '%s'!" % application_name)
print 'Done.'
print ''
except BundleAlreadyExistsException as e:
log_last_exception()
print 'WARNING: ' + str(e)
print 'Commit and push some changes to force redeployment.'
print ''
except ApplicationNotInDatabaseException as e:
log_last_exception()
print 'ERROR: ' + str(e)
print ''
except InvalidApplicationNameException as e:
log_last_exception()
print 'ERROR: ' + str(e)
print ''
except DjangoProjectNotFoundException as e:
log_last_exception()
print 'ERROR: No django project found in the git repository.'
print ''
except:
log_last_exception()
print 'Internal error, please contact <EMAIL>'
print ''
def create_latest_bundle_via_db(application_name):
"""Create a bundle from the latest version of an application. Fetches
details like administrative email address and database credentials from
the management database."""
check_application_name(application_name)
# Extract application info from management database
try:
application_info = Application.get_by_name(application_name)
user_info = application_info.account
bundle_params = {
'application_name': application_name,
'admin_email' : user_info.email,
'db_host' : application_info.db_host,
'db_port' : application_info.db_port,
'db_name' : application_info.db_name,
'db_username' : application_info.db_username,
'db_password' : application_info.db_password,
'setup_uid' : application_info.setup_uid,
'web_uid' : application_info.web_uid,
'cron_uid' : application_info.cron_uid,
'app_gid' : application_info.app_gid,
'celery_procs' : application_info.celery_procs,
}
# Also need to query DB for which hosts to run on; and
# resource allocations may be heterogenous across hosts
check_setup_uid(bundle_params['setup_uid'])
check_web_uid (bundle_params['web_uid' ])
check_cron_uid (bundle_params['cron_uid' ])
check_app_gid (bundle_params['app_gid' ])
except Exception as e:
log_last_exception()
print str(e)
# Couldn't find application_name in the management database!
raise ApplicationNotInDatabaseException(application_name)
# Create the bundle.
bundle_version = create_latest_bundle(**bundle_params)
# Update latest bundle version in the database.
application_info.bundle_version = bundle_version
application_info.save()
return bundle_version
def create_latest_bundle(application_name, admin_email, db_host, db_port, db_name, db_username, db_password, \
setup_uid, web_uid, cron_uid, app_gid, celery_procs):
"""Create a bundle from the latest version of an application. Requires
administrative email address and database credentials as arguments."""
# Put application code in <bundle path>/application
# and user-supplied config files in <bundle path>/config
print 'Cloning git repository...',
(bundle_version, bundle_name, bundle_application_path) = clone_repo_to_bundle(application_name)
print 'Done.'
print ''
bundle_path = os.path.join(BUNDLES_DIR, bundle_name)
recursive_chown_chmod(bundle_path, 0, app_gid, '0750')
# Find the Django project directory
django_project_path = find_django_project(os.path.join(bundle_path, 'application'))
django_project_module_name = os.path.basename(django_project_path)
# Rename the user's settings module to something that's unlikely to conflict
if os.path.isfile(os.path.join(django_project_path, 'settings', '__init__.py')):
user_settings_module_name = '__init__%s' % bundle_version
os.rename(os.path.join(django_project_path, 'settings', '__init__.py'), \
os.path.join(django_project_path, 'settings', user_settings_module_name + '.py'))
elif os.path.isfile(os.path.join(django_project_path, 'settings.py')):
user_settings_module_name = 'settings_%s' % bundle_version
os.rename(os.path.join(django_project_path, 'settings.py'), \
os.path.join(django_project_path, user_settings_module_name + '.py'))
# Create production settings.py file in <bundle path>/application/.../settings.py
# (code also exists in worker_manager.deploy)
print 'Creating production settings.py file...',
if os.path.isdir(os.path.join(django_project_path, 'settings')):
settings_path = os.path.join(django_project_path, 'settings', '__init__.py')
else:
settings_path = os.path.join(django_project_path, 'settings.py')
generate_config_file('generic_settings', settings_path,
user_settings_module_name = user_settings_module_name,
django_project_module_name = django_project_module_name,
db_host = db_host,
db_port = db_port,
db_name = db_name,
db_username = db_username,
db_password = <PASSWORD>,
bundle_name = bundle_name,
debug = False,
celery_procs = None,
application_name = application_name)
os.chown(settings_path, 0, app_gid)
os.chmod(settings_path, 0750)
print 'Done.'
print ''
# The create_virtualenv.py program calls setuid() to run as setup_uid
python_virtual_path = os.path.join(bundle_path, 'python-virtual')
os.mkdir(python_virtual_path, 0770)
os.chown(python_virtual_path, 0, app_gid)
os.chmod(python_virtual_path, 0770)
sys.stdout.flush()
run_external_program([PYTHON_BIN_PATH, os.path.join(MASTER_MANAGER_SRC_DIR, 'uid_application_setup/create_virtualenv.py'), \
'application_name', application_name, 'bundle_name', bundle_name, \
'setup_uid', str(setup_uid), 'app_gid', str(app_gid)], \
pass_stdout=True, cwd=bundle_application_path)
os.umask(0227)
# Save the bundle info used by worker_manager to generate config files
print 'Saving bundle info...',
django_admin_media_path = get_django_admin_media_path(bundle_path)
admin_media_prefix='/admin_media'
BundleInfo( \
django_project_path = django_project_path, \
django_admin_media_path = django_admin_media_path, \
admin_media_prefix = admin_media_prefix, \
admin_email = admin_email, \
setup_uid = setup_uid, \
web_uid = web_uid, \
cron_uid = cron_uid, \
app_gid = app_gid, \
user_settings_module_name = user_settings_module_name, \
db_host = db_host, \
db_port = db_port, \
db_name = db_name, \
db_username = db_username, \
db_password = db_password
).save_to_file(os.path.join(bundle_path, 'config', 'bundle_info.config'))
print 'Done.'
print ''
recursive_chown_chmod(bundle_path, 0, app_gid, '0750')
# TODO: don't chmod everything +x, only what needs it.
return bundle_version
### Also exists in worker_manager.deploy ###
def generate_config_file(__template_name__, __config_file_path__, **kwargs):
"""Generate a bundle config file from a template, supplying arguments
from kwargs."""
# Load the template
lookup = TemplateLookup(directories = [WORKER_TEMPLATE_DIR])
template = lookup.get_template(__template_name__)
# Instantiate the template
instance = template.render(**kwargs)
# Write the instantiated template to the bundle
f = open(__config_file_path__, 'w')
f.write(instance)
f.close()
def get_django_admin_media_path(bundle_path):
try:
# Currently assumes python2.6
f = open(os.path.join(bundle_path, 'python-virtual/lib/python2.6/site-packages/easy-install.pth'))
contents = f.read()
f.close()
django_path = re.search('^(.*/Django-.*\.egg)$', contents, flags=re.MULTILINE).group(0)
admin_media_path = os.path.join(django_path, 'django/contrib/admin/media')
return admin_media_path
except:
return os.path.join(bundle_path, 'directory_that_does_not_exist')
def clone_repo_to_bundle(application_name):
"""Try to clone an application's git repository and put the latest code
into a new bundle. Throws BundleAlreadyExistsException if a bundle
directory already exists for the latest version in the repository."""
# Create temporary directory in which to git clone
master_repo_path = os.path.join(REPOS_DIR, application_name + '.git')
temp_repo_path = tempfile.mkdtemp('.git', 'tmp-', BUNDLES_DIR)
os.chown(temp_repo_path, GIT_UID, GIT_UID)
os.chmod(temp_repo_path, 0700)
# git clone and read current version of git repository
result = run_external_program([PYTHON_BIN_PATH, os.path.join(MASTER_MANAGER_SRC_DIR, 'uid_git/clone_repo.py'), master_repo_path, temp_repo_path])
stdout = result['stdout_contents'].split('\n')
if len(stdout) < 1:
git_repo_version = ''
else:
git_repo_version = stdout[0]
# Validate git_repo_version
if result['exit_code'] != 0 or not validate_git_repo_version(git_repo_version):
raise GitCloneException(application_name, temp_repo_path)
# Compute bundle path
bundle_version = BUNDLE_VERSION_PREFIX + git_repo_version
bundle_name = application_name + '-' + bundle_version
bundle_path = os.path.join(BUNDLES_DIR, bundle_name)
# Check if bundle already exists
if os.path.exists(bundle_path):
shutil.rmtree(temp_repo_path)
raise BundleAlreadyExistsException(bundle_name)
# Make bundle directory
bundle_config_path = os.path.join(bundle_path, 'config')
os.makedirs(bundle_config_path)
os.chmod(bundle_path, 0700)
# Move checked-out repo to bundle
bundle_application_path = get_bundle_application_path(application_name, temp_repo_path, bundle_path)
os.makedirs(bundle_application_path)
os.rename(temp_repo_path, bundle_application_path)
# Copy the user-supplied configuration files to a deterministic location
copy_normal_file(os.path.join(bundle_application_path, 'djangy.config'), os.path.join(bundle_config_path, 'djangy.config'))
copy_normal_file(os.path.join(bundle_application_path, 'djangy.eggs' ), os.path.join(bundle_config_path, 'djangy.eggs' ))
copy_normal_file(os.path.join(bundle_application_path, 'djangy.pip' ), os.path.join(bundle_config_path, 'djangy.pip' ))
# Remove .git history which is not relevant in bundle
shutil.rmtree(os.path.join(bundle_application_path, '.git'))
# Note: bundle permissions must be adjusted by caller
return (bundle_version, bundle_name, bundle_application_path)
def validate_git_repo_version(git_repo_version):
return (None != re.match('^[0-9a-f]{40}$', git_repo_version))
def get_bundle_application_path(application_name, repo_path, bundle_path):
"""Given the path to a copy of the code for an application and the path
to the bundle in which it needs to be inserted, determine the path where
the code needs to be moved to. The simple case is
(bundle_path)/application/(application_name), but if the user provides a
djangy.config file in the root of the repository, they can override
that, e.g., (bundle_path)/application/mydir"""
# Default: (bundle_path)/application/(application_name)
bundle_application_path = os.path.join(bundle_path, 'application', application_name)
# But if djangy.config file exists, look for:
# [application]
# rootdir=(some directory)
djangy_config_path = os.path.join(repo_path, 'djangy.config')
if is_normal_file(djangy_config_path):
parser = RawConfigParser()
parser.read(djangy_config_path)
try:
# Normalize the path relative to a hypothetical root directory,
# then remove the leftmost / to make the path relative again.
rootdir = os.path.normpath(os.path.join('/', parser.get('application', 'rootdir')))[1:]
# Put the path inside the bundle's application directory;
# normalizing will remove a rightmost / if rootdir == ''
bundle_application_path = os.path.normpath(os.path.join(bundle_path, 'application', rootdir))
except:
pass
return bundle_application_path
def is_normal_file(path):
return not os.path.islink(path) and os.path.isfile(path)
def copy_normal_file(src_path, dest_path):
if is_normal_file(src_path) and \
not os.path.exists(dest_path):
shutil.copyfile(src_path, dest_path)
if __name__ == '__main__':
main()
| #!/usr/bin/env python
from ConfigParser import RawConfigParser
from mako.lookup import TemplateLookup
from shared import *
def main():
check_trusted_uid(sys.argv[0])
kwargs = check_and_return_keyword_args(sys.argv, ['application_name'])
deploy(**kwargs)
def deploy(application_name):
print ''
print ''
print 'Welcome to Djangy!'
print ''
print 'Deploying project %s.' % application_name
print ''
try:
bundle_version = create_latest_bundle_via_db(application_name)
print 'Deploying to worker hosts...',
call_worker_managers_allocate(application_name)
call_proxycache_managers_configure(application_name)
log_info_message("Successfully deployed application '%s'!" % application_name)
print 'Done.'
print ''
except BundleAlreadyExistsException as e:
log_last_exception()
print 'WARNING: ' + str(e)
print 'Commit and push some changes to force redeployment.'
print ''
except ApplicationNotInDatabaseException as e:
log_last_exception()
print 'ERROR: ' + str(e)
print ''
except InvalidApplicationNameException as e:
log_last_exception()
print 'ERROR: ' + str(e)
print ''
except DjangoProjectNotFoundException as e:
log_last_exception()
print 'ERROR: No django project found in the git repository.'
print ''
except:
log_last_exception()
print 'Internal error, please contact <EMAIL>'
print ''
def create_latest_bundle_via_db(application_name):
"""Create a bundle from the latest version of an application. Fetches
details like administrative email address and database credentials from
the management database."""
check_application_name(application_name)
# Extract application info from management database
try:
application_info = Application.get_by_name(application_name)
user_info = application_info.account
bundle_params = {
'application_name': application_name,
'admin_email' : user_info.email,
'db_host' : application_info.db_host,
'db_port' : application_info.db_port,
'db_name' : application_info.db_name,
'db_username' : application_info.db_username,
'db_password' : application_info.db_password,
'setup_uid' : application_info.setup_uid,
'web_uid' : application_info.web_uid,
'cron_uid' : application_info.cron_uid,
'app_gid' : application_info.app_gid,
'celery_procs' : application_info.celery_procs,
}
# Also need to query DB for which hosts to run on; and
# resource allocations may be heterogenous across hosts
check_setup_uid(bundle_params['setup_uid'])
check_web_uid (bundle_params['web_uid' ])
check_cron_uid (bundle_params['cron_uid' ])
check_app_gid (bundle_params['app_gid' ])
except Exception as e:
log_last_exception()
print str(e)
# Couldn't find application_name in the management database!
raise ApplicationNotInDatabaseException(application_name)
# Create the bundle.
bundle_version = create_latest_bundle(**bundle_params)
# Update latest bundle version in the database.
application_info.bundle_version = bundle_version
application_info.save()
return bundle_version
def create_latest_bundle(application_name, admin_email, db_host, db_port, db_name, db_username, db_password, \
setup_uid, web_uid, cron_uid, app_gid, celery_procs):
"""Create a bundle from the latest version of an application. Requires
administrative email address and database credentials as arguments."""
# Put application code in <bundle path>/application
# and user-supplied config files in <bundle path>/config
print 'Cloning git repository...',
(bundle_version, bundle_name, bundle_application_path) = clone_repo_to_bundle(application_name)
print 'Done.'
print ''
bundle_path = os.path.join(BUNDLES_DIR, bundle_name)
recursive_chown_chmod(bundle_path, 0, app_gid, '0750')
# Find the Django project directory
django_project_path = find_django_project(os.path.join(bundle_path, 'application'))
django_project_module_name = os.path.basename(django_project_path)
# Rename the user's settings module to something that's unlikely to conflict
if os.path.isfile(os.path.join(django_project_path, 'settings', '__init__.py')):
user_settings_module_name = '__init__%s' % bundle_version
os.rename(os.path.join(django_project_path, 'settings', '__init__.py'), \
os.path.join(django_project_path, 'settings', user_settings_module_name + '.py'))
elif os.path.isfile(os.path.join(django_project_path, 'settings.py')):
user_settings_module_name = 'settings_%s' % bundle_version
os.rename(os.path.join(django_project_path, 'settings.py'), \
os.path.join(django_project_path, user_settings_module_name + '.py'))
# Create production settings.py file in <bundle path>/application/.../settings.py
# (code also exists in worker_manager.deploy)
print 'Creating production settings.py file...',
if os.path.isdir(os.path.join(django_project_path, 'settings')):
settings_path = os.path.join(django_project_path, 'settings', '__init__.py')
else:
settings_path = os.path.join(django_project_path, 'settings.py')
generate_config_file('generic_settings', settings_path,
user_settings_module_name = user_settings_module_name,
django_project_module_name = django_project_module_name,
db_host = db_host,
db_port = db_port,
db_name = db_name,
db_username = db_username,
db_password = <PASSWORD>,
bundle_name = bundle_name,
debug = False,
celery_procs = None,
application_name = application_name)
os.chown(settings_path, 0, app_gid)
os.chmod(settings_path, 0750)
print 'Done.'
print ''
# The create_virtualenv.py program calls setuid() to run as setup_uid
python_virtual_path = os.path.join(bundle_path, 'python-virtual')
os.mkdir(python_virtual_path, 0770)
os.chown(python_virtual_path, 0, app_gid)
os.chmod(python_virtual_path, 0770)
sys.stdout.flush()
run_external_program([PYTHON_BIN_PATH, os.path.join(MASTER_MANAGER_SRC_DIR, 'uid_application_setup/create_virtualenv.py'), \
'application_name', application_name, 'bundle_name', bundle_name, \
'setup_uid', str(setup_uid), 'app_gid', str(app_gid)], \
pass_stdout=True, cwd=bundle_application_path)
os.umask(0227)
# Save the bundle info used by worker_manager to generate config files
print 'Saving bundle info...',
django_admin_media_path = get_django_admin_media_path(bundle_path)
admin_media_prefix='/admin_media'
BundleInfo( \
django_project_path = django_project_path, \
django_admin_media_path = django_admin_media_path, \
admin_media_prefix = admin_media_prefix, \
admin_email = admin_email, \
setup_uid = setup_uid, \
web_uid = web_uid, \
cron_uid = cron_uid, \
app_gid = app_gid, \
user_settings_module_name = user_settings_module_name, \
db_host = db_host, \
db_port = db_port, \
db_name = db_name, \
db_username = db_username, \
db_password = db_password
).save_to_file(os.path.join(bundle_path, 'config', 'bundle_info.config'))
print 'Done.'
print ''
recursive_chown_chmod(bundle_path, 0, app_gid, '0750')
# TODO: don't chmod everything +x, only what needs it.
return bundle_version
### Also exists in worker_manager.deploy ###
def generate_config_file(__template_name__, __config_file_path__, **kwargs):
"""Generate a bundle config file from a template, supplying arguments
from kwargs."""
# Load the template
lookup = TemplateLookup(directories = [WORKER_TEMPLATE_DIR])
template = lookup.get_template(__template_name__)
# Instantiate the template
instance = template.render(**kwargs)
# Write the instantiated template to the bundle
f = open(__config_file_path__, 'w')
f.write(instance)
f.close()
def get_django_admin_media_path(bundle_path):
try:
# Currently assumes python2.6
f = open(os.path.join(bundle_path, 'python-virtual/lib/python2.6/site-packages/easy-install.pth'))
contents = f.read()
f.close()
django_path = re.search('^(.*/Django-.*\.egg)$', contents, flags=re.MULTILINE).group(0)
admin_media_path = os.path.join(django_path, 'django/contrib/admin/media')
return admin_media_path
except:
return os.path.join(bundle_path, 'directory_that_does_not_exist')
def clone_repo_to_bundle(application_name):
"""Try to clone an application's git repository and put the latest code
into a new bundle. Throws BundleAlreadyExistsException if a bundle
directory already exists for the latest version in the repository."""
# Create temporary directory in which to git clone
master_repo_path = os.path.join(REPOS_DIR, application_name + '.git')
temp_repo_path = tempfile.mkdtemp('.git', 'tmp-', BUNDLES_DIR)
os.chown(temp_repo_path, GIT_UID, GIT_UID)
os.chmod(temp_repo_path, 0700)
# git clone and read current version of git repository
result = run_external_program([PYTHON_BIN_PATH, os.path.join(MASTER_MANAGER_SRC_DIR, 'uid_git/clone_repo.py'), master_repo_path, temp_repo_path])
stdout = result['stdout_contents'].split('\n')
if len(stdout) < 1:
git_repo_version = ''
else:
git_repo_version = stdout[0]
# Validate git_repo_version
if result['exit_code'] != 0 or not validate_git_repo_version(git_repo_version):
raise GitCloneException(application_name, temp_repo_path)
# Compute bundle path
bundle_version = BUNDLE_VERSION_PREFIX + git_repo_version
bundle_name = application_name + '-' + bundle_version
bundle_path = os.path.join(BUNDLES_DIR, bundle_name)
# Check if bundle already exists
if os.path.exists(bundle_path):
shutil.rmtree(temp_repo_path)
raise BundleAlreadyExistsException(bundle_name)
# Make bundle directory
bundle_config_path = os.path.join(bundle_path, 'config')
os.makedirs(bundle_config_path)
os.chmod(bundle_path, 0700)
# Move checked-out repo to bundle
bundle_application_path = get_bundle_application_path(application_name, temp_repo_path, bundle_path)
os.makedirs(bundle_application_path)
os.rename(temp_repo_path, bundle_application_path)
# Copy the user-supplied configuration files to a deterministic location
copy_normal_file(os.path.join(bundle_application_path, 'djangy.config'), os.path.join(bundle_config_path, 'djangy.config'))
copy_normal_file(os.path.join(bundle_application_path, 'djangy.eggs' ), os.path.join(bundle_config_path, 'djangy.eggs' ))
copy_normal_file(os.path.join(bundle_application_path, 'djangy.pip' ), os.path.join(bundle_config_path, 'djangy.pip' ))
# Remove .git history which is not relevant in bundle
shutil.rmtree(os.path.join(bundle_application_path, '.git'))
# Note: bundle permissions must be adjusted by caller
return (bundle_version, bundle_name, bundle_application_path)
def validate_git_repo_version(git_repo_version):
return (None != re.match('^[0-9a-f]{40}$', git_repo_version))
def get_bundle_application_path(application_name, repo_path, bundle_path):
"""Given the path to a copy of the code for an application and the path
to the bundle in which it needs to be inserted, determine the path where
the code needs to be moved to. The simple case is
(bundle_path)/application/(application_name), but if the user provides a
djangy.config file in the root of the repository, they can override
that, e.g., (bundle_path)/application/mydir"""
# Default: (bundle_path)/application/(application_name)
bundle_application_path = os.path.join(bundle_path, 'application', application_name)
# But if djangy.config file exists, look for:
# [application]
# rootdir=(some directory)
djangy_config_path = os.path.join(repo_path, 'djangy.config')
if is_normal_file(djangy_config_path):
parser = RawConfigParser()
parser.read(djangy_config_path)
try:
# Normalize the path relative to a hypothetical root directory,
# then remove the leftmost / to make the path relative again.
rootdir = os.path.normpath(os.path.join('/', parser.get('application', 'rootdir')))[1:]
# Put the path inside the bundle's application directory;
# normalizing will remove a rightmost / if rootdir == ''
bundle_application_path = os.path.normpath(os.path.join(bundle_path, 'application', rootdir))
except:
pass
return bundle_application_path
def is_normal_file(path):
return not os.path.islink(path) and os.path.isfile(path)
def copy_normal_file(src_path, dest_path):
if is_normal_file(src_path) and \
not os.path.exists(dest_path):
shutil.copyfile(src_path, dest_path)
if __name__ == '__main__':
main()
| en | 0.798296 | #!/usr/bin/env python Create a bundle from the latest version of an application. Fetches details like administrative email address and database credentials from the management database. # Extract application info from management database # Also need to query DB for which hosts to run on; and # resource allocations may be heterogenous across hosts # Couldn't find application_name in the management database! # Create the bundle. # Update latest bundle version in the database. Create a bundle from the latest version of an application. Requires administrative email address and database credentials as arguments. # Put application code in <bundle path>/application # and user-supplied config files in <bundle path>/config # Find the Django project directory # Rename the user's settings module to something that's unlikely to conflict # Create production settings.py file in <bundle path>/application/.../settings.py # (code also exists in worker_manager.deploy) # The create_virtualenv.py program calls setuid() to run as setup_uid # Save the bundle info used by worker_manager to generate config files # TODO: don't chmod everything +x, only what needs it. ### Also exists in worker_manager.deploy ### Generate a bundle config file from a template, supplying arguments from kwargs. # Load the template # Instantiate the template # Write the instantiated template to the bundle # Currently assumes python2.6 Try to clone an application's git repository and put the latest code into a new bundle. Throws BundleAlreadyExistsException if a bundle directory already exists for the latest version in the repository. # Create temporary directory in which to git clone # git clone and read current version of git repository # Validate git_repo_version # Compute bundle path # Check if bundle already exists # Make bundle directory # Move checked-out repo to bundle # Copy the user-supplied configuration files to a deterministic location # Remove .git history which is not relevant in bundle # Note: bundle permissions must be adjusted by caller Given the path to a copy of the code for an application and the path to the bundle in which it needs to be inserted, determine the path where the code needs to be moved to. The simple case is (bundle_path)/application/(application_name), but if the user provides a djangy.config file in the root of the repository, they can override that, e.g., (bundle_path)/application/mydir # Default: (bundle_path)/application/(application_name) # But if djangy.config file exists, look for: # [application] # rootdir=(some directory) # Normalize the path relative to a hypothetical root directory, # then remove the leftmost / to make the path relative again. # Put the path inside the bundle's application directory; # normalizing will remove a rightmost / if rootdir == '' | 2.203752 | 2 |
search/nlp/qu/intent_finder.py | octabytes/search | 0 | 6620160 | from textdistance import levenshtein
from .stop_words import stop_words
class IntentFinder:
def __init__(self, tokens, intent_list):
self.tokens = tokens
self.intent_list = intent_list
self.score = 0
def intents(self):
# Founded intent in query
query_intents = []
# Search intent from tokens
for intent in self.intent_list:
for index, token in enumerate(self.tokens):
if token in intent["words"]:
query_intent = intent.copy()
query_intent["value"] = token
query_intent["index"] = index
query_intents.append(query_intent)
# Calculate confidence
self.score += 5
# Those tokens which intents are not found
left_tokens = []
for index, token in enumerate(self.tokens):
found = False
for intent in query_intents:
if token == intent["value"]:
found = True
break
if not found:
t = {
"word": token,
"index": index
}
left_tokens.append(t)
# remove stop words from left tokens
cleaned_tokens = []
if left_tokens:
cleaned_tokens = [
t for t in left_tokens if t["word"] not in stop_words]
# If there are some tokens left then
# find those intents which are based on levenshtein distance
for token in cleaned_tokens:
intents = []
for intent in self.intent_list:
for word in intent["words"]:
if len(word) <= 3:
continue
max_distance = 1
if len(word) <= 5:
max_distance = 1
else:
max_distance = 2
d = levenshtein.distance(token["word"], word)
if d <= max_distance:
i = {}
i["intent"] = intent.copy()
i["distance"] = d
intents.append(i)
break
if intents:
min_distance_intent = min(intents, key=lambda i: i["distance"])
i = min_distance_intent["intent"]
i["value"] = token["word"]
i["index"] = token["index"]
query_intents.append(i)
# Calculate confidence
self.score += 5 - (min_distance_intent["distance"] + 1)
return query_intents
| from textdistance import levenshtein
from .stop_words import stop_words
class IntentFinder:
def __init__(self, tokens, intent_list):
self.tokens = tokens
self.intent_list = intent_list
self.score = 0
def intents(self):
# Founded intent in query
query_intents = []
# Search intent from tokens
for intent in self.intent_list:
for index, token in enumerate(self.tokens):
if token in intent["words"]:
query_intent = intent.copy()
query_intent["value"] = token
query_intent["index"] = index
query_intents.append(query_intent)
# Calculate confidence
self.score += 5
# Those tokens which intents are not found
left_tokens = []
for index, token in enumerate(self.tokens):
found = False
for intent in query_intents:
if token == intent["value"]:
found = True
break
if not found:
t = {
"word": token,
"index": index
}
left_tokens.append(t)
# remove stop words from left tokens
cleaned_tokens = []
if left_tokens:
cleaned_tokens = [
t for t in left_tokens if t["word"] not in stop_words]
# If there are some tokens left then
# find those intents which are based on levenshtein distance
for token in cleaned_tokens:
intents = []
for intent in self.intent_list:
for word in intent["words"]:
if len(word) <= 3:
continue
max_distance = 1
if len(word) <= 5:
max_distance = 1
else:
max_distance = 2
d = levenshtein.distance(token["word"], word)
if d <= max_distance:
i = {}
i["intent"] = intent.copy()
i["distance"] = d
intents.append(i)
break
if intents:
min_distance_intent = min(intents, key=lambda i: i["distance"])
i = min_distance_intent["intent"]
i["value"] = token["word"]
i["index"] = token["index"]
query_intents.append(i)
# Calculate confidence
self.score += 5 - (min_distance_intent["distance"] + 1)
return query_intents
| en | 0.868933 | # Founded intent in query # Search intent from tokens # Calculate confidence # Those tokens which intents are not found # remove stop words from left tokens # If there are some tokens left then # find those intents which are based on levenshtein distance # Calculate confidence | 3.014227 | 3 |
unifiedmanip/sim/world.py | wualbert/pytorch-a2c-ppo-acktr-gail | 0 | 6620161 | <reponame>wualbert/pytorch-a2c-ppo-acktr-gail
import sim.rigid_object_deprecated as object
import pybullet as p
import pybullet_data
class World:
"""
The World is the bookkeeping entity for a simulation. It contains objects
(currently 1) and agents. The objects are defined in rigid_object_deprecated.py.
The World helps sets up the pybullet environment and keeps track of stuff
when running simulation.
"""
def __init__(self, object_list=None, agent_list=None,
sim_time_step=1./240.,
gravity=(0., 0., -10.), load_floor=True,
floor_friction=0., **kwargs):
if object_list is None:
object_list = []
if agent_list is None:
agent_list = []
if sim_time_step != 1./240.:
p.setTimeStep(sim_time_step)
self.sim_time_step = sim_time_step
self.object_dict = dict()
for obj in object_list:
self.object_dict[obj.body_id] = obj
self.agent_list = agent_list
self.gravity = gravity
p.setGravity(*self.gravity)
self.floor_friction = floor_friction
if load_floor:
p.setAdditionalSearchPath(
pybullet_data.getDataPath()) # optionally
self.floor_id = p.loadURDF("plane.urdf")
p.changeDynamics(self.floor_id, -1,
lateralFriction=self.floor_friction)
# Take one simulation step
p.stepSimulation()
def step_simulation(self):
# Get the actions from all agents
# TODO(wualbert)
# Execute the actions
# TODO(wualbert)
# Take one simulation step
p.stepSimulation()
class BoxWorld(World):
"""
World with one 2x1x0.5 rectangular box
"""
def __init__(self, p_W_init=(0., 0., 0.25), q_W_init=(0., 0., 0., 1.),
agent_list=None, sim_time_step=1./240.,
gravity=(0., 0., -10.),
target_p_W = (3.,0.,0.25), target_q_W = (0.,0.,0.,1.)):
objects_list = [object.DefaultBoxObject(p_W_init, q_W_init)]
if target_p_W is not None and target_q_W is not None:
objects_list.append(object.BoxVisualization(target_p_W, target_q_W))
super().__init__(objects_list, agent_list, sim_time_step, gravity)
def step_simulation(self):
# The world currently relies on applyExternalForce for actions
for agnt in self.agent_list:
agnt.perform_action()
p.stepSimulation()
| import sim.rigid_object_deprecated as object
import pybullet as p
import pybullet_data
class World:
"""
The World is the bookkeeping entity for a simulation. It contains objects
(currently 1) and agents. The objects are defined in rigid_object_deprecated.py.
The World helps sets up the pybullet environment and keeps track of stuff
when running simulation.
"""
def __init__(self, object_list=None, agent_list=None,
sim_time_step=1./240.,
gravity=(0., 0., -10.), load_floor=True,
floor_friction=0., **kwargs):
if object_list is None:
object_list = []
if agent_list is None:
agent_list = []
if sim_time_step != 1./240.:
p.setTimeStep(sim_time_step)
self.sim_time_step = sim_time_step
self.object_dict = dict()
for obj in object_list:
self.object_dict[obj.body_id] = obj
self.agent_list = agent_list
self.gravity = gravity
p.setGravity(*self.gravity)
self.floor_friction = floor_friction
if load_floor:
p.setAdditionalSearchPath(
pybullet_data.getDataPath()) # optionally
self.floor_id = p.loadURDF("plane.urdf")
p.changeDynamics(self.floor_id, -1,
lateralFriction=self.floor_friction)
# Take one simulation step
p.stepSimulation()
def step_simulation(self):
# Get the actions from all agents
# TODO(wualbert)
# Execute the actions
# TODO(wualbert)
# Take one simulation step
p.stepSimulation()
class BoxWorld(World):
"""
World with one 2x1x0.5 rectangular box
"""
def __init__(self, p_W_init=(0., 0., 0.25), q_W_init=(0., 0., 0., 1.),
agent_list=None, sim_time_step=1./240.,
gravity=(0., 0., -10.),
target_p_W = (3.,0.,0.25), target_q_W = (0.,0.,0.,1.)):
objects_list = [object.DefaultBoxObject(p_W_init, q_W_init)]
if target_p_W is not None and target_q_W is not None:
objects_list.append(object.BoxVisualization(target_p_W, target_q_W))
super().__init__(objects_list, agent_list, sim_time_step, gravity)
def step_simulation(self):
# The world currently relies on applyExternalForce for actions
for agnt in self.agent_list:
agnt.perform_action()
p.stepSimulation() | en | 0.838069 | The World is the bookkeeping entity for a simulation. It contains objects (currently 1) and agents. The objects are defined in rigid_object_deprecated.py. The World helps sets up the pybullet environment and keeps track of stuff when running simulation. # optionally # Take one simulation step # Get the actions from all agents # TODO(wualbert) # Execute the actions # TODO(wualbert) # Take one simulation step World with one 2x1x0.5 rectangular box # The world currently relies on applyExternalForce for actions | 3.165055 | 3 |
jupyterhub_config.py | sparkingarthur/jupyterhub-localsqliteauthenticator | 26 | 6620162 | <reponame>sparkingarthur/jupyterhub-localsqliteauthenticator<filename>jupyterhub_config.py
c.JupyterHub.bind_url = 'http://:9002'
c.Authenticator.add_user_cmd = ['adduser', '--home', '/home/USERNAME']
c.LocalAuthenticator.create_system_users = True
c.Authenticator.delete_invalid_users = True
c.JupyterHub.authenticator_class = 'sqliteauthenticator.SQLiteAuthenticator'
c.Authenticator.admin_users = {'admin'}
from jupyterhub.spawner import LocalProcessSpawner
class MySpawner(LocalProcessSpawner):
def _notebook_dir_default(self):
return '/home/' + self.user.name
c.JupyterHub.spawner_class = MySpawner
| c.JupyterHub.bind_url = 'http://:9002'
c.Authenticator.add_user_cmd = ['adduser', '--home', '/home/USERNAME']
c.LocalAuthenticator.create_system_users = True
c.Authenticator.delete_invalid_users = True
c.JupyterHub.authenticator_class = 'sqliteauthenticator.SQLiteAuthenticator'
c.Authenticator.admin_users = {'admin'}
from jupyterhub.spawner import LocalProcessSpawner
class MySpawner(LocalProcessSpawner):
def _notebook_dir_default(self):
return '/home/' + self.user.name
c.JupyterHub.spawner_class = MySpawner | none | 1 | 1.889967 | 2 | |
sdno-link-monitor/mie/utils.py | openov2/sdno-monitoring | 0 | 6620163 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2016-2017 China Telecommunication Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import os
import sys
import time
import threading
import traceback
import socket
import random
import shlex
from functools import reduce
import subprocess
def vernumber(v):
'''Covert version string a.b.c to a number
a.b.c => 1.2.3 => a * 10000 * 10000 + b * 10000 + c
'''
count = v.count(".")
a, b, c = "0", "0", "0"
try:
if count == 3:
a, b, c = v.split(".")
elif count == 2:
a, b = v.split(".")
elif count == 1:
a = v
except:
print "BAD VERSION <%s>" % v
return int(a) * 10000 * 10000 + int(b) * 10000 + int(c)
def rands(seed, length):
return ''.join([seed[x]
for x in random.sample(xrange(0, len(seed)), length)])
def randobj(obj):
weight_all = sum([seg[1] for seg in obj])
randnum = random.random()
randnum = randnum * weight_all
w_start = 0.0
w_end = 1.0
index = 0
for seg in obj:
w_cur = float(seg[1])
w_end = w_start + w_cur
if w_start <= randnum <= w_end:
return obj[index][0]
w_start = w_end
index += 1
else:
def null():
return ""
return null
def hexdump(s):
return ":".join("{:02x}".format(ord(c)) for c in s)
def size_parse(size):
if not size:
return 0
size = size.strip()
if size[-1] in 'kK':
size = int(size[:-1]) * 1024
elif size[-1] in 'mM':
size = int(size[:-1]) * 1024 * 1024
elif size[-1] in 'gG':
size = int(size[:-1]) * 1024 * 1024 * 1024
elif size[-1] in 'tT':
size = int(size[:-1]) * 1024 * 1024 * 1024 * 1024
elif size[-1] in 'pP':
size = int(size[:-1]) * 1024 * 1024 * 1024 * 1024 * 1024
else:
size = int(size)
return size
### ###########################################################
# timer
#
def now_sec():
return time.time()
def now_msec():
return time.time() * 1000
def now_usec():
return time.time() * 1000 * 1000
def now_str():
return time.strftime("%Y-%m-%d %H:%M:%S")
def now_str_2():
return time.strftime("%Y%m%d%H%M%S")
def extname(s):
if not s:
return None
start = s.rfind(".") + 1
if start < 1 or start == len(s):
return None
return s[start:]
### ###########################################################
# System information
#
# directly use platform.xxx()
### ###########################################################
# setdebugable
#
# kill -USR1 <python_application.pid>
def setdebugable():
import signal
def handle_pdb(sig, frame):
import pdb
pdb.Pdb().set_trace(frame)
signal.signal(signal.SIGUSR1, handle_pdb)
### ###########################################################
# Quitmarker
#
def quitmarker(markerfile=None):
markerfile = markerfile or "/tmp/mie/quitmarker"
g = frame(-1).f_globals
g["quitmarker"] = open(markerfile, 'r')
### ###########################################################
# Jobs
#
class CmdJobs(object):
_jobid = 0
@classmethod
def jid(cls):
res = cls._jobid
cls._jobid += 1
if not res:
res = cls._jobid
cls._jobid += 1
return res
def __init__(self):
# jid
self.jobs = {}
def addjob(self):
job = self.jid()
pass
# 1. call a command to start a job
# 2. report a event to server
# >>> job_set/get/del
# >>> jobset
### #####################################################################
# calulate MD5 for a given file path
#
def md5_file(path):
md5 = hashlib.md5()
f = open(path_local, "rb")
while True:
data = f.read(4096)
if not data:
break
md5.update(data)
chkmd5 = md5.hexdigest()
f.close()
return chkmd5
### ###########################################################
# Get object by id
#
def objbyid(ID):
import ctypes
return ctypes.cast(ID, ctypes.py_object).value
### ###########################################################
# Process functions
#
def pidof(name):
return subprocess.check_output(['pidof', name])
def pkill_by_name(name):
pid = subprocess.check_output(['pidof', name])
if pid:
subprocess.check_output(['kill', pid])
pid = subprocess.check_output(['pidof', name])
if pid:
subprocess.check_output(['kill', '-9', pid])
pid = subprocess.check_output(['pidof', name])
if pid:
return False
return True
def pkill_by_pid(pid):
subprocess.call(['kill', pid])
subprocess.call(['kill', '-9', pid])
def pexec(name, args, force=False, nohup=True):
if force:
if not pkill_by_name(name):
return False
pid = subprocess.check_output(['pidof', name])
if pid:
return True
appname = os.path.basename(args[1])
if nohup:
args.insert("nohup")
subprocess.check_output(args)
pid = subprocess.check_output(['pidof', appname])
if pid:
return True
return False
def pgrep(*args):
cmd = "ps f | "
if not args:
return ""
for arg in args:
cmd += "grep %s | " % arg
cmd += "grep -v grep | head -n 1 | awk '{print $1}' | xargs kill"
print cmd
stdout = subprocess.PIPE
stderr = subprocess.STDOUT
ps = subprocess.Popen(cmd, shell=True, stdout=stdout, stderr=stderr)
return ps.communicate()[0]
def strbt():
stack = traceback.format_stack()
return "\r\n".join(stack[:-1])
from collections import defaultdict
def tree():
return defaultdict(tree)
class WBList():
'''White List and Black List'''
def __init__(self):
self.wlist = {}
self.blist = {}
def wset(self, name):
self.wlist[name] = re.compile("^%s$" % name)
def wdel(self, name=None):
if name:
del self.wlist[name]
else:
self.wlist.clear()
def whas(self, name=None):
pass
def wdmp(self, name=None):
pass
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2016-2017 China Telecommunication Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import os
import sys
import time
import threading
import traceback
import socket
import random
import shlex
from functools import reduce
import subprocess
def vernumber(v):
'''Covert version string a.b.c to a number
a.b.c => 1.2.3 => a * 10000 * 10000 + b * 10000 + c
'''
count = v.count(".")
a, b, c = "0", "0", "0"
try:
if count == 3:
a, b, c = v.split(".")
elif count == 2:
a, b = v.split(".")
elif count == 1:
a = v
except:
print "BAD VERSION <%s>" % v
return int(a) * 10000 * 10000 + int(b) * 10000 + int(c)
def rands(seed, length):
return ''.join([seed[x]
for x in random.sample(xrange(0, len(seed)), length)])
def randobj(obj):
weight_all = sum([seg[1] for seg in obj])
randnum = random.random()
randnum = randnum * weight_all
w_start = 0.0
w_end = 1.0
index = 0
for seg in obj:
w_cur = float(seg[1])
w_end = w_start + w_cur
if w_start <= randnum <= w_end:
return obj[index][0]
w_start = w_end
index += 1
else:
def null():
return ""
return null
def hexdump(s):
return ":".join("{:02x}".format(ord(c)) for c in s)
def size_parse(size):
if not size:
return 0
size = size.strip()
if size[-1] in 'kK':
size = int(size[:-1]) * 1024
elif size[-1] in 'mM':
size = int(size[:-1]) * 1024 * 1024
elif size[-1] in 'gG':
size = int(size[:-1]) * 1024 * 1024 * 1024
elif size[-1] in 'tT':
size = int(size[:-1]) * 1024 * 1024 * 1024 * 1024
elif size[-1] in 'pP':
size = int(size[:-1]) * 1024 * 1024 * 1024 * 1024 * 1024
else:
size = int(size)
return size
### ###########################################################
# timer
#
def now_sec():
return time.time()
def now_msec():
return time.time() * 1000
def now_usec():
return time.time() * 1000 * 1000
def now_str():
return time.strftime("%Y-%m-%d %H:%M:%S")
def now_str_2():
return time.strftime("%Y%m%d%H%M%S")
def extname(s):
if not s:
return None
start = s.rfind(".") + 1
if start < 1 or start == len(s):
return None
return s[start:]
### ###########################################################
# System information
#
# directly use platform.xxx()
### ###########################################################
# setdebugable
#
# kill -USR1 <python_application.pid>
def setdebugable():
import signal
def handle_pdb(sig, frame):
import pdb
pdb.Pdb().set_trace(frame)
signal.signal(signal.SIGUSR1, handle_pdb)
### ###########################################################
# Quitmarker
#
def quitmarker(markerfile=None):
markerfile = markerfile or "/tmp/mie/quitmarker"
g = frame(-1).f_globals
g["quitmarker"] = open(markerfile, 'r')
### ###########################################################
# Jobs
#
class CmdJobs(object):
_jobid = 0
@classmethod
def jid(cls):
res = cls._jobid
cls._jobid += 1
if not res:
res = cls._jobid
cls._jobid += 1
return res
def __init__(self):
# jid
self.jobs = {}
def addjob(self):
job = self.jid()
pass
# 1. call a command to start a job
# 2. report a event to server
# >>> job_set/get/del
# >>> jobset
### #####################################################################
# calulate MD5 for a given file path
#
def md5_file(path):
md5 = hashlib.md5()
f = open(path_local, "rb")
while True:
data = f.read(4096)
if not data:
break
md5.update(data)
chkmd5 = md5.hexdigest()
f.close()
return chkmd5
### ###########################################################
# Get object by id
#
def objbyid(ID):
import ctypes
return ctypes.cast(ID, ctypes.py_object).value
### ###########################################################
# Process functions
#
def pidof(name):
return subprocess.check_output(['pidof', name])
def pkill_by_name(name):
pid = subprocess.check_output(['pidof', name])
if pid:
subprocess.check_output(['kill', pid])
pid = subprocess.check_output(['pidof', name])
if pid:
subprocess.check_output(['kill', '-9', pid])
pid = subprocess.check_output(['pidof', name])
if pid:
return False
return True
def pkill_by_pid(pid):
subprocess.call(['kill', pid])
subprocess.call(['kill', '-9', pid])
def pexec(name, args, force=False, nohup=True):
if force:
if not pkill_by_name(name):
return False
pid = subprocess.check_output(['pidof', name])
if pid:
return True
appname = os.path.basename(args[1])
if nohup:
args.insert("nohup")
subprocess.check_output(args)
pid = subprocess.check_output(['pidof', appname])
if pid:
return True
return False
def pgrep(*args):
cmd = "ps f | "
if not args:
return ""
for arg in args:
cmd += "grep %s | " % arg
cmd += "grep -v grep | head -n 1 | awk '{print $1}' | xargs kill"
print cmd
stdout = subprocess.PIPE
stderr = subprocess.STDOUT
ps = subprocess.Popen(cmd, shell=True, stdout=stdout, stderr=stderr)
return ps.communicate()[0]
def strbt():
stack = traceback.format_stack()
return "\r\n".join(stack[:-1])
from collections import defaultdict
def tree():
return defaultdict(tree)
class WBList():
'''White List and Black List'''
def __init__(self):
self.wlist = {}
self.blist = {}
def wset(self, name):
self.wlist[name] = re.compile("^%s$" % name)
def wdel(self, name=None):
if name:
del self.wlist[name]
else:
self.wlist.clear()
def whas(self, name=None):
pass
def wdmp(self, name=None):
pass
| en | 0.301895 | #!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2016-2017 China Telecommunication Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Covert version string a.b.c to a number a.b.c => 1.2.3 => a * 10000 * 10000 + b * 10000 + c ### ########################################################### # timer # ### ########################################################### # System information # # directly use platform.xxx() ### ########################################################### # setdebugable # # kill -USR1 <python_application.pid> ### ########################################################### # Quitmarker # ### ########################################################### # Jobs # # jid # 1. call a command to start a job # 2. report a event to server # >>> job_set/get/del # >>> jobset ### ##################################################################### # calulate MD5 for a given file path # ### ########################################################### # Get object by id # ### ########################################################### # Process functions # White List and Black List | 2.872727 | 3 |
pyforms_lite/settings.py | NikhilNarayana/pyforms-lite | 0 | 6620164 | <filename>pyforms_lite/settings.py
# !/usr/bin/python3
# -*- coding: utf-8 -*-
import logging, os, sys
PYFORMS_LOG_FILENAME = 'pyforms.log'
PYFORMS_LOG_HANDLER_FILE_LEVEL = logging.INFO
PYFORMS_LOG_HANDLER_CONSOLE_LEVEL = logging.INFO
PYFORMS_CONTROL_CODE_EDITOR_DEFAULT_FONT_SIZE = '12'
PYFORMS_CONTROL_EVENTS_GRAPH_DEFAULT_SCALE = 1
PYFORMS_QUALITY_TESTS_PATH = None
PYFORMS_STYLESHEET = None
PYFORMS_STYLESHEET_DARWIN = None
PYFORMS_STYLESHEET_LINUX = None
PYFORMS_STYLESHEET_WINDOWS = None
PYFORMS_CONTROLPLAYER_FONT = 9
# In a normal loading, there may be errors that show up which are not important.
# This happens because plugins_finder will search for classes on plugins which are not present because they are not needed.
# However, if plugin is not loaded at all, this will show all related errors.
# See pyforms.utils.plugins_finder.find_class()
PYFORMS_SILENT_PLUGINS_FINDER = True
| <filename>pyforms_lite/settings.py
# !/usr/bin/python3
# -*- coding: utf-8 -*-
import logging, os, sys
PYFORMS_LOG_FILENAME = 'pyforms.log'
PYFORMS_LOG_HANDLER_FILE_LEVEL = logging.INFO
PYFORMS_LOG_HANDLER_CONSOLE_LEVEL = logging.INFO
PYFORMS_CONTROL_CODE_EDITOR_DEFAULT_FONT_SIZE = '12'
PYFORMS_CONTROL_EVENTS_GRAPH_DEFAULT_SCALE = 1
PYFORMS_QUALITY_TESTS_PATH = None
PYFORMS_STYLESHEET = None
PYFORMS_STYLESHEET_DARWIN = None
PYFORMS_STYLESHEET_LINUX = None
PYFORMS_STYLESHEET_WINDOWS = None
PYFORMS_CONTROLPLAYER_FONT = 9
# In a normal loading, there may be errors that show up which are not important.
# This happens because plugins_finder will search for classes on plugins which are not present because they are not needed.
# However, if plugin is not loaded at all, this will show all related errors.
# See pyforms.utils.plugins_finder.find_class()
PYFORMS_SILENT_PLUGINS_FINDER = True
| en | 0.872158 | # !/usr/bin/python3 # -*- coding: utf-8 -*- # In a normal loading, there may be errors that show up which are not important. # This happens because plugins_finder will search for classes on plugins which are not present because they are not needed. # However, if plugin is not loaded at all, this will show all related errors. # See pyforms.utils.plugins_finder.find_class() | 1.678112 | 2 |
Chapter4_Packages/1_Packaging_2/main.py | franneck94/UdemyPythonProEng | 2 | 6620165 | <gh_stars>1-10
from my_package.utils.printing import print_hello_world
from my_package.utils.printing import print_name
def main() -> None:
print_hello_world()
print_name("Jan")
if __name__ == "__main__":
main()
| from my_package.utils.printing import print_hello_world
from my_package.utils.printing import print_name
def main() -> None:
print_hello_world()
print_name("Jan")
if __name__ == "__main__":
main() | none | 1 | 2.003047 | 2 | |
acondbs/blueprint/__init__.py | simonsobs/acondbs | 0 | 6620166 | <gh_stars>0
import textwrap
import json
import traceback
from flask import Blueprint, current_app
from flask_graphql import GraphQLView
from .. import auth, schema, ops
from .graphql_ide import GRAPHIQL_NEWER, GRAPHQL_PLAYGROUND
##__________________________________________________________________||
from flask import request
def format_to_str(data_dict):
format_item = textwrap.dedent(
"""
- {key}:
{value}
"""
).lstrip()
return "\n".join(
[
format_item.format(
key=k,
value=textwrap.indent(str(v), " " * 4).rstrip(),
)
for k, v in data_dict.items()
]
)
class GraphQLView(GraphQLView):
def dispatch_request(self):
res = super().dispatch_request()
# return res
if isinstance(res, str):
# e.g, GraphiQL
return res
try:
self._log_response(res)
except BaseException:
traceback.print_exc()
finally:
return res
def _log_response(self, res):
if res.status_code == 200:
return
level = "ERROR"
try:
msg = self._compose_message(res)
except BaseException:
msg = traceback.format_exc()
# print(msg)
# print()
ops.create_log(level=level, message=msg)
ops.commit()
def _compose_message(self, res):
content = {
"Request": self._format_request_to_str(),
"Response": self._format_response_to_str(res),
}
msg = format_to_str(content)
return msg
def _format_request_to_str(self):
content = {
"Header": str(request.headers),
"Data": format_to_str(self.parse_body()),
}
msg = format_to_str(content)
# print(msg)
return msg
def _format_response_to_str(self, response):
content = {
"Status": str(response.status),
"Data": textwrap.indent(
json.dumps(response.get_json(), indent=2),
" " * 4,
),
}
msg = format_to_str(content)
return msg
class GraphQLViewW(GraphQLView):
"""A wrapper of GraphQLView.
Used to determine arguments to GraphQLView.as_view() for each view
based on on the configuration and request.
The usual usage of GraphQLView in add_url_rule() as in the document
(https://github.com/graphql-python/flask-graphql/tree/v2.0.1#usage) is
as follows
bp.add_url_rule('/graphql', view_func=GraphQLView.as_view('graphql', schema=schema, graphiql=True))
The arguments (schema, graphiql) need to have already been
determined when the module is imported. In this app, they have not
because they depend on the configuration and request.
The __init__() will be called in each view from
https://github.com/pallets/flask/blob/1.1.2/src/flask/views.py#L88
The arguments are determined in In __init__() and given to the
base class GraphQLView.
"""
def __init__(self, **kwargs):
kwargs.update(
{
"schema": _select_schema(),
"graphiql": current_app.config.get("ACONDBS_GRAPHIQL", False),
"graphiql_template": _select_graphiql_template(),
}
)
super().__init__(**kwargs)
##__________________________________________________________________||
bp = Blueprint("graphql", __name__)
bp.add_url_rule("/graphql", view_func=GraphQLViewW.as_view("graphql"))
def init_app(app):
app.register_blueprint(bp)
##__________________________________________________________________||
def _select_schema():
if auth.is_admin():
return schema.schema_admin
elif auth.is_signed_in():
return schema.schema_private
else:
return schema.schema_public
def _select_graphiql_template():
template_no = current_app.config.get("ACONDBS_GRAPHIQL_TEMPLATE_NO", None)
if template_no == 1:
return GRAPHIQL_NEWER
elif template_no == 2:
return GRAPHQL_PLAYGROUND
else:
return None
##__________________________________________________________________||
| import textwrap
import json
import traceback
from flask import Blueprint, current_app
from flask_graphql import GraphQLView
from .. import auth, schema, ops
from .graphql_ide import GRAPHIQL_NEWER, GRAPHQL_PLAYGROUND
##__________________________________________________________________||
from flask import request
def format_to_str(data_dict):
format_item = textwrap.dedent(
"""
- {key}:
{value}
"""
).lstrip()
return "\n".join(
[
format_item.format(
key=k,
value=textwrap.indent(str(v), " " * 4).rstrip(),
)
for k, v in data_dict.items()
]
)
class GraphQLView(GraphQLView):
def dispatch_request(self):
res = super().dispatch_request()
# return res
if isinstance(res, str):
# e.g, GraphiQL
return res
try:
self._log_response(res)
except BaseException:
traceback.print_exc()
finally:
return res
def _log_response(self, res):
if res.status_code == 200:
return
level = "ERROR"
try:
msg = self._compose_message(res)
except BaseException:
msg = traceback.format_exc()
# print(msg)
# print()
ops.create_log(level=level, message=msg)
ops.commit()
def _compose_message(self, res):
content = {
"Request": self._format_request_to_str(),
"Response": self._format_response_to_str(res),
}
msg = format_to_str(content)
return msg
def _format_request_to_str(self):
content = {
"Header": str(request.headers),
"Data": format_to_str(self.parse_body()),
}
msg = format_to_str(content)
# print(msg)
return msg
def _format_response_to_str(self, response):
content = {
"Status": str(response.status),
"Data": textwrap.indent(
json.dumps(response.get_json(), indent=2),
" " * 4,
),
}
msg = format_to_str(content)
return msg
class GraphQLViewW(GraphQLView):
"""A wrapper of GraphQLView.
Used to determine arguments to GraphQLView.as_view() for each view
based on on the configuration and request.
The usual usage of GraphQLView in add_url_rule() as in the document
(https://github.com/graphql-python/flask-graphql/tree/v2.0.1#usage) is
as follows
bp.add_url_rule('/graphql', view_func=GraphQLView.as_view('graphql', schema=schema, graphiql=True))
The arguments (schema, graphiql) need to have already been
determined when the module is imported. In this app, they have not
because they depend on the configuration and request.
The __init__() will be called in each view from
https://github.com/pallets/flask/blob/1.1.2/src/flask/views.py#L88
The arguments are determined in In __init__() and given to the
base class GraphQLView.
"""
def __init__(self, **kwargs):
kwargs.update(
{
"schema": _select_schema(),
"graphiql": current_app.config.get("ACONDBS_GRAPHIQL", False),
"graphiql_template": _select_graphiql_template(),
}
)
super().__init__(**kwargs)
##__________________________________________________________________||
bp = Blueprint("graphql", __name__)
bp.add_url_rule("/graphql", view_func=GraphQLViewW.as_view("graphql"))
def init_app(app):
app.register_blueprint(bp)
##__________________________________________________________________||
def _select_schema():
if auth.is_admin():
return schema.schema_admin
elif auth.is_signed_in():
return schema.schema_private
else:
return schema.schema_public
def _select_graphiql_template():
template_no = current_app.config.get("ACONDBS_GRAPHIQL_TEMPLATE_NO", None)
if template_no == 1:
return GRAPHIQL_NEWER
elif template_no == 2:
return GRAPHQL_PLAYGROUND
else:
return None
##__________________________________________________________________|| | en | 0.68074 | ##__________________________________________________________________|| - {key}: {value} # return res # e.g, GraphiQL # print(msg) # print() # print(msg) A wrapper of GraphQLView. Used to determine arguments to GraphQLView.as_view() for each view based on on the configuration and request. The usual usage of GraphQLView in add_url_rule() as in the document (https://github.com/graphql-python/flask-graphql/tree/v2.0.1#usage) is as follows bp.add_url_rule('/graphql', view_func=GraphQLView.as_view('graphql', schema=schema, graphiql=True)) The arguments (schema, graphiql) need to have already been determined when the module is imported. In this app, they have not because they depend on the configuration and request. The __init__() will be called in each view from https://github.com/pallets/flask/blob/1.1.2/src/flask/views.py#L88 The arguments are determined in In __init__() and given to the base class GraphQLView. ##__________________________________________________________________|| ##__________________________________________________________________|| ##__________________________________________________________________|| | 2.331124 | 2 |
ImportAirfoil.py | bobm123/ImportAirfoil | 2 | 6620167 | <reponame>bobm123/ImportAirfoil
# Author-
# Description-Import Airfoil Points
import adsk.core
import adsk.fusion
import traceback
import os
from math import sqrt, sin, cos, atan2
# Globals
_app = None
_ui = None
_sketch = None
# keep event handlers referenced for the duration of the command
_handlers = []
# current set of airfoil points
_airfoil_data = [] # TODO: pass values in attributes
_airfoil_name = ""
_user_filename = ""
# Command inputs
_AirfoilFilename = adsk.core.TextBoxCommandInput.cast(None)
_LePointSelect = adsk.core.SelectionCommandInput.cast(None)
_TePointSelect = adsk.core.SelectionCommandInput.cast(None)
_statusMsg = adsk.core.TextBoxCommandInput.cast(None)
# Event handler that reacts to when the command is destroyed. This terminates the script.
class IaCommandDestroyHandler(adsk.core.CommandEventHandler):
def __init__(self):
super().__init__()
def notify(self, args):
try:
# When the command is done, terminate the script
# This will release all globals which will remove all event handlers
# adsk.terminate()
pass
except:
_ui.messageBox("Failed:\n{}".format(traceback.format_exc()))
# Event handler for the inputChanged event.
class IaCommandInputChangedHandler(adsk.core.InputChangedEventHandler):
def __init__(self):
super().__init__()
def notify(self, args):
try:
eventArgs = adsk.core.InputChangedEventArgs.cast(args)
changedInput = eventArgs.input
global _airfoil_data, _airfoil_name, _user_filename
# Determine what changed from changedInput.id and act on it
if changedInput.id == "AirfoilFilename_id":
filename = get_user_file()
# Try, if not read, invalidate input
if filename:
fn = os.path.split(filename)[-1]
with open(filename, "r") as f:
_airfoil_name, _airfoil_data = read_profile(f)
_user_filename = filename
except:
if _ui:
_ui.messageBox("Failed:\n{}".format(traceback.format_exc()))
# Event handler for the validateInputs event.
class IaCommandValidateInputsHandler(adsk.core.ValidateInputsEventHandler):
def __init__(self):
super().__init__()
def notify(self, args):
try:
eventArgs = adsk.core.ValidateInputsEventArgs.cast(args)
global _statusMsg
_statusMsg.text = ""
if not _airfoil_data:
_statusMsg.text = "Select an airfoil file"
eventArgs.areInputsValid = False
else:
_statusMsg.text = "Imported: {}, {} points".format(
_airfoil_name, len(_airfoil_data)
)
except:
if _ui:
_ui.messageBox("Failed:\n{}".format(traceback.format_exc()))
# Event handler for the execute event.
class IaCommandExecuteHandler(adsk.core.CommandEventHandler):
def __init__(self):
super().__init__()
def notify(self, args):
try:
eventArgs = adsk.core.CommandEventArgs.cast(args)
unitsMgr = _app.activeProduct.unitsManager
if not _airfoil_data:
_ui.messageBox("Load airfoil table")
return
# Run the actual command code here
le_point = _LePointSelect.selection(0).entity.geometry.asArray()
te_point = _TePointSelect.selection(0).entity.geometry.asArray()
draw_airfoil(_sketch, _airfoil_data, le_point, te_point)
except:
if _ui:
_ui.messageBox("Failed:\n{}".format(traceback.format_exc()))
# Event handler that reacts when the command definition is executed which
# results in the command being created and this event being fired.
class IaCommandCreatedHandler(adsk.core.CommandCreatedEventHandler):
def __init__(self):
super().__init__()
def notify(self, args):
try:
# Get the command that was created.
cmd = adsk.core.Command.cast(args.command)
# Verify that a sketch is active.
global _sketch
if _app.activeEditObject.objectType == adsk.fusion.Sketch.classType():
_sketch = _app.activeEditObject
else:
_ui.messageBox("A sketch must be active for this command.")
return ()
# Connect to the variable the command will provide inputs for
global _AirfoilFilename, _statusMsg
global _LePointSelect, _TePointSelect
# Connect to additional command created events
onDestroy = IaCommandDestroyHandler()
cmd.destroy.add(onDestroy)
_handlers.append(onDestroy)
# Connect to the input changed event.
onInputChanged = IaCommandInputChangedHandler()
cmd.inputChanged.add(onInputChanged)
_handlers.append(onInputChanged)
# Connect to the validate inputs event
onValidateInputs = IaCommandValidateInputsHandler()
cmd.validateInputs.add(onValidateInputs)
_handlers.append(onValidateInputs)
# Connect to the execute event
onExecute = IaCommandExecuteHandler()
cmd.execute.add(onExecute)
_handlers.append(onExecute)
# Get the CommandInputs collection associated with the command.
inputs = cmd.commandInputs
# Create bool value input with button style that can be clicked.
_AirfoilFilename = inputs.addBoolValueInput(
"AirfoilFilename_id", "Select File", False, "resources/filebutton", True
)
# Create the Selection inputs for leading and trailing edge points
_LePointSelect = inputs.addSelectionInput(
"LePoint_id", "LE Point", "Leading edge location"
)
_LePointSelect.addSelectionFilter("SketchPoints")
_LePointSelect.setSelectionLimits(1, 1)
_TePointSelect = inputs.addSelectionInput(
"TePoint_id", "TE Point", "Trailing edge location"
)
_TePointSelect.addSelectionFilter("SketchPoints")
_TePointSelect.setSelectionLimits(1, 1)
# Add a status message box at bottom
_statusMsg = inputs.addTextBoxCommandInput("StatusMsg_id", "", "", 2, True)
_statusMsg.isFullWidth = True
except:
_ui.messageBox("Failed:\n{}".format(traceback.format_exc()))
def get_user_file():
"""Get user's file selection using system open file dialog"""
# Set up the file dialog.
fileDlg = _ui.createFileDialog()
fileDlg.isMultiSelectEnabled = False
fileDlg.title = "Open"
fileDlg.filter = "*.txt; *.dat"
dlgResult = fileDlg.showOpen()
if dlgResult == adsk.core.DialogResults.DialogOK:
user_file = fileDlg.filenames[0]
return user_file
else:
return None
def read_profile(infile):
"""
Reads contents of an airfoil definition file such as the
ones found here:
http://m-selig.ae.illinois.edu/ads/coord_database.html
Many have the airfoil's name followed by 2 values
indicating number of points for upper and lower surface,
then a list of upper surface points and finally the lower
surface points.
"""
# Skips airfoil name
name = infile.readline().strip()
# Read the points, then skip any blank lines
raw = [[float(c) for c in line.split()] for line in infile]
raw = [(p[0], p[1]) for p in raw if len(p) == 2]
# The first pair may be the length of the upper and lower data
len_upper = int(raw[0][0])
len_lower = int(raw[0][1])
if len_upper > 1 or len_lower > 1:
raw = raw[1:]
coordinates = raw[len_upper - 1 :: -1]
coordinates.extend(raw[len_upper + 1 :]) # skip the repeated (0,0)
else:
coordinates = raw
return name, coordinates
def mat_mult(t, points):
"""
Multiplies the 3x3 transform matrix with a list of points
All this happens in 'homogeneous coordinates' so the points
are assumed to be lie the z=1 plane, as (x,y,1). However,
the output is placed back on the z=0 plane so it can be
plotted on the sketch plane.
"""
p_out = []
for p in points:
px = p[0] * t[0][0] + p[1] * t[0][1] + t[0][2]
py = p[0] * t[1][0] + p[1] * t[1][1] + t[1][2]
p_out.append([px, py, 0.0])
return p_out
def transform_coordinates(points, le, te):
"""
Rotates and translates a set of points by applying
this transform matrix:
C*cos(A) -C*sin(A) LEx
C*sin(A) C*cos(A) LEy
0 0 1
Where C is the chord or length of line segment LE to TE
and A is its angle referenced to the X-axis. See wiki
article on "homogeneous coordinates" for details.
"""
c = sqrt((te[0] - le[0]) ** 2 + (te[1] - le[1]) ** 2)
a = atan2(te[1] - le[1], te[0] - le[0])
t = []
t.append([c * cos(a), -c * sin(a), le[0]])
t.append([c * sin(a), c * cos(a), le[1]])
# No need to actually append the last row
# t.append([0, 0, 1])
return mat_mult(t, points)
def draw_airfoil(sketch, verticies, le_point, te_point):
"""
Plot the airfoil coordinates so the lie between the
leading edge and trailing edge points. Result is a close polygon
"""
# Transform the points so they lie between the LE and TE points
trans_verts = transform_coordinates(verticies, le_point, te_point)
# TODO: generalize drawing a polygon from list of 2D points
lines = sketch.sketchCurves.sketchLines
# Start a first point
p_start = adsk.core.Point3D.create(trans_verts[0][0], trans_verts[0][1], 0)
p0 = p_start
for p in trans_verts[1:]:
new_line = lines.addByTwoPoints(p0, adsk.core.Point3D.create(p[0], p[1], 0))
p0 = new_line.endSketchPoint
# Close it by connecting p_end back to P_start
new_line = lines.addByTwoPoints(p0, p_start)
return
def remove_toolbar_icon(ui, button_id):
# Get panel the control is in.
addInsPanel = ui.allToolbarPanels.itemById("SolidScriptsAddinsPanel")
# Get and delete the button control.
buttonControl = addInsPanel.controls.itemById(button_id)
if buttonControl:
buttonControl.deleteMe()
else:
ui.messageBox("Could not find button control {}".format(button_id))
# Delete the button definition.
buttonExample = ui.commandDefinitions.itemById(button_id)
if buttonExample:
buttonExample.deleteMe()
else:
ui.messageBox("Could not find button definition {}".format(button_id))
def run(context):
try:
global _app, _ui
_app = adsk.core.Application.get()
_ui = _app.userInterface
# Get the existing command definition or create it if it doesn't already exist.
cmdDef = _ui.commandDefinitions.itemById("IaButton_id")
if not cmdDef:
cmdDef = _ui.commandDefinitions.addButtonDefinition(
"IaButton_id",
"Import Airfoil Data",
"Import airfoil coordinates from a file.",
".//resources//command_icons",
)
#for panel in ui.allToolbarPanels:
# print(panel.id)
# Connect to the command created event.
buttonExampleCreated = IaCommandCreatedHandler()
cmdDef.commandCreated.add(buttonExampleCreated)
_handlers.append(buttonExampleCreated)
# Get the ADD-INS panel in the model workspace.
addInsPanel = _ui.allToolbarPanels.itemById("SolidScriptsAddinsPanel")
# Add the button to the bottom.
buttonControl = addInsPanel.controls.addCommand(cmdDef)
# Make the button available in the panel.
buttonControl.isPromotedByDefault = True
buttonControl.isPromoted = True
# Prevent this module from being terminated when the script
# returns, we might be waiting for event handlers to fire.
adsk.autoTerminate(False)
except:
if _ui:
_ui.messageBox("Failed:\n{}".format(traceback.format_exc()))
def stop(context):
try:
remove_toolbar_icon(_ui, "IaButton_id")
# _ui.messageBox('Stop addin')
except:
if _ui:
_ui.messageBox("Failed:\n{}".format(traceback.format_exc()))
| # Author-
# Description-Import Airfoil Points
import adsk.core
import adsk.fusion
import traceback
import os
from math import sqrt, sin, cos, atan2
# Globals
_app = None
_ui = None
_sketch = None
# keep event handlers referenced for the duration of the command
_handlers = []
# current set of airfoil points
_airfoil_data = [] # TODO: pass values in attributes
_airfoil_name = ""
_user_filename = ""
# Command inputs
_AirfoilFilename = adsk.core.TextBoxCommandInput.cast(None)
_LePointSelect = adsk.core.SelectionCommandInput.cast(None)
_TePointSelect = adsk.core.SelectionCommandInput.cast(None)
_statusMsg = adsk.core.TextBoxCommandInput.cast(None)
# Event handler that reacts to when the command is destroyed. This terminates the script.
class IaCommandDestroyHandler(adsk.core.CommandEventHandler):
def __init__(self):
super().__init__()
def notify(self, args):
try:
# When the command is done, terminate the script
# This will release all globals which will remove all event handlers
# adsk.terminate()
pass
except:
_ui.messageBox("Failed:\n{}".format(traceback.format_exc()))
# Event handler for the inputChanged event.
class IaCommandInputChangedHandler(adsk.core.InputChangedEventHandler):
def __init__(self):
super().__init__()
def notify(self, args):
try:
eventArgs = adsk.core.InputChangedEventArgs.cast(args)
changedInput = eventArgs.input
global _airfoil_data, _airfoil_name, _user_filename
# Determine what changed from changedInput.id and act on it
if changedInput.id == "AirfoilFilename_id":
filename = get_user_file()
# Try, if not read, invalidate input
if filename:
fn = os.path.split(filename)[-1]
with open(filename, "r") as f:
_airfoil_name, _airfoil_data = read_profile(f)
_user_filename = filename
except:
if _ui:
_ui.messageBox("Failed:\n{}".format(traceback.format_exc()))
# Event handler for the validateInputs event.
class IaCommandValidateInputsHandler(adsk.core.ValidateInputsEventHandler):
def __init__(self):
super().__init__()
def notify(self, args):
try:
eventArgs = adsk.core.ValidateInputsEventArgs.cast(args)
global _statusMsg
_statusMsg.text = ""
if not _airfoil_data:
_statusMsg.text = "Select an airfoil file"
eventArgs.areInputsValid = False
else:
_statusMsg.text = "Imported: {}, {} points".format(
_airfoil_name, len(_airfoil_data)
)
except:
if _ui:
_ui.messageBox("Failed:\n{}".format(traceback.format_exc()))
# Event handler for the execute event.
class IaCommandExecuteHandler(adsk.core.CommandEventHandler):
def __init__(self):
super().__init__()
def notify(self, args):
try:
eventArgs = adsk.core.CommandEventArgs.cast(args)
unitsMgr = _app.activeProduct.unitsManager
if not _airfoil_data:
_ui.messageBox("Load airfoil table")
return
# Run the actual command code here
le_point = _LePointSelect.selection(0).entity.geometry.asArray()
te_point = _TePointSelect.selection(0).entity.geometry.asArray()
draw_airfoil(_sketch, _airfoil_data, le_point, te_point)
except:
if _ui:
_ui.messageBox("Failed:\n{}".format(traceback.format_exc()))
# Event handler that reacts when the command definition is executed which
# results in the command being created and this event being fired.
class IaCommandCreatedHandler(adsk.core.CommandCreatedEventHandler):
def __init__(self):
super().__init__()
def notify(self, args):
try:
# Get the command that was created.
cmd = adsk.core.Command.cast(args.command)
# Verify that a sketch is active.
global _sketch
if _app.activeEditObject.objectType == adsk.fusion.Sketch.classType():
_sketch = _app.activeEditObject
else:
_ui.messageBox("A sketch must be active for this command.")
return ()
# Connect to the variable the command will provide inputs for
global _AirfoilFilename, _statusMsg
global _LePointSelect, _TePointSelect
# Connect to additional command created events
onDestroy = IaCommandDestroyHandler()
cmd.destroy.add(onDestroy)
_handlers.append(onDestroy)
# Connect to the input changed event.
onInputChanged = IaCommandInputChangedHandler()
cmd.inputChanged.add(onInputChanged)
_handlers.append(onInputChanged)
# Connect to the validate inputs event
onValidateInputs = IaCommandValidateInputsHandler()
cmd.validateInputs.add(onValidateInputs)
_handlers.append(onValidateInputs)
# Connect to the execute event
onExecute = IaCommandExecuteHandler()
cmd.execute.add(onExecute)
_handlers.append(onExecute)
# Get the CommandInputs collection associated with the command.
inputs = cmd.commandInputs
# Create bool value input with button style that can be clicked.
_AirfoilFilename = inputs.addBoolValueInput(
"AirfoilFilename_id", "Select File", False, "resources/filebutton", True
)
# Create the Selection inputs for leading and trailing edge points
_LePointSelect = inputs.addSelectionInput(
"LePoint_id", "LE Point", "Leading edge location"
)
_LePointSelect.addSelectionFilter("SketchPoints")
_LePointSelect.setSelectionLimits(1, 1)
_TePointSelect = inputs.addSelectionInput(
"TePoint_id", "TE Point", "Trailing edge location"
)
_TePointSelect.addSelectionFilter("SketchPoints")
_TePointSelect.setSelectionLimits(1, 1)
# Add a status message box at bottom
_statusMsg = inputs.addTextBoxCommandInput("StatusMsg_id", "", "", 2, True)
_statusMsg.isFullWidth = True
except:
_ui.messageBox("Failed:\n{}".format(traceback.format_exc()))
def get_user_file():
"""Get user's file selection using system open file dialog"""
# Set up the file dialog.
fileDlg = _ui.createFileDialog()
fileDlg.isMultiSelectEnabled = False
fileDlg.title = "Open"
fileDlg.filter = "*.txt; *.dat"
dlgResult = fileDlg.showOpen()
if dlgResult == adsk.core.DialogResults.DialogOK:
user_file = fileDlg.filenames[0]
return user_file
else:
return None
def read_profile(infile):
"""
Reads contents of an airfoil definition file such as the
ones found here:
http://m-selig.ae.illinois.edu/ads/coord_database.html
Many have the airfoil's name followed by 2 values
indicating number of points for upper and lower surface,
then a list of upper surface points and finally the lower
surface points.
"""
# Skips airfoil name
name = infile.readline().strip()
# Read the points, then skip any blank lines
raw = [[float(c) for c in line.split()] for line in infile]
raw = [(p[0], p[1]) for p in raw if len(p) == 2]
# The first pair may be the length of the upper and lower data
len_upper = int(raw[0][0])
len_lower = int(raw[0][1])
if len_upper > 1 or len_lower > 1:
raw = raw[1:]
coordinates = raw[len_upper - 1 :: -1]
coordinates.extend(raw[len_upper + 1 :]) # skip the repeated (0,0)
else:
coordinates = raw
return name, coordinates
def mat_mult(t, points):
"""
Multiplies the 3x3 transform matrix with a list of points
All this happens in 'homogeneous coordinates' so the points
are assumed to be lie the z=1 plane, as (x,y,1). However,
the output is placed back on the z=0 plane so it can be
plotted on the sketch plane.
"""
p_out = []
for p in points:
px = p[0] * t[0][0] + p[1] * t[0][1] + t[0][2]
py = p[0] * t[1][0] + p[1] * t[1][1] + t[1][2]
p_out.append([px, py, 0.0])
return p_out
def transform_coordinates(points, le, te):
"""
Rotates and translates a set of points by applying
this transform matrix:
C*cos(A) -C*sin(A) LEx
C*sin(A) C*cos(A) LEy
0 0 1
Where C is the chord or length of line segment LE to TE
and A is its angle referenced to the X-axis. See wiki
article on "homogeneous coordinates" for details.
"""
c = sqrt((te[0] - le[0]) ** 2 + (te[1] - le[1]) ** 2)
a = atan2(te[1] - le[1], te[0] - le[0])
t = []
t.append([c * cos(a), -c * sin(a), le[0]])
t.append([c * sin(a), c * cos(a), le[1]])
# No need to actually append the last row
# t.append([0, 0, 1])
return mat_mult(t, points)
def draw_airfoil(sketch, verticies, le_point, te_point):
"""
Plot the airfoil coordinates so the lie between the
leading edge and trailing edge points. Result is a close polygon
"""
# Transform the points so they lie between the LE and TE points
trans_verts = transform_coordinates(verticies, le_point, te_point)
# TODO: generalize drawing a polygon from list of 2D points
lines = sketch.sketchCurves.sketchLines
# Start a first point
p_start = adsk.core.Point3D.create(trans_verts[0][0], trans_verts[0][1], 0)
p0 = p_start
for p in trans_verts[1:]:
new_line = lines.addByTwoPoints(p0, adsk.core.Point3D.create(p[0], p[1], 0))
p0 = new_line.endSketchPoint
# Close it by connecting p_end back to P_start
new_line = lines.addByTwoPoints(p0, p_start)
return
def remove_toolbar_icon(ui, button_id):
# Get panel the control is in.
addInsPanel = ui.allToolbarPanels.itemById("SolidScriptsAddinsPanel")
# Get and delete the button control.
buttonControl = addInsPanel.controls.itemById(button_id)
if buttonControl:
buttonControl.deleteMe()
else:
ui.messageBox("Could not find button control {}".format(button_id))
# Delete the button definition.
buttonExample = ui.commandDefinitions.itemById(button_id)
if buttonExample:
buttonExample.deleteMe()
else:
ui.messageBox("Could not find button definition {}".format(button_id))
def run(context):
try:
global _app, _ui
_app = adsk.core.Application.get()
_ui = _app.userInterface
# Get the existing command definition or create it if it doesn't already exist.
cmdDef = _ui.commandDefinitions.itemById("IaButton_id")
if not cmdDef:
cmdDef = _ui.commandDefinitions.addButtonDefinition(
"IaButton_id",
"Import Airfoil Data",
"Import airfoil coordinates from a file.",
".//resources//command_icons",
)
#for panel in ui.allToolbarPanels:
# print(panel.id)
# Connect to the command created event.
buttonExampleCreated = IaCommandCreatedHandler()
cmdDef.commandCreated.add(buttonExampleCreated)
_handlers.append(buttonExampleCreated)
# Get the ADD-INS panel in the model workspace.
addInsPanel = _ui.allToolbarPanels.itemById("SolidScriptsAddinsPanel")
# Add the button to the bottom.
buttonControl = addInsPanel.controls.addCommand(cmdDef)
# Make the button available in the panel.
buttonControl.isPromotedByDefault = True
buttonControl.isPromoted = True
# Prevent this module from being terminated when the script
# returns, we might be waiting for event handlers to fire.
adsk.autoTerminate(False)
except:
if _ui:
_ui.messageBox("Failed:\n{}".format(traceback.format_exc()))
def stop(context):
try:
remove_toolbar_icon(_ui, "IaButton_id")
# _ui.messageBox('Stop addin')
except:
if _ui:
_ui.messageBox("Failed:\n{}".format(traceback.format_exc())) | en | 0.849337 | # Author- # Description-Import Airfoil Points # Globals # keep event handlers referenced for the duration of the command # current set of airfoil points # TODO: pass values in attributes # Command inputs # Event handler that reacts to when the command is destroyed. This terminates the script. # When the command is done, terminate the script # This will release all globals which will remove all event handlers # adsk.terminate() # Event handler for the inputChanged event. # Determine what changed from changedInput.id and act on it # Try, if not read, invalidate input # Event handler for the validateInputs event. # Event handler for the execute event. # Run the actual command code here # Event handler that reacts when the command definition is executed which # results in the command being created and this event being fired. # Get the command that was created. # Verify that a sketch is active. # Connect to the variable the command will provide inputs for # Connect to additional command created events # Connect to the input changed event. # Connect to the validate inputs event # Connect to the execute event # Get the CommandInputs collection associated with the command. # Create bool value input with button style that can be clicked. # Create the Selection inputs for leading and trailing edge points # Add a status message box at bottom Get user's file selection using system open file dialog # Set up the file dialog. Reads contents of an airfoil definition file such as the ones found here: http://m-selig.ae.illinois.edu/ads/coord_database.html Many have the airfoil's name followed by 2 values indicating number of points for upper and lower surface, then a list of upper surface points and finally the lower surface points. # Skips airfoil name # Read the points, then skip any blank lines # The first pair may be the length of the upper and lower data # skip the repeated (0,0) Multiplies the 3x3 transform matrix with a list of points All this happens in 'homogeneous coordinates' so the points are assumed to be lie the z=1 plane, as (x,y,1). However, the output is placed back on the z=0 plane so it can be plotted on the sketch plane. Rotates and translates a set of points by applying this transform matrix: C*cos(A) -C*sin(A) LEx C*sin(A) C*cos(A) LEy 0 0 1 Where C is the chord or length of line segment LE to TE and A is its angle referenced to the X-axis. See wiki article on "homogeneous coordinates" for details. # No need to actually append the last row # t.append([0, 0, 1]) Plot the airfoil coordinates so the lie between the leading edge and trailing edge points. Result is a close polygon # Transform the points so they lie between the LE and TE points # TODO: generalize drawing a polygon from list of 2D points # Start a first point # Close it by connecting p_end back to P_start # Get panel the control is in. # Get and delete the button control. # Delete the button definition. # Get the existing command definition or create it if it doesn't already exist. #for panel in ui.allToolbarPanels: # print(panel.id) # Connect to the command created event. # Get the ADD-INS panel in the model workspace. # Add the button to the bottom. # Make the button available in the panel. # Prevent this module from being terminated when the script # returns, we might be waiting for event handlers to fire. # _ui.messageBox('Stop addin') | 2.23168 | 2 |
utils/merge_people.py | GreenBankObservatory/nrqz_admin | 1 | 6620168 | from django.db import transaction
from django.db.models import Q
from django.contrib.postgres.search import TrigramSimilarity
from django_super_deduper.merge import MergedModelInstance
from django_super_deduper.models import MergeInfo
from cases.models import Person, Case, PreliminaryCase
THRESHOLD_DEFAULT = 0.9
CONCRETE_PERSON_FIELDS = (
"name",
"phone",
"fax",
"email",
"street",
"city",
"county",
"state",
"zipcode",
"comments",
"data_source",
)
CONTACT_VALUES = ["contact"]
APPLICANT_VALUES = ["applicant"]
def _find_similar_people(name, email="", people=None, threshold=THRESHOLD_DEFAULT):
if people is None:
people = Person.objects.all()
return (
people
# Annotate each item with its similarity ranking with the current name
.annotate(
name_similarity=TrigramSimilarity("name", name),
email_similarity=TrigramSimilarity("email", email),
)
# And filter out anything below the given threshold
.filter(
# Names must be above similarity threshold
Q(name_similarity__gt=threshold)
# Emails must be either above the similarity threshold,
# OR null. We don't want to exclude matches simply because they're
# missing an email -- these are actually _easier_ to merge!
& (Q(email_similarity__gt=threshold) | Q(email=""))
)
)
def find_similar_people(person, threshold=THRESHOLD_DEFAULT, people=None):
similar_people = _find_similar_people(
person.name, person.email, threshold=threshold, people=people
).exclude(id=person.id)
return similar_people
@transaction.atomic
def _handle_cross_references(
model_class, from_field, to_field, threshold=THRESHOLD_DEFAULT
):
""""Expand" all references of from_field to to_field with proper FKs
For example, if we have some PreliminaryCases where the `contact` field is set to a
Person named "applicant", this will:
* Set each of these cases' contact to the value of its applicant
* Delete the old person... maybe????
"""
cases = (
model_class.objects.annotate(
name_similarity=TrigramSimilarity(f"{from_field}__name", to_field)
)
# And filter out anything below the given threshold
.filter(
# Names must be above similarity threshold
Q(name_similarity__gt=threshold)
)
)
for case in cases.all():
from_field_value = getattr(case, from_field)
to_field_value = getattr(case, to_field)
setattr(case, from_field, to_field_value)
print(f"Set {case} '{from_field}' to '{to_field}': '{to_field_value!r}'")
case.save()
deletions = from_field_value.delete()
if deletions[0] != 1:
raise ValueError(
f"Unexpected number of deletions encountered when attempting to delete {from_field_value!r}: {deletions}\n"
"There should only be one deletion! Check logic in _handle_cross_references"
)
@transaction.atomic
def handle_cross_references(threshold=THRESHOLD_DEFAULT):
# Handle PreliminaryCases where the contact references the applicant
_handle_cross_references(
PreliminaryCase, "contact", "applicant", threshold=threshold
)
# Handle PreliminaryCases where the applicant references the contact
_handle_cross_references(
PreliminaryCase, "applicant", "contact", threshold=threshold
)
# Handle Cases where the contact references the applicant
_handle_cross_references(Case, "applicant", "contact", threshold=threshold)
# Handle Cases where the applicant references the contact
_handle_cross_references(Case, "contact", "applicant", threshold=threshold)
def merge_people(person_to_keep, people_to_merge):
(
person,
alias_field_values_summary,
alias_field_values,
) = MergedModelInstance.create_with_change_tracking(
person_to_keep,
people_to_merge,
# This deletes the merged instances
keep_old=False,
)
existing_merge_infos = []
# Avoid breaking serialization by replacing MIA instances with their ID
for item in alias_field_values:
if "model_import_attempt" in item:
item["model_import_attempt"] = item["model_import_attempt"].id
if "merge_info" in item:
# Add to list, and remove from item itself (will be merged later)
existing_merge_infos.append(item.pop("merge_info"))
# Filter out fields that we have not whitelisted
alias_field_values_summary = {
k: v
for k, v in alias_field_values_summary.items()
if k in CONCRETE_PERSON_FIELDS
}
if alias_field_values_summary or alias_field_values:
merge_info = MergeInfo.objects.create(
alias_field_values_summary=alias_field_values_summary,
alias_field_values=alias_field_values,
num_instances_merged=len(people_to_merge) + 1,
)
# Merge together
if existing_merge_infos:
merge_info = MergedModelInstance.create(merge_info, existing_merge_infos)
print("!!!", merge_info)
person.merge_info = merge_info
person.save()
return person_to_keep
| from django.db import transaction
from django.db.models import Q
from django.contrib.postgres.search import TrigramSimilarity
from django_super_deduper.merge import MergedModelInstance
from django_super_deduper.models import MergeInfo
from cases.models import Person, Case, PreliminaryCase
THRESHOLD_DEFAULT = 0.9
CONCRETE_PERSON_FIELDS = (
"name",
"phone",
"fax",
"email",
"street",
"city",
"county",
"state",
"zipcode",
"comments",
"data_source",
)
CONTACT_VALUES = ["contact"]
APPLICANT_VALUES = ["applicant"]
def _find_similar_people(name, email="", people=None, threshold=THRESHOLD_DEFAULT):
if people is None:
people = Person.objects.all()
return (
people
# Annotate each item with its similarity ranking with the current name
.annotate(
name_similarity=TrigramSimilarity("name", name),
email_similarity=TrigramSimilarity("email", email),
)
# And filter out anything below the given threshold
.filter(
# Names must be above similarity threshold
Q(name_similarity__gt=threshold)
# Emails must be either above the similarity threshold,
# OR null. We don't want to exclude matches simply because they're
# missing an email -- these are actually _easier_ to merge!
& (Q(email_similarity__gt=threshold) | Q(email=""))
)
)
def find_similar_people(person, threshold=THRESHOLD_DEFAULT, people=None):
similar_people = _find_similar_people(
person.name, person.email, threshold=threshold, people=people
).exclude(id=person.id)
return similar_people
@transaction.atomic
def _handle_cross_references(
model_class, from_field, to_field, threshold=THRESHOLD_DEFAULT
):
""""Expand" all references of from_field to to_field with proper FKs
For example, if we have some PreliminaryCases where the `contact` field is set to a
Person named "applicant", this will:
* Set each of these cases' contact to the value of its applicant
* Delete the old person... maybe????
"""
cases = (
model_class.objects.annotate(
name_similarity=TrigramSimilarity(f"{from_field}__name", to_field)
)
# And filter out anything below the given threshold
.filter(
# Names must be above similarity threshold
Q(name_similarity__gt=threshold)
)
)
for case in cases.all():
from_field_value = getattr(case, from_field)
to_field_value = getattr(case, to_field)
setattr(case, from_field, to_field_value)
print(f"Set {case} '{from_field}' to '{to_field}': '{to_field_value!r}'")
case.save()
deletions = from_field_value.delete()
if deletions[0] != 1:
raise ValueError(
f"Unexpected number of deletions encountered when attempting to delete {from_field_value!r}: {deletions}\n"
"There should only be one deletion! Check logic in _handle_cross_references"
)
@transaction.atomic
def handle_cross_references(threshold=THRESHOLD_DEFAULT):
# Handle PreliminaryCases where the contact references the applicant
_handle_cross_references(
PreliminaryCase, "contact", "applicant", threshold=threshold
)
# Handle PreliminaryCases where the applicant references the contact
_handle_cross_references(
PreliminaryCase, "applicant", "contact", threshold=threshold
)
# Handle Cases where the contact references the applicant
_handle_cross_references(Case, "applicant", "contact", threshold=threshold)
# Handle Cases where the applicant references the contact
_handle_cross_references(Case, "contact", "applicant", threshold=threshold)
def merge_people(person_to_keep, people_to_merge):
(
person,
alias_field_values_summary,
alias_field_values,
) = MergedModelInstance.create_with_change_tracking(
person_to_keep,
people_to_merge,
# This deletes the merged instances
keep_old=False,
)
existing_merge_infos = []
# Avoid breaking serialization by replacing MIA instances with their ID
for item in alias_field_values:
if "model_import_attempt" in item:
item["model_import_attempt"] = item["model_import_attempt"].id
if "merge_info" in item:
# Add to list, and remove from item itself (will be merged later)
existing_merge_infos.append(item.pop("merge_info"))
# Filter out fields that we have not whitelisted
alias_field_values_summary = {
k: v
for k, v in alias_field_values_summary.items()
if k in CONCRETE_PERSON_FIELDS
}
if alias_field_values_summary or alias_field_values:
merge_info = MergeInfo.objects.create(
alias_field_values_summary=alias_field_values_summary,
alias_field_values=alias_field_values,
num_instances_merged=len(people_to_merge) + 1,
)
# Merge together
if existing_merge_infos:
merge_info = MergedModelInstance.create(merge_info, existing_merge_infos)
print("!!!", merge_info)
person.merge_info = merge_info
person.save()
return person_to_keep
| en | 0.861115 | # Annotate each item with its similarity ranking with the current name # And filter out anything below the given threshold # Names must be above similarity threshold # Emails must be either above the similarity threshold, # OR null. We don't want to exclude matches simply because they're # missing an email -- these are actually _easier_ to merge! "Expand" all references of from_field to to_field with proper FKs For example, if we have some PreliminaryCases where the `contact` field is set to a Person named "applicant", this will: * Set each of these cases' contact to the value of its applicant * Delete the old person... maybe???? # And filter out anything below the given threshold # Names must be above similarity threshold # Handle PreliminaryCases where the contact references the applicant # Handle PreliminaryCases where the applicant references the contact # Handle Cases where the contact references the applicant # Handle Cases where the applicant references the contact # This deletes the merged instances # Avoid breaking serialization by replacing MIA instances with their ID # Add to list, and remove from item itself (will be merged later) # Filter out fields that we have not whitelisted # Merge together | 2.544376 | 3 |
tests/test_basic.py | rastrea2r/rastrea2r-server | 2 | 6620169 | <reponame>rastrea2r/rastrea2r-server
import os
import rastrea2r_server
from rastrea2r_server import app, auth, config, db, user
import unittest
from base64 import b64encode
class BasicTestCase(unittest.TestCase):
""" Basic test cases """
TEST_USER = "testuser"
TEST_PWD = "<PASSWORD>"
def setUp(self):
rastrea2r_server.app.config["TESTING"] = True
app.config["DEBUG"] = False
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///" + os.path.join(
app.root_path, "testdb"
)
self.app = app.test_client()
db.drop_all()
db.create_all()
user.add_user(self.TEST_USER, self.TEST_PWD)
# executed after each test
def tearDown(self):
pass
def test_basic(self):
""" check True is True """
self.assertTrue(True)
def test_version(self):
""" check rastrea2r_server exposes a version attribute """
self.assertTrue(hasattr(rastrea2r_server, "__version__"))
self.assertIsInstance(rastrea2r_server.__version__, str)
def test_unauthenticate_api(self):
response = self.app.get("/test")
self.assertIn(b"Hello World", response.data)
self.assertEqual(response.status_code, 200)
def test_valid_root_route(self):
response = self.app.get("/", follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertIn(b"api server: Authentication required for use", response.data)
def test_invalid_route(self):
response = self.app.get("/aaa", follow_redirects=True)
self.assertEqual(response.status_code, 404)
self.assertIn(b"Not Found:", response.data)
def test_missing_authentication(self):
response = self.app.get("/rastrea2r/api/v1.0/echo", follow_redirects=True)
self.assertEqual(response.status_code, 401)
self.assertIn(b"Authentication Failed:", response.data)
def test_basic_authentication(self):
headers = {
"Authorization": "Basic %s"
% b64encode(b"testuser:testpasswd").decode("ascii")
}
response = self.app.get(
"/rastrea2r/api/v1.0/echo?message=test", headers=headers
)
self.assertIn(b"test", response.data)
self.assertEqual(response.status_code, 200)
def test_invalid_authentication(self):
headers = {
"Authorization": "Basic %s" % b64encode(b"invalid:invalid").decode("ascii")
}
response = self.app.get(
"/rastrea2r/api/v1.0/echo?message=test", headers=headers
)
self.assertIn(b"Authentication Failed", response.data)
self.assertEqual(response.status_code, 401)
def test_method_not_allowed(self):
headers = {
"Authorization": "Basic %s"
% b64encode(b"testuser:testpasswd").decode("ascii")
}
response = self.app.delete(
"/rastrea2r/api/v1.0/echo?message=test", headers=headers
)
self.assertIn(b"Method Not Allowed", response.data)
self.assertEqual(response.status_code, 405)
def test_missing_attribute(self):
headers = {
"Authorization": "Basic %s"
% b64encode(b"testuser:testpasswd").decode("ascii")
}
response = self.app.get("/rastrea2r/api/v1.0/echo", headers=headers)
self.assertIn(b"Must provide message attribute via GET", response.data)
self.assertEqual(response.status_code, 200)
def test_valid_info(self):
headers = {
"Authorization": "Basic %s"
% b64encode(b"testuser:testpasswd").decode("ascii")
}
response = self.app.get("/rastrea2r/api/v1.0/info", headers=headers)
self.assertIn(b"Flask API Data", response.data)
self.assertEqual(response.status_code, 200)
def test_valid_key_info(self):
headers = {
"Authorization": "Basic %s"
% b64encode(b"testuser:testpasswd").decode("ascii")
}
response = self.app.get(
"/rastrea2r/api/v1.0/info?key=remoteaddr", headers=headers
)
self.assertIn(b"GET", response.data)
self.assertEqual(response.status_code, 200)
if __name__ == "__main__":
unittest.main()
| import os
import rastrea2r_server
from rastrea2r_server import app, auth, config, db, user
import unittest
from base64 import b64encode
class BasicTestCase(unittest.TestCase):
""" Basic test cases """
TEST_USER = "testuser"
TEST_PWD = "<PASSWORD>"
def setUp(self):
rastrea2r_server.app.config["TESTING"] = True
app.config["DEBUG"] = False
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///" + os.path.join(
app.root_path, "testdb"
)
self.app = app.test_client()
db.drop_all()
db.create_all()
user.add_user(self.TEST_USER, self.TEST_PWD)
# executed after each test
def tearDown(self):
pass
def test_basic(self):
""" check True is True """
self.assertTrue(True)
def test_version(self):
""" check rastrea2r_server exposes a version attribute """
self.assertTrue(hasattr(rastrea2r_server, "__version__"))
self.assertIsInstance(rastrea2r_server.__version__, str)
def test_unauthenticate_api(self):
response = self.app.get("/test")
self.assertIn(b"Hello World", response.data)
self.assertEqual(response.status_code, 200)
def test_valid_root_route(self):
response = self.app.get("/", follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertIn(b"api server: Authentication required for use", response.data)
def test_invalid_route(self):
response = self.app.get("/aaa", follow_redirects=True)
self.assertEqual(response.status_code, 404)
self.assertIn(b"Not Found:", response.data)
def test_missing_authentication(self):
response = self.app.get("/rastrea2r/api/v1.0/echo", follow_redirects=True)
self.assertEqual(response.status_code, 401)
self.assertIn(b"Authentication Failed:", response.data)
def test_basic_authentication(self):
headers = {
"Authorization": "Basic %s"
% b64encode(b"testuser:testpasswd").decode("ascii")
}
response = self.app.get(
"/rastrea2r/api/v1.0/echo?message=test", headers=headers
)
self.assertIn(b"test", response.data)
self.assertEqual(response.status_code, 200)
def test_invalid_authentication(self):
headers = {
"Authorization": "Basic %s" % b64encode(b"invalid:invalid").decode("ascii")
}
response = self.app.get(
"/rastrea2r/api/v1.0/echo?message=test", headers=headers
)
self.assertIn(b"Authentication Failed", response.data)
self.assertEqual(response.status_code, 401)
def test_method_not_allowed(self):
headers = {
"Authorization": "Basic %s"
% b64encode(b"testuser:testpasswd").decode("ascii")
}
response = self.app.delete(
"/rastrea2r/api/v1.0/echo?message=test", headers=headers
)
self.assertIn(b"Method Not Allowed", response.data)
self.assertEqual(response.status_code, 405)
def test_missing_attribute(self):
headers = {
"Authorization": "Basic %s"
% b64encode(b"testuser:testpasswd").decode("ascii")
}
response = self.app.get("/rastrea2r/api/v1.0/echo", headers=headers)
self.assertIn(b"Must provide message attribute via GET", response.data)
self.assertEqual(response.status_code, 200)
def test_valid_info(self):
headers = {
"Authorization": "Basic %s"
% b64encode(b"testuser:testpasswd").decode("ascii")
}
response = self.app.get("/rastrea2r/api/v1.0/info", headers=headers)
self.assertIn(b"Flask API Data", response.data)
self.assertEqual(response.status_code, 200)
def test_valid_key_info(self):
headers = {
"Authorization": "Basic %s"
% b64encode(b"testuser:testpasswd").decode("ascii")
}
response = self.app.get(
"/rastrea2r/api/v1.0/info?key=remoteaddr", headers=headers
)
self.assertIn(b"GET", response.data)
self.assertEqual(response.status_code, 200)
if __name__ == "__main__":
unittest.main() | en | 0.760584 | Basic test cases # executed after each test check True is True check rastrea2r_server exposes a version attribute | 2.720511 | 3 |
tests/test_market.py | hentr/cryptocom-exchange | 0 | 6620170 | import pytest
import cryptocom.exchange as cro
@pytest.mark.asyncio
async def test_get_pairs(exchange: cro.Exchange):
pairs = await exchange.get_pairs()
keys = [
'quote_currency', 'base_currency',
'price_decimals', 'quantity_decimals'
]
for pair_keys in pairs.values():
assert sorted(keys) == sorted(pair_keys)
for pair in pairs:
assert pair in cro.Pair
@pytest.mark.asyncio
async def test_get_tickers(exchange: cro.Exchange):
tickers = await exchange.get_tickers()
keys = sorted(['<KEY>
for data in tickers.values():
assert keys == sorted(data)
sorted(p.value for p in tickers) == sorted(p.value for p in cro.Pair)
ticker = await exchange.get_tickers(cro.Pair.BTC_USDT)
assert keys == sorted(ticker)
@pytest.mark.asyncio
async def test_get_trades(exchange: cro.Exchange):
trades = await exchange.get_trades(cro.Pair.CRO_USDT)
keys = sorted(['<KEY>'])
for trade in trades:
assert sorted(trade) == keys
@pytest.mark.asyncio
async def test_get_price(exchange: cro.Exchange):
price = await exchange.get_price(cro.Pair.CRO_USDT)
assert price > 0
@pytest.mark.asyncio
async def test_get_orderbook(exchange: cro.Exchange):
data = await exchange.get_orderbook(cro.Pair.CRO_USDT, depth=50)
asks = data['asks']
bids = data['bids']
# price, quantity, number of orders
assert asks and bids
assert len(asks[0]) == 3
assert len(bids[0]) == 3
@pytest.mark.asyncio
async def test_listen_candles(exchange: cro.Exchange):
candles = []
pairs = (cro.Pair.CRO_USDC, cro.Pair.USDC_USDT, cro.Pair.BTC_USDT)
count = 0
default_count = 300
async for candle in exchange.listen_candles(cro.Period.MINS, *pairs):
candles.append(candle)
count += 1
if count == len(pairs) * default_count:
break
for pair in pairs:
assert len([
c for c in candles if c.pair == pair
]) == default_count
@pytest.mark.asyncio
async def test_listen_trades(exchange: cro.Exchange):
trades = []
count = 0
pairs = [cro.Pair.CRO_USDT, cro.Pair.BTC_USDT]
pairs_seen = set()
async for trade in exchange.listen_trades(*pairs):
trades.append(trade)
pairs_seen.add(trade.pair)
if count > 100:
break
count += 1
assert len(pairs_seen) == len(pairs)
@pytest.mark.asyncio
async def test_listen_orderbook(exchange: cro.Exchange):
pairs = [cro.Pair.CRO_USDT, cro.Pair.BTC_USDT]
orderbooks = []
depth = 50
async for orderbook in exchange.listen_orderbook(*pairs, depth=depth):
orderbooks.append(orderbook)
if set(pairs) == set(o.pair for o in orderbooks):
break
for book in orderbooks:
assert book.buys and book.sells
assert book.sells[0].price > book.buys[0].price
assert book.spread > 0
assert len(book.sells) == len(book.buys) == depth
| import pytest
import cryptocom.exchange as cro
@pytest.mark.asyncio
async def test_get_pairs(exchange: cro.Exchange):
pairs = await exchange.get_pairs()
keys = [
'quote_currency', 'base_currency',
'price_decimals', 'quantity_decimals'
]
for pair_keys in pairs.values():
assert sorted(keys) == sorted(pair_keys)
for pair in pairs:
assert pair in cro.Pair
@pytest.mark.asyncio
async def test_get_tickers(exchange: cro.Exchange):
tickers = await exchange.get_tickers()
keys = sorted(['<KEY>
for data in tickers.values():
assert keys == sorted(data)
sorted(p.value for p in tickers) == sorted(p.value for p in cro.Pair)
ticker = await exchange.get_tickers(cro.Pair.BTC_USDT)
assert keys == sorted(ticker)
@pytest.mark.asyncio
async def test_get_trades(exchange: cro.Exchange):
trades = await exchange.get_trades(cro.Pair.CRO_USDT)
keys = sorted(['<KEY>'])
for trade in trades:
assert sorted(trade) == keys
@pytest.mark.asyncio
async def test_get_price(exchange: cro.Exchange):
price = await exchange.get_price(cro.Pair.CRO_USDT)
assert price > 0
@pytest.mark.asyncio
async def test_get_orderbook(exchange: cro.Exchange):
data = await exchange.get_orderbook(cro.Pair.CRO_USDT, depth=50)
asks = data['asks']
bids = data['bids']
# price, quantity, number of orders
assert asks and bids
assert len(asks[0]) == 3
assert len(bids[0]) == 3
@pytest.mark.asyncio
async def test_listen_candles(exchange: cro.Exchange):
candles = []
pairs = (cro.Pair.CRO_USDC, cro.Pair.USDC_USDT, cro.Pair.BTC_USDT)
count = 0
default_count = 300
async for candle in exchange.listen_candles(cro.Period.MINS, *pairs):
candles.append(candle)
count += 1
if count == len(pairs) * default_count:
break
for pair in pairs:
assert len([
c for c in candles if c.pair == pair
]) == default_count
@pytest.mark.asyncio
async def test_listen_trades(exchange: cro.Exchange):
trades = []
count = 0
pairs = [cro.Pair.CRO_USDT, cro.Pair.BTC_USDT]
pairs_seen = set()
async for trade in exchange.listen_trades(*pairs):
trades.append(trade)
pairs_seen.add(trade.pair)
if count > 100:
break
count += 1
assert len(pairs_seen) == len(pairs)
@pytest.mark.asyncio
async def test_listen_orderbook(exchange: cro.Exchange):
pairs = [cro.Pair.CRO_USDT, cro.Pair.BTC_USDT]
orderbooks = []
depth = 50
async for orderbook in exchange.listen_orderbook(*pairs, depth=depth):
orderbooks.append(orderbook)
if set(pairs) == set(o.pair for o in orderbooks):
break
for book in orderbooks:
assert book.buys and book.sells
assert book.sells[0].price > book.buys[0].price
assert book.spread > 0
assert len(book.sells) == len(book.buys) == depth
| en | 0.805229 | # price, quantity, number of orders | 2.317004 | 2 |
LogisticRegression/raw_logistic_regression.py | hf136/models | 1 | 6620171 | <reponame>hf136/models
# coding=utf-8
from __future__ import print_function
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import utils
__author__ = 'wuyueqiu'
X, y = utils.gen_data()
# 定义参数 w 和 b
theta = np.random.rand(2)
bias = 0
learning_rate = 1
for epoch in range(1000):
# 定义模型,前向计算
z = X.dot(theta) + bias
pred_y = 1 / (1 + np.exp(-z))
# loss
loss = - (y * np.log(pred_y) + (1 - y) * np.log(1 - pred_y)).mean()
print('epoch {}, loss {}'.format(epoch, loss))
# 计算梯度(求导)
grad_theta = (pred_y - y).T.dot(X) / y.size
grad_bias = (pred_y - y).sum() / y.size
# 更新参数
theta -= learning_rate * grad_theta
bias -= learning_rate * grad_bias
print('theta:\n', theta)
print('bias:\n', bias)
z = X.dot(theta) + bias
pred_y = 1 / (1 + np.exp(-z))
X1 = X[pred_y >= 0.5]
X2 = X[pred_y < 0.5]
plt.plot(X1[:, 0], X1[:, 1], 'bo')
plt.plot(X2[:, 0], X2[:, 1], 'rx')
x = np.arange(1, 3, 0.1)
y = -(theta[0] * x + bias) / theta[1]
plt.plot(x, y)
plt.show()
| # coding=utf-8
from __future__ import print_function
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import utils
__author__ = 'wuyueqiu'
X, y = utils.gen_data()
# 定义参数 w 和 b
theta = np.random.rand(2)
bias = 0
learning_rate = 1
for epoch in range(1000):
# 定义模型,前向计算
z = X.dot(theta) + bias
pred_y = 1 / (1 + np.exp(-z))
# loss
loss = - (y * np.log(pred_y) + (1 - y) * np.log(1 - pred_y)).mean()
print('epoch {}, loss {}'.format(epoch, loss))
# 计算梯度(求导)
grad_theta = (pred_y - y).T.dot(X) / y.size
grad_bias = (pred_y - y).sum() / y.size
# 更新参数
theta -= learning_rate * grad_theta
bias -= learning_rate * grad_bias
print('theta:\n', theta)
print('bias:\n', bias)
z = X.dot(theta) + bias
pred_y = 1 / (1 + np.exp(-z))
X1 = X[pred_y >= 0.5]
X2 = X[pred_y < 0.5]
plt.plot(X1[:, 0], X1[:, 1], 'bo')
plt.plot(X2[:, 0], X2[:, 1], 'rx')
x = np.arange(1, 3, 0.1)
y = -(theta[0] * x + bias) / theta[1]
plt.plot(x, y)
plt.show() | zh | 0.735917 | # coding=utf-8 # 定义参数 w 和 b # 定义模型,前向计算 # loss # 计算梯度(求导) # 更新参数 | 3.166563 | 3 |
tests/test_linear.py | srm-mic/nocode.ai | 7 | 6620172 | <reponame>srm-mic/nocode.ai
from src.build_model import build_model, SkeletonModel
from src.parser import parse_yaml
import torch
network = parse_yaml("demo/demo_linear.yaml")
sm = SkeletonModel(network)
sm.show_nodes()
sm.load_weights("test_linear.pth")
x = torch.ones((3, 1))
ans = sm(x.T)
"""
fix the tests. it works. verified manually.
print(ans[0].detach() == torch.tensor(0.7022))
assert ans == torch.tensor([[0.7022]]), "Fails on demo_linear.yaml"
""" | from src.build_model import build_model, SkeletonModel
from src.parser import parse_yaml
import torch
network = parse_yaml("demo/demo_linear.yaml")
sm = SkeletonModel(network)
sm.show_nodes()
sm.load_weights("test_linear.pth")
x = torch.ones((3, 1))
ans = sm(x.T)
"""
fix the tests. it works. verified manually.
print(ans[0].detach() == torch.tensor(0.7022))
assert ans == torch.tensor([[0.7022]]), "Fails on demo_linear.yaml"
""" | en | 0.719593 | fix the tests. it works. verified manually. print(ans[0].detach() == torch.tensor(0.7022)) assert ans == torch.tensor([[0.7022]]), "Fails on demo_linear.yaml" | 2.268046 | 2 |
imgbeddings/__init__.py | minimaxir/imgbeddings | 42 | 6620173 | <reponame>minimaxir/imgbeddings
from .imgbeddings import imgbeddings # noqa
| from .imgbeddings import imgbeddings # noqa | none | 1 | 1.008234 | 1 | |
attention/weights2nii.py | rgbayrak/multi-task-physio | 0 | 6620174 | import nibabel as nib
import os
import numpy as np
import matplotlib.pyplot as plt
path_atlas = '/data/gm-atlases/Schaefer/Schaefer2018_400Parcels_17Networks_order_FSLMNI152_2mm.nii.gz'
# path_atlas = '/data/wm-atlases/pandora_2mm/thresholded/TractSeg-th0.95-2mm_HCP.nii.gz'
# path_atlas = '/data/AAN_brainstem_2mm/AAN_MNI152_2mm.nii.gz'
# path_atlas = '/data/gm-atlases/Tian2020MSA/7T_2mm/Tian_Subcortex_S1_7T_2mm.nii.gz'
atlas = nib.load(path_atlas)
atlas_img = atlas.get_fdata()
atlas_id = 'schaefer'
d4 = False
# exclude = [4, 12]
exclude = []
path_labels = 'info.csv'
with open(path_labels, 'r') as f:
content = f.readlines()
hr = []
rv = []
ids = []
roi_labels = []
id = 0
for i in range(len(content)):
_, label, atlas_name, rv_val, hr_val, val = content[i].strip().split(',')
if i != 0 and atlas_name == atlas_id:
# hr.append(float(hr_val))
# rv.append(float(rv_val))
ids.append(int(id))
roi_labels.append(label)
id += 1
# rv_copy = np.zeros(atlas_img.shape)
hr_copy = np.zeros(atlas_img.shape)
if not d4:
for id in ids:
# rv_copy[atlas_img == (id+1)] = rv[id]
# hr_copy[atlas_img == (id+1)] = hr[id]
hr_copy[atlas_img == (id+1)] = s_att3[0][id]
print(s_att3[0][id])
else:
hr = np.array(hr)
# rv = np.array(rv)
#####TRACTSEG or AAN#####
# rv_copy = atlas_img * rv[None, None, None, ...]
hr_copy = atlas_img * hr[None, None, None, ...]
for ex in exclude:
# rv_copy = np.delete(rv_copy, ex, 3)
hr_copy = np.delete(hr_copy, ex, 3)
# rv_copy = np.max(rv_copy, axis=3)
hr_copy = np.max(hr_copy, axis=3)
# save new img
# rv_img = nib.Nifti1Image(rv_copy, atlas.affine, atlas.header)
# rv_path = '/home/bayrakrg/neurdy/pycharm/multi-task-physio/single-roi-models/{}_rv_mean_pearson.nii.gz'.format(atlas_id)
# nib.save(rv_img, rv_path)
hr_img = nib.Nifti1Image(hr_copy, atlas.affine, atlas.header)
hr_path = '/home/bayrakrg/neurdy/pycharm/multi-task-physio/IPMI2021/{}_rv_mean_pearson.nii.gz'.format(atlas_id)
nib.save(hr_img, hr_path)
| import nibabel as nib
import os
import numpy as np
import matplotlib.pyplot as plt
path_atlas = '/data/gm-atlases/Schaefer/Schaefer2018_400Parcels_17Networks_order_FSLMNI152_2mm.nii.gz'
# path_atlas = '/data/wm-atlases/pandora_2mm/thresholded/TractSeg-th0.95-2mm_HCP.nii.gz'
# path_atlas = '/data/AAN_brainstem_2mm/AAN_MNI152_2mm.nii.gz'
# path_atlas = '/data/gm-atlases/Tian2020MSA/7T_2mm/Tian_Subcortex_S1_7T_2mm.nii.gz'
atlas = nib.load(path_atlas)
atlas_img = atlas.get_fdata()
atlas_id = 'schaefer'
d4 = False
# exclude = [4, 12]
exclude = []
path_labels = 'info.csv'
with open(path_labels, 'r') as f:
content = f.readlines()
hr = []
rv = []
ids = []
roi_labels = []
id = 0
for i in range(len(content)):
_, label, atlas_name, rv_val, hr_val, val = content[i].strip().split(',')
if i != 0 and atlas_name == atlas_id:
# hr.append(float(hr_val))
# rv.append(float(rv_val))
ids.append(int(id))
roi_labels.append(label)
id += 1
# rv_copy = np.zeros(atlas_img.shape)
hr_copy = np.zeros(atlas_img.shape)
if not d4:
for id in ids:
# rv_copy[atlas_img == (id+1)] = rv[id]
# hr_copy[atlas_img == (id+1)] = hr[id]
hr_copy[atlas_img == (id+1)] = s_att3[0][id]
print(s_att3[0][id])
else:
hr = np.array(hr)
# rv = np.array(rv)
#####TRACTSEG or AAN#####
# rv_copy = atlas_img * rv[None, None, None, ...]
hr_copy = atlas_img * hr[None, None, None, ...]
for ex in exclude:
# rv_copy = np.delete(rv_copy, ex, 3)
hr_copy = np.delete(hr_copy, ex, 3)
# rv_copy = np.max(rv_copy, axis=3)
hr_copy = np.max(hr_copy, axis=3)
# save new img
# rv_img = nib.Nifti1Image(rv_copy, atlas.affine, atlas.header)
# rv_path = '/home/bayrakrg/neurdy/pycharm/multi-task-physio/single-roi-models/{}_rv_mean_pearson.nii.gz'.format(atlas_id)
# nib.save(rv_img, rv_path)
hr_img = nib.Nifti1Image(hr_copy, atlas.affine, atlas.header)
hr_path = '/home/bayrakrg/neurdy/pycharm/multi-task-physio/IPMI2021/{}_rv_mean_pearson.nii.gz'.format(atlas_id)
nib.save(hr_img, hr_path)
| en | 0.411126 | # path_atlas = '/data/wm-atlases/pandora_2mm/thresholded/TractSeg-th0.95-2mm_HCP.nii.gz' # path_atlas = '/data/AAN_brainstem_2mm/AAN_MNI152_2mm.nii.gz' # path_atlas = '/data/gm-atlases/Tian2020MSA/7T_2mm/Tian_Subcortex_S1_7T_2mm.nii.gz' # exclude = [4, 12] # hr.append(float(hr_val)) # rv.append(float(rv_val)) # rv_copy = np.zeros(atlas_img.shape) # rv_copy[atlas_img == (id+1)] = rv[id] # hr_copy[atlas_img == (id+1)] = hr[id] # rv = np.array(rv) #####TRACTSEG or AAN##### # rv_copy = atlas_img * rv[None, None, None, ...] # rv_copy = np.delete(rv_copy, ex, 3) # rv_copy = np.max(rv_copy, axis=3) # save new img # rv_img = nib.Nifti1Image(rv_copy, atlas.affine, atlas.header) # rv_path = '/home/bayrakrg/neurdy/pycharm/multi-task-physio/single-roi-models/{}_rv_mean_pearson.nii.gz'.format(atlas_id) # nib.save(rv_img, rv_path) | 2.106129 | 2 |
python3/aliennumbers/aliennumbers.py | luthercss/kattis | 0 | 6620175 | <reponame>luthercss/kattis<gh_stars>0
def base_to_int(code, lang):
l = len(lang)
s = 0
n = 0
m = {lang[i]: i for i in range(l)}
for i in reversed(code):
s += m[i] * l ** n
n += 1
return s
def int_to_base(num, lang):
lst = []
l = len(lang)
m = {i: lang[i] for i in range(l)}
while num > 0:
lst = [m[num % l]] + lst
num //= l
return .join(lst)
for j in range(int(input())):
print('Case #{0}: {1}'.format(j+1, (lambda s: int_to_base(base_to_int(s[0], s[1]), s[2]))(input().split())))
| def base_to_int(code, lang):
l = len(lang)
s = 0
n = 0
m = {lang[i]: i for i in range(l)}
for i in reversed(code):
s += m[i] * l ** n
n += 1
return s
def int_to_base(num, lang):
lst = []
l = len(lang)
m = {i: lang[i] for i in range(l)}
while num > 0:
lst = [m[num % l]] + lst
num //= l
return .join(lst)
for j in range(int(input())):
print('Case #{0}: {1}'.format(j+1, (lambda s: int_to_base(base_to_int(s[0], s[1]), s[2]))(input().split()))) | en | 0.111727 | #{0}: {1}'.format(j+1, (lambda s: int_to_base(base_to_int(s[0], s[1]), s[2]))(input().split()))) | 3.430577 | 3 |
cryptography/key_pad_decode/__init__.py | JASTYN/pythonmaster | 3 | 6620176 | def decode(string):
d = {1: 9, 2: 8, 3: 7, 4: 6, 5: 0, 9: 1, 8: 2, 7: 3, 6: 4, 0: 5}
m = [str(d.get(int(i))) for i in string]
return "".join(m)
| def decode(string):
d = {1: 9, 2: 8, 3: 7, 4: 6, 5: 0, 9: 1, 8: 2, 7: 3, 6: 4, 0: 5}
m = [str(d.get(int(i))) for i in string]
return "".join(m)
| none | 1 | 3.18635 | 3 | |
vest/aggregations/lyapunov.py | vcerqueira/vest-python | 5 | 6620177 | import numpy as np
import nolds
def mle(x: np.ndarray) -> float:
""" Maximum Lyapunov Exponent
:param x: 1-d numeric vector
:return: numeric scalar
"""
k = int(np.sqrt(len(x)))
try:
out = nolds.lyap_r(data=x,
emb_dim=k,
trajectory_len=k,
min_neighbors=k)
except (ValueError, np.linalg.LinAlgError, AssertionError) as e:
out = np.nan
return out
| import numpy as np
import nolds
def mle(x: np.ndarray) -> float:
""" Maximum Lyapunov Exponent
:param x: 1-d numeric vector
:return: numeric scalar
"""
k = int(np.sqrt(len(x)))
try:
out = nolds.lyap_r(data=x,
emb_dim=k,
trajectory_len=k,
min_neighbors=k)
except (ValueError, np.linalg.LinAlgError, AssertionError) as e:
out = np.nan
return out
| en | 0.267414 | Maximum Lyapunov Exponent :param x: 1-d numeric vector :return: numeric scalar | 2.51585 | 3 |
pomodoro-start/main.py | ankitbharti1994/Python | 0 | 6620178 | from tkinter import *
import math
# ---------------------------- CONSTANTS ------------------------------- #
PINK = "#e2979c"
RED = "#e7305b"
GREEN = "#9bdeac"
YELLOW = "#f7f5dd"
FONT_NAME = "Courier"
WORK_MIN = 2
SHORT_BREAK_MIN = 1
LONG_BREAK_MIN = 4
reps = 0
Timer = None
# ---------------------------- TIMER RESET ------------------------------- #
def start_button_clicked():
start_timer()
def reset_button_clicked():
reset_timer()
# ---------------------------- TIMER MECHANISM ------------------------------- #
def start_timer():
global reps, is_reset
reps += 1
start_button.config(state='disabled')
work_sec = WORK_MIN * 60
short_break_sec = SHORT_BREAK_MIN * 60
long_break_sec = LONG_BREAK_MIN * 60
if reps % 8 == 0:
# if its 8th rep, then take long break
status_label.config(text='Break', foreground=RED)
window.attributes('-topmost', 1)
count_down(long_break_sec)
elif reps % 2 == 0:
# if its 2nd/4th/6th rep, then take short break of 5 minutes
status_label.config(text='Break', foreground=PINK)
window.attributes('-topmost', 1)
count_down(short_break_sec)
else:
# if its 1st/3rd/5th/7th rep, then work for 25 minutes
status_label.config(text='Work', foreground=GREEN)
window.attributes('-topmost', 0)
count_down(work_sec)
def reset_timer():
global reps
reps = 0
window.after_cancel(Timer)
canvas.itemconfig(canvas_text, text='00:00')
status_label.config(text='Status', foreground=GREEN)
checkmark.config(text='')
start_button.config(state='active')
# ---------------------------- COUNTDOWN MECHANISM ------------------------------- #
def count_down(initial_value):
minute = math.floor(initial_value / 60)
seconds = initial_value % 60
if minute < 10:
minute = f'0{minute}'
if seconds < 10:
seconds = f'0{seconds}'
canvas.itemconfig(canvas_text, text='{0}:{1}'.format(minute, seconds))
if initial_value > 0:
global Timer
Timer = window.after(1000, count_down, initial_value - 1)
else:
start_timer()
if reps % 2 == 0:
checks = ''
work_session = math.floor(reps / 2)
for _ in range(work_session):
checks += '✔️️'
checkmark.config(text=checks)
# ---------------------------- UI SETUP ------------------------------- #
window = Tk()
window.title('Pomodoro')
window.config(padx=100, pady=50, bg=YELLOW)
# Label
status_label = Label(text='Status', foreground=GREEN, bg=YELLOW, font=(FONT_NAME, 30))
status_label.grid(row=0, column=1)
# Canvas
canvas = Canvas(width=200, height=224, bg=YELLOW, highlightthickness=0)
tomato_image = PhotoImage(file='tomato.png')
canvas.create_image(100, 112, image=tomato_image)
canvas_text = canvas.create_text(100, 130, text="00:00", fill='white', font=(FONT_NAME, 35, 'bold'))
canvas.grid(row=1, column=1)
# button
start_button = Button(text='Start', bg=YELLOW, highlightthickness=0, command=start_button_clicked)
start_button.grid(row=2, column=0)
# reset button
reset_button = Button(text='Reset', bg=YELLOW, highlightthickness=0, command=reset_button_clicked)
reset_button.grid(row=2, column=2)
# checkmark
checkmark = Label(foreground=GREEN, bg=YELLOW)
checkmark.grid(row=3, column=1)
window.mainloop()
| from tkinter import *
import math
# ---------------------------- CONSTANTS ------------------------------- #
PINK = "#e2979c"
RED = "#e7305b"
GREEN = "#9bdeac"
YELLOW = "#f7f5dd"
FONT_NAME = "Courier"
WORK_MIN = 2
SHORT_BREAK_MIN = 1
LONG_BREAK_MIN = 4
reps = 0
Timer = None
# ---------------------------- TIMER RESET ------------------------------- #
def start_button_clicked():
start_timer()
def reset_button_clicked():
reset_timer()
# ---------------------------- TIMER MECHANISM ------------------------------- #
def start_timer():
global reps, is_reset
reps += 1
start_button.config(state='disabled')
work_sec = WORK_MIN * 60
short_break_sec = SHORT_BREAK_MIN * 60
long_break_sec = LONG_BREAK_MIN * 60
if reps % 8 == 0:
# if its 8th rep, then take long break
status_label.config(text='Break', foreground=RED)
window.attributes('-topmost', 1)
count_down(long_break_sec)
elif reps % 2 == 0:
# if its 2nd/4th/6th rep, then take short break of 5 minutes
status_label.config(text='Break', foreground=PINK)
window.attributes('-topmost', 1)
count_down(short_break_sec)
else:
# if its 1st/3rd/5th/7th rep, then work for 25 minutes
status_label.config(text='Work', foreground=GREEN)
window.attributes('-topmost', 0)
count_down(work_sec)
def reset_timer():
global reps
reps = 0
window.after_cancel(Timer)
canvas.itemconfig(canvas_text, text='00:00')
status_label.config(text='Status', foreground=GREEN)
checkmark.config(text='')
start_button.config(state='active')
# ---------------------------- COUNTDOWN MECHANISM ------------------------------- #
def count_down(initial_value):
minute = math.floor(initial_value / 60)
seconds = initial_value % 60
if minute < 10:
minute = f'0{minute}'
if seconds < 10:
seconds = f'0{seconds}'
canvas.itemconfig(canvas_text, text='{0}:{1}'.format(minute, seconds))
if initial_value > 0:
global Timer
Timer = window.after(1000, count_down, initial_value - 1)
else:
start_timer()
if reps % 2 == 0:
checks = ''
work_session = math.floor(reps / 2)
for _ in range(work_session):
checks += '✔️️'
checkmark.config(text=checks)
# ---------------------------- UI SETUP ------------------------------- #
window = Tk()
window.title('Pomodoro')
window.config(padx=100, pady=50, bg=YELLOW)
# Label
status_label = Label(text='Status', foreground=GREEN, bg=YELLOW, font=(FONT_NAME, 30))
status_label.grid(row=0, column=1)
# Canvas
canvas = Canvas(width=200, height=224, bg=YELLOW, highlightthickness=0)
tomato_image = PhotoImage(file='tomato.png')
canvas.create_image(100, 112, image=tomato_image)
canvas_text = canvas.create_text(100, 130, text="00:00", fill='white', font=(FONT_NAME, 35, 'bold'))
canvas.grid(row=1, column=1)
# button
start_button = Button(text='Start', bg=YELLOW, highlightthickness=0, command=start_button_clicked)
start_button.grid(row=2, column=0)
# reset button
reset_button = Button(text='Reset', bg=YELLOW, highlightthickness=0, command=reset_button_clicked)
reset_button.grid(row=2, column=2)
# checkmark
checkmark = Label(foreground=GREEN, bg=YELLOW)
checkmark.grid(row=3, column=1)
window.mainloop()
| en | 0.439562 | # ---------------------------- CONSTANTS ------------------------------- # # ---------------------------- TIMER RESET ------------------------------- # # ---------------------------- TIMER MECHANISM ------------------------------- # # if its 8th rep, then take long break # if its 2nd/4th/6th rep, then take short break of 5 minutes # if its 1st/3rd/5th/7th rep, then work for 25 minutes # ---------------------------- COUNTDOWN MECHANISM ------------------------------- # # ---------------------------- UI SETUP ------------------------------- # # Label # Canvas # button # reset button # checkmark | 3.281641 | 3 |
08/08.py | manuelemacchia/advent-of-code-2019 | 1 | 6620179 | <reponame>manuelemacchia/advent-of-code-2019<filename>08/08.py
with open('input.txt', 'r') as f:
data = f.read()
width = 25
height = 6
# Part One
layers = [data[i:i+width*height] for i in range(0, len(data)-1, width*height)]
zeroes_count = [layer.count('0') for layer in layers]
layer_id = zeroes_count.index(min(zeroes_count))
print(layers[layer_id].count('1') * layers[layer_id].count('2'))
# Part Two
black = '0'
white = '1'
transparent = '2'
def calc_pixel(layer):
for pixel in layer:
if pixel == transparent:
continue
return pixel
pixel_layers = [[layer[i] for layer in layers] for i in range(len(layers[0]))]
image = [calc_pixel(pixel_layer) for pixel_layer in pixel_layers]
for i, c in enumerate(image):
if c == black:
print(' ', end='')
elif c == white:
print('#', end='')
if (i+1) % width == 0:
print() | with open('input.txt', 'r') as f:
data = f.read()
width = 25
height = 6
# Part One
layers = [data[i:i+width*height] for i in range(0, len(data)-1, width*height)]
zeroes_count = [layer.count('0') for layer in layers]
layer_id = zeroes_count.index(min(zeroes_count))
print(layers[layer_id].count('1') * layers[layer_id].count('2'))
# Part Two
black = '0'
white = '1'
transparent = '2'
def calc_pixel(layer):
for pixel in layer:
if pixel == transparent:
continue
return pixel
pixel_layers = [[layer[i] for layer in layers] for i in range(len(layers[0]))]
image = [calc_pixel(pixel_layer) for pixel_layer in pixel_layers]
for i, c in enumerate(image):
if c == black:
print(' ', end='')
elif c == white:
print('#', end='')
if (i+1) % width == 0:
print() | en | 0.63833 | # Part One # Part Two | 3.17838 | 3 |
openwater/template.py | flowmatters/openwater | 1 | 6620180 | <filename>openwater/template.py
import os
import sys
import h5py
from subprocess import Popen, PIPE
from queue import Queue, Empty # python 3.x
from threading import Thread
from time import sleep
import numpy as np
import pandas as pd
import networkx as nx
from functools import reduce
from . import nodes as node_types
from .timing import init_timer, report_time, close_timer
from itertools import chain
from .array_params import get_parameter_locations
from .nodes import create_indexed_parameter_table
from .file import _tabulate_model_scalars_from_file
import logging
logger = logging.getLogger(__name__)
# Non blocking IO solution from http://stackoverflow.com/a/4896288
ON_POSIX = 'posix' in sys.builtin_module_names
TAG_PROCESS='_process'
TAG_MODEL='_model'
TAG_RUN_INDEX='_run_idx'
TAG_GENERATION='_generation'
META_TAGS=[TAG_PROCESS,TAG_MODEL,TAG_RUN_INDEX,TAG_GENERATION]
DEFAULT_TIMESTEPS=365
LINK_TABLE_COLUMNS = ['%s_%s'%(n,c) for n in ['src','dest'] for c in ['generation','model','node','gen_node','var']]
def connections_match(o,i):
o_node,_,o_alias,o_tags = o
i_node,_,i_alias,i_tags = i
if o_alias != i_alias:
# print('aliases do not match')
return False
o_tags = o_tags.copy()
o_tags.update(o_node.tags)
i_tags = i_tags.copy()
i_tags.update(i_node.tags)
return tags_match(o_tags, i_tags)
def tags_match(o_tags,i_tags):
common_keys = list(set(o_tags.keys()).intersection(i_tags.keys()))
for ck in common_keys:
if ck in ['_process','_model']: continue
if o_tags[ck] != i_tags[ck]:
# print('common key (%s) does not match (%s vs %s)'%(ck,o_node.tags[ck],i_node.tags[ck]))
return False
return True
class OWTemplate(object):
def __init__(self,lbl=''):
self.label=lbl
self.nodes = []
self.links = []
self.nested = []
self.inputs = []
self.outputs = []
def define_input(self,node,name=None,alias=None,**kwargs):
if name is None:
# Would be nice to have a short hand to define every input of this
# node as an input to the graph (and similarly every output of a node
# as an output of the graph
# But the node currently stores the model name)
raise InvalidFluxException(node,'(no name provided)','input')
if not node.has_input(name):
raise InvalidFluxException(node,name,'input')
if alias is None:
alias = name
self.inputs.append((node,name,alias,kwargs))
def define_output(self,node,name=None,alias=None,**kwargs):
if not node.has_output(name):
raise InvalidFluxException(node,name,'output')
if alias is None:
alias = name
self.outputs.append((node,name,alias,kwargs))
def _has_flux(self,alias,fluxes):
for fl in fluxes:
if fl[2]==alias:
return True
return False
def has_input(self,alias):
return self._has_flux(alias,self.inputs)
def add_node(self,model_type=None,name=None,process=None,**tags):
# if hasattr(node_or_name,'model_type'):
# self.nodes.append(node_or_name)
# else:
if process and not TAG_PROCESS in tags:
tags[TAG_PROCESS]=process
new_node = OWNode(model_type,name,**tags)
self.nodes.append(new_node)
return new_node
def add_link(self,link):
self.links.append(link)
def add_conditional_link(self,from_node,from_output,to_node,possibly_inputs,model):
for pi in possibly_inputs:
if pi in model.description['Inputs']:
self.add_link(OWLink(from_node,from_output,to_node,pi))
return True
return False
def match_labelled_flux(self,fluxes,flux_name,flux_tags,exclude_tags):
required_tags = set(flux_tags.keys())
for node,name,alias,stored_tags in fluxes:
if flux_name != alias:
continue
stored_tags = dict(**stored_tags)
stored_tags.update(**node.tags)
if len(required_tags.difference(stored_tags.keys())):
continue
skip = False
for et in exclude_tags:
if et in stored_tags:
skip = True
break
if skip: continue
if tags_match(flux_tags,stored_tags):
return node, name
return None,None
def make_link(self,output_name,input_name,
from_node=None,from_tags={},from_exclude_tags=[],
to_node=None,to_tags={},to_exclude_tags=[]):
link_txt = f'{output_name}({from_node or str(from_tags)}) -> {input_name}({to_node or str(to_tags)})'
if from_node is None:
from_node, new_output_name = self.match_labelled_flux(
self.outputs,output_name,from_tags,from_exclude_tags)
if from_node is None or new_output_name is None:
n_outputs = len(self.outputs)
raise Exception('%s: No matching output for %s, with tags %s. Have %d outputs'%(link_txt,new_output_name or output_name,str(from_tags),n_outputs))
output_name = new_output_name
if to_node is None:
to_node, new_input_name = self.match_labelled_flux(
self.inputs,input_name,to_tags,to_exclude_tags)
if to_node is None or new_input_name is None:
raise Exception('%s: No matching input for %s, with tags %s'%(link_txt,new_input_name or input_name,str(to_tags)))
input_name = new_input_name
return OWLink(from_node,output_name,to_node,input_name)
def flatten(self):
'''
Generate a single, flat template containing all nested
templates, instantiating links between nested templates based
on input and output descriptions.
When instantiating links, the order of the nested templates matters,
with links only instantiated from outputs of earlier nested templates to
inputs of later nested templates.
'''
result = OWTemplate(self.label)
result.nodes += self.nodes
result.links += self.links
result.inputs += self.inputs
result.outputs += self.outputs
flattened = [t.flatten() for t in self.nested]
available_outputs = []
used_outputs = []
for child in flattened:
result.nodes += child.nodes
result.links += child.links
for child_input in child.inputs:
input_linked = False
for previous_output in available_outputs:
if connections_match(previous_output,child_input):
# print('linking',previous_output,child_input)
result.add_link(OWLink(previous_output[0],previous_output[1],child_input[0],child_input[1]))
used_outputs.append(previous_output)
input_linked = True
if not input_linked:
result.inputs.append(child_input)
available_outputs += child.outputs
#unused_outputs = set(available_outputs).difference(set(used_outputs))
unused_outputs = [o for o in available_outputs if not o in used_outputs]
result.outputs+= list(unused_outputs)
return result
def nest(self,other):
'''
Add all nodes and links from other to this template,
connecting all outputs from this template to inputs in other AND
'''
self.nested.append(other)
def instantiate(self,**instance_tags):
res = OWTemplate()
node_map = {}
for n in self.nodes:
new_node = res.add_node(n.model_type,**n.tags,**instance_tags)
node_map[n.name] = new_node
for l in self.links:
new_from = node_map[l.from_node.name]
new_to = node_map[l.to_node.name]
new_link = res.add_link(OWLink(new_from,l.from_output,new_to,l.to_input))
return res
class OWNode(object):
def __init__(self,model_type,name=None,**tags):
self.model_type = model_type
if hasattr(model_type,'name'):
self.model_name = model_type.name
self.model_type = model_type
else:
self.model_name = model_type
import openwater.nodes as node_types
from openwater.discovery import discover
discover()
self.model_type = getattr(node_types,self.model_name)
self.tags = tags
self.tags[TAG_MODEL] = self.model_name
if name:
self.name = name
else:
self.name = self.make_name()
def make_name(self):
std_names = ['catchment','model',TAG_PROCESS,'constituent','hru','lu']
for k in sorted(self.tags.keys()):
if k.startswith('_'):continue
if not k in std_names:
std_names.append(k)
return '-'.join([str(self.tags.get(k,None)) for k in std_names if k in self.tags])
def __str__(self):
return '%s (%s)'%(self.name,self.model_name)
def has_output(self,name):
return name in self.model_type.description['Outputs']
def has_input(self,name):
return name in self.model_type.description['Inputs']
class OWLink(object):
def __init__(self,from_node,from_output,to_node,to_input):
assert from_node is not None
assert from_output is not None
assert to_node is not None
assert to_input is not None
if not from_node.has_output(from_output):
raise InvalidFluxException(from_node,from_output,'output')
if not to_node.has_input(to_input):
raise InvalidFluxException(to_node,to_input,'input')
self.from_node = from_node
self.from_output = from_output
self.to_node = to_node
self.to_input = to_input
# class OWSystem(object):
# def __init__(self):
# self.nodes = []
# self.links = []
# def add_node(self,name,)
def template_to_graph(g:nx.DiGraph,tpl:OWTemplate,allow_duplicates=False,**tags) -> nx.DiGraph:
"""
Add all the nodes and links in an Openwater Template to a graph
Parameters
----------
g: nx.DiGraph
an existing graph object. If None, a new graph object will be created
tpl: OWTemplate
Openwater Template to add to the graph
allow_duplicates:
Whether to allow duplicate links (ie between the same two nodes and variables) or whether to throw an exception
Defaults to False (ie raise exception on duplicate)
tags:
Additional tags to assign to all nodes in the template when adding to the graph
Returns
-------
nx.DiGraph
The graph object passed in as g, or the new graph object created
Raises
------
Exception
when duplicate link encountered (unless allow_duplicates=True)
"""
if not g:
g = nx.DiGraph()
nodes = {}
nw = tpl.instantiate(**tags)
for n in nw.nodes:
g.add_node(str(n),**n.tags)
nodes[str(n)] = n
for l in nw.links:
key = (str(nodes[str(l.from_node)]),str(nodes[str(l.to_node)]))
if key in g.edges:
existing = g.edges[key]
if not allow_duplicates and \
(l.from_output in existing['src']) and \
(l.to_input in existing['dest']):
raise Exception(f'Duplicate link along {key}, between {l.from_output} and {l.to_input}')
existing['src'].append(l.from_output)
existing['dest'].append(l.to_input)
continue
g.add_edge(key[0],key[1],
src=[l.from_output],dest=[l.to_input])
return g
def node_matches(n,**kwargs):
for k,v in kwargs.items():
if not k in n:
return False
if n[k]!=v:
return False
return True
def match_nodes(g,**kwargs):
return [name for name,node in g.nodes.items() if node_matches(node,**kwargs)]
def model_type(n):
return str(n).split('(')[1][:-1]
def group_run_order(g):
ancestors_by_node = {}
by_node_type_gen = {}
node_gen = {}
for n in list(g.nodes):
ancestors = nx.ancestors(g,n)
ancestors_by_node[n] = ancestors
mt = model_type(n)
if not mt in by_node_type_gen:
by_node_type_gen[mt] = {}
n_ancestors = len(ancestors)
if not n_ancestors in by_node_type_gen[mt]:
by_node_type_gen[mt][n_ancestors] = []
by_node_type_gen[mt][n_ancestors].append(n)
node_gen[n]=n_ancestors
return ancestors_by_node,by_node_type_gen,node_gen
def assign_stages(order,node_gen,by_node_type_gen):
done = {}
stages = []
i = 0
for n in order:
if n in done: continue
gen = node_gen[n]
mt = model_type(n)
others = by_node_type_gen[mt][gen]
while len(stages) <= gen:
stages.append([])
stages[gen] += others
for o in others: done[o]=i
i += 1
return stages
def map_stages(stages):
result = {}
for i,s in enumerate(stages):
for n in s:
result[n] = i
return result
THRESHOLD_N_JOBS=500
def find_first_small_stage(stages):
for i,s in enumerate(stages):
if len(s)<THRESHOLD_N_JOBS:
return i
return -1
def flatten(l_of_l):
return [item for sublist in l_of_l for item in sublist]
descendants_by_node={}
imm_descendants_by_node={}
# cache_queries = 0
# cache_misses = 0
def descendants_cached(g,n):
# global cache_queries, cache_misses
# cache_queries += 1
if not n in descendants_by_node:
# cache_misses += 1
descendants_by_node[n] = nx.descendants(g,n)
return descendants_by_node[n]
# return list(node_descendent_df[node_descendent_df[n]].index)
def immediate_descendants_cached(g,n):
if not n in imm_descendants_by_node:
# cache_misses += 1
imm_descendants_by_node[n] = set(g.successors(n))
return imm_descendants_by_node[n]
def bring_forward(g,stages):
init_timer('bring_forward')
first_small_stage = find_first_small_stage(stages)
for i in range(first_small_stage,len(stages)-1):
si = stages[i]
if not len(si):
continue
all_descendents = set(flatten([descendants_cached(g,n) for n in si]))
si1 = stages[i+1]
candidates = [n for n in si1 if not n in all_descendents]
if len(candidates):
stages[i] += candidates
stages[i+1] = list(set(stages[i+1]) - set(candidates)) # [n for n in stages[i+1] if not n in candidates] # set difference?
stages = [s for s in stages if len(s)]
close_timer()
return stages
def latest_possible(g,n,n_stages,node_stages):
current = node_stages[n]
next_stage = current+1
descendants = immediate_descendants_cached(g,n)
# descendent_stages = np.array([node_stages[d] for d in descendants])
# earliest_descendent = descendent_stages.min()
# return earliest_descendent - 1
lowest = n_stages
for d in descendants:
descendent_stage = node_stages[d]
# if descendent_stage == 0:
# print(n,d,n_stages,current,descendent_stage,lowest)
if descendent_stage >= lowest:
continue
if descendent_stage == next_stage:
return current
lowest = descendent_stage
# if descendent_stage < lowest:
# lowest = descendent_stage
if not lowest:
print(n,current,n_stages,d,descendent_stage)
raise Exception('Boo')
return lowest-1
#shifts = 0
# def push_back_orig(g,stages):
# first_small_stage = find_first_small_stage(stages)
# # visited = {}
# global shifts
# node_stages = map_stages(stages)
# count = 0
# nodes_downstream = 0
# for i in range(len(stages)-1,-1,-1):
# stage_nodes = stages[i]
# nodes_downstream += len(stage_nodes)
# print(i)
# for n in stage_nodes:
# # if (n in visited) and visited[n]==i:
# # # Node visited as an ancestor and not moved, so no reason to look further at ancestors
# # continue
# ancestors = ancestors_by_node[n]
# for a in ancestors:
# current_stage = node_stages[a]
# # visited[a]=current_stage
# if current_stage == (i-1):
# continue # Already as late as possible
# new_stage = latest_possible(g,a,len(stages),node_stages)
# if new_stage==current_stage:
# continue
# shifts += 1
# stages[new_stage].append(a)
# stages[current_stage].remove(a)
# node_stages[a] = new_stage
# #print(i,n,a,current_stage,new_stage)
# #count += 1
# #assert(count<10)
# stages = [s for s in stages if len(s)]
# return stages
# @profile
def push_back_ss(g,stages):
init_timer('push_back_ss')
to_remove = {n:[] for n in range(len(stages))}
to_add = {n:[] for n in range(len(stages))}
# first_small_stage = find_first_small_stage(stages)
visited = {}
# init_timer('map node stages')
node_stages = map_stages(stages)
# close_timer()
count = 0
nodes_downstream = 0
# global shifts
for i in range(len(stages)-1,-1,-1):
# init_timer('stage %d'%i)
stage_nodes = list(set(stages[i]).union(set(to_add[i])) - set(to_remove[i]))
stages[i] = stage_nodes
nodes_downstream += len(stage_nodes)
for n in stage_nodes:
already_visited = n in visited
if already_visited and visited[n]==i:
# Node last visited as an ancestor and not moved, so no reason to look further at ancestors
continue
ancestors = ancestors_by_node[n]
for a in ancestors:
current_stage = node_stages[a]
visited[a]=current_stage
if current_stage == (i-1):
continue # Already as late as possible
new_stage = latest_possible(g,a,len(stages),node_stages)
if new_stage==current_stage:
continue
# shifts += 1
to_add[new_stage].append(a)
to_remove[current_stage].append(a)
#stages[new_stage].append(a)
#stages[current_stage].remove(a)
node_stages[a] = new_stage
#print(i,n,a,current_stage,new_stage)
#count += 1
#assert(count<10)
# close_timer()
stages = [s for s in stages if len(s)]
close_timer()
return stages
def compute_simulation_order(graph):
init_timer('compute_simulation_order')
init_timer('Get basic order')
global descendants_by_node
descendants_by_node = {}
g = graph
sequential_order = list(nx.topological_sort(g))
global ancestors_by_node #, node_ancestry_df, node_descendent_df
ancestors_by_node,by_node_type_gen,node_gen = group_run_order(g)
stages = assign_stages(sequential_order,node_gen,by_node_type_gen)
stages = [s for s in stages if len(s)]
# report_time('create node ancestry dataframe for %d nodes'%len(ancestors_by_node))
# node_ancestry_df = pd.DataFrame(data=False,index=list(g.nodes),columns=list(g.nodes))
# for k,ancestors in ancestors_by_node.items():
# node_ancestry_df[k][ancestors] = True
# node_descendent_df = node_ancestry_df.transpose()
report_time('Grouping model stages/generations')
n_stages = len(stages)
new_n_stages = 0
iteration = 1
while new_n_stages<n_stages:
init_timer('Iteration %d'%iteration)
n_stages = len(stages)
stages = bring_forward(g,stages)
stages = push_back_ss(g,stages)
new_n_stages = len(stages)
iteration += 1
close_timer()
close_timer()
close_timer()
return stages
def tag_set(nodes):
return reduce(lambda a,b: a.union(b),[set(n.keys()) for n in nodes])
def proc_model(node):
return '%s/%s'%(node[TAG_PROCESS],node[TAG_MODEL])
def match_model_name(node_name):
return g.nodes[node_name][TAG_MODEL]
# return np.string_(re.match(re.compile('.*\(([\w\d]+)\)'),node_name)[1])
def sort_nodes(nodes):
'''
Sort a group of nodes by relevant criteria
(Currently just name - but ultimately by tags in some way!)
'''
return sorted(nodes)
class ModelGraph(object):
def __init__(self,graph,initialise=True,time_period=None):
self._graph = graph
self._parameteriser = None
self._last_write = None
self.time_period = time_period
if initialise:
self.initialise()
def initialise(self):
init_timer('Compute simulation order')
self.order = compute_simulation_order(self._graph)
report_time('Tag nodes')
for i,gen in enumerate(self.order):
for node in gen:
self._graph.nodes[node][TAG_GENERATION]=i
self.sequence = flatten(self.order)
nodes = self._graph.nodes
self.model_names = list({n[TAG_MODEL] for n in self._graph.nodes.values()} )
proc_models = {proc_model(nodes[n]) for n in nodes}
node_names_by_process_and_model = {pm:[n for n in nodes if proc_model(nodes[n])==pm] for pm in proc_models}
nodes_by_process_and_model = {pm:[nodes[n] for n in proc_nodes] for pm,proc_nodes in node_names_by_process_and_model.items()}
tags_by_process_and_model = {p:list(tag_set(nodes)-set(META_TAGS)) for p,nodes in nodes_by_process_and_model.items()}
self.all_tags = set().union(*tags_by_process_and_model.values())
self.distinct_values = {t:sorted(set([nodes[n][t] for n in nodes if t in nodes[n]])) for t in self.all_tags}
report_time('Assign runtime metadata')
for pm in proc_models:
node = nodes_by_process_and_model[pm][0]
self.assign_run_indices(node[TAG_PROCESS],node[TAG_MODEL])
close_timer()
def assign_run_indices(self,proc,model_type):
'''
Assign run indices to each model run within a given process, p (eg 'rainfall runoff')
'''
i = 0
nodes = self._graph.nodes
for gen in self.order:
relevant_gen = [n for n in gen if nodes[n][TAG_MODEL]==model_type] # and nodes[n][TAG_PROCESS]==proc] ### BAD ASSUMPTION!
relevant_gen = sort_nodes(relevant_gen)
for n in relevant_gen:
node = nodes[n]
node[TAG_RUN_INDEX] = i
i += 1
def nodes_matching(self,model,**tags):
if hasattr(model,'name'):
model = model.name
if not '_model' in tags:
tags['_model'] = model
def tags_match(node):
for k,v in tags.items():
if not k in node or node[k] != v:
return False
return True
return {n:self._graph.nodes[n] for n in self._graph.nodes if tags_match(self._graph.nodes[n])}
def write_model(self,f,timesteps=DEFAULT_TIMESTEPS):
init_timer('Write model file')
init_timer('Write meta and dimensions')
close = False
if hasattr(f,'upper'):
h5f = h5py.File(f,'w')
close = True
else:
h5f = f
try:
self._write_meta(h5f)
report_time('Write model groups')
self._write_model_groups(h5f,timesteps)
report_time('Write links')
self._write_links(h5f)
report_time('Write dimensions')
self._write_dimensions(h5f)
finally:
if close: h5f.close()
self._last_write=f
close_timer()
close_timer()
def run(self,time_period,model_fn=None,results_fn=None,**kwargs):
'''
kwargs: Arguments and fflags to pass directly to ow-sim, including:
* overwrite (boolean): Overwrite existing output file if it exists
* verbose (boolean): Show verbose logging during simulation
'''
if model_fn:
self.write_model(model_fn,len(time_period))
model_fn = self._last_write
if not model_fn:
raise Exception('model_fn not provided and model not previously saved')
return _run(time_period,model_fn,results_fn,**kwargs)
def _flux_number(self,node_name,flux_type,flux_name):
node = self._graph.nodes[node_name]
model_type = node[TAG_MODEL]
desc = getattr(node_types,model_type).description
flux_type = flux_type.capitalize()
if not flux_type.endswith('s'):
flux_type += 's'
if not flux_name in desc[flux_type]:
raise Exception('Unknown %s flux %s on %s'%(flux_type[:-1],flux_name,model_type))
#return -1
return desc[flux_type].index(flux_name)
def _write_meta(self,h5f):
meta = h5f.create_group('META')
meta.create_dataset('models',
data=[np.string_(n) for n in self.model_names],
dtype='S%d'%max([len(mn) for mn in self.model_names]))
if self.time_period is not None:
dates = np.array([ts.isoformat() for ts in self.time_period],dtype=h5py.special_dtype(vlen=str))
meta.create_dataset('timeperiod',data=dates)
def _write_dimensions(self,f):
dimensions = f.create_group('DIMENSIONS')
for t in self.all_tags:
vals = self.distinct_values[t]
if hasattr(vals[0],'__len__'):
vals = [np.string_(v) for v in vals]
dimensions.create_dataset(t,data=vals)
def _map_process(self,node_set):
'''
For a given model (eg 'GR4J'), organise all model runs
by the parameterisation dimensions (eg catchment x hru) and assign indices
'''
nodes = self._graph.nodes
def dim_tags(node_name):
node = nodes[node_name]
keys = node.keys()
return set(keys) - set(META_TAGS)
dimsets = {frozenset(dim_tags(n)) for n in node_set}
all_dims = set(chain.from_iterable(dimsets))
# assert len(dimsets)==1 # don't support one process having different dimensions
# Should at least support attributes (tags that only ever have one value)
dimensions = all_dims # list(dimsets)[0]
# dim_values = {d:sorted({nodes[n][d] for n in node_set}) for d in dimensions}
if len(dimsets) > 1:
print('Populating nodes with dummy dimension values')
for dim in dimensions:
added_dummy = False
dummy_val = f'dummy-{dim}'
for node in node_set:
if not dim in nodes[node]:
nodes[node][dim] = dummy_val
if not added_dummy and (dummy_val not in self.distinct_values[dim]):
self.distinct_values[dim].append(dummy_val)
added_dummy = True
dim_values = {d:sorted({nodes[n][d] for n in node_set}) for d in dimensions}
attributes = {d:vals[0] for d,vals in dim_values.items() if (len(vals)==1) and (len(node_set)>1)}
dimension_values = {d:vals for d,vals in dim_values.items() if (len(vals)>1) or (len(node_set)==1)}
dimensions = [d for d in dimensions if not d in attributes]
if not len(dimensions):
print(attributes)
print(len(node_set))
raise 'No dimensions'
if len([d for d in dimensions if len(dimension_values[d])==0]):
print('Dimension(s) with 0 length:',dimension_values)
raise Exception('Dimension(s) with 0 length')
# dims = tags_by_process[p]
# dimensions = [distinct_values[d] for d in dims]
shape = tuple([len(self.distinct_values[d]) for d in dimensions])
model_instances = np.ones(shape=shape,dtype=np.uint32) * -1
for node_name in node_set:
node = nodes[node_name]
loc = tuple([self.distinct_values[d].index(node[d]) for d in dimensions])
if len(loc) < len(shape):
print(loc,node)
model_instances[loc] = node[TAG_RUN_INDEX]
return dimension_values, attributes,model_instances
def _write_model_groups(self,f,n_timesteps):
models_grp = f.create_group('MODELS')
nodes = self._graph.nodes
models = {nodes[n][TAG_MODEL] for n in nodes}
self.model_batches = {}
for mx,m in enumerate(models):
model_msg ='Writing model %s (%d/%d)'%(m,mx+1,len(models))
init_timer(model_msg)
print(model_msg)
model_grp = models_grp.create_group(m)
model_nodes = [n for n in nodes if nodes[n][TAG_MODEL]==m]
processes_for_model = {nodes[n][TAG_PROCESS] for n in model_nodes}
# assert(len(processes_for_model)==1) # not necessary?
dims,attributes,instances = self._map_process(model_nodes)
ds = model_grp.create_dataset('map',dtype=instances.dtype,data=instances,fillvalue=-1)
# write out model index
ds.attrs['PROCESSES']=[np.string_(s) for s in list(processes_for_model)]
ds.attrs['DIMS']=[np.string_(d) for d in dims]
for attr,val in attributes.items():
ds.attrs[attr]=val
self.model_batches[m] = np.cumsum([len([n for n in gen if nodes[n][TAG_MODEL]==m]) for gen in self.order])
model_meta = getattr(node_types,m)
if hasattr(model_meta,'description'):
desc = model_meta.description
n_states = len(desc['States']) # Compute, based on parameters...
n_params = len(desc['Parameters'])
n_inputs = len(desc['Inputs'])
else:
print('No description for %s'%m)
desc = None
n_states = 3
n_params = 4
n_inputs = 2
# batch_counts = [len(mc.get(m,[])) for mc in model_counts_by_generation]
model_grp.create_dataset('batches',shape=(len(self.order),),dtype=np.uint32,data=self.model_batches[m],fillvalue=-1)
n_cells = len(model_nodes) # instances.size
# Init states....
model_grp.create_dataset('states',shape=(n_cells,n_states),dtype=np.float64,fillvalue=0)
model_grp.create_dataset('parameters',shape=(n_params,n_cells),dtype=np.float64,fillvalue=0)
# model_grp.create_dataset('inputs',shape=(n_cells,n_inputs,n_timesteps),dtype=np.float64,fillvalue=0)
if (self._parameteriser is not None) and (desc is not None):
node_dict = {n:nodes[n] for n in model_nodes}
nodes_df = pd.DataFrame([nodes[n] for n in model_nodes])
for k,v in attributes.items():
nodes_df[k] = v
full_dims = dict(**dims,**attributes)
init_timer('Parameterisation')
self._parameteriser.parameterise(model_meta,model_grp,instances,full_dims,node_dict,nodes_df)
close_timer()
close_timer()
def gen_index(self,node):
global_idx = node['_run_idx']
model_name = node['_model']
gen = node['_generation']
if gen:
start_of_gen = self.model_batches[model_name][gen-1]
else:
start_of_gen = 0
return global_idx - start_of_gen
def link_table(self):
model_lookup = dict([(m,i) for i,m in enumerate(self.model_names)])
link_table = []
for l_from,l_to in self._graph.edges:
link_data = self._graph.edges[(l_from,l_to)]
for src_var,dest_var in zip(link_data['src'],link_data['dest']):
link = {}
f_node = self._graph.nodes[l_from]
t_node = self._graph.nodes[l_to]
link['src_generation'] = f_node['_generation']
link['src_model'] = model_lookup[f_node['_model']]
link['src_node'] = f_node['_run_idx']
link['src_gen_node'] = self.gen_index(f_node)
link['src_var'] = self._flux_number(l_from,'output',src_var)
link['dest_generation'] = t_node['_generation']
link['dest_model'] = model_lookup[t_node['_model']]
link['dest_node'] = t_node['_run_idx']
link['dest_gen_node'] = self.gen_index(t_node)
link['dest_var'] = self._flux_number(l_to,'input',dest_var)
link_table.append(link)
link_table = pd.DataFrame(link_table)
col_order = LINK_TABLE_COLUMNS
link_table = link_table[col_order]
sort_order = ['src_generation','src_model','src_gen_node','dest_generation','dest_model','dest_gen_node']
return link_table.sort_values(sort_order)
def _write_links(self,f):
table = np.array(self.link_table())
f.create_dataset('LINKS',dtype=np.uint32,data=table)
def dim_val(v):
if hasattr(v,'decode'):
return v.decode()
return v
class ModelFile(object):
def __init__(self,fn):
self.filename = fn
import h5py
self._h5f = h5py.File(self.filename,'r')
self._dimensions = {k:[dim_val(d) for d in self._h5f['DIMENSIONS'][k][...]] for k in self._h5f['DIMENSIONS']}
# print(self._dimensions)
self._links = pd.DataFrame(self._h5f['LINKS'][...],columns=LINK_TABLE_COLUMNS)
self._models = self._h5f['META']['models'][...]
if 'timeperiod' in self._h5f['META']:
timesteps = [d for d in self._h5f['META']['timeperiod'][...]]
if isinstance(timesteps[0],bytes):
timesteps = [d.decode() for d in timesteps]
self.time_period = pd.DatetimeIndex([pd.Timestamp.fromisoformat(d) for d in timesteps])
self._parameteriser = None
def _matches(self,model,**tags):
model_dims = [d.decode() for d in self._h5f['MODELS'][model]['map'].attrs['DIMS']]
# print(model_dims)
lookup = {}
for tag,value in tags.items():
if not tag in model_dims:
return False
if not value in self._dimensions[tag]:
return False
lookup[tag] = self._dimensions[tag].index(value)
idx = [lookup.get(d,slice(None,None)) for d in model_dims]
# print(model,list(zip(model_dims,idx)))
return np.any(self._h5f['MODELS'][model]['map'][tuple(idx)] > 0)
def models_matching(self,**tags):
result = []
for k in self._h5f['MODELS']:
# print(k)
if self._matches(k,**tags):
result.append(k)
return result
def _map_model_dims(self,model):
model_map = self._h5f['MODELS'][model]['map'][...]
m_dims = [dim_val(d) for d in self._h5f['MODELS'][model]['map'].attrs['DIMS']]
dims = {d:self._h5f['DIMENSIONS'][d][...] for d in m_dims}
dim_indices = list(zip(*np.where(model_map>=0)))#np.logical_not(np.isnan(model_map)))))
def translate_dims(tpl):
return [dim_val(dims[d][ix]) for d,ix in zip(m_dims,tpl)]
dim_columns = [translate_dims(di)+[model_map[di]] for ix,di in enumerate(dim_indices) if model_map[di]>=0]
return {d:[di[i] for di in dim_columns] for i,d in enumerate(m_dims+['_run_idx'])}
def _raw_parameters(self,model,**tags):
vals = self._h5f['MODELS'][model]['parameters'][...]
model_map = self._map_model_dims(model)
df = pd.DataFrame(model_map)
dim_cols = set(df.columns) - {'_run_idx'}
df = df.set_index(list(dim_cols))
param_df = pd.DataFrame(vals).transpose().reindex(index=df['_run_idx'])
result = param_df.set_index(df.index)
return result
def parameters(self,model,**tags):
return _tabulate_model_scalars_from_file(self._h5f,
model,
self._map_model_dims(model),
'parameters',
**tags)
def initial_states(self,model,**tags):
return _tabulate_model_scalars_from_file(self._h5f,
model,
self._map_model_dims(model),
'states',
**tags)
def indexed_parameters(self,model,**tags):
raw = self._raw_parameters(model,**tags)
desc = getattr(node_types,model).description
indexed = create_indexed_parameter_table(desc,raw)
index_names = indexed.index.names
indexed = indexed.reset_index()
for k,v in tags.items():
indexed = indexed[indexed[k]==v]
indexed = indexed.set_index(index_names)
return indexed
def nodes_matching(self,model,**tags):
if hasattr(model,'name'):
model = model.name
nodes = pd.DataFrame(self._map_model_dims(model))
for tag,tag_val in tags.items():
nodes = nodes[nodes[tag]==tag_val]
return nodes
def link_table(self):
"""
Return the table of links between model nodes as a Data Frame.
"""
linkages = pd.DataFrame(self._h5f['LINKS'][...],columns=LINK_TABLE_COLUMNS)
all_models = np.array([m.decode() for m in list(self._h5f['META']['models'][...])])
descriptions = {mod:getattr(node_types,mod).description for mod in all_models}
linkages.src_model = all_models[linkages.src_model]
linkages.dest_model = all_models[linkages.dest_model]
linkages.src_var = [descriptions[m]['Outputs'][v] for m,v in zip(linkages.src_model,linkages.src_var)]
linkages.dest_var = [descriptions[m]['Inputs'][v] for m,v in zip(linkages.dest_model,linkages.dest_var)]
return linkages
def links_between(self,dest_mod=None,dest_var=None,src_mod=None,src_var=None,src_tags={},dest_tags={},annotate=True):
"""
Identify the links between particular model graph nodes.
Optionally (and by default), label src node and destination nodes by tags
All parameters are optional. By default returns all links, with all tags.
Parameters
----------
dest_mod : string
Destination model type (eg EmcDwc) and only show links to this model type
dest_var : string
Destination variable (eg inflow) and only show links to this variable
src_mod: string
Source model type (eg EmcDwc) and only show links from this model type
src_var: string
Source variable (eg outflow) and only show links from this variable
src_tags: dict
Only show links from graph nodes with all these tags
dest_tags: dict
Only show links to graph nodes with all these tags
annotate: boolean
Add source node and destination node tags as columns to the data frame
"""
linkages = self.link_table()
if dest_mod:
linkages = linkages[linkages.dest_model==dest_mod]
nodes = self.nodes_matching(dest_mod,**dest_tags)
linkages = linkages[linkages.dest_node.isin(nodes._run_idx)]
if dest_var:
linkages = linkages[linkages.dest_var==dest_var]
if src_mod:
linkages = linkages[linkages.src_model==src_mod]
nodes = self.nodes_matching(src_mod,**src_tags)
linkages = linkages[linkages.src_node.isin(nodes._run_idx)]
if src_var:
linkages = linkages[linkages.src_var==src_var]
if annotate:
model_maps = {m:pd.DataFrame(self._map_model_dims(m)) for m in set(linkages.src_model).union(linkages.dest_model)}
def annotate_tbl(prefix):
tag_names = set([c for m in set(linkages[f'{prefix}_model']) for c in model_maps[m].columns])-{'_run_idx'}
for tag_name in tag_names:
col = f'{prefix}_{tag_name}'
rows = [model_maps[m][model_maps[m]._run_idx==n] for m,n in zip(linkages[f'{prefix}_model'],linkages[f'{prefix}_node'])]
linkages[col] = [row[tag_name].iloc[0] if tag_name in row else '-' for row in rows]
annotate_tbl('src')
annotate_tbl('dest')
return linkages
def close(self):
self._h5f.close()
self._h5f = None
def write(self,clear_inputs=False):
try:
self.close()
import h5py
self._h5f = h5py.File(self.filename,'r+')
if self._parameteriser is None:
print('Nothing to do')
return
models_grp = self._h5f['MODELS']
models = list(models_grp.keys())
for m in models:
print('Parameterising %s'%str(m))
model_grp = models_grp[m]
instances = model_grp['map'][...]
dims = [dim_val(d) for d in model_grp['map'].attrs['DIMS']]
dim_map = self._map_model_dims(m)
nodes = ['%s-%d'%(m,ix) for ix in range(len(dim_map[dims[0]]))]
# dims,attributes,instances = self._map_process(model_nodes)
model_meta = getattr(node_types,m)
# for k,v in attributes.items():
# nodes_df[k] = v
# full_dims = dict(**dims,**attributes)
node_dict = {n:{d:vals[ix] for d,vals in dim_map.items()} for ix,n in enumerate(nodes)}
nodes_df = pd.DataFrame({'node':nodes})
for d, vals in dim_map.items():
nodes_df[d] = vals
if clear_inputs and 'inputs' in model_grp:
del model_grp['inputs']
# initialise parameters and states if they don't exist!
self._parameteriser.parameterise(model_meta,model_grp,instances,dim_map,node_dict,nodes_df)
finally:
self.close()
self._h5f = h5py.File(self.filename,'r')
def run(self,time_period,results_fn=None,**kwargs):
'''
kwargs: Arguments and fflags to pass directly to ow-sim, including:
* overwrite (boolean): Overwrite existing output file if it exists
* verbose (boolean): Show verbose logging during simulation
'''
return _run(time_period,self.filename,results_fn,**kwargs)
def _run(time_period,model_fn=None,results_fn=None,**kwargs):
'''
kwargs: Arguments and fflags to pass directly to ow-sim, including:
* overwrite (boolean): Overwrite existing output file if it exists
* verbose (boolean): Show verbose logging during simulation
'''
from openwater.discovery import _exe_path
from openwater.results import OpenwaterResults
if not results_fn:
base,ext = os.path.splitext(model_fn)
results_fn = '%s_outputs%s'%(base,ext)
print('INFO: No output filename provided. Writing to %s'%results_fn)
cmd_line = [_exe_path('sim')]
for k,v in kwargs.items():
cmd_line += ow_sim_flag_text(k,v)
cmd_line.append(model_fn),
cmd_line.append(results_fn)
# "%s %s %s %s"%(_exe_path('sim'),flags,model_fn,results_fn)
logger.debug('Running with command line: %s',cmd_line)
proc = Popen(cmd_line,stdout=PIPE,stderr=PIPE,bufsize=1, close_fds=ON_POSIX)
std_out_queue,std_out_thread = configure_non_blocking_io(proc,'stdout')
std_err_queue,std_err_thread = configure_non_blocking_io(proc,'stderr')
err = []
out = []
finished = False
while not finished:
if proc.poll() is not None:
finished = True
end_stream=False
while not end_stream:
try:
line = std_err_queue.get_nowait().decode('utf-8')
err.append(line)
print('ERROR %s'%(line,))
sys.stdout.flush()
except Empty:
end_stream = True
end_stream = False
while not end_stream:
try:
line = std_out_queue.get_nowait().decode('utf-8')
out.append(line)
print(line)
sys.stdout.flush()
except Empty:
end_stream = True
sleep(0.05)
assert proc.returncode==0
return OpenwaterResults(model_fn,results_fn,time_period)
def run_simulation(model,output='model_outputs.h5',overwrite=False):
import openwater.discovery
cmd = '%s/ow-sim'%openwater.discovery.OW_BIN
if overwrite:
cmd += ' -overwrite'
cmd = '%s %s %s'%(cmd,model,output)
res = os.system(cmd)
return res
def _enqueue_output(out, queue):
for line in iter(out.readline, b''):
queue.put(line)
out.close()
def configure_non_blocking_io(proc,stream):
queue = Queue()
thread = Thread(target=_enqueue_output,args=(getattr(proc,stream),queue))
thread.daemon = True
thread.start()
return queue,thread
def ow_sim_flag_text(k,v):
k = k.replace('_','-')
k = '-%s'%k
if v == False:
return []
if v == True:
return [k]
if hasattr(v,'__len__') and not isinstance(v,str):
if hasattr(v,'items'):
v = ','.join([f'{v_key}:{v_val}' for v_key,v_val in v.items()])
else:
v = ','.join(v)
return [k,str(v)]
class InvalidFluxException(Exception):
def __init__(self,node,flux_name,flux_type):
super(InvalidFluxException,self).__init__(f'Invalid flux: Node ({node}) has no {flux_type} named {flux_name}')
self.node = node
self.flux_type = flux_type
self.flux_name = flux_name
| <filename>openwater/template.py
import os
import sys
import h5py
from subprocess import Popen, PIPE
from queue import Queue, Empty # python 3.x
from threading import Thread
from time import sleep
import numpy as np
import pandas as pd
import networkx as nx
from functools import reduce
from . import nodes as node_types
from .timing import init_timer, report_time, close_timer
from itertools import chain
from .array_params import get_parameter_locations
from .nodes import create_indexed_parameter_table
from .file import _tabulate_model_scalars_from_file
import logging
logger = logging.getLogger(__name__)
# Non blocking IO solution from http://stackoverflow.com/a/4896288
ON_POSIX = 'posix' in sys.builtin_module_names
TAG_PROCESS='_process'
TAG_MODEL='_model'
TAG_RUN_INDEX='_run_idx'
TAG_GENERATION='_generation'
META_TAGS=[TAG_PROCESS,TAG_MODEL,TAG_RUN_INDEX,TAG_GENERATION]
DEFAULT_TIMESTEPS=365
LINK_TABLE_COLUMNS = ['%s_%s'%(n,c) for n in ['src','dest'] for c in ['generation','model','node','gen_node','var']]
def connections_match(o,i):
o_node,_,o_alias,o_tags = o
i_node,_,i_alias,i_tags = i
if o_alias != i_alias:
# print('aliases do not match')
return False
o_tags = o_tags.copy()
o_tags.update(o_node.tags)
i_tags = i_tags.copy()
i_tags.update(i_node.tags)
return tags_match(o_tags, i_tags)
def tags_match(o_tags,i_tags):
common_keys = list(set(o_tags.keys()).intersection(i_tags.keys()))
for ck in common_keys:
if ck in ['_process','_model']: continue
if o_tags[ck] != i_tags[ck]:
# print('common key (%s) does not match (%s vs %s)'%(ck,o_node.tags[ck],i_node.tags[ck]))
return False
return True
class OWTemplate(object):
def __init__(self,lbl=''):
self.label=lbl
self.nodes = []
self.links = []
self.nested = []
self.inputs = []
self.outputs = []
def define_input(self,node,name=None,alias=None,**kwargs):
if name is None:
# Would be nice to have a short hand to define every input of this
# node as an input to the graph (and similarly every output of a node
# as an output of the graph
# But the node currently stores the model name)
raise InvalidFluxException(node,'(no name provided)','input')
if not node.has_input(name):
raise InvalidFluxException(node,name,'input')
if alias is None:
alias = name
self.inputs.append((node,name,alias,kwargs))
def define_output(self,node,name=None,alias=None,**kwargs):
if not node.has_output(name):
raise InvalidFluxException(node,name,'output')
if alias is None:
alias = name
self.outputs.append((node,name,alias,kwargs))
def _has_flux(self,alias,fluxes):
for fl in fluxes:
if fl[2]==alias:
return True
return False
def has_input(self,alias):
return self._has_flux(alias,self.inputs)
def add_node(self,model_type=None,name=None,process=None,**tags):
# if hasattr(node_or_name,'model_type'):
# self.nodes.append(node_or_name)
# else:
if process and not TAG_PROCESS in tags:
tags[TAG_PROCESS]=process
new_node = OWNode(model_type,name,**tags)
self.nodes.append(new_node)
return new_node
def add_link(self,link):
self.links.append(link)
def add_conditional_link(self,from_node,from_output,to_node,possibly_inputs,model):
for pi in possibly_inputs:
if pi in model.description['Inputs']:
self.add_link(OWLink(from_node,from_output,to_node,pi))
return True
return False
def match_labelled_flux(self,fluxes,flux_name,flux_tags,exclude_tags):
required_tags = set(flux_tags.keys())
for node,name,alias,stored_tags in fluxes:
if flux_name != alias:
continue
stored_tags = dict(**stored_tags)
stored_tags.update(**node.tags)
if len(required_tags.difference(stored_tags.keys())):
continue
skip = False
for et in exclude_tags:
if et in stored_tags:
skip = True
break
if skip: continue
if tags_match(flux_tags,stored_tags):
return node, name
return None,None
def make_link(self,output_name,input_name,
from_node=None,from_tags={},from_exclude_tags=[],
to_node=None,to_tags={},to_exclude_tags=[]):
link_txt = f'{output_name}({from_node or str(from_tags)}) -> {input_name}({to_node or str(to_tags)})'
if from_node is None:
from_node, new_output_name = self.match_labelled_flux(
self.outputs,output_name,from_tags,from_exclude_tags)
if from_node is None or new_output_name is None:
n_outputs = len(self.outputs)
raise Exception('%s: No matching output for %s, with tags %s. Have %d outputs'%(link_txt,new_output_name or output_name,str(from_tags),n_outputs))
output_name = new_output_name
if to_node is None:
to_node, new_input_name = self.match_labelled_flux(
self.inputs,input_name,to_tags,to_exclude_tags)
if to_node is None or new_input_name is None:
raise Exception('%s: No matching input for %s, with tags %s'%(link_txt,new_input_name or input_name,str(to_tags)))
input_name = new_input_name
return OWLink(from_node,output_name,to_node,input_name)
def flatten(self):
'''
Generate a single, flat template containing all nested
templates, instantiating links between nested templates based
on input and output descriptions.
When instantiating links, the order of the nested templates matters,
with links only instantiated from outputs of earlier nested templates to
inputs of later nested templates.
'''
result = OWTemplate(self.label)
result.nodes += self.nodes
result.links += self.links
result.inputs += self.inputs
result.outputs += self.outputs
flattened = [t.flatten() for t in self.nested]
available_outputs = []
used_outputs = []
for child in flattened:
result.nodes += child.nodes
result.links += child.links
for child_input in child.inputs:
input_linked = False
for previous_output in available_outputs:
if connections_match(previous_output,child_input):
# print('linking',previous_output,child_input)
result.add_link(OWLink(previous_output[0],previous_output[1],child_input[0],child_input[1]))
used_outputs.append(previous_output)
input_linked = True
if not input_linked:
result.inputs.append(child_input)
available_outputs += child.outputs
#unused_outputs = set(available_outputs).difference(set(used_outputs))
unused_outputs = [o for o in available_outputs if not o in used_outputs]
result.outputs+= list(unused_outputs)
return result
def nest(self,other):
'''
Add all nodes and links from other to this template,
connecting all outputs from this template to inputs in other AND
'''
self.nested.append(other)
def instantiate(self,**instance_tags):
res = OWTemplate()
node_map = {}
for n in self.nodes:
new_node = res.add_node(n.model_type,**n.tags,**instance_tags)
node_map[n.name] = new_node
for l in self.links:
new_from = node_map[l.from_node.name]
new_to = node_map[l.to_node.name]
new_link = res.add_link(OWLink(new_from,l.from_output,new_to,l.to_input))
return res
class OWNode(object):
def __init__(self,model_type,name=None,**tags):
self.model_type = model_type
if hasattr(model_type,'name'):
self.model_name = model_type.name
self.model_type = model_type
else:
self.model_name = model_type
import openwater.nodes as node_types
from openwater.discovery import discover
discover()
self.model_type = getattr(node_types,self.model_name)
self.tags = tags
self.tags[TAG_MODEL] = self.model_name
if name:
self.name = name
else:
self.name = self.make_name()
def make_name(self):
std_names = ['catchment','model',TAG_PROCESS,'constituent','hru','lu']
for k in sorted(self.tags.keys()):
if k.startswith('_'):continue
if not k in std_names:
std_names.append(k)
return '-'.join([str(self.tags.get(k,None)) for k in std_names if k in self.tags])
def __str__(self):
return '%s (%s)'%(self.name,self.model_name)
def has_output(self,name):
return name in self.model_type.description['Outputs']
def has_input(self,name):
return name in self.model_type.description['Inputs']
class OWLink(object):
def __init__(self,from_node,from_output,to_node,to_input):
assert from_node is not None
assert from_output is not None
assert to_node is not None
assert to_input is not None
if not from_node.has_output(from_output):
raise InvalidFluxException(from_node,from_output,'output')
if not to_node.has_input(to_input):
raise InvalidFluxException(to_node,to_input,'input')
self.from_node = from_node
self.from_output = from_output
self.to_node = to_node
self.to_input = to_input
# class OWSystem(object):
# def __init__(self):
# self.nodes = []
# self.links = []
# def add_node(self,name,)
def template_to_graph(g:nx.DiGraph,tpl:OWTemplate,allow_duplicates=False,**tags) -> nx.DiGraph:
"""
Add all the nodes and links in an Openwater Template to a graph
Parameters
----------
g: nx.DiGraph
an existing graph object. If None, a new graph object will be created
tpl: OWTemplate
Openwater Template to add to the graph
allow_duplicates:
Whether to allow duplicate links (ie between the same two nodes and variables) or whether to throw an exception
Defaults to False (ie raise exception on duplicate)
tags:
Additional tags to assign to all nodes in the template when adding to the graph
Returns
-------
nx.DiGraph
The graph object passed in as g, or the new graph object created
Raises
------
Exception
when duplicate link encountered (unless allow_duplicates=True)
"""
if not g:
g = nx.DiGraph()
nodes = {}
nw = tpl.instantiate(**tags)
for n in nw.nodes:
g.add_node(str(n),**n.tags)
nodes[str(n)] = n
for l in nw.links:
key = (str(nodes[str(l.from_node)]),str(nodes[str(l.to_node)]))
if key in g.edges:
existing = g.edges[key]
if not allow_duplicates and \
(l.from_output in existing['src']) and \
(l.to_input in existing['dest']):
raise Exception(f'Duplicate link along {key}, between {l.from_output} and {l.to_input}')
existing['src'].append(l.from_output)
existing['dest'].append(l.to_input)
continue
g.add_edge(key[0],key[1],
src=[l.from_output],dest=[l.to_input])
return g
def node_matches(n,**kwargs):
for k,v in kwargs.items():
if not k in n:
return False
if n[k]!=v:
return False
return True
def match_nodes(g,**kwargs):
return [name for name,node in g.nodes.items() if node_matches(node,**kwargs)]
def model_type(n):
return str(n).split('(')[1][:-1]
def group_run_order(g):
ancestors_by_node = {}
by_node_type_gen = {}
node_gen = {}
for n in list(g.nodes):
ancestors = nx.ancestors(g,n)
ancestors_by_node[n] = ancestors
mt = model_type(n)
if not mt in by_node_type_gen:
by_node_type_gen[mt] = {}
n_ancestors = len(ancestors)
if not n_ancestors in by_node_type_gen[mt]:
by_node_type_gen[mt][n_ancestors] = []
by_node_type_gen[mt][n_ancestors].append(n)
node_gen[n]=n_ancestors
return ancestors_by_node,by_node_type_gen,node_gen
def assign_stages(order,node_gen,by_node_type_gen):
done = {}
stages = []
i = 0
for n in order:
if n in done: continue
gen = node_gen[n]
mt = model_type(n)
others = by_node_type_gen[mt][gen]
while len(stages) <= gen:
stages.append([])
stages[gen] += others
for o in others: done[o]=i
i += 1
return stages
def map_stages(stages):
result = {}
for i,s in enumerate(stages):
for n in s:
result[n] = i
return result
THRESHOLD_N_JOBS=500
def find_first_small_stage(stages):
for i,s in enumerate(stages):
if len(s)<THRESHOLD_N_JOBS:
return i
return -1
def flatten(l_of_l):
return [item for sublist in l_of_l for item in sublist]
descendants_by_node={}
imm_descendants_by_node={}
# cache_queries = 0
# cache_misses = 0
def descendants_cached(g,n):
# global cache_queries, cache_misses
# cache_queries += 1
if not n in descendants_by_node:
# cache_misses += 1
descendants_by_node[n] = nx.descendants(g,n)
return descendants_by_node[n]
# return list(node_descendent_df[node_descendent_df[n]].index)
def immediate_descendants_cached(g,n):
if not n in imm_descendants_by_node:
# cache_misses += 1
imm_descendants_by_node[n] = set(g.successors(n))
return imm_descendants_by_node[n]
def bring_forward(g,stages):
init_timer('bring_forward')
first_small_stage = find_first_small_stage(stages)
for i in range(first_small_stage,len(stages)-1):
si = stages[i]
if not len(si):
continue
all_descendents = set(flatten([descendants_cached(g,n) for n in si]))
si1 = stages[i+1]
candidates = [n for n in si1 if not n in all_descendents]
if len(candidates):
stages[i] += candidates
stages[i+1] = list(set(stages[i+1]) - set(candidates)) # [n for n in stages[i+1] if not n in candidates] # set difference?
stages = [s for s in stages if len(s)]
close_timer()
return stages
def latest_possible(g,n,n_stages,node_stages):
current = node_stages[n]
next_stage = current+1
descendants = immediate_descendants_cached(g,n)
# descendent_stages = np.array([node_stages[d] for d in descendants])
# earliest_descendent = descendent_stages.min()
# return earliest_descendent - 1
lowest = n_stages
for d in descendants:
descendent_stage = node_stages[d]
# if descendent_stage == 0:
# print(n,d,n_stages,current,descendent_stage,lowest)
if descendent_stage >= lowest:
continue
if descendent_stage == next_stage:
return current
lowest = descendent_stage
# if descendent_stage < lowest:
# lowest = descendent_stage
if not lowest:
print(n,current,n_stages,d,descendent_stage)
raise Exception('Boo')
return lowest-1
#shifts = 0
# def push_back_orig(g,stages):
# first_small_stage = find_first_small_stage(stages)
# # visited = {}
# global shifts
# node_stages = map_stages(stages)
# count = 0
# nodes_downstream = 0
# for i in range(len(stages)-1,-1,-1):
# stage_nodes = stages[i]
# nodes_downstream += len(stage_nodes)
# print(i)
# for n in stage_nodes:
# # if (n in visited) and visited[n]==i:
# # # Node visited as an ancestor and not moved, so no reason to look further at ancestors
# # continue
# ancestors = ancestors_by_node[n]
# for a in ancestors:
# current_stage = node_stages[a]
# # visited[a]=current_stage
# if current_stage == (i-1):
# continue # Already as late as possible
# new_stage = latest_possible(g,a,len(stages),node_stages)
# if new_stage==current_stage:
# continue
# shifts += 1
# stages[new_stage].append(a)
# stages[current_stage].remove(a)
# node_stages[a] = new_stage
# #print(i,n,a,current_stage,new_stage)
# #count += 1
# #assert(count<10)
# stages = [s for s in stages if len(s)]
# return stages
# @profile
def push_back_ss(g,stages):
init_timer('push_back_ss')
to_remove = {n:[] for n in range(len(stages))}
to_add = {n:[] for n in range(len(stages))}
# first_small_stage = find_first_small_stage(stages)
visited = {}
# init_timer('map node stages')
node_stages = map_stages(stages)
# close_timer()
count = 0
nodes_downstream = 0
# global shifts
for i in range(len(stages)-1,-1,-1):
# init_timer('stage %d'%i)
stage_nodes = list(set(stages[i]).union(set(to_add[i])) - set(to_remove[i]))
stages[i] = stage_nodes
nodes_downstream += len(stage_nodes)
for n in stage_nodes:
already_visited = n in visited
if already_visited and visited[n]==i:
# Node last visited as an ancestor and not moved, so no reason to look further at ancestors
continue
ancestors = ancestors_by_node[n]
for a in ancestors:
current_stage = node_stages[a]
visited[a]=current_stage
if current_stage == (i-1):
continue # Already as late as possible
new_stage = latest_possible(g,a,len(stages),node_stages)
if new_stage==current_stage:
continue
# shifts += 1
to_add[new_stage].append(a)
to_remove[current_stage].append(a)
#stages[new_stage].append(a)
#stages[current_stage].remove(a)
node_stages[a] = new_stage
#print(i,n,a,current_stage,new_stage)
#count += 1
#assert(count<10)
# close_timer()
stages = [s for s in stages if len(s)]
close_timer()
return stages
def compute_simulation_order(graph):
init_timer('compute_simulation_order')
init_timer('Get basic order')
global descendants_by_node
descendants_by_node = {}
g = graph
sequential_order = list(nx.topological_sort(g))
global ancestors_by_node #, node_ancestry_df, node_descendent_df
ancestors_by_node,by_node_type_gen,node_gen = group_run_order(g)
stages = assign_stages(sequential_order,node_gen,by_node_type_gen)
stages = [s for s in stages if len(s)]
# report_time('create node ancestry dataframe for %d nodes'%len(ancestors_by_node))
# node_ancestry_df = pd.DataFrame(data=False,index=list(g.nodes),columns=list(g.nodes))
# for k,ancestors in ancestors_by_node.items():
# node_ancestry_df[k][ancestors] = True
# node_descendent_df = node_ancestry_df.transpose()
report_time('Grouping model stages/generations')
n_stages = len(stages)
new_n_stages = 0
iteration = 1
while new_n_stages<n_stages:
init_timer('Iteration %d'%iteration)
n_stages = len(stages)
stages = bring_forward(g,stages)
stages = push_back_ss(g,stages)
new_n_stages = len(stages)
iteration += 1
close_timer()
close_timer()
close_timer()
return stages
def tag_set(nodes):
return reduce(lambda a,b: a.union(b),[set(n.keys()) for n in nodes])
def proc_model(node):
return '%s/%s'%(node[TAG_PROCESS],node[TAG_MODEL])
def match_model_name(node_name):
return g.nodes[node_name][TAG_MODEL]
# return np.string_(re.match(re.compile('.*\(([\w\d]+)\)'),node_name)[1])
def sort_nodes(nodes):
'''
Sort a group of nodes by relevant criteria
(Currently just name - but ultimately by tags in some way!)
'''
return sorted(nodes)
class ModelGraph(object):
def __init__(self,graph,initialise=True,time_period=None):
self._graph = graph
self._parameteriser = None
self._last_write = None
self.time_period = time_period
if initialise:
self.initialise()
def initialise(self):
init_timer('Compute simulation order')
self.order = compute_simulation_order(self._graph)
report_time('Tag nodes')
for i,gen in enumerate(self.order):
for node in gen:
self._graph.nodes[node][TAG_GENERATION]=i
self.sequence = flatten(self.order)
nodes = self._graph.nodes
self.model_names = list({n[TAG_MODEL] for n in self._graph.nodes.values()} )
proc_models = {proc_model(nodes[n]) for n in nodes}
node_names_by_process_and_model = {pm:[n for n in nodes if proc_model(nodes[n])==pm] for pm in proc_models}
nodes_by_process_and_model = {pm:[nodes[n] for n in proc_nodes] for pm,proc_nodes in node_names_by_process_and_model.items()}
tags_by_process_and_model = {p:list(tag_set(nodes)-set(META_TAGS)) for p,nodes in nodes_by_process_and_model.items()}
self.all_tags = set().union(*tags_by_process_and_model.values())
self.distinct_values = {t:sorted(set([nodes[n][t] for n in nodes if t in nodes[n]])) for t in self.all_tags}
report_time('Assign runtime metadata')
for pm in proc_models:
node = nodes_by_process_and_model[pm][0]
self.assign_run_indices(node[TAG_PROCESS],node[TAG_MODEL])
close_timer()
def assign_run_indices(self,proc,model_type):
'''
Assign run indices to each model run within a given process, p (eg 'rainfall runoff')
'''
i = 0
nodes = self._graph.nodes
for gen in self.order:
relevant_gen = [n for n in gen if nodes[n][TAG_MODEL]==model_type] # and nodes[n][TAG_PROCESS]==proc] ### BAD ASSUMPTION!
relevant_gen = sort_nodes(relevant_gen)
for n in relevant_gen:
node = nodes[n]
node[TAG_RUN_INDEX] = i
i += 1
def nodes_matching(self,model,**tags):
if hasattr(model,'name'):
model = model.name
if not '_model' in tags:
tags['_model'] = model
def tags_match(node):
for k,v in tags.items():
if not k in node or node[k] != v:
return False
return True
return {n:self._graph.nodes[n] for n in self._graph.nodes if tags_match(self._graph.nodes[n])}
def write_model(self,f,timesteps=DEFAULT_TIMESTEPS):
init_timer('Write model file')
init_timer('Write meta and dimensions')
close = False
if hasattr(f,'upper'):
h5f = h5py.File(f,'w')
close = True
else:
h5f = f
try:
self._write_meta(h5f)
report_time('Write model groups')
self._write_model_groups(h5f,timesteps)
report_time('Write links')
self._write_links(h5f)
report_time('Write dimensions')
self._write_dimensions(h5f)
finally:
if close: h5f.close()
self._last_write=f
close_timer()
close_timer()
def run(self,time_period,model_fn=None,results_fn=None,**kwargs):
'''
kwargs: Arguments and fflags to pass directly to ow-sim, including:
* overwrite (boolean): Overwrite existing output file if it exists
* verbose (boolean): Show verbose logging during simulation
'''
if model_fn:
self.write_model(model_fn,len(time_period))
model_fn = self._last_write
if not model_fn:
raise Exception('model_fn not provided and model not previously saved')
return _run(time_period,model_fn,results_fn,**kwargs)
def _flux_number(self,node_name,flux_type,flux_name):
node = self._graph.nodes[node_name]
model_type = node[TAG_MODEL]
desc = getattr(node_types,model_type).description
flux_type = flux_type.capitalize()
if not flux_type.endswith('s'):
flux_type += 's'
if not flux_name in desc[flux_type]:
raise Exception('Unknown %s flux %s on %s'%(flux_type[:-1],flux_name,model_type))
#return -1
return desc[flux_type].index(flux_name)
def _write_meta(self,h5f):
meta = h5f.create_group('META')
meta.create_dataset('models',
data=[np.string_(n) for n in self.model_names],
dtype='S%d'%max([len(mn) for mn in self.model_names]))
if self.time_period is not None:
dates = np.array([ts.isoformat() for ts in self.time_period],dtype=h5py.special_dtype(vlen=str))
meta.create_dataset('timeperiod',data=dates)
def _write_dimensions(self,f):
dimensions = f.create_group('DIMENSIONS')
for t in self.all_tags:
vals = self.distinct_values[t]
if hasattr(vals[0],'__len__'):
vals = [np.string_(v) for v in vals]
dimensions.create_dataset(t,data=vals)
def _map_process(self,node_set):
'''
For a given model (eg 'GR4J'), organise all model runs
by the parameterisation dimensions (eg catchment x hru) and assign indices
'''
nodes = self._graph.nodes
def dim_tags(node_name):
node = nodes[node_name]
keys = node.keys()
return set(keys) - set(META_TAGS)
dimsets = {frozenset(dim_tags(n)) for n in node_set}
all_dims = set(chain.from_iterable(dimsets))
# assert len(dimsets)==1 # don't support one process having different dimensions
# Should at least support attributes (tags that only ever have one value)
dimensions = all_dims # list(dimsets)[0]
# dim_values = {d:sorted({nodes[n][d] for n in node_set}) for d in dimensions}
if len(dimsets) > 1:
print('Populating nodes with dummy dimension values')
for dim in dimensions:
added_dummy = False
dummy_val = f'dummy-{dim}'
for node in node_set:
if not dim in nodes[node]:
nodes[node][dim] = dummy_val
if not added_dummy and (dummy_val not in self.distinct_values[dim]):
self.distinct_values[dim].append(dummy_val)
added_dummy = True
dim_values = {d:sorted({nodes[n][d] for n in node_set}) for d in dimensions}
attributes = {d:vals[0] for d,vals in dim_values.items() if (len(vals)==1) and (len(node_set)>1)}
dimension_values = {d:vals for d,vals in dim_values.items() if (len(vals)>1) or (len(node_set)==1)}
dimensions = [d for d in dimensions if not d in attributes]
if not len(dimensions):
print(attributes)
print(len(node_set))
raise 'No dimensions'
if len([d for d in dimensions if len(dimension_values[d])==0]):
print('Dimension(s) with 0 length:',dimension_values)
raise Exception('Dimension(s) with 0 length')
# dims = tags_by_process[p]
# dimensions = [distinct_values[d] for d in dims]
shape = tuple([len(self.distinct_values[d]) for d in dimensions])
model_instances = np.ones(shape=shape,dtype=np.uint32) * -1
for node_name in node_set:
node = nodes[node_name]
loc = tuple([self.distinct_values[d].index(node[d]) for d in dimensions])
if len(loc) < len(shape):
print(loc,node)
model_instances[loc] = node[TAG_RUN_INDEX]
return dimension_values, attributes,model_instances
def _write_model_groups(self,f,n_timesteps):
models_grp = f.create_group('MODELS')
nodes = self._graph.nodes
models = {nodes[n][TAG_MODEL] for n in nodes}
self.model_batches = {}
for mx,m in enumerate(models):
model_msg ='Writing model %s (%d/%d)'%(m,mx+1,len(models))
init_timer(model_msg)
print(model_msg)
model_grp = models_grp.create_group(m)
model_nodes = [n for n in nodes if nodes[n][TAG_MODEL]==m]
processes_for_model = {nodes[n][TAG_PROCESS] for n in model_nodes}
# assert(len(processes_for_model)==1) # not necessary?
dims,attributes,instances = self._map_process(model_nodes)
ds = model_grp.create_dataset('map',dtype=instances.dtype,data=instances,fillvalue=-1)
# write out model index
ds.attrs['PROCESSES']=[np.string_(s) for s in list(processes_for_model)]
ds.attrs['DIMS']=[np.string_(d) for d in dims]
for attr,val in attributes.items():
ds.attrs[attr]=val
self.model_batches[m] = np.cumsum([len([n for n in gen if nodes[n][TAG_MODEL]==m]) for gen in self.order])
model_meta = getattr(node_types,m)
if hasattr(model_meta,'description'):
desc = model_meta.description
n_states = len(desc['States']) # Compute, based on parameters...
n_params = len(desc['Parameters'])
n_inputs = len(desc['Inputs'])
else:
print('No description for %s'%m)
desc = None
n_states = 3
n_params = 4
n_inputs = 2
# batch_counts = [len(mc.get(m,[])) for mc in model_counts_by_generation]
model_grp.create_dataset('batches',shape=(len(self.order),),dtype=np.uint32,data=self.model_batches[m],fillvalue=-1)
n_cells = len(model_nodes) # instances.size
# Init states....
model_grp.create_dataset('states',shape=(n_cells,n_states),dtype=np.float64,fillvalue=0)
model_grp.create_dataset('parameters',shape=(n_params,n_cells),dtype=np.float64,fillvalue=0)
# model_grp.create_dataset('inputs',shape=(n_cells,n_inputs,n_timesteps),dtype=np.float64,fillvalue=0)
if (self._parameteriser is not None) and (desc is not None):
node_dict = {n:nodes[n] for n in model_nodes}
nodes_df = pd.DataFrame([nodes[n] for n in model_nodes])
for k,v in attributes.items():
nodes_df[k] = v
full_dims = dict(**dims,**attributes)
init_timer('Parameterisation')
self._parameteriser.parameterise(model_meta,model_grp,instances,full_dims,node_dict,nodes_df)
close_timer()
close_timer()
def gen_index(self,node):
global_idx = node['_run_idx']
model_name = node['_model']
gen = node['_generation']
if gen:
start_of_gen = self.model_batches[model_name][gen-1]
else:
start_of_gen = 0
return global_idx - start_of_gen
def link_table(self):
model_lookup = dict([(m,i) for i,m in enumerate(self.model_names)])
link_table = []
for l_from,l_to in self._graph.edges:
link_data = self._graph.edges[(l_from,l_to)]
for src_var,dest_var in zip(link_data['src'],link_data['dest']):
link = {}
f_node = self._graph.nodes[l_from]
t_node = self._graph.nodes[l_to]
link['src_generation'] = f_node['_generation']
link['src_model'] = model_lookup[f_node['_model']]
link['src_node'] = f_node['_run_idx']
link['src_gen_node'] = self.gen_index(f_node)
link['src_var'] = self._flux_number(l_from,'output',src_var)
link['dest_generation'] = t_node['_generation']
link['dest_model'] = model_lookup[t_node['_model']]
link['dest_node'] = t_node['_run_idx']
link['dest_gen_node'] = self.gen_index(t_node)
link['dest_var'] = self._flux_number(l_to,'input',dest_var)
link_table.append(link)
link_table = pd.DataFrame(link_table)
col_order = LINK_TABLE_COLUMNS
link_table = link_table[col_order]
sort_order = ['src_generation','src_model','src_gen_node','dest_generation','dest_model','dest_gen_node']
return link_table.sort_values(sort_order)
def _write_links(self,f):
table = np.array(self.link_table())
f.create_dataset('LINKS',dtype=np.uint32,data=table)
def dim_val(v):
if hasattr(v,'decode'):
return v.decode()
return v
class ModelFile(object):
def __init__(self,fn):
self.filename = fn
import h5py
self._h5f = h5py.File(self.filename,'r')
self._dimensions = {k:[dim_val(d) for d in self._h5f['DIMENSIONS'][k][...]] for k in self._h5f['DIMENSIONS']}
# print(self._dimensions)
self._links = pd.DataFrame(self._h5f['LINKS'][...],columns=LINK_TABLE_COLUMNS)
self._models = self._h5f['META']['models'][...]
if 'timeperiod' in self._h5f['META']:
timesteps = [d for d in self._h5f['META']['timeperiod'][...]]
if isinstance(timesteps[0],bytes):
timesteps = [d.decode() for d in timesteps]
self.time_period = pd.DatetimeIndex([pd.Timestamp.fromisoformat(d) for d in timesteps])
self._parameteriser = None
def _matches(self,model,**tags):
model_dims = [d.decode() for d in self._h5f['MODELS'][model]['map'].attrs['DIMS']]
# print(model_dims)
lookup = {}
for tag,value in tags.items():
if not tag in model_dims:
return False
if not value in self._dimensions[tag]:
return False
lookup[tag] = self._dimensions[tag].index(value)
idx = [lookup.get(d,slice(None,None)) for d in model_dims]
# print(model,list(zip(model_dims,idx)))
return np.any(self._h5f['MODELS'][model]['map'][tuple(idx)] > 0)
def models_matching(self,**tags):
result = []
for k in self._h5f['MODELS']:
# print(k)
if self._matches(k,**tags):
result.append(k)
return result
def _map_model_dims(self,model):
model_map = self._h5f['MODELS'][model]['map'][...]
m_dims = [dim_val(d) for d in self._h5f['MODELS'][model]['map'].attrs['DIMS']]
dims = {d:self._h5f['DIMENSIONS'][d][...] for d in m_dims}
dim_indices = list(zip(*np.where(model_map>=0)))#np.logical_not(np.isnan(model_map)))))
def translate_dims(tpl):
return [dim_val(dims[d][ix]) for d,ix in zip(m_dims,tpl)]
dim_columns = [translate_dims(di)+[model_map[di]] for ix,di in enumerate(dim_indices) if model_map[di]>=0]
return {d:[di[i] for di in dim_columns] for i,d in enumerate(m_dims+['_run_idx'])}
def _raw_parameters(self,model,**tags):
vals = self._h5f['MODELS'][model]['parameters'][...]
model_map = self._map_model_dims(model)
df = pd.DataFrame(model_map)
dim_cols = set(df.columns) - {'_run_idx'}
df = df.set_index(list(dim_cols))
param_df = pd.DataFrame(vals).transpose().reindex(index=df['_run_idx'])
result = param_df.set_index(df.index)
return result
def parameters(self,model,**tags):
return _tabulate_model_scalars_from_file(self._h5f,
model,
self._map_model_dims(model),
'parameters',
**tags)
def initial_states(self,model,**tags):
return _tabulate_model_scalars_from_file(self._h5f,
model,
self._map_model_dims(model),
'states',
**tags)
def indexed_parameters(self,model,**tags):
raw = self._raw_parameters(model,**tags)
desc = getattr(node_types,model).description
indexed = create_indexed_parameter_table(desc,raw)
index_names = indexed.index.names
indexed = indexed.reset_index()
for k,v in tags.items():
indexed = indexed[indexed[k]==v]
indexed = indexed.set_index(index_names)
return indexed
def nodes_matching(self,model,**tags):
if hasattr(model,'name'):
model = model.name
nodes = pd.DataFrame(self._map_model_dims(model))
for tag,tag_val in tags.items():
nodes = nodes[nodes[tag]==tag_val]
return nodes
def link_table(self):
"""
Return the table of links between model nodes as a Data Frame.
"""
linkages = pd.DataFrame(self._h5f['LINKS'][...],columns=LINK_TABLE_COLUMNS)
all_models = np.array([m.decode() for m in list(self._h5f['META']['models'][...])])
descriptions = {mod:getattr(node_types,mod).description for mod in all_models}
linkages.src_model = all_models[linkages.src_model]
linkages.dest_model = all_models[linkages.dest_model]
linkages.src_var = [descriptions[m]['Outputs'][v] for m,v in zip(linkages.src_model,linkages.src_var)]
linkages.dest_var = [descriptions[m]['Inputs'][v] for m,v in zip(linkages.dest_model,linkages.dest_var)]
return linkages
def links_between(self,dest_mod=None,dest_var=None,src_mod=None,src_var=None,src_tags={},dest_tags={},annotate=True):
"""
Identify the links between particular model graph nodes.
Optionally (and by default), label src node and destination nodes by tags
All parameters are optional. By default returns all links, with all tags.
Parameters
----------
dest_mod : string
Destination model type (eg EmcDwc) and only show links to this model type
dest_var : string
Destination variable (eg inflow) and only show links to this variable
src_mod: string
Source model type (eg EmcDwc) and only show links from this model type
src_var: string
Source variable (eg outflow) and only show links from this variable
src_tags: dict
Only show links from graph nodes with all these tags
dest_tags: dict
Only show links to graph nodes with all these tags
annotate: boolean
Add source node and destination node tags as columns to the data frame
"""
linkages = self.link_table()
if dest_mod:
linkages = linkages[linkages.dest_model==dest_mod]
nodes = self.nodes_matching(dest_mod,**dest_tags)
linkages = linkages[linkages.dest_node.isin(nodes._run_idx)]
if dest_var:
linkages = linkages[linkages.dest_var==dest_var]
if src_mod:
linkages = linkages[linkages.src_model==src_mod]
nodes = self.nodes_matching(src_mod,**src_tags)
linkages = linkages[linkages.src_node.isin(nodes._run_idx)]
if src_var:
linkages = linkages[linkages.src_var==src_var]
if annotate:
model_maps = {m:pd.DataFrame(self._map_model_dims(m)) for m in set(linkages.src_model).union(linkages.dest_model)}
def annotate_tbl(prefix):
tag_names = set([c for m in set(linkages[f'{prefix}_model']) for c in model_maps[m].columns])-{'_run_idx'}
for tag_name in tag_names:
col = f'{prefix}_{tag_name}'
rows = [model_maps[m][model_maps[m]._run_idx==n] for m,n in zip(linkages[f'{prefix}_model'],linkages[f'{prefix}_node'])]
linkages[col] = [row[tag_name].iloc[0] if tag_name in row else '-' for row in rows]
annotate_tbl('src')
annotate_tbl('dest')
return linkages
def close(self):
self._h5f.close()
self._h5f = None
def write(self,clear_inputs=False):
try:
self.close()
import h5py
self._h5f = h5py.File(self.filename,'r+')
if self._parameteriser is None:
print('Nothing to do')
return
models_grp = self._h5f['MODELS']
models = list(models_grp.keys())
for m in models:
print('Parameterising %s'%str(m))
model_grp = models_grp[m]
instances = model_grp['map'][...]
dims = [dim_val(d) for d in model_grp['map'].attrs['DIMS']]
dim_map = self._map_model_dims(m)
nodes = ['%s-%d'%(m,ix) for ix in range(len(dim_map[dims[0]]))]
# dims,attributes,instances = self._map_process(model_nodes)
model_meta = getattr(node_types,m)
# for k,v in attributes.items():
# nodes_df[k] = v
# full_dims = dict(**dims,**attributes)
node_dict = {n:{d:vals[ix] for d,vals in dim_map.items()} for ix,n in enumerate(nodes)}
nodes_df = pd.DataFrame({'node':nodes})
for d, vals in dim_map.items():
nodes_df[d] = vals
if clear_inputs and 'inputs' in model_grp:
del model_grp['inputs']
# initialise parameters and states if they don't exist!
self._parameteriser.parameterise(model_meta,model_grp,instances,dim_map,node_dict,nodes_df)
finally:
self.close()
self._h5f = h5py.File(self.filename,'r')
def run(self,time_period,results_fn=None,**kwargs):
'''
kwargs: Arguments and fflags to pass directly to ow-sim, including:
* overwrite (boolean): Overwrite existing output file if it exists
* verbose (boolean): Show verbose logging during simulation
'''
return _run(time_period,self.filename,results_fn,**kwargs)
def _run(time_period,model_fn=None,results_fn=None,**kwargs):
'''
kwargs: Arguments and fflags to pass directly to ow-sim, including:
* overwrite (boolean): Overwrite existing output file if it exists
* verbose (boolean): Show verbose logging during simulation
'''
from openwater.discovery import _exe_path
from openwater.results import OpenwaterResults
if not results_fn:
base,ext = os.path.splitext(model_fn)
results_fn = '%s_outputs%s'%(base,ext)
print('INFO: No output filename provided. Writing to %s'%results_fn)
cmd_line = [_exe_path('sim')]
for k,v in kwargs.items():
cmd_line += ow_sim_flag_text(k,v)
cmd_line.append(model_fn),
cmd_line.append(results_fn)
# "%s %s %s %s"%(_exe_path('sim'),flags,model_fn,results_fn)
logger.debug('Running with command line: %s',cmd_line)
proc = Popen(cmd_line,stdout=PIPE,stderr=PIPE,bufsize=1, close_fds=ON_POSIX)
std_out_queue,std_out_thread = configure_non_blocking_io(proc,'stdout')
std_err_queue,std_err_thread = configure_non_blocking_io(proc,'stderr')
err = []
out = []
finished = False
while not finished:
if proc.poll() is not None:
finished = True
end_stream=False
while not end_stream:
try:
line = std_err_queue.get_nowait().decode('utf-8')
err.append(line)
print('ERROR %s'%(line,))
sys.stdout.flush()
except Empty:
end_stream = True
end_stream = False
while not end_stream:
try:
line = std_out_queue.get_nowait().decode('utf-8')
out.append(line)
print(line)
sys.stdout.flush()
except Empty:
end_stream = True
sleep(0.05)
assert proc.returncode==0
return OpenwaterResults(model_fn,results_fn,time_period)
def run_simulation(model,output='model_outputs.h5',overwrite=False):
import openwater.discovery
cmd = '%s/ow-sim'%openwater.discovery.OW_BIN
if overwrite:
cmd += ' -overwrite'
cmd = '%s %s %s'%(cmd,model,output)
res = os.system(cmd)
return res
def _enqueue_output(out, queue):
for line in iter(out.readline, b''):
queue.put(line)
out.close()
def configure_non_blocking_io(proc,stream):
queue = Queue()
thread = Thread(target=_enqueue_output,args=(getattr(proc,stream),queue))
thread.daemon = True
thread.start()
return queue,thread
def ow_sim_flag_text(k,v):
k = k.replace('_','-')
k = '-%s'%k
if v == False:
return []
if v == True:
return [k]
if hasattr(v,'__len__') and not isinstance(v,str):
if hasattr(v,'items'):
v = ','.join([f'{v_key}:{v_val}' for v_key,v_val in v.items()])
else:
v = ','.join(v)
return [k,str(v)]
class InvalidFluxException(Exception):
def __init__(self,node,flux_name,flux_type):
super(InvalidFluxException,self).__init__(f'Invalid flux: Node ({node}) has no {flux_type} named {flux_name}')
self.node = node
self.flux_type = flux_type
self.flux_name = flux_name
| en | 0.645457 | # python 3.x # Non blocking IO solution from http://stackoverflow.com/a/4896288 # print('aliases do not match') # print('common key (%s) does not match (%s vs %s)'%(ck,o_node.tags[ck],i_node.tags[ck])) # Would be nice to have a short hand to define every input of this # node as an input to the graph (and similarly every output of a node # as an output of the graph # But the node currently stores the model name) # if hasattr(node_or_name,'model_type'): # self.nodes.append(node_or_name) # else: Generate a single, flat template containing all nested templates, instantiating links between nested templates based on input and output descriptions. When instantiating links, the order of the nested templates matters, with links only instantiated from outputs of earlier nested templates to inputs of later nested templates. # print('linking',previous_output,child_input) #unused_outputs = set(available_outputs).difference(set(used_outputs)) Add all nodes and links from other to this template, connecting all outputs from this template to inputs in other AND # class OWSystem(object): # def __init__(self): # self.nodes = [] # self.links = [] # def add_node(self,name,) Add all the nodes and links in an Openwater Template to a graph Parameters ---------- g: nx.DiGraph an existing graph object. If None, a new graph object will be created tpl: OWTemplate Openwater Template to add to the graph allow_duplicates: Whether to allow duplicate links (ie between the same two nodes and variables) or whether to throw an exception Defaults to False (ie raise exception on duplicate) tags: Additional tags to assign to all nodes in the template when adding to the graph Returns ------- nx.DiGraph The graph object passed in as g, or the new graph object created Raises ------ Exception when duplicate link encountered (unless allow_duplicates=True) # cache_queries = 0 # cache_misses = 0 # global cache_queries, cache_misses # cache_queries += 1 # cache_misses += 1 # return list(node_descendent_df[node_descendent_df[n]].index) # cache_misses += 1 # [n for n in stages[i+1] if not n in candidates] # set difference? # descendent_stages = np.array([node_stages[d] for d in descendants]) # earliest_descendent = descendent_stages.min() # return earliest_descendent - 1 # if descendent_stage == 0: # print(n,d,n_stages,current,descendent_stage,lowest) # if descendent_stage < lowest: # lowest = descendent_stage #shifts = 0 # def push_back_orig(g,stages): # first_small_stage = find_first_small_stage(stages) # # visited = {} # global shifts # node_stages = map_stages(stages) # count = 0 # nodes_downstream = 0 # for i in range(len(stages)-1,-1,-1): # stage_nodes = stages[i] # nodes_downstream += len(stage_nodes) # print(i) # for n in stage_nodes: # # if (n in visited) and visited[n]==i: # # # Node visited as an ancestor and not moved, so no reason to look further at ancestors # # continue # ancestors = ancestors_by_node[n] # for a in ancestors: # current_stage = node_stages[a] # # visited[a]=current_stage # if current_stage == (i-1): # continue # Already as late as possible # new_stage = latest_possible(g,a,len(stages),node_stages) # if new_stage==current_stage: # continue # shifts += 1 # stages[new_stage].append(a) # stages[current_stage].remove(a) # node_stages[a] = new_stage # #print(i,n,a,current_stage,new_stage) # #count += 1 # #assert(count<10) # stages = [s for s in stages if len(s)] # return stages # @profile # first_small_stage = find_first_small_stage(stages) # init_timer('map node stages') # close_timer() # global shifts # init_timer('stage %d'%i) # Node last visited as an ancestor and not moved, so no reason to look further at ancestors # Already as late as possible # shifts += 1 #stages[new_stage].append(a) #stages[current_stage].remove(a) #print(i,n,a,current_stage,new_stage) #count += 1 #assert(count<10) # close_timer() #, node_ancestry_df, node_descendent_df # report_time('create node ancestry dataframe for %d nodes'%len(ancestors_by_node)) # node_ancestry_df = pd.DataFrame(data=False,index=list(g.nodes),columns=list(g.nodes)) # for k,ancestors in ancestors_by_node.items(): # node_ancestry_df[k][ancestors] = True # node_descendent_df = node_ancestry_df.transpose() # return np.string_(re.match(re.compile('.*\(([\w\d]+)\)'),node_name)[1]) Sort a group of nodes by relevant criteria (Currently just name - but ultimately by tags in some way!) Assign run indices to each model run within a given process, p (eg 'rainfall runoff') # and nodes[n][TAG_PROCESS]==proc] ### BAD ASSUMPTION! kwargs: Arguments and fflags to pass directly to ow-sim, including: * overwrite (boolean): Overwrite existing output file if it exists * verbose (boolean): Show verbose logging during simulation #return -1 For a given model (eg 'GR4J'), organise all model runs by the parameterisation dimensions (eg catchment x hru) and assign indices # assert len(dimsets)==1 # don't support one process having different dimensions # Should at least support attributes (tags that only ever have one value) # list(dimsets)[0] # dim_values = {d:sorted({nodes[n][d] for n in node_set}) for d in dimensions} # dims = tags_by_process[p] # dimensions = [distinct_values[d] for d in dims] # assert(len(processes_for_model)==1) # not necessary? # write out model index # Compute, based on parameters... # batch_counts = [len(mc.get(m,[])) for mc in model_counts_by_generation] # instances.size # Init states.... # model_grp.create_dataset('inputs',shape=(n_cells,n_inputs,n_timesteps),dtype=np.float64,fillvalue=0) # print(self._dimensions) # print(model_dims) # print(model,list(zip(model_dims,idx))) # print(k) #np.logical_not(np.isnan(model_map))))) Return the table of links between model nodes as a Data Frame. Identify the links between particular model graph nodes. Optionally (and by default), label src node and destination nodes by tags All parameters are optional. By default returns all links, with all tags. Parameters ---------- dest_mod : string Destination model type (eg EmcDwc) and only show links to this model type dest_var : string Destination variable (eg inflow) and only show links to this variable src_mod: string Source model type (eg EmcDwc) and only show links from this model type src_var: string Source variable (eg outflow) and only show links from this variable src_tags: dict Only show links from graph nodes with all these tags dest_tags: dict Only show links to graph nodes with all these tags annotate: boolean Add source node and destination node tags as columns to the data frame # dims,attributes,instances = self._map_process(model_nodes) # for k,v in attributes.items(): # nodes_df[k] = v # full_dims = dict(**dims,**attributes) # initialise parameters and states if they don't exist! kwargs: Arguments and fflags to pass directly to ow-sim, including: * overwrite (boolean): Overwrite existing output file if it exists * verbose (boolean): Show verbose logging during simulation kwargs: Arguments and fflags to pass directly to ow-sim, including: * overwrite (boolean): Overwrite existing output file if it exists * verbose (boolean): Show verbose logging during simulation # "%s %s %s %s"%(_exe_path('sim'),flags,model_fn,results_fn) | 1.882855 | 2 |
utils.py | giuscri/thesis | 0 | 6620181 | <gh_stars>0
from binascii import hexlify
import os
import json
import pickle
def dump_pickle_to_file(obj, path):
dirname, basename = os.path.split(path)
os.makedirs(dirname, exist_ok=True)
with open(path, "wb") as f:
pickle.dump(obj, f)
def dump_json_to_file(obj, path):
dirname, basename = os.path.split(path)
os.makedirs(dirname, exist_ok=True)
with open(path, "w") as f:
json.dump(obj, f)
def load_pickle_from_file(path):
with open(path, "rb") as f:
return pickle.load(f)
def load_json_from_file(path):
with open(path, "r") as f:
return json.load(f)
| from binascii import hexlify
import os
import json
import pickle
def dump_pickle_to_file(obj, path):
dirname, basename = os.path.split(path)
os.makedirs(dirname, exist_ok=True)
with open(path, "wb") as f:
pickle.dump(obj, f)
def dump_json_to_file(obj, path):
dirname, basename = os.path.split(path)
os.makedirs(dirname, exist_ok=True)
with open(path, "w") as f:
json.dump(obj, f)
def load_pickle_from_file(path):
with open(path, "rb") as f:
return pickle.load(f)
def load_json_from_file(path):
with open(path, "r") as f:
return json.load(f) | none | 1 | 3.184002 | 3 | |
pyGMs/varset_py.py | ihler/pyGM | 7 | 6620182 | """
Pure python implementation of variables ("id" and state size) and variable sets (sorted lists of variables
"""
import numpy as np
from sortedcontainers import SortedSet as sset;
from functools import reduce
class Var(object):
" ""A basic discrete random variable; a pair, (label,#states) "" "
label = []
states = 0
def __init__(self, label, states):
self.label = label
self.states = states
def __repr__(self):
return "Var ({},{})".format(self.label,self.states)
def __str__(self):
return str(self.label)
def __lt__(self,that):
return self.label < int(that)
def __le__(self,that):
return self.label <= int(that)
def __gt__(self,that):
return self.label > int(that)
def __ge__(self,that):
return self.label >= int(that)
def __eq__(self,that): # Note tests only for equality of variable label, not states
return self.label == int(that)
def __ne__(self,that):
return not self.__eq__(that)
def __hash__(self):
return hash(self.label)
def __int__(self):
return self.label
def __index__(self):
return self.label
class VarSet(sset):
" ""Container for (sorted) set of variables; the arguments to a factor "" "
# TODO: switch to np.array1D pair (ids, states) (int/uint,uint)?
# using __get__ to return Var types
# use np.union1d, in1d, etc to manipulate
def dims(self):
return tuple(v.states for v in self) if len(self) else (1,)
def nvar(self): # also size?
return len(self)
def nrStates(self):
return reduce( lambda s,v: s*v.states, self, 1); # TODO: faster? slower?
def nrStatesDouble(self):
return reduce( lambda s,v: s*v.states, self, 1.0);
def __repr__(self):
return "{"+','.join(map(str,self))+'}'
def __str__(self):
return "{"+','.join(map(str,self))+'}'
def ind2sub(self,idx):
return np.unravel_index(idx,self.dims())
#return np.unravel_index(idx,self.dims(),order=orderMethod)
def sub2ind(self,sub):
return np.ravel_multi_index(sub,self.dims())
def __hash__(self):
return hash(tuple(v.label for v in self))
@property
def labels(self):
return [v.label for v in self]
def expand_dims(self, *iterables):
return tuple( tuple(map(lambda x:x.states if x in that else 1, self)) for that in iterables);
#dA = tuple(map(lambda x:x.states if x in A.v else 1 ,vall));
#dB = tuple(map(lambda x:x.states if x in B.v else 1 ,vall));
#return np.ravel_multi_index(sub,self.dims(),order=orderMethod)
# todo: needs set equality comparison? (inherited from sset?)
| """
Pure python implementation of variables ("id" and state size) and variable sets (sorted lists of variables
"""
import numpy as np
from sortedcontainers import SortedSet as sset;
from functools import reduce
class Var(object):
" ""A basic discrete random variable; a pair, (label,#states) "" "
label = []
states = 0
def __init__(self, label, states):
self.label = label
self.states = states
def __repr__(self):
return "Var ({},{})".format(self.label,self.states)
def __str__(self):
return str(self.label)
def __lt__(self,that):
return self.label < int(that)
def __le__(self,that):
return self.label <= int(that)
def __gt__(self,that):
return self.label > int(that)
def __ge__(self,that):
return self.label >= int(that)
def __eq__(self,that): # Note tests only for equality of variable label, not states
return self.label == int(that)
def __ne__(self,that):
return not self.__eq__(that)
def __hash__(self):
return hash(self.label)
def __int__(self):
return self.label
def __index__(self):
return self.label
class VarSet(sset):
" ""Container for (sorted) set of variables; the arguments to a factor "" "
# TODO: switch to np.array1D pair (ids, states) (int/uint,uint)?
# using __get__ to return Var types
# use np.union1d, in1d, etc to manipulate
def dims(self):
return tuple(v.states for v in self) if len(self) else (1,)
def nvar(self): # also size?
return len(self)
def nrStates(self):
return reduce( lambda s,v: s*v.states, self, 1); # TODO: faster? slower?
def nrStatesDouble(self):
return reduce( lambda s,v: s*v.states, self, 1.0);
def __repr__(self):
return "{"+','.join(map(str,self))+'}'
def __str__(self):
return "{"+','.join(map(str,self))+'}'
def ind2sub(self,idx):
return np.unravel_index(idx,self.dims())
#return np.unravel_index(idx,self.dims(),order=orderMethod)
def sub2ind(self,sub):
return np.ravel_multi_index(sub,self.dims())
def __hash__(self):
return hash(tuple(v.label for v in self))
@property
def labels(self):
return [v.label for v in self]
def expand_dims(self, *iterables):
return tuple( tuple(map(lambda x:x.states if x in that else 1, self)) for that in iterables);
#dA = tuple(map(lambda x:x.states if x in A.v else 1 ,vall));
#dB = tuple(map(lambda x:x.states if x in B.v else 1 ,vall));
#return np.ravel_multi_index(sub,self.dims(),order=orderMethod)
# todo: needs set equality comparison? (inherited from sset?)
| en | 0.533897 | Pure python implementation of variables ("id" and state size) and variable sets (sorted lists of variables #states) "" " # Note tests only for equality of variable label, not states # TODO: switch to np.array1D pair (ids, states) (int/uint,uint)? # using __get__ to return Var types # use np.union1d, in1d, etc to manipulate # also size? # TODO: faster? slower? #return np.unravel_index(idx,self.dims(),order=orderMethod) #dA = tuple(map(lambda x:x.states if x in A.v else 1 ,vall)); #dB = tuple(map(lambda x:x.states if x in B.v else 1 ,vall)); #return np.ravel_multi_index(sub,self.dims(),order=orderMethod) # todo: needs set equality comparison? (inherited from sset?) | 3.322497 | 3 |
selenium_tests/__init__.py | Wassaf-Shahzad/micromasters | 32 | 6620183 | <reponame>Wassaf-Shahzad/micromasters<gh_stars>10-100
# pylint: disable=missing-docstring,invalid-name
default_app_config = 'selenium_tests.apps.SeleniumTestsConfig'
| # pylint: disable=missing-docstring,invalid-name
default_app_config = 'selenium_tests.apps.SeleniumTestsConfig' | en | 0.53368 | # pylint: disable=missing-docstring,invalid-name | 1.096652 | 1 |
code/pybricks-test01/main.py | pitdagosti/ARNEIS | 10 | 6620184 | from pybricks.parameters import Color, Port
from pybricks.pupdevices import ColorDistanceSensor, Motor
from pybricks.tools import wait
# Say hello :)
print("Hello, Pybricks!")
motor = Motor(Port.A)
# dial = Motor(Port.A)
sensor = ColorDistanceSensor(Port.B)
# Sensor light will be set to Color.GREEN when measuring distance
sensor.light.off()
# First, we'll move the dial to zero.
# dial.run_target(500, 0, Stop.COAST)
while True:
# Set the speed based on sensor distance
dist = sensor.distance()
sensor.light.on(Color.RED)
speed = dist * 5
if abs(speed) < 100:
speed = 0
print("dist=", dist, ", speed=", speed)
# speed = dial.angle() * 3
# Run motor at desired speed
motor.run(speed)
# Wait briefly, then repeat
wait(100)
# EOF
| from pybricks.parameters import Color, Port
from pybricks.pupdevices import ColorDistanceSensor, Motor
from pybricks.tools import wait
# Say hello :)
print("Hello, Pybricks!")
motor = Motor(Port.A)
# dial = Motor(Port.A)
sensor = ColorDistanceSensor(Port.B)
# Sensor light will be set to Color.GREEN when measuring distance
sensor.light.off()
# First, we'll move the dial to zero.
# dial.run_target(500, 0, Stop.COAST)
while True:
# Set the speed based on sensor distance
dist = sensor.distance()
sensor.light.on(Color.RED)
speed = dist * 5
if abs(speed) < 100:
speed = 0
print("dist=", dist, ", speed=", speed)
# speed = dial.angle() * 3
# Run motor at desired speed
motor.run(speed)
# Wait briefly, then repeat
wait(100)
# EOF
| en | 0.757643 | # Say hello :) # dial = Motor(Port.A) # Sensor light will be set to Color.GREEN when measuring distance # First, we'll move the dial to zero. # dial.run_target(500, 0, Stop.COAST) # Set the speed based on sensor distance # speed = dial.angle() * 3 # Run motor at desired speed # Wait briefly, then repeat # EOF | 3.441856 | 3 |
scripts/update-version.py | makamekm/script.elementum.jackett | 1 | 6620185 | <reponame>makamekm/script.elementum.jackett<filename>scripts/update-version.py
#!/usr/bin/env python3
import sys
from os import path, pardir
from xml.dom.minidom import parse
root = path.abspath(path.join(path.dirname(path.abspath(__file__)), pardir))
pretty_print = lambda d: '\n'.join([line for line in d.toprettyxml(indent=' ' * 2).split('\n') if line.strip()])
if __name__ == '__main__':
if len(sys.argv) != 2:
app = sys.argv[0]
print("usage: {} version".format(app))
print("")
print("example:")
print("\t{} 0.1.2".format(app))
sys.exit(1)
version = sys.argv[1]
if version[0:1] == 'v':
version = version[1:]
doc = parse(path.join(root, 'addon.xml'))
doc.getElementsByTagName("addon")[0].setAttribute('version', version)
print(pretty_print(doc))
| #!/usr/bin/env python3
import sys
from os import path, pardir
from xml.dom.minidom import parse
root = path.abspath(path.join(path.dirname(path.abspath(__file__)), pardir))
pretty_print = lambda d: '\n'.join([line for line in d.toprettyxml(indent=' ' * 2).split('\n') if line.strip()])
if __name__ == '__main__':
if len(sys.argv) != 2:
app = sys.argv[0]
print("usage: {} version".format(app))
print("")
print("example:")
print("\t{} 0.1.2".format(app))
sys.exit(1)
version = sys.argv[1]
if version[0:1] == 'v':
version = version[1:]
doc = parse(path.join(root, 'addon.xml'))
doc.getElementsByTagName("addon")[0].setAttribute('version', version)
print(pretty_print(doc)) | fr | 0.221828 | #!/usr/bin/env python3 | 2.510178 | 3 |
Linux-Operation0605/app/core/templatetags/tags.py | zhouli121018/nodejsgm | 0 | 6620186 | <filename>Linux-Operation0605/app/core/templatetags/tags.py<gh_stars>0
# coding=utf-8
import datetime
from django import template
from app.core.models import Domain
from app.utils.domain_session import get_domainid_bysession
register = template.Library()
@register.filter
def int2datetime(t):
try:
return datetime.datetime.fromtimestamp(float(t)).strftime("%Y-%m-%d %H:%M:%S") if t else '-'
except:
return t
@register.filter
def float2percent(t):
return '%.2f' % t if isinstance(t, float) else '-'
@register.filter
def list_sum(list, key):
return sum([l.get(key, 0) for l in list])
@register.filter
def preview_check(filname):
# allow_suffix = ( 'jpg', 'jpeg', 'png', 'gif', 'bmp', 'tif', 'tiff', 'xbm', 'xpm',
# 'doc', 'docx', 'dot', 'dotx',
# 'ppt', 'pptx', 'pps', 'ppsx', 'pot', 'potx',
# 'xls', 'xlsx', 'xlt', 'xltx'
# )
allow_suffix = ( 'jpg', 'jpeg', 'png', 'gif', 'bmp')
suffix = filname.split('.')[-1]
suffix = suffix.lower()
return suffix in allow_suffix
@register.filter
def smooth_timedelta(timedeltaobj):
"""Convert a datetime.timedelta object into Days, Hours, Minutes, Seconds."""
secs = timedeltaobj.total_seconds()
timetot = ""
if secs > 86400: # 60sec * 60min * 24hrs
days = secs // 86400
timetot += "{} 天".format(int(days))
secs = secs - days*86400
if secs > 3600:
hrs = secs // 3600
timetot += " {} 小时".format(int(hrs))
secs = secs - hrs*3600
if secs > 60:
mins = secs // 60
timetot += " {} 分钟".format(int(mins))
secs = secs - mins*60
if secs > 0:
timetot += " {} 秒".format(int(secs))
return timetot
@register.inclusion_tag('switch_domain.html')
def switch_domain(request):
domain_list = Domain.objects.filter(disabled='-1')
domain_id = get_domainid_bysession(request)
return {
'domain_list': domain_list,
'domain_id': domain_id
}
| <filename>Linux-Operation0605/app/core/templatetags/tags.py<gh_stars>0
# coding=utf-8
import datetime
from django import template
from app.core.models import Domain
from app.utils.domain_session import get_domainid_bysession
register = template.Library()
@register.filter
def int2datetime(t):
try:
return datetime.datetime.fromtimestamp(float(t)).strftime("%Y-%m-%d %H:%M:%S") if t else '-'
except:
return t
@register.filter
def float2percent(t):
return '%.2f' % t if isinstance(t, float) else '-'
@register.filter
def list_sum(list, key):
return sum([l.get(key, 0) for l in list])
@register.filter
def preview_check(filname):
# allow_suffix = ( 'jpg', 'jpeg', 'png', 'gif', 'bmp', 'tif', 'tiff', 'xbm', 'xpm',
# 'doc', 'docx', 'dot', 'dotx',
# 'ppt', 'pptx', 'pps', 'ppsx', 'pot', 'potx',
# 'xls', 'xlsx', 'xlt', 'xltx'
# )
allow_suffix = ( 'jpg', 'jpeg', 'png', 'gif', 'bmp')
suffix = filname.split('.')[-1]
suffix = suffix.lower()
return suffix in allow_suffix
@register.filter
def smooth_timedelta(timedeltaobj):
"""Convert a datetime.timedelta object into Days, Hours, Minutes, Seconds."""
secs = timedeltaobj.total_seconds()
timetot = ""
if secs > 86400: # 60sec * 60min * 24hrs
days = secs // 86400
timetot += "{} 天".format(int(days))
secs = secs - days*86400
if secs > 3600:
hrs = secs // 3600
timetot += " {} 小时".format(int(hrs))
secs = secs - hrs*3600
if secs > 60:
mins = secs // 60
timetot += " {} 分钟".format(int(mins))
secs = secs - mins*60
if secs > 0:
timetot += " {} 秒".format(int(secs))
return timetot
@register.inclusion_tag('switch_domain.html')
def switch_domain(request):
domain_list = Domain.objects.filter(disabled='-1')
domain_id = get_domainid_bysession(request)
return {
'domain_list': domain_list,
'domain_id': domain_id
}
| en | 0.160748 | # coding=utf-8 # allow_suffix = ( 'jpg', 'jpeg', 'png', 'gif', 'bmp', 'tif', 'tiff', 'xbm', 'xpm', # 'doc', 'docx', 'dot', 'dotx', # 'ppt', 'pptx', 'pps', 'ppsx', 'pot', 'potx', # 'xls', 'xlsx', 'xlt', 'xltx' # ) Convert a datetime.timedelta object into Days, Hours, Minutes, Seconds. # 60sec * 60min * 24hrs | 2.096068 | 2 |
OLD/REQ.py | sarisabban/btc | 1 | 6620187 | #!/usr/bin/python3
import urllib.request, time , json , os
#Inputs
buy_price = 0.00000723
profit_percent = 25
#Selling Target
Sell = ((buy_price * profit_percent) / 100) + buy_price
Sell = f'{Sell:.9f}'
print('For ' + str(profit_percent) + '% profit sell at:\t' + Sell)
#Live Update
while True:
web = urllib.request.urlopen('https://www.binance.com/api/v1/ticker/allPrices')
for line in web:
line = line.decode()
line = json.loads(line)
#Coin Price
price = line[81]['price']
#Calculations
Gain_percent = round((((float(price) - buy_price) / buy_price) * 100) , 2)
#Output
print('REQ/BTC = ' + price , '\tGain/Loss = ' + str(Gain_percent) + '%' , end = '\r')
if Gain_percent >= profit_percent:
os.system('( speaker-test -t sine -f 1000 )& pid=$! ; sleep 0.1s ; kill -9 $pid')
else:
continue
time.sleep(1)
| #!/usr/bin/python3
import urllib.request, time , json , os
#Inputs
buy_price = 0.00000723
profit_percent = 25
#Selling Target
Sell = ((buy_price * profit_percent) / 100) + buy_price
Sell = f'{Sell:.9f}'
print('For ' + str(profit_percent) + '% profit sell at:\t' + Sell)
#Live Update
while True:
web = urllib.request.urlopen('https://www.binance.com/api/v1/ticker/allPrices')
for line in web:
line = line.decode()
line = json.loads(line)
#Coin Price
price = line[81]['price']
#Calculations
Gain_percent = round((((float(price) - buy_price) / buy_price) * 100) , 2)
#Output
print('REQ/BTC = ' + price , '\tGain/Loss = ' + str(Gain_percent) + '%' , end = '\r')
if Gain_percent >= profit_percent:
os.system('( speaker-test -t sine -f 1000 )& pid=$! ; sleep 0.1s ; kill -9 $pid')
else:
continue
time.sleep(1)
| en | 0.502171 | #!/usr/bin/python3 #Inputs #Selling Target #Live Update #Coin Price #Calculations #Output | 2.931093 | 3 |
src/yabc/app/__main__.py | robertkarl/yabc | 14 | 6620188 | import yabc
app = yabc.app.create_app()
app.run()
| import yabc
app = yabc.app.create_app()
app.run()
| none | 1 | 1.243544 | 1 | |
dographviz.py | judge2020/crossover-viz | 0 | 6620189 | <filename>dographviz.py
import json
from main import extract_data
import graphviz
already_pulled = []
already_done_combos = []
def getsafe(txt):
return ''.join([i if ord(i) < 128 else ' ' for i in txt])
if __name__ == '__main__':
print("parsing...")
out = extract_data('CrossoverWiki.xml')
print("parsed")
dot = graphviz.Digraph(comment='Fortnite multiverse')
def dopulls(title):
if title in already_pulled:
return
print(f"pulling {title}")
already_pulled.append(title)
_wants = list(filter(lambda x: x['title'] == title, out))
if len(_wants) != 1:
# some titles don't have a dedicated page and thus no child relationships, so we don't pull any more via them
return
want = _wants[0]
for link in want['links']:
slug = '_X_'.join(sorted([link['with'], want['title']]))
if slug in already_done_combos:
continue
already_done_combos.append(slug)
dot.edge(getsafe(link['with']), getsafe(want['title']))
dopulls(link['with'])
dopulls('Fortnite')
with open("Output.txt", "w") as text_file:
text_file.write(dot.source)
| <filename>dographviz.py
import json
from main import extract_data
import graphviz
already_pulled = []
already_done_combos = []
def getsafe(txt):
return ''.join([i if ord(i) < 128 else ' ' for i in txt])
if __name__ == '__main__':
print("parsing...")
out = extract_data('CrossoverWiki.xml')
print("parsed")
dot = graphviz.Digraph(comment='Fortnite multiverse')
def dopulls(title):
if title in already_pulled:
return
print(f"pulling {title}")
already_pulled.append(title)
_wants = list(filter(lambda x: x['title'] == title, out))
if len(_wants) != 1:
# some titles don't have a dedicated page and thus no child relationships, so we don't pull any more via them
return
want = _wants[0]
for link in want['links']:
slug = '_X_'.join(sorted([link['with'], want['title']]))
if slug in already_done_combos:
continue
already_done_combos.append(slug)
dot.edge(getsafe(link['with']), getsafe(want['title']))
dopulls(link['with'])
dopulls('Fortnite')
with open("Output.txt", "w") as text_file:
text_file.write(dot.source)
| en | 0.975698 | # some titles don't have a dedicated page and thus no child relationships, so we don't pull any more via them | 2.80637 | 3 |
comaf/apps/workspace/admin.py | nanchenchen/lsst-new-showMAF | 0 | 6620190 | from django.contrib import admin
import comaf.apps.workspace.models as workplace
# Register your models here.
admin.site.register(workplace.SpaceView)
admin.site.register(workplace.MemberSpace)
admin.site.register(workplace.WorkSpace) | from django.contrib import admin
import comaf.apps.workspace.models as workplace
# Register your models here.
admin.site.register(workplace.SpaceView)
admin.site.register(workplace.MemberSpace)
admin.site.register(workplace.WorkSpace) | en | 0.968259 | # Register your models here. | 1.305324 | 1 |
foobartory/config.py | NSpehler/foobartory | 0 | 6620191 | <reponame>NSpehler/foobartory
ROBOT_MIN = 2
ROBOT_MAX = 30
ROBOT_COST_FOOS = 6
ROBOT_COST_EUROS = 3
ROBOT_CHANGE_ACTIVITY_TIME = 5
FOO_MINING_TIME = 1
BAR_MINING_TIME_MIN = 0.5
BAR_MINING_TIME_MAX = 2
FOOBAR_ASSEMBLY_TIME = 2
FOOBAR_ASSEMBLY_SUCCESS_RATE = 0.6
FOOBAR_SELL_MAX = 5
FOOBAR_SELL_TIME = 10
FOOBAR_SELL_PRICE = 1
DURATION_MODIFIER = 0.01 | ROBOT_MIN = 2
ROBOT_MAX = 30
ROBOT_COST_FOOS = 6
ROBOT_COST_EUROS = 3
ROBOT_CHANGE_ACTIVITY_TIME = 5
FOO_MINING_TIME = 1
BAR_MINING_TIME_MIN = 0.5
BAR_MINING_TIME_MAX = 2
FOOBAR_ASSEMBLY_TIME = 2
FOOBAR_ASSEMBLY_SUCCESS_RATE = 0.6
FOOBAR_SELL_MAX = 5
FOOBAR_SELL_TIME = 10
FOOBAR_SELL_PRICE = 1
DURATION_MODIFIER = 0.01 | none | 1 | 1.134412 | 1 | |
model-builder/skr_objc_builder/objc_enum.py | DaYeSquad/worktilerwdemo | 5 | 6620192 | <filename>model-builder/skr_objc_builder/objc_enum.py
from skrutil import string_utils
_CPP_BR = '\n\n'
_OBJC_SPACE = ' '
class ObjcEnum:
"""Represents Objective-C++ enum.
"""
def __init__(self, enum_class_name):
self.enum_class_name = enum_class_name
self.int_alias_tuple_list = []
def append(self, int_value, alias):
self.int_alias_tuple_list.append((int_value, alias))
def generate_objc_enum(self, class_name, config):
objc_enum = ''
objc_enum += 'typedef NS_ENUM(NSUInteger, {2}{0}{1}) {{\n'\
.format(class_name, self.enum_class_name, config.objc_prefix)
for int_alias_tuple in self.int_alias_tuple_list:
objc_enum += _OBJC_SPACE + '{4}{2}{3}{0} = {1},\n'\
.format(string_utils.cpp_enum_class_name_to_objc_enum_class_name(int_alias_tuple[1]),
int_alias_tuple[0],
class_name,
self.enum_class_name,
config.objc_prefix)
objc_enum += '};\n'
return objc_enum
| <filename>model-builder/skr_objc_builder/objc_enum.py
from skrutil import string_utils
_CPP_BR = '\n\n'
_OBJC_SPACE = ' '
class ObjcEnum:
"""Represents Objective-C++ enum.
"""
def __init__(self, enum_class_name):
self.enum_class_name = enum_class_name
self.int_alias_tuple_list = []
def append(self, int_value, alias):
self.int_alias_tuple_list.append((int_value, alias))
def generate_objc_enum(self, class_name, config):
objc_enum = ''
objc_enum += 'typedef NS_ENUM(NSUInteger, {2}{0}{1}) {{\n'\
.format(class_name, self.enum_class_name, config.objc_prefix)
for int_alias_tuple in self.int_alias_tuple_list:
objc_enum += _OBJC_SPACE + '{4}{2}{3}{0} = {1},\n'\
.format(string_utils.cpp_enum_class_name_to_objc_enum_class_name(int_alias_tuple[1]),
int_alias_tuple[0],
class_name,
self.enum_class_name,
config.objc_prefix)
objc_enum += '};\n'
return objc_enum
| en | 0.321884 | Represents Objective-C++ enum. | 2.602188 | 3 |
backend/venv/Lib/site-packages/gitweb.py | analurandis/Tur | 0 | 6620193 | '''
Module provides WSGI-based methods for handling HTTP Get and Post requests that
are specific only to git-http-backend's Smart HTTP protocol.
Copyright (c) 2011 <NAME> <<EMAIL>>
This file is part of GitWeb Project.
GitWeb Project is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 2.1 of the License, or
(at your option) any later version.
GitWeb Project is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with GitWeb Project. If not, see <http://www.gnu.org/licenses/>.
'''
import os
import sys
import socket
import logging
import subprocess
import subprocessio
from webob import Request, Response, exc
log = logging.getLogger(__name__)
class FileWrapper(object):
def __init__(self, fd, content_length):
self.fd = fd
self.content_length = content_length
self.remain = content_length
def read(self, size):
if size <= self.remain:
try:
data = self.fd.read(size)
except socket.error:
raise IOError(self)
self.remain -= size
elif self.remain:
data = self.fd.read(self.remain)
self.remain = 0
else:
data = None
return data
def __repr__(self):
return '<FileWrapper %s len: %s, read: %s>' % (
self.fd, self.content_length, self.content_length - self.keep)
class GitRepository(object):
git_folder_signature = set(['config', 'head', 'info', 'objects', 'refs'])
commands = ['git-upload-pack', 'git-receive-pack']
def __init__(self, content_path):
files = set([f.lower() for f in os.listdir(content_path)])
assert self.git_folder_signature.intersection(files) == self.git_folder_signature, content_path
self.content_path = content_path
self.valid_accepts = ['application/x-%s-result' % c for c in self.commands]
def inforefs(self, request, environ):
"""WSGI Response producer for HTTP GET Git Smart HTTP /info/refs request."""
git_command = request.GET['service']
if git_command not in self.commands:
return exc.HTTPMethodNotAllowed()
# note to self:
# please, resist the urge to add '\n' to git capture and increment line count by 1.
# The code in Git client not only does NOT need '\n', but actually blows up
# if you sprinkle "flush" (0000) as "0001\n".
# It reads binary, per number of bytes specified.
# if you do add '\n' as part of data, count it.
smart_server_advert = '# service=%s' % git_command
try:
out = subprocessio.SubprocessIOChunker(
r'git %s --stateless-rpc --advertise-refs "%s"' % (git_command[4:], self.content_path),
starting_values = [ str(hex(len(smart_server_advert)+4)[2:].rjust(4,'0') + smart_server_advert + '0000') ]
)
except EnvironmentError, e:
raise exc.HTTPExpectationFailed()
resp = Response()
resp.content_type = 'application/x-%s-advertisement' % str(git_command)
resp.app_iter = out
return resp
def backend(self, request, environ):
"""
WSGI Response producer for HTTP POST Git Smart HTTP requests.
Reads commands and data from HTTP POST's body.
returns an iterator obj with contents of git command's response to stdout
"""
git_command = request.path_info.strip('/')
if git_command not in self.commands:
return exc.HTTPMethodNotAllowed()
if 'CONTENT_LENGTH' in environ:
inputstream = FileWrapper(environ['wsgi.input'], request.content_length)
else:
inputstream = environ['wsgi.input']
try:
out = subprocessio.SubprocessIOChunker(
r'git %s --stateless-rpc "%s"' % (git_command[4:], self.content_path),
inputstream = inputstream
)
except EnvironmentError, e:
raise exc.HTTPExpectationFailed()
if git_command in [u'git-receive-pack']:
# updating refs manually after each push. Needed for pre-1.7.0.4 git clients using regular HTTP mode.
subprocess.call(u'git --git-dir "%s" update-server-info' % self.content_path, shell=True)
resp = Response()
resp.content_type = 'application/x-%s-result' % git_command.encode('utf8')
resp.app_iter = out
return resp
def __call__(self, environ, start_response):
request = Request(environ)
if request.path_info.startswith('/info/refs'):
app = self.inforefs
elif [a for a in self.valid_accepts if a in request.accept]:
app = self.backend
try:
resp = app(request, environ)
except exc.HTTPException, e:
resp = e
log.exception(e)
except Exception, e:
log.exception(e)
resp = exc.HTTPInternalServerError()
start_response(resp.status, resp.headers.items())
return resp.app_iter
class GitDirectory(object):
repository_app = GitRepository
def __init__(self, content_path, auto_create=True, **kwargs):
if not os.path.isdir(content_path):
if auto_create:
os.makedirs(content_path)
else:
raise OSError(content_path)
self.content_path = content_path
self.auto_create = auto_create
if 'pre_clone_hook' in kwargs:
self.pre_clone_hook = kwargs['pre_clone_hook']
if 'post_clone_hook' in kwargs:
self.pre_clone_hook = kwargs['post_clone_hook']
def pre_clone_hook(self, content_path, request):
pass
def post_clone_hook(self, content_path, request):
pass
def __call__(self, environ, start_response):
request = Request(environ)
repo_name = request.path_info_pop()
if not repo_name.endswith('.git'):
return exc.HTTPNotFound()(environ, start_response)
content_path = os.path.realpath(os.path.join(self.content_path, repo_name))
if self.content_path not in content_path:
return exc.HTTPForbidden()(environ, start_response)
try:
app = GitRepository(content_path)
except (AssertionError, OSError):
if os.path.isdir(content_path):
app = self.repository_app(content_path)
else:
if self.auto_create and 'application/x-git-receive-pack-result' in request.accept:
try:
self.pre_clone_hook(content_path, request)
subprocess.call(u'git init --quiet --bare "%s"' % content_path, shell=True)
self.post_clone_hook(content_path, request)
except exc.HTTPException, e:
return e(environ, start_response)
app = self.repository_app(content_path)
else:
return exc.HTTPNotFound()(environ, start_response)
return app(environ, start_response)
def make_app(global_config, content_path='', **local_config):
if 'content_path' in global_config:
content_path = global_config['content_path']
return GitRepository(content_path)
def make_dir_app(global_config, content_path='', auto_create=None, **local_config):
if 'content_path' in global_config:
content_path = global_config['content_path']
return GitDirectory(content_path, auto_create=auto_create)
| '''
Module provides WSGI-based methods for handling HTTP Get and Post requests that
are specific only to git-http-backend's Smart HTTP protocol.
Copyright (c) 2011 <NAME> <<EMAIL>>
This file is part of GitWeb Project.
GitWeb Project is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 2.1 of the License, or
(at your option) any later version.
GitWeb Project is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with GitWeb Project. If not, see <http://www.gnu.org/licenses/>.
'''
import os
import sys
import socket
import logging
import subprocess
import subprocessio
from webob import Request, Response, exc
log = logging.getLogger(__name__)
class FileWrapper(object):
def __init__(self, fd, content_length):
self.fd = fd
self.content_length = content_length
self.remain = content_length
def read(self, size):
if size <= self.remain:
try:
data = self.fd.read(size)
except socket.error:
raise IOError(self)
self.remain -= size
elif self.remain:
data = self.fd.read(self.remain)
self.remain = 0
else:
data = None
return data
def __repr__(self):
return '<FileWrapper %s len: %s, read: %s>' % (
self.fd, self.content_length, self.content_length - self.keep)
class GitRepository(object):
git_folder_signature = set(['config', 'head', 'info', 'objects', 'refs'])
commands = ['git-upload-pack', 'git-receive-pack']
def __init__(self, content_path):
files = set([f.lower() for f in os.listdir(content_path)])
assert self.git_folder_signature.intersection(files) == self.git_folder_signature, content_path
self.content_path = content_path
self.valid_accepts = ['application/x-%s-result' % c for c in self.commands]
def inforefs(self, request, environ):
"""WSGI Response producer for HTTP GET Git Smart HTTP /info/refs request."""
git_command = request.GET['service']
if git_command not in self.commands:
return exc.HTTPMethodNotAllowed()
# note to self:
# please, resist the urge to add '\n' to git capture and increment line count by 1.
# The code in Git client not only does NOT need '\n', but actually blows up
# if you sprinkle "flush" (0000) as "0001\n".
# It reads binary, per number of bytes specified.
# if you do add '\n' as part of data, count it.
smart_server_advert = '# service=%s' % git_command
try:
out = subprocessio.SubprocessIOChunker(
r'git %s --stateless-rpc --advertise-refs "%s"' % (git_command[4:], self.content_path),
starting_values = [ str(hex(len(smart_server_advert)+4)[2:].rjust(4,'0') + smart_server_advert + '0000') ]
)
except EnvironmentError, e:
raise exc.HTTPExpectationFailed()
resp = Response()
resp.content_type = 'application/x-%s-advertisement' % str(git_command)
resp.app_iter = out
return resp
def backend(self, request, environ):
"""
WSGI Response producer for HTTP POST Git Smart HTTP requests.
Reads commands and data from HTTP POST's body.
returns an iterator obj with contents of git command's response to stdout
"""
git_command = request.path_info.strip('/')
if git_command not in self.commands:
return exc.HTTPMethodNotAllowed()
if 'CONTENT_LENGTH' in environ:
inputstream = FileWrapper(environ['wsgi.input'], request.content_length)
else:
inputstream = environ['wsgi.input']
try:
out = subprocessio.SubprocessIOChunker(
r'git %s --stateless-rpc "%s"' % (git_command[4:], self.content_path),
inputstream = inputstream
)
except EnvironmentError, e:
raise exc.HTTPExpectationFailed()
if git_command in [u'git-receive-pack']:
# updating refs manually after each push. Needed for pre-1.7.0.4 git clients using regular HTTP mode.
subprocess.call(u'git --git-dir "%s" update-server-info' % self.content_path, shell=True)
resp = Response()
resp.content_type = 'application/x-%s-result' % git_command.encode('utf8')
resp.app_iter = out
return resp
def __call__(self, environ, start_response):
request = Request(environ)
if request.path_info.startswith('/info/refs'):
app = self.inforefs
elif [a for a in self.valid_accepts if a in request.accept]:
app = self.backend
try:
resp = app(request, environ)
except exc.HTTPException, e:
resp = e
log.exception(e)
except Exception, e:
log.exception(e)
resp = exc.HTTPInternalServerError()
start_response(resp.status, resp.headers.items())
return resp.app_iter
class GitDirectory(object):
repository_app = GitRepository
def __init__(self, content_path, auto_create=True, **kwargs):
if not os.path.isdir(content_path):
if auto_create:
os.makedirs(content_path)
else:
raise OSError(content_path)
self.content_path = content_path
self.auto_create = auto_create
if 'pre_clone_hook' in kwargs:
self.pre_clone_hook = kwargs['pre_clone_hook']
if 'post_clone_hook' in kwargs:
self.pre_clone_hook = kwargs['post_clone_hook']
def pre_clone_hook(self, content_path, request):
pass
def post_clone_hook(self, content_path, request):
pass
def __call__(self, environ, start_response):
request = Request(environ)
repo_name = request.path_info_pop()
if not repo_name.endswith('.git'):
return exc.HTTPNotFound()(environ, start_response)
content_path = os.path.realpath(os.path.join(self.content_path, repo_name))
if self.content_path not in content_path:
return exc.HTTPForbidden()(environ, start_response)
try:
app = GitRepository(content_path)
except (AssertionError, OSError):
if os.path.isdir(content_path):
app = self.repository_app(content_path)
else:
if self.auto_create and 'application/x-git-receive-pack-result' in request.accept:
try:
self.pre_clone_hook(content_path, request)
subprocess.call(u'git init --quiet --bare "%s"' % content_path, shell=True)
self.post_clone_hook(content_path, request)
except exc.HTTPException, e:
return e(environ, start_response)
app = self.repository_app(content_path)
else:
return exc.HTTPNotFound()(environ, start_response)
return app(environ, start_response)
def make_app(global_config, content_path='', **local_config):
if 'content_path' in global_config:
content_path = global_config['content_path']
return GitRepository(content_path)
def make_dir_app(global_config, content_path='', auto_create=None, **local_config):
if 'content_path' in global_config:
content_path = global_config['content_path']
return GitDirectory(content_path, auto_create=auto_create)
| en | 0.860396 | Module provides WSGI-based methods for handling HTTP Get and Post requests that are specific only to git-http-backend's Smart HTTP protocol. Copyright (c) 2011 <NAME> <<EMAIL>> This file is part of GitWeb Project. GitWeb Project is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 2.1 of the License, or (at your option) any later version. GitWeb Project is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with GitWeb Project. If not, see <http://www.gnu.org/licenses/>. WSGI Response producer for HTTP GET Git Smart HTTP /info/refs request. # note to self: # please, resist the urge to add '\n' to git capture and increment line count by 1. # The code in Git client not only does NOT need '\n', but actually blows up # if you sprinkle "flush" (0000) as "0001\n". # It reads binary, per number of bytes specified. # if you do add '\n' as part of data, count it. WSGI Response producer for HTTP POST Git Smart HTTP requests. Reads commands and data from HTTP POST's body. returns an iterator obj with contents of git command's response to stdout # updating refs manually after each push. Needed for pre-1.7.0.4 git clients using regular HTTP mode. | 2.263673 | 2 |
variables.py | justinXD/AI_CETI | 0 | 6620194 | <gh_stars>0
#declaramos variables de tipo string y de tipo numero entero
mensaje = "Hola mundo!!"
numero1 = 1000
numero2 = 1022
#sumamos los numeros
resultado = numero1 + numero2
#imprimimos el mensaje con un andalogo de los templates de JS
print(f'{mensaje}, año {resultado}')
#podemos imprimir usando también solo print(variable) | #declaramos variables de tipo string y de tipo numero entero
mensaje = "Hola mundo!!"
numero1 = 1000
numero2 = 1022
#sumamos los numeros
resultado = numero1 + numero2
#imprimimos el mensaje con un andalogo de los templates de JS
print(f'{mensaje}, año {resultado}')
#podemos imprimir usando también solo print(variable) | es | 0.933108 | #declaramos variables de tipo string y de tipo numero entero #sumamos los numeros #imprimimos el mensaje con un andalogo de los templates de JS #podemos imprimir usando también solo print(variable) | 3.516623 | 4 |
arduinocontroller.py | elpaso/arduinocontroller | 0 | 6620195 | <filename>arduinocontroller.py
# -*- coding: utf-8 -*-
from osv import fields, osv
# Add 1 because crappy OE doesn't distinguish empty from 0
PINMODE_INPUT = 0+1
PINMODE_OUTPUT = 1+1
PINMODE_ANALOG = 2 +1
PINMODE_PWM = 3+1
PINMODE_SERVO = 4+1
PIN_RANGE = {
PINMODE_INPUT: (0, 1023),
PINMODE_OUTPUT: (0, 1),
PINMODE_ANALOG: (0, 1),
PINMODE_PWM: (0, 255),
PINMODE_SERVO: (0, 180),
}
DEBUG=True
if DEBUG:
import logging
logger = logging.getLogger(__name__)
def dbg(msg):
logger.info(msg)
else:
def dbg(msg):
pass
class arduinocontroller_board(osv.osv):
""" Arduino board with configuration """
# store device connections
device_store = {}
device_iterator_store = {}
digital_pindir_values = [(PINMODE_INPUT, 'Input'), (PINMODE_OUTPUT, 'Output')]
pwm_pindir_values = [(PINMODE_INPUT, 'Input'), (PINMODE_OUTPUT, 'Output'), (PINMODE_PWM, 'PWM Output'), (PINMODE_SERVO, 'Servo Output')]
_name = "arduinocontroller.board"
_description = "Arduino board"
_rec_name = 'device'
_columns = {
'device': fields.char('Device', size=64, required=True),
'model': fields.selection([('uno', 'Arduino uno')], 'Model', default='uno', required=True),
'note' : fields.text('Notes'),
'online': fields.boolean('Online'),
# Digital
'pind2dir': fields.selection(digital_pindir_values, 'Digital Pin 2 direction'),
'pind3dir': fields.selection(pwm_pindir_values, 'Digital Pin 3 direction (PWM)'),
'pind4dir': fields.selection(digital_pindir_values, 'Digital Pin 4 direction'),
'pind5dir': fields.selection(pwm_pindir_values, 'Digital Pin 5 direction (PWM)'),
'pind6dir': fields.selection(pwm_pindir_values, 'Digital Pin 6 direction (PWM)'),
'pind7dir': fields.selection(digital_pindir_values, 'Digital Pin 7 direction'),
'pind8dir': fields.selection(digital_pindir_values, 'Digital Pin 8 direction'),
'pind9dir': fields.selection(pwm_pindir_values, 'Digital Pin 9 direction (PWM)'),
'pind10dir': fields.selection(pwm_pindir_values, 'Digital Pin 10 direction (PWM)'),
'pind11dir': fields.selection(pwm_pindir_values, 'Digital Pin 11 direction (PWM)'),
'pind12dir': fields.selection(digital_pindir_values, 'Digital Pin 12 direction'),
'pind13dir': fields.selection(digital_pindir_values, 'Digital Pin 13 direction'),
'pind2value': fields.integer('Digital Pin 2 value'),
'pind3value': fields.integer('Digital Pin 3 value'),
'pind4value': fields.integer('Digital Pin 4 value'),
'pind5value': fields.integer('Digital Pin 5 value'),
'pind6value': fields.integer('Digital Pin 6 value'),
'pind7value': fields.integer('Digital Pin 7 value'),
'pind8value': fields.integer('Digital Pin 8 value'),
'pind9value': fields.integer('Digital Pin 9 value'),
'pind10value': fields.integer('Digital Pin 10 value'),
'pind11value': fields.integer('Digital Pin 11 value'),
'pind12value': fields.integer('Digital Pin 12 value'),
'pind13value': fields.integer('Digital Pin 13 value'),
# Analog
'pina0active': fields.boolean('Analog Pin 0 active'),
'pina1active': fields.boolean('Analog Pin 1 active'),
'pina2active': fields.boolean('Analog Pin 2 active'),
'pina3active': fields.boolean('Analog Pin 3 active'),
'pina4active': fields.boolean('Analog Pin 4 active'),
'pina5active': fields.boolean('Analog Pin 5 active'),
'pina0value': fields.float('Analog Pin 0 value'),
'pina1value': fields.float('Analog Pin 1 value'),
'pina2value': fields.float('Analog Pin 2 value'),
'pina3value': fields.float('Analog Pin 3 value'),
'pina4value': fields.float('Analog Pin 4 value'),
'pina5value': fields.float('Analog Pin 5 value'),
}
_defaults = {
'device': '/dev/ttyACM0',
'model': 'uno',
}
def default_get(self, cr, uid, fields_list, context=None):
"""
Set defaults
"""
if context is None:
context = {}
dbg('Default get called')
# Digital pins
for i in range(2, 14):
context['default_pind%ddir' % i] = context.get('pind%ddir' % i, PINMODE_OUTPUT)
context['default_pind%dvalue' % i] = context.get('pind%dvalue' % i, 0)
# Analog pins
for i in range(0, 6):
context['default_pina%dactive' % i] = context.get('pina%dactive' % i, False)
v = super(arduinocontroller_board, self).default_get(cr, uid, fields_list, context)
return v
# Let's support multiple configurations for the same device!
#_sql_constraints = [('unique_model_device', 'unique(model, device)', 'Model and device must be unique')]
def in_range(self, pin, mode, value, fail_silently=True):
""" Constraints the var in range """
if mode == PINMODE_INPUT:
return value
bottom , top = PIN_RANGE[mode]
res = bottom <= value <= top
if not res and not fail_silently:
raise osv.except_osv('Value is out of range', 'Please check that value for pin %s is in range %d-%d.' % (pin, bottom, top))
if res:
return value
if value < bottom:
return bottom
return top
def onchange_pin(self, cr, uid, ids, pin, mode, value):
""" Check range """
self.in_range(pin, int(mode), value, False)
return {'value': {pin: value}}
def onchange_online(self, cr, uid, ids, online, device):
""" Connect to the device and report status """
v={}
if ids and online:
try:
from pyfirmata import Arduino, util
except ImportError:
return {'warning' : {'title' : 'Attention!', 'message' : 'Pyfirmata is not installed, arduino operations are disabled. You can install pyfirmata from hg clone ssh://hg@bitbucket.org/tino/pyfirmata'}}
board = self._get_board(device)
if not board:
return {'warning' : {'title' : 'Attention!', 'message' : 'Cannot communicate with Arduino, please check your connections and settings on device %s.' % device}}
return {'value':v}
def _setup_board(self, board, device, **kwargs):
"""
Set the board up and read/write values from the board
@return values
"""
from pyfirmata import Arduino, util
try:
self.device_iterator_store[device]
except:
self.device_iterator_store[device] = util.Iterator(board)
self.device_iterator_store[device].start()
v = {}
# Digital pins
for i in range(2, 14):
if 'pind%ddir' % i in kwargs and 'pind%dvalue' % i in kwargs:
try:
pinmode = int(kwargs['pind%ddir' % i])
pinvalue = kwargs['pind%dvalue' % i]
pinvalue = self.in_range(i, pinmode, pinvalue)
board.digital[i].mode = pinmode-1 # less 1: crappy OE
dbg('DIGITAL %d Setting mode to : %d' % (i, pinmode))
v['pind%dvalue' % i] = pinvalue
if pinmode == PINMODE_INPUT:
v['pind%dvalue' % i] = board.digital[i].read()
dbg('DIGITAL %d reads %s' % (i, v['pind%dvalue' % i]))
elif pinmode == PINMODE_PWM:
dbg('DIGITAL PWM %d writes %s' % (i, pinvalue))
board.digital[i].write(pinvalue/255.0) # 0-1
elif pinmode == PINMODE_SERVO:
dbg('DIGITAL SERVO %d writes %s' % (i, pinvalue))
board.digital[i].write(pinvalue)
elif pinmode == PINMODE_OUTPUT:
dbg('DIGITAL OUTPUT %d writes %s' % (i, pinvalue))
board.digital[i].write(pinvalue)
except:
raise
# Analog pins
# TODO: writing
for i in range(0, 6):
if 'pina%dactive' % i in kwargs and kwargs['pina%dactive' % i]:
board.analog[i].mode = PINMODE_INPUT-1 # less 1: crappy OE
try:
#board.analog[i].enable_reporting()
v['pina%dvalue' % i] = board.analog[i].read()
dbg('ANALOG %d reads %s' % (i, v['pina%dvalue' % i]))
except:
# TODO: better error handling
raise
else:
# TODO: something or delete the branch
pass
return v
def _get_board(self, device):
"""
Returns device connection, creates one if necessary
@returns boolean true if board is online
"""
from pyfirmata import Arduino, util
try:
return self.device_store[device]
except KeyError:
try:
board = Arduino(device)
self.device_store[device] = board
return board
except util.serial.SerialException:
return False
def write(self, cr, uid, ids, vals, context=None):
"""
Update record(s) exist in {ids}, with new value provided in {vals}
@param cr: A database cursor
@param user: ID of the user currently logged in
@param ids: list of record ids to update
@param vals: dict of new values to be set
@param context: context arguments, like lang, time zone
@return: Returns True on success, False otherwise
"""
from pyfirmata import util
record = self.browse(cr, uid, ids[0], context=context)
try:
if record.online:
# Merge values into vals
parms = dict([(k, getattr(vals, k, getattr(record, k, None))) for k in self._columns if k.startswith('pin')])
board = self._get_board(record.device)
if board:
parms.update(vals)
vals.update(self._setup_board(board, record.device, **parms))
except util.serial.SerialException:
raise osv.except_osv('Device is set online but cannot communicate with Arduino', 'Please check your connections and settings on device %s.' % record.device)
res = super(arduinocontroller_board, self).write(cr, uid, ids, vals, context=context)
return res
def refresh_board(self, cr, uid, ids, context=None):
""" Re-read values from the board """
record = self.browse(cr, uid, ids[0], context=context)
if not record.online:
raise osv.except_osv('Device is offline','Device is offline, check the online flag.')
board = self._get_board(record.device)
if not board:
raise osv.except_osv('Cannot communicate with Arduino', 'Please check your connections and settings on device %s.' % record.device)
return True
arduinocontroller_board()
| <filename>arduinocontroller.py
# -*- coding: utf-8 -*-
from osv import fields, osv
# Add 1 because crappy OE doesn't distinguish empty from 0
PINMODE_INPUT = 0+1
PINMODE_OUTPUT = 1+1
PINMODE_ANALOG = 2 +1
PINMODE_PWM = 3+1
PINMODE_SERVO = 4+1
PIN_RANGE = {
PINMODE_INPUT: (0, 1023),
PINMODE_OUTPUT: (0, 1),
PINMODE_ANALOG: (0, 1),
PINMODE_PWM: (0, 255),
PINMODE_SERVO: (0, 180),
}
DEBUG=True
if DEBUG:
import logging
logger = logging.getLogger(__name__)
def dbg(msg):
logger.info(msg)
else:
def dbg(msg):
pass
class arduinocontroller_board(osv.osv):
""" Arduino board with configuration """
# store device connections
device_store = {}
device_iterator_store = {}
digital_pindir_values = [(PINMODE_INPUT, 'Input'), (PINMODE_OUTPUT, 'Output')]
pwm_pindir_values = [(PINMODE_INPUT, 'Input'), (PINMODE_OUTPUT, 'Output'), (PINMODE_PWM, 'PWM Output'), (PINMODE_SERVO, 'Servo Output')]
_name = "arduinocontroller.board"
_description = "Arduino board"
_rec_name = 'device'
_columns = {
'device': fields.char('Device', size=64, required=True),
'model': fields.selection([('uno', 'Arduino uno')], 'Model', default='uno', required=True),
'note' : fields.text('Notes'),
'online': fields.boolean('Online'),
# Digital
'pind2dir': fields.selection(digital_pindir_values, 'Digital Pin 2 direction'),
'pind3dir': fields.selection(pwm_pindir_values, 'Digital Pin 3 direction (PWM)'),
'pind4dir': fields.selection(digital_pindir_values, 'Digital Pin 4 direction'),
'pind5dir': fields.selection(pwm_pindir_values, 'Digital Pin 5 direction (PWM)'),
'pind6dir': fields.selection(pwm_pindir_values, 'Digital Pin 6 direction (PWM)'),
'pind7dir': fields.selection(digital_pindir_values, 'Digital Pin 7 direction'),
'pind8dir': fields.selection(digital_pindir_values, 'Digital Pin 8 direction'),
'pind9dir': fields.selection(pwm_pindir_values, 'Digital Pin 9 direction (PWM)'),
'pind10dir': fields.selection(pwm_pindir_values, 'Digital Pin 10 direction (PWM)'),
'pind11dir': fields.selection(pwm_pindir_values, 'Digital Pin 11 direction (PWM)'),
'pind12dir': fields.selection(digital_pindir_values, 'Digital Pin 12 direction'),
'pind13dir': fields.selection(digital_pindir_values, 'Digital Pin 13 direction'),
'pind2value': fields.integer('Digital Pin 2 value'),
'pind3value': fields.integer('Digital Pin 3 value'),
'pind4value': fields.integer('Digital Pin 4 value'),
'pind5value': fields.integer('Digital Pin 5 value'),
'pind6value': fields.integer('Digital Pin 6 value'),
'pind7value': fields.integer('Digital Pin 7 value'),
'pind8value': fields.integer('Digital Pin 8 value'),
'pind9value': fields.integer('Digital Pin 9 value'),
'pind10value': fields.integer('Digital Pin 10 value'),
'pind11value': fields.integer('Digital Pin 11 value'),
'pind12value': fields.integer('Digital Pin 12 value'),
'pind13value': fields.integer('Digital Pin 13 value'),
# Analog
'pina0active': fields.boolean('Analog Pin 0 active'),
'pina1active': fields.boolean('Analog Pin 1 active'),
'pina2active': fields.boolean('Analog Pin 2 active'),
'pina3active': fields.boolean('Analog Pin 3 active'),
'pina4active': fields.boolean('Analog Pin 4 active'),
'pina5active': fields.boolean('Analog Pin 5 active'),
'pina0value': fields.float('Analog Pin 0 value'),
'pina1value': fields.float('Analog Pin 1 value'),
'pina2value': fields.float('Analog Pin 2 value'),
'pina3value': fields.float('Analog Pin 3 value'),
'pina4value': fields.float('Analog Pin 4 value'),
'pina5value': fields.float('Analog Pin 5 value'),
}
_defaults = {
'device': '/dev/ttyACM0',
'model': 'uno',
}
def default_get(self, cr, uid, fields_list, context=None):
"""
Set defaults
"""
if context is None:
context = {}
dbg('Default get called')
# Digital pins
for i in range(2, 14):
context['default_pind%ddir' % i] = context.get('pind%ddir' % i, PINMODE_OUTPUT)
context['default_pind%dvalue' % i] = context.get('pind%dvalue' % i, 0)
# Analog pins
for i in range(0, 6):
context['default_pina%dactive' % i] = context.get('pina%dactive' % i, False)
v = super(arduinocontroller_board, self).default_get(cr, uid, fields_list, context)
return v
# Let's support multiple configurations for the same device!
#_sql_constraints = [('unique_model_device', 'unique(model, device)', 'Model and device must be unique')]
def in_range(self, pin, mode, value, fail_silently=True):
""" Constraints the var in range """
if mode == PINMODE_INPUT:
return value
bottom , top = PIN_RANGE[mode]
res = bottom <= value <= top
if not res and not fail_silently:
raise osv.except_osv('Value is out of range', 'Please check that value for pin %s is in range %d-%d.' % (pin, bottom, top))
if res:
return value
if value < bottom:
return bottom
return top
def onchange_pin(self, cr, uid, ids, pin, mode, value):
""" Check range """
self.in_range(pin, int(mode), value, False)
return {'value': {pin: value}}
def onchange_online(self, cr, uid, ids, online, device):
""" Connect to the device and report status """
v={}
if ids and online:
try:
from pyfirmata import Arduino, util
except ImportError:
return {'warning' : {'title' : 'Attention!', 'message' : 'Pyfirmata is not installed, arduino operations are disabled. You can install pyfirmata from hg clone ssh://hg@bitbucket.org/tino/pyfirmata'}}
board = self._get_board(device)
if not board:
return {'warning' : {'title' : 'Attention!', 'message' : 'Cannot communicate with Arduino, please check your connections and settings on device %s.' % device}}
return {'value':v}
def _setup_board(self, board, device, **kwargs):
"""
Set the board up and read/write values from the board
@return values
"""
from pyfirmata import Arduino, util
try:
self.device_iterator_store[device]
except:
self.device_iterator_store[device] = util.Iterator(board)
self.device_iterator_store[device].start()
v = {}
# Digital pins
for i in range(2, 14):
if 'pind%ddir' % i in kwargs and 'pind%dvalue' % i in kwargs:
try:
pinmode = int(kwargs['pind%ddir' % i])
pinvalue = kwargs['pind%dvalue' % i]
pinvalue = self.in_range(i, pinmode, pinvalue)
board.digital[i].mode = pinmode-1 # less 1: crappy OE
dbg('DIGITAL %d Setting mode to : %d' % (i, pinmode))
v['pind%dvalue' % i] = pinvalue
if pinmode == PINMODE_INPUT:
v['pind%dvalue' % i] = board.digital[i].read()
dbg('DIGITAL %d reads %s' % (i, v['pind%dvalue' % i]))
elif pinmode == PINMODE_PWM:
dbg('DIGITAL PWM %d writes %s' % (i, pinvalue))
board.digital[i].write(pinvalue/255.0) # 0-1
elif pinmode == PINMODE_SERVO:
dbg('DIGITAL SERVO %d writes %s' % (i, pinvalue))
board.digital[i].write(pinvalue)
elif pinmode == PINMODE_OUTPUT:
dbg('DIGITAL OUTPUT %d writes %s' % (i, pinvalue))
board.digital[i].write(pinvalue)
except:
raise
# Analog pins
# TODO: writing
for i in range(0, 6):
if 'pina%dactive' % i in kwargs and kwargs['pina%dactive' % i]:
board.analog[i].mode = PINMODE_INPUT-1 # less 1: crappy OE
try:
#board.analog[i].enable_reporting()
v['pina%dvalue' % i] = board.analog[i].read()
dbg('ANALOG %d reads %s' % (i, v['pina%dvalue' % i]))
except:
# TODO: better error handling
raise
else:
# TODO: something or delete the branch
pass
return v
def _get_board(self, device):
"""
Returns device connection, creates one if necessary
@returns boolean true if board is online
"""
from pyfirmata import Arduino, util
try:
return self.device_store[device]
except KeyError:
try:
board = Arduino(device)
self.device_store[device] = board
return board
except util.serial.SerialException:
return False
def write(self, cr, uid, ids, vals, context=None):
"""
Update record(s) exist in {ids}, with new value provided in {vals}
@param cr: A database cursor
@param user: ID of the user currently logged in
@param ids: list of record ids to update
@param vals: dict of new values to be set
@param context: context arguments, like lang, time zone
@return: Returns True on success, False otherwise
"""
from pyfirmata import util
record = self.browse(cr, uid, ids[0], context=context)
try:
if record.online:
# Merge values into vals
parms = dict([(k, getattr(vals, k, getattr(record, k, None))) for k in self._columns if k.startswith('pin')])
board = self._get_board(record.device)
if board:
parms.update(vals)
vals.update(self._setup_board(board, record.device, **parms))
except util.serial.SerialException:
raise osv.except_osv('Device is set online but cannot communicate with Arduino', 'Please check your connections and settings on device %s.' % record.device)
res = super(arduinocontroller_board, self).write(cr, uid, ids, vals, context=context)
return res
def refresh_board(self, cr, uid, ids, context=None):
""" Re-read values from the board """
record = self.browse(cr, uid, ids[0], context=context)
if not record.online:
raise osv.except_osv('Device is offline','Device is offline, check the online flag.')
board = self._get_board(record.device)
if not board:
raise osv.except_osv('Cannot communicate with Arduino', 'Please check your connections and settings on device %s.' % record.device)
return True
arduinocontroller_board()
| en | 0.680239 | # -*- coding: utf-8 -*- # Add 1 because crappy OE doesn't distinguish empty from 0 Arduino board with configuration # store device connections # Digital # Analog Set defaults # Digital pins # Analog pins # Let's support multiple configurations for the same device! #_sql_constraints = [('unique_model_device', 'unique(model, device)', 'Model and device must be unique')] Constraints the var in range Check range Connect to the device and report status Set the board up and read/write values from the board @return values # Digital pins # less 1: crappy OE # 0-1 # Analog pins # TODO: writing # less 1: crappy OE #board.analog[i].enable_reporting() # TODO: better error handling # TODO: something or delete the branch Returns device connection, creates one if necessary @returns boolean true if board is online Update record(s) exist in {ids}, with new value provided in {vals} @param cr: A database cursor @param user: ID of the user currently logged in @param ids: list of record ids to update @param vals: dict of new values to be set @param context: context arguments, like lang, time zone @return: Returns True on success, False otherwise # Merge values into vals Re-read values from the board | 2.976466 | 3 |
fairseq/fed_utils.py | zjumml/multilingual-kd-pytorch | 77 | 6620196 | import glob
import hashlib
import os
import torch
from tqdm import tqdm
from fairseq import utils, distributed_utils
import numpy as np
import ujson as json
from fairseq.data.indexed_dataset import IndexedDatasetBuilder, IndexedCachedDataset
FED_VERSION_FN = 'fed_version.v3.idx'
def dist2topk(out_dist, k):
topk_prob, topk_idx = torch.topk(out_dist, k, dim=-1)
topk_prob = topk_prob.view(-1, k) # (B x T) x k
topk_prob = topk_prob / topk_prob.sum(1, keepdim=True)
topk_idx = topk_idx.view(-1, k) # (B x T) x k
return topk_idx, topk_prob
def output2topk(output, k):
topk_outp, topk_idx = torch.topk(output, k, dim=-1)
topk_outp = topk_outp.view(-1, k) # (B x T) x k
topk_idx = topk_idx.view(-1, k) # (B x T) x k
return topk_idx, topk_outp
def get_sample_key(ids):
if not hasattr(get_sample_key, 'sample_key_cache'):
get_sample_key.sample_key_cache = {}
ids_str = ','.join([str(id) for id in sorted(ids)])
if ids_str not in get_sample_key.sample_key_cache:
hash_object = hashlib.md5(ids_str.encode())
get_sample_key.sample_key_cache[ids_str] = hash_object.hexdigest()
return get_sample_key.sample_key_cache[ids_str]
class TeacherOutputDatasetBuilder(IndexedDatasetBuilder):
def add_item(self, data):
# +1 for Lua compatibility
data = np.array(data, dtype=self.dtype)
bytes = self.out_file.write(data)
self.data_offsets.append(self.data_offsets[-1] + bytes / self.element_size)
for s in data.shape:
self.sizes.append(s)
self.dim_offsets.append(self.dim_offsets[-1] + len(data.shape))
class TeacherOutputDataset(IndexedCachedDataset):
dtype2size = {
float: 8,
int: 4,
}
def __init__(self, prefix):
self.cache_index = {}
super().__init__(prefix, fix_lua_indexing=False)
@staticmethod
def save_bin(prefix, data_list, dtype=np.float):
bin_path = prefix + '.bin'
idx_path = prefix + '.idx'
builder = TeacherOutputDatasetBuilder(bin_path, dtype)
for d in data_list:
builder.add_item(d)
builder.finalize(idx_path)
def __getitem__(self, i):
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
if i in self.cache:
np.copyto(a, self.cache[i])
else:
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
self.cache[i] = a
item = torch.from_numpy(a)
if self.dtype == np.int32 or self.dtype == np.int or self.dtype == np.int64:
item = item.long()
else:
item = item.float()
return item
def gen_outputs(args, task, trainer):
trainer.model.eval()
itr = task.get_batch_iterator(
dataset=task.dataset('train'),
max_tokens=args.max_tokens,
max_sentences=args.max_sentences_valid,
max_positions=utils.resolve_max_positions(
task.max_positions(),
trainer.get_model().max_positions(),
),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=8,
seed=args.seed,
num_shards=args.distributed_world_size,
shard_id=args.distributed_rank,
).next_epoch_itr(shuffle=False)
outputs = [None for _ in range(len(task.dataset('train')))]
for sample in tqdm(itr, mininterval=5):
with torch.no_grad():
if sample is None or len(sample) == 0:
continue
sample = utils.move_to_cuda(sample)
bs, srclen = sample['net_input']['src_tokens'].shape
output = trainer.model(**sample['net_input'])[0].detach()
non_padding_mask = sample['target'].ne(task.target_dictionary.pad()).cpu()
_, tgtlen = sample['target'].shape
topk_idx, topk_v = output2topk(output, args.distill_topk)
topk_x_shape = (bs, tgtlen, args.distill_topk)
topk_idx, topk_v = topk_idx.view(*topk_x_shape).cpu().numpy(), topk_v.view(*topk_x_shape).cpu().numpy()
non_padding_mask = non_padding_mask.view(*topk_x_shape[:2]).cpu().numpy().astype(bool)
for b in range(bs):
outputs[sample['id'][b].item()] = \
topk_idx[b, non_padding_mask[b]].tolist(), \
topk_v[b, non_padding_mask[b]].tolist()
return outputs
def save_expert_outputs(args, task, trainer):
print("| Start saving expert outputs..")
expert_outputs = gen_outputs(args, task, trainer)
output_path = os.path.join(args.save_dir, 'train_output.json.{}'.format(args.distributed_rank))
json.dump(expert_outputs, open(output_path, 'w'))
distributed_utils.barrier(args, 'save_expert_outputs')
if distributed_utils.is_master(args):
expert_outputs_ = []
val_bleu_path1 = os.path.join(args.save_dir, 'val_bleu.json')
val_bleu_path2 = os.path.join(args.data[0], 'expert_bleu_{}_{}.json'.format(args.sources, args.targets))
os.system('cp {} {}'.format(val_bleu_path1, val_bleu_path2))
for i in range(args.distributed_world_size):
output_path = os.path.join(args.save_dir, 'train_output.json.{}'.format(i))
expert_outputs_.append(json.load(open(output_path, 'r')))
try:
os.remove(output_path)
except:
pass
for j in range(len(expert_outputs_[0])):
for i in range(args.distributed_world_size):
if expert_outputs_[i][j] is not None:
expert_outputs[j] = expert_outputs_[i][j]
break
assert expert_outputs[j] is not None
path = os.path.join(args.data[0], '{}_{}_topk_idx'.format(args.sources, args.targets))
TeacherOutputDataset.save_bin(path, [o[0] for o in expert_outputs], np.int32)
path = os.path.join(args.data[0], '{}_{}_topk_prob'.format(args.sources, args.targets))
TeacherOutputDataset.save_bin(path, [o[1] for o in expert_outputs], np.float)
print("| Save expert@{}_{}".format(args.sources, args.targets))
# def save_master_outputs(args, task, trainer, version, dev_scores, force_save=False):
# assert dev_scores is not None
# master_outputs = None
#
# try:
# with open(os.path.join(args.fed_path, 'all_{}'.format(args.target_lang), FED_VERSION_FN)) as f:
# old_version_data = json.load(f)
# except:
# old_version_data = None
#
# dataset = task.dataset('train')
# division = dataset.src_cumsum + [len(dataset)]
# version_path = os.path.join(args.save_dir, FED_VERSION_FN)
# version_data = {
# 'version': version,
# }
#
# for lng_idx, lng in enumerate(dataset.fed_lngs):
# start, end = division[lng_idx], division[lng_idx + 1]
# if force_save or old_version_data is None or dev_scores['bleu_{}'.format(lng)] > old_version_data[
# 'bleu_{}'.format(lng)]:
# output_path = os.path.join(args.save_dir, 'train_output.{}.json.{}'.format(lng, args.distributed_rank))
# if master_outputs is None:
# master_outputs = gen_outputs(args, task, trainer)
# json.dump(master_outputs[start:end], open(output_path, 'w'))
# version_data['bleu_{}'.format(lng)] = dev_scores['bleu_{}'.format(lng)]
# else:
# version_data['bleu_{}'.format(lng)] = old_version_data['bleu_{}'.format(lng)]
#
# if distributed_utils.is_master(args):
# with open(version_path, 'w') as f:
# json.dump(version_data, f)
# print("| Save master, data:{}".format(json.dumps(version_data)))
#
#
# def load_master_outputs(args, score, old_master_version=None, old_master_outputs=None):
# assert score is not None
# master_outputs = old_master_outputs
# master_version = old_master_version
#
# files = glob.glob(os.path.join(args.fed_path, 'all_{}'.format(args.target_lang),
# 'train_output.{}.*.*'.format(args.source_lang)))
# if len(files) == 0:
# files = glob.glob(os.path.join(args.fed_path, 'train_output.{}.*.*'.format(args.source_lang)))
# if len(files) == 0:
# print("| Master not found.")
# return master_version, master_outputs
#
# try:
# version_fn = os.path.join(args.fed_path, 'all_{}'.format(args.target_lang), FED_VERSION_FN)
# if not os.path.exists(version_fn):
# version_fn = os.path.join(args.fed_path, FED_VERSION_FN)
# with open(version_fn) as f:
# version_data = json.load(f)
# version = version_data['version']
#
# if old_master_version is not None and old_master_outputs is not None:
# if version <= old_master_version:
# print("| Master has not updated yet.")
# return master_version, master_outputs
# except FileNotFoundError:
# print("| Master version not found.")
# return master_version, master_outputs
#
# outputs = []
# for f in files:
# outputs.append(json.load(open(f, 'r')))
# outputs_flatten = [None for _ in range(len(outputs[0]))]
# for i in range(len(outputs[0])):
# for j in range(len(files)):
# if outputs[j][i] is not None:
# outputs_flatten[i] = outputs[j][i]
# break
# assert outputs_flatten[i] is not None
# print("| Load master@{}.".format(version))
# return version, outputs_flatten
| import glob
import hashlib
import os
import torch
from tqdm import tqdm
from fairseq import utils, distributed_utils
import numpy as np
import ujson as json
from fairseq.data.indexed_dataset import IndexedDatasetBuilder, IndexedCachedDataset
FED_VERSION_FN = 'fed_version.v3.idx'
def dist2topk(out_dist, k):
topk_prob, topk_idx = torch.topk(out_dist, k, dim=-1)
topk_prob = topk_prob.view(-1, k) # (B x T) x k
topk_prob = topk_prob / topk_prob.sum(1, keepdim=True)
topk_idx = topk_idx.view(-1, k) # (B x T) x k
return topk_idx, topk_prob
def output2topk(output, k):
topk_outp, topk_idx = torch.topk(output, k, dim=-1)
topk_outp = topk_outp.view(-1, k) # (B x T) x k
topk_idx = topk_idx.view(-1, k) # (B x T) x k
return topk_idx, topk_outp
def get_sample_key(ids):
if not hasattr(get_sample_key, 'sample_key_cache'):
get_sample_key.sample_key_cache = {}
ids_str = ','.join([str(id) for id in sorted(ids)])
if ids_str not in get_sample_key.sample_key_cache:
hash_object = hashlib.md5(ids_str.encode())
get_sample_key.sample_key_cache[ids_str] = hash_object.hexdigest()
return get_sample_key.sample_key_cache[ids_str]
class TeacherOutputDatasetBuilder(IndexedDatasetBuilder):
def add_item(self, data):
# +1 for Lua compatibility
data = np.array(data, dtype=self.dtype)
bytes = self.out_file.write(data)
self.data_offsets.append(self.data_offsets[-1] + bytes / self.element_size)
for s in data.shape:
self.sizes.append(s)
self.dim_offsets.append(self.dim_offsets[-1] + len(data.shape))
class TeacherOutputDataset(IndexedCachedDataset):
dtype2size = {
float: 8,
int: 4,
}
def __init__(self, prefix):
self.cache_index = {}
super().__init__(prefix, fix_lua_indexing=False)
@staticmethod
def save_bin(prefix, data_list, dtype=np.float):
bin_path = prefix + '.bin'
idx_path = prefix + '.idx'
builder = TeacherOutputDatasetBuilder(bin_path, dtype)
for d in data_list:
builder.add_item(d)
builder.finalize(idx_path)
def __getitem__(self, i):
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
if i in self.cache:
np.copyto(a, self.cache[i])
else:
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
self.cache[i] = a
item = torch.from_numpy(a)
if self.dtype == np.int32 or self.dtype == np.int or self.dtype == np.int64:
item = item.long()
else:
item = item.float()
return item
def gen_outputs(args, task, trainer):
trainer.model.eval()
itr = task.get_batch_iterator(
dataset=task.dataset('train'),
max_tokens=args.max_tokens,
max_sentences=args.max_sentences_valid,
max_positions=utils.resolve_max_positions(
task.max_positions(),
trainer.get_model().max_positions(),
),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=8,
seed=args.seed,
num_shards=args.distributed_world_size,
shard_id=args.distributed_rank,
).next_epoch_itr(shuffle=False)
outputs = [None for _ in range(len(task.dataset('train')))]
for sample in tqdm(itr, mininterval=5):
with torch.no_grad():
if sample is None or len(sample) == 0:
continue
sample = utils.move_to_cuda(sample)
bs, srclen = sample['net_input']['src_tokens'].shape
output = trainer.model(**sample['net_input'])[0].detach()
non_padding_mask = sample['target'].ne(task.target_dictionary.pad()).cpu()
_, tgtlen = sample['target'].shape
topk_idx, topk_v = output2topk(output, args.distill_topk)
topk_x_shape = (bs, tgtlen, args.distill_topk)
topk_idx, topk_v = topk_idx.view(*topk_x_shape).cpu().numpy(), topk_v.view(*topk_x_shape).cpu().numpy()
non_padding_mask = non_padding_mask.view(*topk_x_shape[:2]).cpu().numpy().astype(bool)
for b in range(bs):
outputs[sample['id'][b].item()] = \
topk_idx[b, non_padding_mask[b]].tolist(), \
topk_v[b, non_padding_mask[b]].tolist()
return outputs
def save_expert_outputs(args, task, trainer):
print("| Start saving expert outputs..")
expert_outputs = gen_outputs(args, task, trainer)
output_path = os.path.join(args.save_dir, 'train_output.json.{}'.format(args.distributed_rank))
json.dump(expert_outputs, open(output_path, 'w'))
distributed_utils.barrier(args, 'save_expert_outputs')
if distributed_utils.is_master(args):
expert_outputs_ = []
val_bleu_path1 = os.path.join(args.save_dir, 'val_bleu.json')
val_bleu_path2 = os.path.join(args.data[0], 'expert_bleu_{}_{}.json'.format(args.sources, args.targets))
os.system('cp {} {}'.format(val_bleu_path1, val_bleu_path2))
for i in range(args.distributed_world_size):
output_path = os.path.join(args.save_dir, 'train_output.json.{}'.format(i))
expert_outputs_.append(json.load(open(output_path, 'r')))
try:
os.remove(output_path)
except:
pass
for j in range(len(expert_outputs_[0])):
for i in range(args.distributed_world_size):
if expert_outputs_[i][j] is not None:
expert_outputs[j] = expert_outputs_[i][j]
break
assert expert_outputs[j] is not None
path = os.path.join(args.data[0], '{}_{}_topk_idx'.format(args.sources, args.targets))
TeacherOutputDataset.save_bin(path, [o[0] for o in expert_outputs], np.int32)
path = os.path.join(args.data[0], '{}_{}_topk_prob'.format(args.sources, args.targets))
TeacherOutputDataset.save_bin(path, [o[1] for o in expert_outputs], np.float)
print("| Save expert@{}_{}".format(args.sources, args.targets))
# def save_master_outputs(args, task, trainer, version, dev_scores, force_save=False):
# assert dev_scores is not None
# master_outputs = None
#
# try:
# with open(os.path.join(args.fed_path, 'all_{}'.format(args.target_lang), FED_VERSION_FN)) as f:
# old_version_data = json.load(f)
# except:
# old_version_data = None
#
# dataset = task.dataset('train')
# division = dataset.src_cumsum + [len(dataset)]
# version_path = os.path.join(args.save_dir, FED_VERSION_FN)
# version_data = {
# 'version': version,
# }
#
# for lng_idx, lng in enumerate(dataset.fed_lngs):
# start, end = division[lng_idx], division[lng_idx + 1]
# if force_save or old_version_data is None or dev_scores['bleu_{}'.format(lng)] > old_version_data[
# 'bleu_{}'.format(lng)]:
# output_path = os.path.join(args.save_dir, 'train_output.{}.json.{}'.format(lng, args.distributed_rank))
# if master_outputs is None:
# master_outputs = gen_outputs(args, task, trainer)
# json.dump(master_outputs[start:end], open(output_path, 'w'))
# version_data['bleu_{}'.format(lng)] = dev_scores['bleu_{}'.format(lng)]
# else:
# version_data['bleu_{}'.format(lng)] = old_version_data['bleu_{}'.format(lng)]
#
# if distributed_utils.is_master(args):
# with open(version_path, 'w') as f:
# json.dump(version_data, f)
# print("| Save master, data:{}".format(json.dumps(version_data)))
#
#
# def load_master_outputs(args, score, old_master_version=None, old_master_outputs=None):
# assert score is not None
# master_outputs = old_master_outputs
# master_version = old_master_version
#
# files = glob.glob(os.path.join(args.fed_path, 'all_{}'.format(args.target_lang),
# 'train_output.{}.*.*'.format(args.source_lang)))
# if len(files) == 0:
# files = glob.glob(os.path.join(args.fed_path, 'train_output.{}.*.*'.format(args.source_lang)))
# if len(files) == 0:
# print("| Master not found.")
# return master_version, master_outputs
#
# try:
# version_fn = os.path.join(args.fed_path, 'all_{}'.format(args.target_lang), FED_VERSION_FN)
# if not os.path.exists(version_fn):
# version_fn = os.path.join(args.fed_path, FED_VERSION_FN)
# with open(version_fn) as f:
# version_data = json.load(f)
# version = version_data['version']
#
# if old_master_version is not None and old_master_outputs is not None:
# if version <= old_master_version:
# print("| Master has not updated yet.")
# return master_version, master_outputs
# except FileNotFoundError:
# print("| Master version not found.")
# return master_version, master_outputs
#
# outputs = []
# for f in files:
# outputs.append(json.load(open(f, 'r')))
# outputs_flatten = [None for _ in range(len(outputs[0]))]
# for i in range(len(outputs[0])):
# for j in range(len(files)):
# if outputs[j][i] is not None:
# outputs_flatten[i] = outputs[j][i]
# break
# assert outputs_flatten[i] is not None
# print("| Load master@{}.".format(version))
# return version, outputs_flatten
| en | 0.385545 | # (B x T) x k # (B x T) x k # (B x T) x k # (B x T) x k # +1 for Lua compatibility # def save_master_outputs(args, task, trainer, version, dev_scores, force_save=False): # assert dev_scores is not None # master_outputs = None # # try: # with open(os.path.join(args.fed_path, 'all_{}'.format(args.target_lang), FED_VERSION_FN)) as f: # old_version_data = json.load(f) # except: # old_version_data = None # # dataset = task.dataset('train') # division = dataset.src_cumsum + [len(dataset)] # version_path = os.path.join(args.save_dir, FED_VERSION_FN) # version_data = { # 'version': version, # } # # for lng_idx, lng in enumerate(dataset.fed_lngs): # start, end = division[lng_idx], division[lng_idx + 1] # if force_save or old_version_data is None or dev_scores['bleu_{}'.format(lng)] > old_version_data[ # 'bleu_{}'.format(lng)]: # output_path = os.path.join(args.save_dir, 'train_output.{}.json.{}'.format(lng, args.distributed_rank)) # if master_outputs is None: # master_outputs = gen_outputs(args, task, trainer) # json.dump(master_outputs[start:end], open(output_path, 'w')) # version_data['bleu_{}'.format(lng)] = dev_scores['bleu_{}'.format(lng)] # else: # version_data['bleu_{}'.format(lng)] = old_version_data['bleu_{}'.format(lng)] # # if distributed_utils.is_master(args): # with open(version_path, 'w') as f: # json.dump(version_data, f) # print("| Save master, data:{}".format(json.dumps(version_data))) # # # def load_master_outputs(args, score, old_master_version=None, old_master_outputs=None): # assert score is not None # master_outputs = old_master_outputs # master_version = old_master_version # # files = glob.glob(os.path.join(args.fed_path, 'all_{}'.format(args.target_lang), # 'train_output.{}.*.*'.format(args.source_lang))) # if len(files) == 0: # files = glob.glob(os.path.join(args.fed_path, 'train_output.{}.*.*'.format(args.source_lang))) # if len(files) == 0: # print("| Master not found.") # return master_version, master_outputs # # try: # version_fn = os.path.join(args.fed_path, 'all_{}'.format(args.target_lang), FED_VERSION_FN) # if not os.path.exists(version_fn): # version_fn = os.path.join(args.fed_path, FED_VERSION_FN) # with open(version_fn) as f: # version_data = json.load(f) # version = version_data['version'] # # if old_master_version is not None and old_master_outputs is not None: # if version <= old_master_version: # print("| Master has not updated yet.") # return master_version, master_outputs # except FileNotFoundError: # print("| Master version not found.") # return master_version, master_outputs # # outputs = [] # for f in files: # outputs.append(json.load(open(f, 'r'))) # outputs_flatten = [None for _ in range(len(outputs[0]))] # for i in range(len(outputs[0])): # for j in range(len(files)): # if outputs[j][i] is not None: # outputs_flatten[i] = outputs[j][i] # break # assert outputs_flatten[i] is not None # print("| Load master@{}.".format(version)) # return version, outputs_flatten | 2.002997 | 2 |
scripts/tinypng.py | svegio/MEE5002 | 3 | 6620197 | <reponame>svegio/MEE5002<filename>scripts/tinypng.py
import os
import shutil
import subprocess
import tempfile
def tinypng(input_path: str, output_path: str) -> bool:
'''
- Argument:
- input_path: 输入路径
- output_path: 输出路径
'''
if not os.path.exists(input_path):
return False
with tempfile.NamedTemporaryFile(
suffix=os.path.splitext(input_path)[-1], delete=False
) as f:
for command in (
(
'ffmpeg', '-i', input_path,
'-vf', 'palettegen=max_colors=256:stats_mode=single',
'-y', f.name,
), (
'ffmpeg', '-i', input_path, '-i', f.name,
'-lavfi', '[0][1:v] paletteuse', '-pix_fmt', 'pal8',
'-y', output_path,
)
):
subprocess.run(command)
os.unlink(f.name)
if os.path.exists(output_path):
if os.path.getsize(output_path) > os.path.getsize(input_path):
shutil.copy(input_path, output_path) # 未起到压缩目的
else:
shutil.copy(input_path, output_path) # 压缩失败
return True
| import os
import shutil
import subprocess
import tempfile
def tinypng(input_path: str, output_path: str) -> bool:
'''
- Argument:
- input_path: 输入路径
- output_path: 输出路径
'''
if not os.path.exists(input_path):
return False
with tempfile.NamedTemporaryFile(
suffix=os.path.splitext(input_path)[-1], delete=False
) as f:
for command in (
(
'ffmpeg', '-i', input_path,
'-vf', 'palettegen=max_colors=256:stats_mode=single',
'-y', f.name,
), (
'ffmpeg', '-i', input_path, '-i', f.name,
'-lavfi', '[0][1:v] paletteuse', '-pix_fmt', 'pal8',
'-y', output_path,
)
):
subprocess.run(command)
os.unlink(f.name)
if os.path.exists(output_path):
if os.path.getsize(output_path) > os.path.getsize(input_path):
shutil.copy(input_path, output_path) # 未起到压缩目的
else:
shutil.copy(input_path, output_path) # 压缩失败
return True | ja | 0.412991 | - Argument: - input_path: 输入路径 - output_path: 输出路径 # 未起到压缩目的 # 压缩失败 | 2.720801 | 3 |
TLG.py | ankitpipalia/codechef-solutions | 1 | 6620198 | tcase = int(input())
name = dict()
while(tcase):
p1, p2 = map(int, input().split(" "))
if p1 >= p2:
name[p1 - p2] = 1
else :
name[p2-p1] = 2
tcase -= 1
x = max(name.keys())
print(name[int(x)] , int(x)) | tcase = int(input())
name = dict()
while(tcase):
p1, p2 = map(int, input().split(" "))
if p1 >= p2:
name[p1 - p2] = 1
else :
name[p2-p1] = 2
tcase -= 1
x = max(name.keys())
print(name[int(x)] , int(x)) | none | 1 | 3.223801 | 3 | |
housekeeper/cli/include.py | Clinical-Genomics/housekeeper | 2 | 6620199 | """Module for including files via CLI"""
import datetime as dt
import logging
import click
from housekeeper.exc import VersionIncludedError
from housekeeper.include import include_version
LOG = logging.getLogger(__name__)
@click.command()
@click.option("--version-id", type=int, help="version id of the bundle version")
@click.argument("bundle_name", required=False)
@click.pass_context
def include(context: click.Context, bundle_name: str, version_id: int):
"""Include a bundle of files into the internal space.
Use bundle name if you simply want to include the latest version.
"""
LOG.info("Running include")
store = context.obj["store"]
if not (version_id or bundle_name):
LOG.warning("Please use bundle name or version-id")
raise click.Abort
if version_id:
LOG.info("Use version %s", version_id)
version_obj = store.Version.get(version_id)
if version_obj is None:
LOG.warning("version not found")
raise click.Abort
if bundle_name:
bundle_obj = store.bundle(bundle_name)
if bundle_obj is None:
LOG.warning("bundle %s not found", bundle_name)
raise click.Abort
if len(bundle_obj.versions) == 0:
LOG.error("Could not find any versions for bundle %s", bundle_name)
raise click.Abort
LOG.info("Including latest version for %s", bundle_name)
version_obj = bundle_obj.versions[0]
try:
include_version(context.obj["root"], version_obj)
except VersionIncludedError as error:
LOG.warning(error.message)
raise click.Abort
version_obj.included_at = dt.datetime.now()
store.commit()
click.echo(click.style("included all files!", fg="green"))
| """Module for including files via CLI"""
import datetime as dt
import logging
import click
from housekeeper.exc import VersionIncludedError
from housekeeper.include import include_version
LOG = logging.getLogger(__name__)
@click.command()
@click.option("--version-id", type=int, help="version id of the bundle version")
@click.argument("bundle_name", required=False)
@click.pass_context
def include(context: click.Context, bundle_name: str, version_id: int):
"""Include a bundle of files into the internal space.
Use bundle name if you simply want to include the latest version.
"""
LOG.info("Running include")
store = context.obj["store"]
if not (version_id or bundle_name):
LOG.warning("Please use bundle name or version-id")
raise click.Abort
if version_id:
LOG.info("Use version %s", version_id)
version_obj = store.Version.get(version_id)
if version_obj is None:
LOG.warning("version not found")
raise click.Abort
if bundle_name:
bundle_obj = store.bundle(bundle_name)
if bundle_obj is None:
LOG.warning("bundle %s not found", bundle_name)
raise click.Abort
if len(bundle_obj.versions) == 0:
LOG.error("Could not find any versions for bundle %s", bundle_name)
raise click.Abort
LOG.info("Including latest version for %s", bundle_name)
version_obj = bundle_obj.versions[0]
try:
include_version(context.obj["root"], version_obj)
except VersionIncludedError as error:
LOG.warning(error.message)
raise click.Abort
version_obj.included_at = dt.datetime.now()
store.commit()
click.echo(click.style("included all files!", fg="green"))
| en | 0.879769 | Module for including files via CLI Include a bundle of files into the internal space. Use bundle name if you simply want to include the latest version. | 2.349948 | 2 |
tests/actors/test_network_event.py | reapler/geckordp | 1 | 6620200 | # pylint: disable=unused-import
from time import sleep
import pytest
import tests.helpers.constants as constants
from tests.helpers.utils import *
from geckordp.rdp_client import RDPClient
from geckordp.actors.root import RootActor
from geckordp.actors.descriptors.tab import TabActor
from geckordp.actors.network_event import NetworkEventActor
from geckordp.actors.web_console import WebConsoleActor
from geckordp.actors.targets.window_global import WindowGlobalActor
from geckordp.actors.watcher import WatcherActor
from geckordp.actors.descriptors.process import ProcessActor
from geckordp.actors.thread import ThreadActor
from geckordp.actors.events import Events
from geckordp.logger import log, logdict
def test_network_event():
cl = None
try:
cl = RDPClient(3)
cl.connect(constants.REMOTE_HOST, constants.REMOTE_PORT)
root = RootActor(cl)
process_descriptors = root.list_processes()
for descriptor in process_descriptors:
actor_id = descriptor["actor"]
process_actor_ids = ProcessActor(
cl, actor_id).get_target()
console = WebConsoleActor(
cl, process_actor_ids["consoleActor"])
console.start_listeners([])
current_tab = root.current_tab()
tab = TabActor(cl, current_tab["actor"])
actor_ids = tab.get_target()
browser = WindowGlobalActor(
cl, actor_ids["actor"])
console = WebConsoleActor(
cl, actor_ids["consoleActor"])
console.start_listeners([])
watcher_ctx = tab.get_watcher()
watcher = WatcherActor(
cl, watcher_ctx["actor"])
thread = ThreadActor(
cl, actor_ids["threadActor"])
thread.attach()
# todo add TargetConfigurationActor
watcher.watch_resources([
WatcherActor.Resources.CONSOLE_MESSAGE,
WatcherActor.Resources.ERROR_MESSAGE,
WatcherActor.Resources.NETWORK_EVENT,
WatcherActor.Resources.NETWORK_EVENT_STACKTRACE,
WatcherActor.Resources.DOCUMENT_EVENT,
])
network_event_ids = []
def on_resource_available(data):
resources = data["resources"]
if (len(resources) <= 0):
return
resources = resources[0]
if (resources["resourceType"] != "network-event"):
return
network_event_actor_id = resources["actor"]
resource_id = resources.get("resourceId", -1)
if (resource_id == -1):
return
network_event_ids.append(network_event_actor_id)
cl.add_event_listener(
watcher_ctx["actor"],
Events.Watcher.RESOURCE_AVAILABLE_FORM,
on_resource_available)
browser.navigate_to("https://example.com/")
sleep(1.5)
network_event = NetworkEventActor(cl, network_event_ids[0])
# get_request_headers
val = network_event.get_request_headers()["headers"]
assert len(val) > 2
# get_request_cookies
val = network_event.get_request_cookies().get("cookies", 0)
assert val != 0
# get_request_post_data
val = network_event.get_request_post_data().get("postData", 0)
assert val != 0
# get_response_headers
val = network_event.get_response_headers()["headers"]
assert isinstance(val, list)
# get_response_cookies
val = network_event.get_response_cookies().get("cookies", 0)
assert val != 0
# get_response_cache
val = network_event.get_response_cache()
assert response_valid("netEvent", val), str(val)
# get_response_content
val = network_event.get_response_content()["content"]["size"]
assert val > 100
# get_event_timings
val = network_event.get_event_timings().get("timings", 0)
assert val != 0
# get_security_info
val = network_event.get_security_info()["securityInfo"]["state"]
assert val == "secure"
finally:
cl.disconnect()
| # pylint: disable=unused-import
from time import sleep
import pytest
import tests.helpers.constants as constants
from tests.helpers.utils import *
from geckordp.rdp_client import RDPClient
from geckordp.actors.root import RootActor
from geckordp.actors.descriptors.tab import TabActor
from geckordp.actors.network_event import NetworkEventActor
from geckordp.actors.web_console import WebConsoleActor
from geckordp.actors.targets.window_global import WindowGlobalActor
from geckordp.actors.watcher import WatcherActor
from geckordp.actors.descriptors.process import ProcessActor
from geckordp.actors.thread import ThreadActor
from geckordp.actors.events import Events
from geckordp.logger import log, logdict
def test_network_event():
cl = None
try:
cl = RDPClient(3)
cl.connect(constants.REMOTE_HOST, constants.REMOTE_PORT)
root = RootActor(cl)
process_descriptors = root.list_processes()
for descriptor in process_descriptors:
actor_id = descriptor["actor"]
process_actor_ids = ProcessActor(
cl, actor_id).get_target()
console = WebConsoleActor(
cl, process_actor_ids["consoleActor"])
console.start_listeners([])
current_tab = root.current_tab()
tab = TabActor(cl, current_tab["actor"])
actor_ids = tab.get_target()
browser = WindowGlobalActor(
cl, actor_ids["actor"])
console = WebConsoleActor(
cl, actor_ids["consoleActor"])
console.start_listeners([])
watcher_ctx = tab.get_watcher()
watcher = WatcherActor(
cl, watcher_ctx["actor"])
thread = ThreadActor(
cl, actor_ids["threadActor"])
thread.attach()
# todo add TargetConfigurationActor
watcher.watch_resources([
WatcherActor.Resources.CONSOLE_MESSAGE,
WatcherActor.Resources.ERROR_MESSAGE,
WatcherActor.Resources.NETWORK_EVENT,
WatcherActor.Resources.NETWORK_EVENT_STACKTRACE,
WatcherActor.Resources.DOCUMENT_EVENT,
])
network_event_ids = []
def on_resource_available(data):
resources = data["resources"]
if (len(resources) <= 0):
return
resources = resources[0]
if (resources["resourceType"] != "network-event"):
return
network_event_actor_id = resources["actor"]
resource_id = resources.get("resourceId", -1)
if (resource_id == -1):
return
network_event_ids.append(network_event_actor_id)
cl.add_event_listener(
watcher_ctx["actor"],
Events.Watcher.RESOURCE_AVAILABLE_FORM,
on_resource_available)
browser.navigate_to("https://example.com/")
sleep(1.5)
network_event = NetworkEventActor(cl, network_event_ids[0])
# get_request_headers
val = network_event.get_request_headers()["headers"]
assert len(val) > 2
# get_request_cookies
val = network_event.get_request_cookies().get("cookies", 0)
assert val != 0
# get_request_post_data
val = network_event.get_request_post_data().get("postData", 0)
assert val != 0
# get_response_headers
val = network_event.get_response_headers()["headers"]
assert isinstance(val, list)
# get_response_cookies
val = network_event.get_response_cookies().get("cookies", 0)
assert val != 0
# get_response_cache
val = network_event.get_response_cache()
assert response_valid("netEvent", val), str(val)
# get_response_content
val = network_event.get_response_content()["content"]["size"]
assert val > 100
# get_event_timings
val = network_event.get_event_timings().get("timings", 0)
assert val != 0
# get_security_info
val = network_event.get_security_info()["securityInfo"]["state"]
assert val == "secure"
finally:
cl.disconnect()
| en | 0.146862 | # pylint: disable=unused-import # todo add TargetConfigurationActor # get_request_headers # get_request_cookies # get_request_post_data # get_response_headers # get_response_cookies # get_response_cache # get_response_content # get_event_timings # get_security_info | 1.94874 | 2 |
naming/core/migrations/0006_auto_20201003_1520.py | icky-baker/sonny | 0 | 6620201 | # Generated by Django 3.1.2 on 2020-10-03 15:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("core", "0005_storedfile_meta"),
]
operations = [
migrations.AlterField(
model_name="storedfile",
name="meta",
field=models.JSONField(default=dict, verbose_name="Meta information about file"),
),
]
| # Generated by Django 3.1.2 on 2020-10-03 15:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("core", "0005_storedfile_meta"),
]
operations = [
migrations.AlterField(
model_name="storedfile",
name="meta",
field=models.JSONField(default=dict, verbose_name="Meta information about file"),
),
]
| en | 0.806611 | # Generated by Django 3.1.2 on 2020-10-03 15:20 | 1.566045 | 2 |
model2/attentions.py | hyyc116/SIP_v1 | 0 | 6620202 | <filename>model2/attentions.py
#coding:utf-8
'''
定义各种attention
'''
import tensorflow as tf
class BahdanauAttention(tf.keras.Model):
def __init__(self,units):
super(BahdanauAttention,self).__init__()
self._W1 = tf.keras.layers.Dense(units)
self._W2 = tf.keras.layers.Dense(units)
self._V = tf.keras.layers.Dense(1)
def call(self,query,values):
hidden_with_time_axis = tf.expand_dims(query,1)
score = self._V(tf.nn.tanh(self._W1(values)+self._W2(hidden_with_time_axis)))
attention_weights = tf.nn.softmax(score,axis=1)
context_vector = attention_weights*values
context_vector = tf.reduce_sum(context_vector,axis=1)
return context_vector,attention_weights | <filename>model2/attentions.py
#coding:utf-8
'''
定义各种attention
'''
import tensorflow as tf
class BahdanauAttention(tf.keras.Model):
def __init__(self,units):
super(BahdanauAttention,self).__init__()
self._W1 = tf.keras.layers.Dense(units)
self._W2 = tf.keras.layers.Dense(units)
self._V = tf.keras.layers.Dense(1)
def call(self,query,values):
hidden_with_time_axis = tf.expand_dims(query,1)
score = self._V(tf.nn.tanh(self._W1(values)+self._W2(hidden_with_time_axis)))
attention_weights = tf.nn.softmax(score,axis=1)
context_vector = attention_weights*values
context_vector = tf.reduce_sum(context_vector,axis=1)
return context_vector,attention_weights | en | 0.446874 | #coding:utf-8 定义各种attention | 2.509476 | 3 |
p7_flask_app/auth.py | crazynayan/tpf1 | 1 | 6620203 | import os
from base64 import b64encode
from typing import Optional, Dict
from firestore_ci import FirestoreDocument
from flask import g, Response, jsonify
from flask_httpauth import HTTPBasicAuth, HTTPTokenAuth
from werkzeug.security import generate_password_hash, check_password_hash
from config import config
from p7_flask_app import tpf1_app
from p7_flask_app.errors import error_response
basic_auth = HTTPBasicAuth()
token_auth = HTTPTokenAuth()
@basic_auth.verify_password
def verify_password(email: str, password: str) -> bool:
g.current_user = User.get_by_email(email)
return g.current_user is not None and g.current_user.check_password(password)
@basic_auth.error_handler
def basic_auth_error() -> Response:
return error_response(401)
@tpf1_app.route("/tokens", methods=["POST"])
@basic_auth.login_required
def generate_token() -> Response:
g.current_user.generate_token()
user_response: dict = {
"email": g.current_user.email,
"id": g.current_user.id,
"initial": g.current_user.initial,
"role": g.current_user.role,
"token": g.current_user.token,
}
return jsonify(user_response)
@token_auth.verify_token
def verify_token(token: str) -> bool:
g.current_user = User.get_by_token(token) if token else None
return g.current_user is not None
@token_auth.error_handler
def token_auth_error() -> Response:
return error_response(401)
class User(FirestoreDocument):
def __init__(self):
super().__init__()
self.email: str = str() # email address is the username
self.initial: str = str() # 2 character initial in uppercase
self.role: str = config.MEMBER # Role of the user. Refer config.ROLES
self.password_hash: str = config.DEFAULT_PASSWORD
self.token: str = config.DEFAULT_TOKEN
def __repr__(self):
return f"{self.email}|{self.initial}|{self.role}"
def set_password(self, password) -> None:
self.password_hash = generate_password_hash(password)
self.save()
def check_password(self, password) -> bool:
return check_password_hash(self.password_hash, password)
def generate_token(self) -> str:
self.token = b64encode(os.urandom(24)).decode()
self.save()
return self.token
def revoke_token(self) -> None:
self.token = b64encode(os.urandom(24)).decode()
@classmethod
def get_user(cls, doc_id: str) -> Optional[Dict[str, str]]:
user: Optional[cls] = cls.get_by_id(doc_id)
if not user:
return None
user_dict: Dict[str, str] = dict()
user_dict["email"] = user.email
return user_dict
@classmethod
def get_by_token(cls, token: str) -> Optional["User"]:
return cls.objects.filter_by(token=token).first()
@classmethod
def get_by_email(cls, email: str) -> Optional["User"]:
return cls.objects.filter_by(email=email).first()
User.init()
| import os
from base64 import b64encode
from typing import Optional, Dict
from firestore_ci import FirestoreDocument
from flask import g, Response, jsonify
from flask_httpauth import HTTPBasicAuth, HTTPTokenAuth
from werkzeug.security import generate_password_hash, check_password_hash
from config import config
from p7_flask_app import tpf1_app
from p7_flask_app.errors import error_response
basic_auth = HTTPBasicAuth()
token_auth = HTTPTokenAuth()
@basic_auth.verify_password
def verify_password(email: str, password: str) -> bool:
g.current_user = User.get_by_email(email)
return g.current_user is not None and g.current_user.check_password(password)
@basic_auth.error_handler
def basic_auth_error() -> Response:
return error_response(401)
@tpf1_app.route("/tokens", methods=["POST"])
@basic_auth.login_required
def generate_token() -> Response:
g.current_user.generate_token()
user_response: dict = {
"email": g.current_user.email,
"id": g.current_user.id,
"initial": g.current_user.initial,
"role": g.current_user.role,
"token": g.current_user.token,
}
return jsonify(user_response)
@token_auth.verify_token
def verify_token(token: str) -> bool:
g.current_user = User.get_by_token(token) if token else None
return g.current_user is not None
@token_auth.error_handler
def token_auth_error() -> Response:
return error_response(401)
class User(FirestoreDocument):
def __init__(self):
super().__init__()
self.email: str = str() # email address is the username
self.initial: str = str() # 2 character initial in uppercase
self.role: str = config.MEMBER # Role of the user. Refer config.ROLES
self.password_hash: str = config.DEFAULT_PASSWORD
self.token: str = config.DEFAULT_TOKEN
def __repr__(self):
return f"{self.email}|{self.initial}|{self.role}"
def set_password(self, password) -> None:
self.password_hash = generate_password_hash(password)
self.save()
def check_password(self, password) -> bool:
return check_password_hash(self.password_hash, password)
def generate_token(self) -> str:
self.token = b64encode(os.urandom(24)).decode()
self.save()
return self.token
def revoke_token(self) -> None:
self.token = b64encode(os.urandom(24)).decode()
@classmethod
def get_user(cls, doc_id: str) -> Optional[Dict[str, str]]:
user: Optional[cls] = cls.get_by_id(doc_id)
if not user:
return None
user_dict: Dict[str, str] = dict()
user_dict["email"] = user.email
return user_dict
@classmethod
def get_by_token(cls, token: str) -> Optional["User"]:
return cls.objects.filter_by(token=token).first()
@classmethod
def get_by_email(cls, email: str) -> Optional["User"]:
return cls.objects.filter_by(email=email).first()
User.init()
| en | 0.825134 | # email address is the username # 2 character initial in uppercase # Role of the user. Refer config.ROLES | 2.627638 | 3 |
goid/plate_layout.py | fmi-basel/zinneretal-methods | 0 | 6620204 | import pandas as pd
import numpy as np
from threading import RLock
from functools import lru_cache
_NOT_FOUND = object()
class cached_property:
# cached_property from functools python 3.8
# https://github.com/python/cpython/blob/master/Lib/functools.py#L1169
# NOTE functools.lru_cache() applied on class method/properties creates
# class level cache and prevents garbage collection
def __init__(self, func):
self.func = func
self.attrname = None
self.__doc__ = func.__doc__
self.lock = RLock()
def __set_name__(self, owner, name):
if self.attrname is None:
self.attrname = name
elif name != self.attrname:
raise TypeError(
"Cannot assign the same cached_property to two different names "
f"({self.attrname!r} and {name!r}).")
def __get__(self, instance, owner=None):
if instance is None:
return self
if self.attrname is None:
raise TypeError(
"Cannot use cached_property instance without calling __set_name__ on it."
)
try:
cache = instance.__dict__
except AttributeError: # not all objects have __dict__ (e.g. class defines slots)
msg = (f"No '__dict__' attribute on {type(instance).__name__!r} "
f"instance to cache {self.attrname!r} property.")
raise TypeError(msg) from None
val = cache.get(self.attrname, _NOT_FOUND)
if val is _NOT_FOUND:
with self.lock:
# check if another thread filled cache while we awaited lock
val = cache.get(self.attrname, _NOT_FOUND)
if val is _NOT_FOUND:
val = self.func(instance)
try:
cache[self.attrname] = val
except TypeError:
msg = (
f"The '__dict__' attribute on {type(instance).__name__!r} instance "
f"does not support item assignment for caching {self.attrname!r} property."
)
raise TypeError(msg) from None
return val
class ExperimentLayoutParser():
'''Basic experiment layout parser. Can be used to get stain to channel
mapping and vice versa.
Args:
path: path of .xlsx layout
sheet: name of sheet to parse in .xlsx layout
'''
# TODO read condition section
# TODO handle exception, inform which section does not comply with template
# TODO handle edge cases (empty array, etc.)
def __init__(self, path, sheet):
self.path = path
self.sheet = sheet
self.barcode, self.plate_df, self.condition_df, self.stain_df = self._parse_sheet(
)
def _parse_sheet(self):
# load raw sheet array
template = pd.read_excel(self.path,
sheet_name=self.sheet,
header=None,
index_col=None,
engine='openpyxl').values
# find top left corner (and trim if necessary)
row0, col0 = np.argwhere(template == 'Barcode')[0]
template = template[row0:, col0:]
# read barcode
barcode = template[0, 1]
# read plate section
plate_row0 = np.argwhere(template[:, 0] == 'A')[0][0] - 1
plate_index, stain_nrows = self._trim_at_first_nan(
template[plate_row0 + 1:, 0])
plate_columns, stain_ncols = self._trim_at_first_nan(
template[plate_row0, 1:])
plate_df = pd.DataFrame(template[plate_row0 + 1:plate_row0 +
stain_nrows + 1, 1:stain_ncols + 1],
index=plate_index,
columns=plate_columns.astype(int))
# read condition section
condition_row0 = np.argwhere(template[:, 0] == 'Well*')[0][0]
condition_index, condition_nrows = self._trim_at_first_nan(
template[condition_row0 + 1:, 0])
condition_columns, condition_ncols = self._trim_at_first_nan(
template[condition_row0, 1:])
condition_df = pd.DataFrame(
template[condition_row0 + 1:condition_row0 + 1 + condition_nrows,
1:condition_ncols + 1],
index=condition_index,
columns=condition_columns)
# read stain section
stain_row0 = np.argwhere(template[:, 0] == 'Well*')[-1][0]
stain_index, stain_nrows = self._trim_at_first_nan(
template[stain_row0 + 1:, 0])
stain_columns, stain_ncols = self._trim_at_first_nan(
template[stain_row0, 1:])
stain_df = pd.DataFrame(template[stain_row0 + 1:stain_row0 + 1 +
stain_nrows, 1:stain_ncols + 1],
index=stain_index,
columns=stain_columns)
return barcode, plate_df, condition_df, stain_df
@cached_property
def condition_plate_df(self):
'''Flat dataframe with 'col', 'row', 'condition_id' columns'''
# yapf: disable
df = (self.plate_df
.applymap(lambda x: x.split('-')[0].strip() if isinstance(x, str) else np.nan)
.unstack()
.dropna()
.reset_index())
# yapf: enable
df.columns = ['col', 'row', 'condition_id']
return df
@cached_property
def stain_plate_df(self):
'''Flat Dataframe with 'col', 'row', 'stain_id' columns'''
# yapf: disable
df = (self.plate_df
.applymap(lambda x: x.split('-')[1].strip() if isinstance(x, str) else np.nan)
.unstack()
.dropna()
.astype(np.int)
.reset_index())
# yapf: enable
df.columns = ['col', 'row', 'stain_id']
return df
@cached_property
def condition_mapping_df(self):
'''Dataframe mapping plate row,col to experimental conditions'''
# lookup staining for each well
df = self.condition_df.loc[
self.condition_plate_df.condition_id].reset_index(drop=True)
# add columns indicating plate row/col
df = self.condition_plate_df[['col', 'row']].join(df)
return df.set_index(['row', 'col'])
@cached_property
def well_to_condition(self):
'''condition series with ['row', 'col'] multi-index'''
return self.condition_mapping_df['Condition*'].rename('condition')
@cached_property
def stain_mapping_df(self):
'''Dataframe mapping plate row,col to staining conditions'''
# lookup staining for each well
df = self.stain_df.loc[self.stain_plate_df.stain_id].reset_index(
drop=True)
# add columns indicating plate row/col
df = self.stain_plate_df[['col', 'row']].join(df)
return df.set_index(['row', 'col'])
@cached_property
def ch_to_stain(self):
'''stain series with ['row', 'col', 'channel'] multi-index'''
df = (self.stain_mapping_df.rename(
columns={
'Channel01*': 1,
'Channel02*': 2,
'Channel03*': 3,
'Channel04*': 4
})[[1, 2, 3, 4]].stack())
df.index.names = ['row', 'col', 'channel']
df.name = 'stain'
return df
@cached_property
def stain_to_ch(self):
'''channel series with ['row', 'col', 'stain'] multi-index'''
return self.ch_to_stain.reset_index().set_index(
['row', 'col', 'stain']).iloc[:, 0]
def get_first_matching_stain_to_ch(self, row, col, stain_candidates):
'''Returns the channel id of the first matched staining or None if there is no match'''
for stain in stain_candidates:
try:
return self.stain_to_ch[row, col, stain]
except KeyError as e:
pass
return None
@staticmethod
def _trim_at_first_nan(arr):
'''trim a 1d array (e.g. index or columns) at the first encountered nan.
returns the trimmed array and its length'''
arr_length = len(arr)
nan_idxs = np.nonzero(pd.isnull(arr))[0]
if len(nan_idxs) > 0:
arr_length = nan_idxs[0]
return arr[:arr_length], arr_length
@lru_cache(maxsize=32)
def cached_experiment_layout_parser(path, sheet):
return ExperimentLayoutParser(path, sheet)
| import pandas as pd
import numpy as np
from threading import RLock
from functools import lru_cache
_NOT_FOUND = object()
class cached_property:
# cached_property from functools python 3.8
# https://github.com/python/cpython/blob/master/Lib/functools.py#L1169
# NOTE functools.lru_cache() applied on class method/properties creates
# class level cache and prevents garbage collection
def __init__(self, func):
self.func = func
self.attrname = None
self.__doc__ = func.__doc__
self.lock = RLock()
def __set_name__(self, owner, name):
if self.attrname is None:
self.attrname = name
elif name != self.attrname:
raise TypeError(
"Cannot assign the same cached_property to two different names "
f"({self.attrname!r} and {name!r}).")
def __get__(self, instance, owner=None):
if instance is None:
return self
if self.attrname is None:
raise TypeError(
"Cannot use cached_property instance without calling __set_name__ on it."
)
try:
cache = instance.__dict__
except AttributeError: # not all objects have __dict__ (e.g. class defines slots)
msg = (f"No '__dict__' attribute on {type(instance).__name__!r} "
f"instance to cache {self.attrname!r} property.")
raise TypeError(msg) from None
val = cache.get(self.attrname, _NOT_FOUND)
if val is _NOT_FOUND:
with self.lock:
# check if another thread filled cache while we awaited lock
val = cache.get(self.attrname, _NOT_FOUND)
if val is _NOT_FOUND:
val = self.func(instance)
try:
cache[self.attrname] = val
except TypeError:
msg = (
f"The '__dict__' attribute on {type(instance).__name__!r} instance "
f"does not support item assignment for caching {self.attrname!r} property."
)
raise TypeError(msg) from None
return val
class ExperimentLayoutParser():
'''Basic experiment layout parser. Can be used to get stain to channel
mapping and vice versa.
Args:
path: path of .xlsx layout
sheet: name of sheet to parse in .xlsx layout
'''
# TODO read condition section
# TODO handle exception, inform which section does not comply with template
# TODO handle edge cases (empty array, etc.)
def __init__(self, path, sheet):
self.path = path
self.sheet = sheet
self.barcode, self.plate_df, self.condition_df, self.stain_df = self._parse_sheet(
)
def _parse_sheet(self):
# load raw sheet array
template = pd.read_excel(self.path,
sheet_name=self.sheet,
header=None,
index_col=None,
engine='openpyxl').values
# find top left corner (and trim if necessary)
row0, col0 = np.argwhere(template == 'Barcode')[0]
template = template[row0:, col0:]
# read barcode
barcode = template[0, 1]
# read plate section
plate_row0 = np.argwhere(template[:, 0] == 'A')[0][0] - 1
plate_index, stain_nrows = self._trim_at_first_nan(
template[plate_row0 + 1:, 0])
plate_columns, stain_ncols = self._trim_at_first_nan(
template[plate_row0, 1:])
plate_df = pd.DataFrame(template[plate_row0 + 1:plate_row0 +
stain_nrows + 1, 1:stain_ncols + 1],
index=plate_index,
columns=plate_columns.astype(int))
# read condition section
condition_row0 = np.argwhere(template[:, 0] == 'Well*')[0][0]
condition_index, condition_nrows = self._trim_at_first_nan(
template[condition_row0 + 1:, 0])
condition_columns, condition_ncols = self._trim_at_first_nan(
template[condition_row0, 1:])
condition_df = pd.DataFrame(
template[condition_row0 + 1:condition_row0 + 1 + condition_nrows,
1:condition_ncols + 1],
index=condition_index,
columns=condition_columns)
# read stain section
stain_row0 = np.argwhere(template[:, 0] == 'Well*')[-1][0]
stain_index, stain_nrows = self._trim_at_first_nan(
template[stain_row0 + 1:, 0])
stain_columns, stain_ncols = self._trim_at_first_nan(
template[stain_row0, 1:])
stain_df = pd.DataFrame(template[stain_row0 + 1:stain_row0 + 1 +
stain_nrows, 1:stain_ncols + 1],
index=stain_index,
columns=stain_columns)
return barcode, plate_df, condition_df, stain_df
@cached_property
def condition_plate_df(self):
'''Flat dataframe with 'col', 'row', 'condition_id' columns'''
# yapf: disable
df = (self.plate_df
.applymap(lambda x: x.split('-')[0].strip() if isinstance(x, str) else np.nan)
.unstack()
.dropna()
.reset_index())
# yapf: enable
df.columns = ['col', 'row', 'condition_id']
return df
@cached_property
def stain_plate_df(self):
'''Flat Dataframe with 'col', 'row', 'stain_id' columns'''
# yapf: disable
df = (self.plate_df
.applymap(lambda x: x.split('-')[1].strip() if isinstance(x, str) else np.nan)
.unstack()
.dropna()
.astype(np.int)
.reset_index())
# yapf: enable
df.columns = ['col', 'row', 'stain_id']
return df
@cached_property
def condition_mapping_df(self):
'''Dataframe mapping plate row,col to experimental conditions'''
# lookup staining for each well
df = self.condition_df.loc[
self.condition_plate_df.condition_id].reset_index(drop=True)
# add columns indicating plate row/col
df = self.condition_plate_df[['col', 'row']].join(df)
return df.set_index(['row', 'col'])
@cached_property
def well_to_condition(self):
'''condition series with ['row', 'col'] multi-index'''
return self.condition_mapping_df['Condition*'].rename('condition')
@cached_property
def stain_mapping_df(self):
'''Dataframe mapping plate row,col to staining conditions'''
# lookup staining for each well
df = self.stain_df.loc[self.stain_plate_df.stain_id].reset_index(
drop=True)
# add columns indicating plate row/col
df = self.stain_plate_df[['col', 'row']].join(df)
return df.set_index(['row', 'col'])
@cached_property
def ch_to_stain(self):
'''stain series with ['row', 'col', 'channel'] multi-index'''
df = (self.stain_mapping_df.rename(
columns={
'Channel01*': 1,
'Channel02*': 2,
'Channel03*': 3,
'Channel04*': 4
})[[1, 2, 3, 4]].stack())
df.index.names = ['row', 'col', 'channel']
df.name = 'stain'
return df
@cached_property
def stain_to_ch(self):
'''channel series with ['row', 'col', 'stain'] multi-index'''
return self.ch_to_stain.reset_index().set_index(
['row', 'col', 'stain']).iloc[:, 0]
def get_first_matching_stain_to_ch(self, row, col, stain_candidates):
'''Returns the channel id of the first matched staining or None if there is no match'''
for stain in stain_candidates:
try:
return self.stain_to_ch[row, col, stain]
except KeyError as e:
pass
return None
@staticmethod
def _trim_at_first_nan(arr):
'''trim a 1d array (e.g. index or columns) at the first encountered nan.
returns the trimmed array and its length'''
arr_length = len(arr)
nan_idxs = np.nonzero(pd.isnull(arr))[0]
if len(nan_idxs) > 0:
arr_length = nan_idxs[0]
return arr[:arr_length], arr_length
@lru_cache(maxsize=32)
def cached_experiment_layout_parser(path, sheet):
return ExperimentLayoutParser(path, sheet)
| en | 0.642946 | # cached_property from functools python 3.8 # https://github.com/python/cpython/blob/master/Lib/functools.py#L1169 # NOTE functools.lru_cache() applied on class method/properties creates # class level cache and prevents garbage collection # not all objects have __dict__ (e.g. class defines slots) # check if another thread filled cache while we awaited lock Basic experiment layout parser. Can be used to get stain to channel mapping and vice versa. Args: path: path of .xlsx layout sheet: name of sheet to parse in .xlsx layout # TODO read condition section # TODO handle exception, inform which section does not comply with template # TODO handle edge cases (empty array, etc.) # load raw sheet array # find top left corner (and trim if necessary) # read barcode # read plate section # read condition section # read stain section Flat dataframe with 'col', 'row', 'condition_id' columns # yapf: disable # yapf: enable Flat Dataframe with 'col', 'row', 'stain_id' columns # yapf: disable # yapf: enable Dataframe mapping plate row,col to experimental conditions # lookup staining for each well # add columns indicating plate row/col condition series with ['row', 'col'] multi-index Dataframe mapping plate row,col to staining conditions # lookup staining for each well # add columns indicating plate row/col stain series with ['row', 'col', 'channel'] multi-index channel series with ['row', 'col', 'stain'] multi-index Returns the channel id of the first matched staining or None if there is no match trim a 1d array (e.g. index or columns) at the first encountered nan. returns the trimmed array and its length | 2.934844 | 3 |
hackerrank/Python/06_Itertools/itertools_combinations_with_replacement.py | mizukirc/python-snippets | 0 | 6620205 | <reponame>mizukirc/python-snippets
from itertools import combinations_with_replacement
instr, num = input().split()
[print("".join(i)) for i in combinations_with_replacement(sorted(instr), int(num))] | from itertools import combinations_with_replacement
instr, num = input().split()
[print("".join(i)) for i in combinations_with_replacement(sorted(instr), int(num))] | none | 1 | 3.280838 | 3 | |
src/genie/libs/parser/iosxe/tests/ShowIpNatStatistics/cli/equal/golden_output_4_expected.py | balmasea/genieparser | 204 | 6620206 | <filename>src/genie/libs/parser/iosxe/tests/ShowIpNatStatistics/cli/equal/golden_output_4_expected.py
expected_output = {
"active_translations": {"dynamic": 0, "extended": 0, "static": 0, "total": 0},
"appl_doors": 0,
"cef_punted_pkts": 0,
"cef_translated_pkts": 0,
"dynamic_mappings": {},
"expired_translations": 0,
"hits": 0,
"interfaces": {},
"misses": 0,
"normal_doors": 0,
"peak_translations": 0,
"queued_pkts": 0,
"total_doors": 0,
}
| <filename>src/genie/libs/parser/iosxe/tests/ShowIpNatStatistics/cli/equal/golden_output_4_expected.py
expected_output = {
"active_translations": {"dynamic": 0, "extended": 0, "static": 0, "total": 0},
"appl_doors": 0,
"cef_punted_pkts": 0,
"cef_translated_pkts": 0,
"dynamic_mappings": {},
"expired_translations": 0,
"hits": 0,
"interfaces": {},
"misses": 0,
"normal_doors": 0,
"peak_translations": 0,
"queued_pkts": 0,
"total_doors": 0,
}
| none | 1 | 0.889242 | 1 | |
Judger/model/C_CPP.py | YeGuangjun/eagle-oj-judger | 0 | 6620207 |
class C_CPP:
def __init__(self,config, data):
self.exe_path = config['exe_path']
self.max_memory = data['memory_limit']*1024*1024
self.source_name = config['source_name']
self.complication = config['complication']
self.compile_command =config['compile_command']
self.run_args = config['run']['args']
self.run_rule = config['run']['seccomp_rule']
def replace(self,outfile):
self.exe_path = self.exe_path.format(exe_path =outfile)
self.compile_command = self.compile_command.format(exe_path = outfile)
def to_list(self):
self.run_args =[]
self.compile_command = self.compile_command.split(' ')
def getSourceName(self):
return self.source_name
def getComplication(self):
return self.complication
def getCompileCommand(self):
return self.compile_command
def getExe_path(self):
return self.exe_path
def getMax_memory(self):
return self.max_memory
def getRun_args(self):
return self.run_args
def getRun_rule(self):
return self.run_rule
|
class C_CPP:
def __init__(self,config, data):
self.exe_path = config['exe_path']
self.max_memory = data['memory_limit']*1024*1024
self.source_name = config['source_name']
self.complication = config['complication']
self.compile_command =config['compile_command']
self.run_args = config['run']['args']
self.run_rule = config['run']['seccomp_rule']
def replace(self,outfile):
self.exe_path = self.exe_path.format(exe_path =outfile)
self.compile_command = self.compile_command.format(exe_path = outfile)
def to_list(self):
self.run_args =[]
self.compile_command = self.compile_command.split(' ')
def getSourceName(self):
return self.source_name
def getComplication(self):
return self.complication
def getCompileCommand(self):
return self.compile_command
def getExe_path(self):
return self.exe_path
def getMax_memory(self):
return self.max_memory
def getRun_args(self):
return self.run_args
def getRun_rule(self):
return self.run_rule
| none | 1 | 2.532247 | 3 | |
FastAPI Machine learning Apps/app/heartdiseaseprediction/train.py | rexsimiloluwah/Python-Experiments | 1 | 6620208 | <filename>FastAPI Machine learning Apps/app/heartdiseaseprediction/train.py
import os
from pathlib import Path
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split, cross_val_score, RandomizedSearchCV
from sklearn.preprocessing import MinMaxScaler
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.metrics import f1_score, classification_report, accuracy_score, confusion_matrix
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
import seaborn as sns
import joblib
# Importing the data, Full data description --> https://www.kaggle.com/amanajmera1/framingham-heart-study-dataset
BASE_DIR = os.path.dirname(__file__)
DATASET_DIR = os.path.join(BASE_DIR, 'data')
MODELS_DIR = os.path.join(BASE_DIR, 'models')
class HeartDiseaseModel:
def __init__(self):
self.features = ['sysBP','glucose','age','totChol','cigsPerDay','diaBP','prevalentHyp','diabetes','BPMeds','male','BMI','prevalentStroke']
self.model_path = os.path.join(MODELS_DIR, 'heart-prediction-model.pkl')
def save_model(self):
""" Model Serialization """
self.model = self.train_model()
joblib.dump(self.model, self.model_path)
def load_model(self):
""" Model De-serialization """
self.model = joblib.load(self.model_path)
def train_model(self):
self.df = pd.read_csv(os.path.join(DATASET_DIR, 'framingham.csv')).dropna()
model = RandomForestClassifier(max_depth=8, max_features='sqrt', n_estimators=900)
X = self.df[self.features]
target = self.df.iloc[:, -1]
model.fit(X, target)
return model
def predict(self, data : dict):
print(data)
male_enc = {"female" : 0, "male" : 1}
bpmeds_enc = {"yes" : 1, "no" : 0}
diabetes_enc = {"yes" : 1, "no" : 0}
prevalence_enc = {"yes" : 1, "no" : 0}
bmi = data["weight"] / data["height"]
features = [data["sysBP"], data["glucose"], data["age"], data["totChol"], data["cigsPerDay"], data["diaBP"], prevalence_enc[data["prevalentHyp"].lower()], diabetes_enc[data["diabetes"].lower()], bpmeds_enc[data["BPMeds"].lower()], male_enc[data["gender"].lower()], bmi, prevalence_enc[data["prevalentStroke"].lower()]]
data = np.array([features])
prediction = self.model.predict(data)
prediction_prob = self.model.predict_proba(data).max()
response = {
"prediction" : int(prediction[0]),
"probability" : np.round(prediction_prob, 2)
}
print(response)
return response
if __name__ == "__main__":
heartdisease = HeartDiseaseModel()
# heartdisease.save_model()
heartdisease.load_model()
data = {
"sysBP" : 40, "glucose" : 20, "age" : 12, "totChol" : 130,
"cigsPerDay" : 1, "diaBP" : 10, "prevalentHyp" : "Yes", "diabetes" : "Yes", "BPMeds" : "No",
"gender" : "Male", "weight" : 23, "height" : 4, "prevalentStroke" : "Yes"
}
print(heartdisease.predict(data))
| <filename>FastAPI Machine learning Apps/app/heartdiseaseprediction/train.py
import os
from pathlib import Path
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split, cross_val_score, RandomizedSearchCV
from sklearn.preprocessing import MinMaxScaler
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.metrics import f1_score, classification_report, accuracy_score, confusion_matrix
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
import seaborn as sns
import joblib
# Importing the data, Full data description --> https://www.kaggle.com/amanajmera1/framingham-heart-study-dataset
BASE_DIR = os.path.dirname(__file__)
DATASET_DIR = os.path.join(BASE_DIR, 'data')
MODELS_DIR = os.path.join(BASE_DIR, 'models')
class HeartDiseaseModel:
def __init__(self):
self.features = ['sysBP','glucose','age','totChol','cigsPerDay','diaBP','prevalentHyp','diabetes','BPMeds','male','BMI','prevalentStroke']
self.model_path = os.path.join(MODELS_DIR, 'heart-prediction-model.pkl')
def save_model(self):
""" Model Serialization """
self.model = self.train_model()
joblib.dump(self.model, self.model_path)
def load_model(self):
""" Model De-serialization """
self.model = joblib.load(self.model_path)
def train_model(self):
self.df = pd.read_csv(os.path.join(DATASET_DIR, 'framingham.csv')).dropna()
model = RandomForestClassifier(max_depth=8, max_features='sqrt', n_estimators=900)
X = self.df[self.features]
target = self.df.iloc[:, -1]
model.fit(X, target)
return model
def predict(self, data : dict):
print(data)
male_enc = {"female" : 0, "male" : 1}
bpmeds_enc = {"yes" : 1, "no" : 0}
diabetes_enc = {"yes" : 1, "no" : 0}
prevalence_enc = {"yes" : 1, "no" : 0}
bmi = data["weight"] / data["height"]
features = [data["sysBP"], data["glucose"], data["age"], data["totChol"], data["cigsPerDay"], data["diaBP"], prevalence_enc[data["prevalentHyp"].lower()], diabetes_enc[data["diabetes"].lower()], bpmeds_enc[data["BPMeds"].lower()], male_enc[data["gender"].lower()], bmi, prevalence_enc[data["prevalentStroke"].lower()]]
data = np.array([features])
prediction = self.model.predict(data)
prediction_prob = self.model.predict_proba(data).max()
response = {
"prediction" : int(prediction[0]),
"probability" : np.round(prediction_prob, 2)
}
print(response)
return response
if __name__ == "__main__":
heartdisease = HeartDiseaseModel()
# heartdisease.save_model()
heartdisease.load_model()
data = {
"sysBP" : 40, "glucose" : 20, "age" : 12, "totChol" : 130,
"cigsPerDay" : 1, "diaBP" : 10, "prevalentHyp" : "Yes", "diabetes" : "Yes", "BPMeds" : "No",
"gender" : "Male", "weight" : 23, "height" : 4, "prevalentStroke" : "Yes"
}
print(heartdisease.predict(data))
| en | 0.538699 | # Importing the data, Full data description --> https://www.kaggle.com/amanajmera1/framingham-heart-study-dataset Model Serialization Model De-serialization # heartdisease.save_model() | 2.393618 | 2 |
Code_Annually-accumulated-deficit.py | chandrakant6492/Drought-coping-strategy | 0 | 6620209 | import xarray as xr
import numpy as np
from tqdm import tqdm
import warnings
warnings.filterwarnings("ignore")
## Calculate maximum precipiration month
P_CHIRPS_argmax = xr.open_mfdataset('/chirps-v2.0.*.days_p25.nc').precip.sel(time = slice('2001','2012'))
P_CHIRPS_argmax = P_CHIRPS_argmax.resample(time = '1MS').sum('time')
P_CHIRPS_argmax = (P_CHIRPS_argmax.groupby('time.month').mean('time'))
P_CHIRPS_argmax = P_CHIRPS_argmax.argmax(axis = 0).values
def data_analysis(year,lat_top, lat_bottom, lon_min, lon_max):
global P_CHIRPS, E_Ensemble
P_CHIRPS = xr.open_mfdataset('/chirps-v2.0.*.days_p25.nc').precip.sel(time = slice(str(year),str(year+1)))
P_CHIRPS = P_CHIRPS.sel(latitude = slice(lat_bottom,lat_top), longitude = slice(lon_min,lon_max))
## Evaporation dataset
E_Ensemble = xr.open_mfdataset('/Evaporation.Ensemble_(equal_wgt).FLUXCOM(RS).BESS.PML_0.25res_daily_*.nc').Evaporation.sel(time = slice(str(year),str(year+1)))
E_Ensemble = E_Ensemble.sel(lat = slice(lat_bottom,lat_top), lon = slice(lon_min,lon_max))
############################################################################################################
def Ensemble_RZSC(year,lat_top = 50, lat_bottom = -50, lon_min = 0, lon_max = 360):
data_analysis(year,lat_top, lat_bottom, lon_min, lon_max)
sum_of_days = [0,31,59,90,120,151,181,212,243,273,304,334]
global max_deficit_annual
print('Calculation for year: '+ str(year))
print('Dataset: Ensemble')
deficit_all = np.array(E_Ensemble) - np.array(P_CHIRPS)
max_deficit_annual = np.zeros((P_CHIRPS_argmax.shape[0],P_CHIRPS_argmax.shape[1]))
for lat in tqdm(range(P_CHIRPS_argmax.shape[0])):
for long in range(P_CHIRPS_argmax.shape[1]):
cummul_deficit = np.zeros((730))
for i in range(sum_of_days[P_CHIRPS_argmax[lat,long]],sum_of_days[P_CHIRPS_argmax[lat,long]]+365):
cummul_deficit[i] = deficit_all[i,lat,long] + cummul_deficit[i-1]
if cummul_deficit[i] < 0:
cummul_deficit[i] = 0
else:
continue
max_deficit_annual[lat,long] = np.nanmax(cummul_deficit)
import datetime
from netCDF4 import Dataset,num2date,date2num
# -----------------------
nyears = 1;
unout = 'days since '+str(year)+'-01-01 00:00:00'
# -----------------------
ny, nx = (400, 1440)
lon = np.linspace(0.125,359.875,nx);
lat = np.linspace(-49.875,49.875,ny);
dataout = max_deficit_annual;
datesout = [datetime.datetime(int(year)+iyear,1,1) for iyear in range(nyears)]; # create datevalues
# =========================
ncout = Dataset('/home/chandra/data/Max_RZSC_annual_Chirps_Ensemble(BESS+PML+FLUXCOM)/Simulation7 (Sensitivity dataset)/Ensemble/Max_Rootzone_Ensemble_(BESS+PML+FLUXCOM)_Chirps_0.25res'+str(year)+'.nc', 'w','NETCDF4');
ncout.createDimension('lon',nx);
ncout.createDimension('lat',ny);
ncout.createDimension('time',nyears);
lonvar = ncout.createVariable('lon','float32',('lon'));lonvar[:] = lon;
latvar = ncout.createVariable('lat','float32',('lat'));latvar[:] = lat;
timevar = ncout.createVariable('time','float64',('time'));timevar.setncattr('units',unout);timevar[:]=date2num(datesout,unout);
# Do not add space between names
myvar = ncout.createVariable('RootZone_SC','float32',('time','lat','lon'));myvar.setncattr('units','mm');myvar[:] = dataout;
ncout.close();
for year in range(2001,2012):
Ensemble_RZSC(year)
| import xarray as xr
import numpy as np
from tqdm import tqdm
import warnings
warnings.filterwarnings("ignore")
## Calculate maximum precipiration month
P_CHIRPS_argmax = xr.open_mfdataset('/chirps-v2.0.*.days_p25.nc').precip.sel(time = slice('2001','2012'))
P_CHIRPS_argmax = P_CHIRPS_argmax.resample(time = '1MS').sum('time')
P_CHIRPS_argmax = (P_CHIRPS_argmax.groupby('time.month').mean('time'))
P_CHIRPS_argmax = P_CHIRPS_argmax.argmax(axis = 0).values
def data_analysis(year,lat_top, lat_bottom, lon_min, lon_max):
global P_CHIRPS, E_Ensemble
P_CHIRPS = xr.open_mfdataset('/chirps-v2.0.*.days_p25.nc').precip.sel(time = slice(str(year),str(year+1)))
P_CHIRPS = P_CHIRPS.sel(latitude = slice(lat_bottom,lat_top), longitude = slice(lon_min,lon_max))
## Evaporation dataset
E_Ensemble = xr.open_mfdataset('/Evaporation.Ensemble_(equal_wgt).FLUXCOM(RS).BESS.PML_0.25res_daily_*.nc').Evaporation.sel(time = slice(str(year),str(year+1)))
E_Ensemble = E_Ensemble.sel(lat = slice(lat_bottom,lat_top), lon = slice(lon_min,lon_max))
############################################################################################################
def Ensemble_RZSC(year,lat_top = 50, lat_bottom = -50, lon_min = 0, lon_max = 360):
data_analysis(year,lat_top, lat_bottom, lon_min, lon_max)
sum_of_days = [0,31,59,90,120,151,181,212,243,273,304,334]
global max_deficit_annual
print('Calculation for year: '+ str(year))
print('Dataset: Ensemble')
deficit_all = np.array(E_Ensemble) - np.array(P_CHIRPS)
max_deficit_annual = np.zeros((P_CHIRPS_argmax.shape[0],P_CHIRPS_argmax.shape[1]))
for lat in tqdm(range(P_CHIRPS_argmax.shape[0])):
for long in range(P_CHIRPS_argmax.shape[1]):
cummul_deficit = np.zeros((730))
for i in range(sum_of_days[P_CHIRPS_argmax[lat,long]],sum_of_days[P_CHIRPS_argmax[lat,long]]+365):
cummul_deficit[i] = deficit_all[i,lat,long] + cummul_deficit[i-1]
if cummul_deficit[i] < 0:
cummul_deficit[i] = 0
else:
continue
max_deficit_annual[lat,long] = np.nanmax(cummul_deficit)
import datetime
from netCDF4 import Dataset,num2date,date2num
# -----------------------
nyears = 1;
unout = 'days since '+str(year)+'-01-01 00:00:00'
# -----------------------
ny, nx = (400, 1440)
lon = np.linspace(0.125,359.875,nx);
lat = np.linspace(-49.875,49.875,ny);
dataout = max_deficit_annual;
datesout = [datetime.datetime(int(year)+iyear,1,1) for iyear in range(nyears)]; # create datevalues
# =========================
ncout = Dataset('/home/chandra/data/Max_RZSC_annual_Chirps_Ensemble(BESS+PML+FLUXCOM)/Simulation7 (Sensitivity dataset)/Ensemble/Max_Rootzone_Ensemble_(BESS+PML+FLUXCOM)_Chirps_0.25res'+str(year)+'.nc', 'w','NETCDF4');
ncout.createDimension('lon',nx);
ncout.createDimension('lat',ny);
ncout.createDimension('time',nyears);
lonvar = ncout.createVariable('lon','float32',('lon'));lonvar[:] = lon;
latvar = ncout.createVariable('lat','float32',('lat'));latvar[:] = lat;
timevar = ncout.createVariable('time','float64',('time'));timevar.setncattr('units',unout);timevar[:]=date2num(datesout,unout);
# Do not add space between names
myvar = ncout.createVariable('RootZone_SC','float32',('time','lat','lon'));myvar.setncattr('units','mm');myvar[:] = dataout;
ncout.close();
for year in range(2001,2012):
Ensemble_RZSC(year)
| de | 0.400016 | ## Calculate maximum precipiration month ## Evaporation dataset ############################################################################################################ # ----------------------- # ----------------------- # create datevalues # ========================= # Do not add space between names | 2.102083 | 2 |
kyu_4/sudoku_solution_validator/test_valid_solution.py | pedrocodacyorg2/codewars | 1 | 6620210 | # Created by <NAME>.
# GitHub: https://github.com/ikostan
# LinkedIn: https://www.linkedin.com/in/egor-kostan/
# ALGORITHMS DATA STRUCTURES VALIDATION
import allure
import unittest
from utils.log_func import print_log
from kyu_4.sudoku_solution_validator.valid_solution import validSolution
@allure.epic('4 kyu')
@allure.parent_suite('Competent')
@allure.suite("Algorithms")
@allure.sub_suite("Unit Tests")
@allure.feature("Validation")
@allure.story('Sudoku Solution Validator')
@allure.tag('ALGORITHMS', 'DATA STRUCTURES', 'VALIDATION')
@allure.link(url='https://www.codewars.com/kata/529bf0e9bdf7657179000008/train/python',
name='Source/Kata')
class ValidSolutionTestCase(unittest.TestCase):
"""
Testing validSolution function
"""
def test_valid_solution(self):
"""
Test a function validSolution/ValidateSolution/valid_solution()
that accepts a 2D array representing a Sudoku board, and returns
true if it is a valid solution, or false otherwise. The cells of
the sudoku board may also contain 0's, which will represent empty
cells. Boards containing one or more zeroes are considered to be
invalid solutions.
The board is always 9 cells by 9 cells, and every
cell only contains integers from 0 to 9.
:return:
"""
allure.dynamic.title("Testing validSolution")
allure.dynamic.severity(allure.severity_level.NORMAL)
allure.dynamic.description_html('<h3>Codewars badge:</h3>'
'<img src="https://www.codewars.com/users/myFirstCode'
'/badges/large">'
'<h3>Test Description:</h3>'
"<p>Test a function validSolution/ValidateSolution/valid_solution()"
" that accepts a 2D array representing a Sudoku board, and returns"
" true if it is a valid solution, or false otherwise. The cells of"
" the sudoku board may also contain 0's, which will represent empty"
" cells. Boards containing one or more zeroes are considered to be"
" invalid solutions.</p>"
"<p>The board is always 9 cells by 9 cells, and every "
"cell only contains integers from 0 to 9.</p>")
test_data = [
([[5, 3, 4, 6, 7, 8, 9, 1, 2],
[6, 7, 2, 1, 9, 5, 3, 4, 8],
[1, 9, 8, 3, 4, 2, 5, 6, 7],
[8, 5, 9, 7, 6, 1, 4, 2, 3],
[4, 2, 6, 8, 5, 3, 7, 9, 1],
[7, 1, 3, 9, 2, 4, 8, 5, 6],
[9, 6, 1, 5, 3, 7, 2, 8, 4],
[2, 8, 7, 4, 1, 9, 6, 3, 5],
[3, 4, 5, 2, 8, 6, 1, 7, 9]], True),
([[5, 3, 4, 6, 7, 8, 9, 1, 2],
[6, 7, 2, 1, 9, 0, 3, 4, 9],
[1, 0, 0, 3, 4, 2, 5, 6, 0],
[8, 5, 9, 7, 6, 1, 0, 2, 0],
[4, 2, 6, 8, 5, 3, 7, 9, 1],
[7, 1, 3, 9, 2, 4, 8, 5, 6],
[9, 0, 1, 5, 3, 7, 2, 1, 4],
[2, 8, 7, 4, 1, 9, 6, 3, 5],
[3, 0, 0, 4, 8, 1, 1, 7, 9]], False),
([[1, 2, 3, 4, 5, 6, 7, 8, 9],
[2, 3, 4, 5, 6, 7, 8, 9, 1],
[3, 4, 5, 6, 7, 8, 9, 1, 2],
[4, 5, 6, 7, 8, 9, 1, 2, 3],
[5, 6, 7, 8, 9, 1, 2, 3, 4],
[6, 7, 8, 9, 1, 2, 3, 4, 5],
[7, 8, 9, 1, 2, 3, 4, 5, 6],
[8, 9, 1, 2, 3, 4, 5, 6, 7],
[9, 1, 2, 3, 4, 5, 6, 7, 8]], False)]
for data in test_data:
board = data[0]
expected = data[1]
actual_result = validSolution(board)
print_log(board=board,
expected=expected,
actual_result=actual_result)
with allure.step("Enter test list ({}) and "
"verify the output ({}) vs "
"expected ({})".format(board,
actual_result,
expected)):
self.assertEqual(expected, actual_result)
| # Created by <NAME>.
# GitHub: https://github.com/ikostan
# LinkedIn: https://www.linkedin.com/in/egor-kostan/
# ALGORITHMS DATA STRUCTURES VALIDATION
import allure
import unittest
from utils.log_func import print_log
from kyu_4.sudoku_solution_validator.valid_solution import validSolution
@allure.epic('4 kyu')
@allure.parent_suite('Competent')
@allure.suite("Algorithms")
@allure.sub_suite("Unit Tests")
@allure.feature("Validation")
@allure.story('Sudoku Solution Validator')
@allure.tag('ALGORITHMS', 'DATA STRUCTURES', 'VALIDATION')
@allure.link(url='https://www.codewars.com/kata/529bf0e9bdf7657179000008/train/python',
name='Source/Kata')
class ValidSolutionTestCase(unittest.TestCase):
"""
Testing validSolution function
"""
def test_valid_solution(self):
"""
Test a function validSolution/ValidateSolution/valid_solution()
that accepts a 2D array representing a Sudoku board, and returns
true if it is a valid solution, or false otherwise. The cells of
the sudoku board may also contain 0's, which will represent empty
cells. Boards containing one or more zeroes are considered to be
invalid solutions.
The board is always 9 cells by 9 cells, and every
cell only contains integers from 0 to 9.
:return:
"""
allure.dynamic.title("Testing validSolution")
allure.dynamic.severity(allure.severity_level.NORMAL)
allure.dynamic.description_html('<h3>Codewars badge:</h3>'
'<img src="https://www.codewars.com/users/myFirstCode'
'/badges/large">'
'<h3>Test Description:</h3>'
"<p>Test a function validSolution/ValidateSolution/valid_solution()"
" that accepts a 2D array representing a Sudoku board, and returns"
" true if it is a valid solution, or false otherwise. The cells of"
" the sudoku board may also contain 0's, which will represent empty"
" cells. Boards containing one or more zeroes are considered to be"
" invalid solutions.</p>"
"<p>The board is always 9 cells by 9 cells, and every "
"cell only contains integers from 0 to 9.</p>")
test_data = [
([[5, 3, 4, 6, 7, 8, 9, 1, 2],
[6, 7, 2, 1, 9, 5, 3, 4, 8],
[1, 9, 8, 3, 4, 2, 5, 6, 7],
[8, 5, 9, 7, 6, 1, 4, 2, 3],
[4, 2, 6, 8, 5, 3, 7, 9, 1],
[7, 1, 3, 9, 2, 4, 8, 5, 6],
[9, 6, 1, 5, 3, 7, 2, 8, 4],
[2, 8, 7, 4, 1, 9, 6, 3, 5],
[3, 4, 5, 2, 8, 6, 1, 7, 9]], True),
([[5, 3, 4, 6, 7, 8, 9, 1, 2],
[6, 7, 2, 1, 9, 0, 3, 4, 9],
[1, 0, 0, 3, 4, 2, 5, 6, 0],
[8, 5, 9, 7, 6, 1, 0, 2, 0],
[4, 2, 6, 8, 5, 3, 7, 9, 1],
[7, 1, 3, 9, 2, 4, 8, 5, 6],
[9, 0, 1, 5, 3, 7, 2, 1, 4],
[2, 8, 7, 4, 1, 9, 6, 3, 5],
[3, 0, 0, 4, 8, 1, 1, 7, 9]], False),
([[1, 2, 3, 4, 5, 6, 7, 8, 9],
[2, 3, 4, 5, 6, 7, 8, 9, 1],
[3, 4, 5, 6, 7, 8, 9, 1, 2],
[4, 5, 6, 7, 8, 9, 1, 2, 3],
[5, 6, 7, 8, 9, 1, 2, 3, 4],
[6, 7, 8, 9, 1, 2, 3, 4, 5],
[7, 8, 9, 1, 2, 3, 4, 5, 6],
[8, 9, 1, 2, 3, 4, 5, 6, 7],
[9, 1, 2, 3, 4, 5, 6, 7, 8]], False)]
for data in test_data:
board = data[0]
expected = data[1]
actual_result = validSolution(board)
print_log(board=board,
expected=expected,
actual_result=actual_result)
with allure.step("Enter test list ({}) and "
"verify the output ({}) vs "
"expected ({})".format(board,
actual_result,
expected)):
self.assertEqual(expected, actual_result)
| en | 0.788299 | # Created by <NAME>. # GitHub: https://github.com/ikostan # LinkedIn: https://www.linkedin.com/in/egor-kostan/ # ALGORITHMS DATA STRUCTURES VALIDATION Testing validSolution function Test a function validSolution/ValidateSolution/valid_solution() that accepts a 2D array representing a Sudoku board, and returns true if it is a valid solution, or false otherwise. The cells of the sudoku board may also contain 0's, which will represent empty cells. Boards containing one or more zeroes are considered to be invalid solutions. The board is always 9 cells by 9 cells, and every cell only contains integers from 0 to 9. :return: | 3.357426 | 3 |
day-09/part-1/jon.py | lypnol/adventofcode-2021 | 6 | 6620211 | <gh_stars>1-10
from tool.runners.python import SubmissionPy
class JonSubmission(SubmissionPy):
def run(self, s):
m = s.strip().splitlines()
ny = len(m)
nx = len(m[0])
def val(x, y):
if x < 0 or x >= nx or y < 0 or y >= ny:
return 10
return int(m[y][x])
risk = 0
for x in range(nx):
for y in range(ny):
v = val(x, y)
if v < val(x-1, y) and v < val(x+1, y) and v < val(x, y-1) and v < val(x, y+1):
risk += 1 + v
return risk
def test_jon():
"""
Run `python -m pytest ./day-09/part-1/jon.py` to test the submission.
"""
assert (
JonSubmission().run(
"""
2199943210
3987894921
9856789892
8767896789
9899965678
""".strip()
)
== 15
)
| from tool.runners.python import SubmissionPy
class JonSubmission(SubmissionPy):
def run(self, s):
m = s.strip().splitlines()
ny = len(m)
nx = len(m[0])
def val(x, y):
if x < 0 or x >= nx or y < 0 or y >= ny:
return 10
return int(m[y][x])
risk = 0
for x in range(nx):
for y in range(ny):
v = val(x, y)
if v < val(x-1, y) and v < val(x+1, y) and v < val(x, y-1) and v < val(x, y+1):
risk += 1 + v
return risk
def test_jon():
"""
Run `python -m pytest ./day-09/part-1/jon.py` to test the submission.
"""
assert (
JonSubmission().run(
"""
2199943210
3987894921
9856789892
8767896789
9899965678
""".strip()
)
== 15
) | en | 0.424642 | Run `python -m pytest ./day-09/part-1/jon.py` to test the submission. 2199943210 3987894921 9856789892 8767896789 9899965678 | 2.953412 | 3 |
CODE/model/AlbertBurger.py | Zaaachary/CSQA | 0 | 6620212 | #! -*- encoding:utf-8 -*-
"""
@File : AlbertBurger.py
@Author : <NAME>
@Contact : <EMAIL>
@Dscpt :
"""
import math
import os
from copy import deepcopy
import torch
import torch.nn as nn
import torch.nn.functional as F
# from transformers import AlbertModel as tfms_AlbertModel
from transformers import AlbertPreTrainedModel, AlbertConfig
from .AlbertModel import AlbertModel
from .BurgerBase import CSLinearBase, BurgerBase
from utils import common
class CSDecoderLayer(nn.Module):
def __init__(self, config, cs_num):
super().__init__()
self.hidden_size = config.hidden_size
self.cs_num = cs_num
self.tfm_decoder = nn.TransformerDecoderLayer(d_model=config.hidden_size, nhead=8)
def forward(self, qa_expand, cs, qa_padding_mask_expand, cs_padding_mask):
'''
qa_expand [B, cs_num, qa_len, H] -> [qa_len, B*cs_num, H]
cs [B, cs_num, cs_len, H] -> [cs_len, B*cs_num, H]
qa_padding [B, cs_num, qa_len] -> [B*cs_num, qa_len]
cs_padding [B, cs_num, cs_len] -> [B*cs_num, cs_len]
decoder_output [cs_len, B*cs_num, H] -> [B, cs_num, cs_len, H]
'''
batch_size, cs_num, qa_len, hidden_size = qa_expand.shape
cs_len = cs.shape[-2]
qa_expand = qa_expand.contiguous().view(batch_size*cs_num, qa_len, hidden_size)
qa = qa_expand.transpose(0, 1)
cs = cs.contiguous().view(batch_size*cs_num, cs_len, hidden_size)
cs = cs.transpose(0, 1)
qa_padding = qa_padding_mask_expand.contiguous().view(batch_size*cs_num, qa_len)
cs_padding = cs_padding_mask.contiguous().view(batch_size*cs_num, cs_len)
# import pdb; pdb.set_trace()
decoder_output = self.tfm_decoder(tgt=cs, memory=qa, tgt_key_padding_mask=cs_padding, memory_key_padding_mask=qa_padding)
decoder_output = decoder_output.transpose(0, 1)
decoder_output = decoder_output.contiguous().view(batch_size, cs_num, cs_len, hidden_size)
return decoder_output
class AttentionLayer(nn.Module):
def __init__(self, config, cs_num):
super().__init__()
self.hidden_size = config.hidden_size
self.cs_num = cs_num
self.mult_attn = nn.MultiheadAttention(self.hidden_size, num_heads=1)
def forward(self, query, keyvalue, attn_mask):
'''
input:
- query: [b, cs_num, Lq, hidden]
- keyvalue: [b, cs_num, Lkv, hidden]
output:
- attn_output_weights: [B, cs_num, Lq, Lkv]
- attn_output: [B, cs_num, Lq, H]
'''
Batch_size, cs_num, Lq, hidden_size = query.shape
Lkv = keyvalue.shape[-2]
# [B, cs_num, L, H] -> [B * cs_num, L, H] -> [L, B*cs_num, H]
query = query.contiguous().view(-1, query.size(-2), query.size(-1))
query = query.transpose(0, 1)
keyvalue = keyvalue.contiguous().view(-1, keyvalue.size(-2), keyvalue.size(-1))
keyvalue = keyvalue.transpose(0, 1)
# [B, cs_num, L] -> [B*cs_num, L]
attn_mask = attn_mask.contiguous().view(-1, attn_mask.size(-1))
# [Lq, B*cs_num, H], [B*cs_num, Lq, Ls]
attn_output, attn_output_weights = self.mult_attn(query, keyvalue, keyvalue, key_padding_mask=attn_mask)
# [Lq, B*cs_num, H] -> [B*cs_num, Lq, H] -> [B, cs_num, Lq, H]
attn_output = attn_output.transpose(0, 1)
attn_output = attn_output.view(Batch_size, cs_num, Lq, hidden_size)
# [B*cs_num, Lq, Lkv] -> [B, cs_num, Lq, Lkv]
attn_output_weights = attn_output_weights.view(Batch_size, self.cs_num, Lq, Lkv)
return attn_output, attn_output_weights
class AttentionMerge(nn.Module):
def __init__(self, input_size, attention_size, dropout_prob):
super(AttentionMerge, self).__init__()
self.attention_size = attention_size
self.hidden_layer = nn.Linear(input_size, self.attention_size)
self.query_ = nn.Parameter(torch.Tensor(self.attention_size, 1))
self.dropout = nn.Dropout(dropout_prob)
self.query_.data.normal_(mean=0.0, std=0.02)
def forward(self, values, mask=None):
"""
H (B, L, hidden_size) => h (B, hidden_size)
"""
if mask is None:
mask = torch.zeros_like(values)
# mask = mask.data.normal_(mean=0.0, std=0.02)
else:
mask = (1 - mask.unsqueeze(-1).type(torch.float)) * -1000.
# values [batch*5, len, hidden]
keys = self.hidden_layer(values)
keys = torch.tanh(keys)
query_var = torch.var(self.query_)
# (b, l, h) + (h, 1) -> (b, l, 1)
attention_probs = keys @ self.query_ / math.sqrt(self.attention_size * query_var)
# attention_probs = keys @ self.query_ / math.sqrt(self.attention_size)
# import pdb; pdb.set_trace()
attention_probs = F.softmax(attention_probs * mask, dim=-2) # [batch*5, len, 1]
attention_probs = self.dropout(attention_probs)
context = torch.sum(attention_probs + values, dim=-2) # [batch*5, hidden]
return context
# Burger Here
class AlbertBurgerAlpha5(nn.Module, CSLinearBase, BurgerBase):
def __init__(self, config, **kwargs):
super(AlbertBurgerAlpha5, self).__init__()
self.albert1_layers = kwargs['albert1_layers']
self.cs_num = kwargs['model_cs_num']
self.max_cs_len = kwargs['max_cs_len']
self.max_qa_len = kwargs['max_qa_len']
self.config = config
self.config1 = deepcopy(config)
self.config1.num_hidden_layers = self.albert1_layers
self.config2 = deepcopy(config)
self.config2.num_hidden_layers = config.num_hidden_layers - self.albert1_layers
self.config2.without_embedding = True
# modules
self.albert1 = AlbertModel(self.config1)
# self.cs_attention_scorer = AttentionLayer(config, self.cs_num)
self.cs_qa_attn = CSDecoderLayer(self.config, self.cs_num)
self.albert2 = AlbertModel(self.config2)
self.attention_merge = AttentionMerge(config.hidden_size, config.hidden_size//4, 0.1)
self.scorer = nn.Sequential(
nn.Dropout(0.1),
nn.Linear(config.hidden_size, 1)
)
self.apply(self.init_weights)
def forward(self, input_ids, attention_mask, token_type_ids, labels=None):
"""
input_ids: [B, 5, L]
labels: [B, ]
"""
logits = self._forward(input_ids, attention_mask, token_type_ids)
loss = F.cross_entropy(logits, labels) # get the CELoss
with torch.no_grad():
logits = F.softmax(logits, dim=1) # get the score
predicts = torch.argmax(logits, dim=1) # find the result
right_num = torch.sum(predicts == labels)
return loss, right_num
def _forward(self, input_ids, attention_mask, token_type_ids):
# [B, 5, L] => [B * 5, L]
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
outputs = self.albert1(
input_ids = flat_input_ids,
attention_mask = flat_attention_mask,
token_type_ids = flat_token_type_ids
)
middle_hidden_state = outputs.last_hidden_state
cs_encoding, cs_padding_mask, qa_encoding, qa_padding_mask = self._pad_qacs_to_maxlen(flat_input_ids, middle_hidden_state)
qa_encoding_expand = qa_encoding.unsqueeze(1).expand(-1, self.cs_num, -1, -1)
qa_padding_mask_expand = qa_padding_mask.unsqueeze(1).expand(-1, self.cs_num, -1)
# import pdb; pdb.set_trace()
# attn_output:[5B, cs_num, L, H] attn_weights:[5B, cs_num, Lq, Lc]
decoder_output = self.cs_qa_attn(qa_encoding_expand, cs_encoding, qa_padding_mask_expand, cs_padding_mask)
# import pdb; pdb.set_trace()
middle_hidden_state = self._remvoe_cs_pad_add_to_last_hidden_state(decoder_output, middle_hidden_state)
outputs = self.albert2(inputs_embeds=middle_hidden_state)
merged_output = self.attention_merge(outputs.last_hidden_state, flat_attention_mask)
logits = self.scorer(merged_output).view(-1, 5)
# pooler_output = outputs.pooler_output # [CLS]
# [B*5, H] => [B*5, 1] => [B, 5]
# logits = self.scorer(pooler_output).view(-1, 5)
return logits
class AlbertBurgerAlpha4(nn.Module, CSLinearBase, BurgerBase):
def __init__(self, config, **kwargs):
super(AlbertBurgerAlpha4, self).__init__()
self.albert1_layers = kwargs['albert1_layers']
self.cs_num = kwargs['model_cs_num']
self.max_cs_len = kwargs['max_cs_len']
self.max_qa_len = kwargs['max_qa_len']
self.config = config
self.config1 = deepcopy(config)
self.config1.num_hidden_layers = self.albert1_layers
self.config2 = deepcopy(config)
self.config2.num_hidden_layers = config.num_hidden_layers - self.albert1_layers
self.config2.without_embedding = True
# modules
self.albert1 = AlbertModel(self.config1)
self.cs_attention = AttentionLayer(config, self.cs_num)
self.cs_merge = AttentionMerge(config.hidden_size, config.hidden_size//4, 0.1)
self.cs_scorer = nn.Sequential(
nn.Dropout(0.1),
nn.Linear(config.hidden_size, 1)
)
self.albert2 = AlbertModel(self.config2)
self.scorer = nn.Sequential(
nn.Dropout(0.1),
nn.Linear(config.hidden_size, 1)
)
self.apply(self.init_weights)
def forward(self, input_ids, attention_mask, token_type_ids, labels=None):
"""
input_ids: [B, 5, L]
labels: [B, ]
"""
logits = self._forward(input_ids, attention_mask, token_type_ids)
loss = F.cross_entropy(logits, labels) # get the CELoss
with torch.no_grad():
logits = F.softmax(logits, dim=1) # get the score
predicts = torch.argmax(logits, dim=1) # find the result
right_num = torch.sum(predicts == labels)
return loss, right_num
def _forward(self, input_ids, attention_mask, token_type_ids):
# [B, 5, L] => [B * 5, L]
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
outputs = self.albert1(
input_ids = flat_input_ids,
attention_mask = flat_attention_mask,
token_type_ids = flat_token_type_ids
)
middle_hidden_state = outputs.last_hidden_state
cs_encoding, cs_padding_mask, qa_encoding, qa_padding_mask = self._pad_qacs_to_maxlen(flat_input_ids, middle_hidden_state)
qa_encoding_expand = qa_encoding.unsqueeze(1).expand(-1, self.cs_num, -1, -1)
qa_padding_mask_expand = qa_padding_mask.unsqueeze(1).expand(-1, self.cs_num, -1)
# attn_output:[5B, cs_num, L, H] attn_weights:[5B, cs_num, Lq, Lc]
attn_output, attn_weights = self.cs_attention(cs_encoding, qa_encoding_expand, qa_padding_mask_expand)
# import pdb; pdb.set_trace()
merge = self.cs_merge(attn_output, cs_padding_mask) # merge: [5B, cs_num, H]
cs_score = self.cs_scorer(merge)
cs_score = F.softmax(cs_score, dim=-2).unsqueeze(-1)
cs_encoding = cs_score * cs_encoding
middle_hidden_state = self._remvoe_cs_pad_add_to_last_hidden_state(cs_encoding, middle_hidden_state)
outputs = self.albert2(inputs_embeds=middle_hidden_state)
pooler_output = outputs.pooler_output # [CLS]
# [B*5, H] => [B*5, 1] => [B, 5]
logits = self.scorer(pooler_output).view(-1, 5)
return logits
class AlbertBurgerAlpha3(nn.Module, CSLinearBase, BurgerBase):
def __init__(self, config, **kwargs):
super(AlbertBurgerAlpha3, self).__init__()
self.albert1_layers = kwargs['albert1_layers']
self.cs_num = kwargs['model_cs_num']
self.max_cs_len = kwargs['max_cs_len']
self.max_qa_len = kwargs['max_qa_len']
self.config = config
self.config1 = deepcopy(config)
self.config1.num_hidden_layers = self.albert1_layers
self.config2 = deepcopy(config)
self.config2.num_hidden_layers = config.num_hidden_layers - self.albert1_layers
self.config2.without_embedding = True
# modules
self.albert1 = AlbertModel(self.config1)
self.cs_attention_scorer = AttentionLayer(config, self.cs_num)
self.albert2 = AlbertModel(self.config2)
self.attention_merge = AttentionMerge(config.hidden_size, config.hidden_size//4, 0.1)
self.scorer = nn.Sequential(
nn.Dropout(0.1),
nn.Linear(config.hidden_size, 1)
)
self.apply(self.init_weights)
def forward(self, input_ids, attention_mask, token_type_ids, labels=None):
"""
input_ids: [B, 5, L]
labels: [B, ]
"""
logits = self._forward(input_ids, attention_mask, token_type_ids)
loss = F.cross_entropy(logits, labels) # get the CELoss
with torch.no_grad():
logits = F.softmax(logits, dim=1) # get the score
predicts = torch.argmax(logits, dim=1) # find the result
right_num = torch.sum(predicts == labels)
return loss, right_num
def _forward(self, input_ids, attention_mask, token_type_ids):
# [B, 5, L] => [B * 5, L]
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
outputs = self.albert1(
input_ids = flat_input_ids,
attention_mask = flat_attention_mask,
token_type_ids = flat_token_type_ids
)
middle_hidden_state = outputs.last_hidden_state
cs_encoding, _, qa_encoding, qa_padding_mask = self._pad_qacs_to_maxlen(flat_input_ids, middle_hidden_state)
qa_encoding_expand = qa_encoding.unsqueeze(1).expand(-1, self.cs_num, -1, -1)
qa_padding_mask_expand = qa_padding_mask.unsqueeze(1).expand(-1, self.cs_num, -1)
# attn_output:[5B, cs_num, L, H] attn_weights:[5B, cs_num, Lq, Lc]
attn_output, attn_weights = self.cs_attention_scorer(cs_encoding, qa_encoding_expand, qa_padding_mask_expand)
middle_hidden_state = self._remvoe_cs_pad_add_to_last_hidden_state(attn_output, middle_hidden_state)
outputs = self.albert2(inputs_embeds=middle_hidden_state)
merged_output = self.attention_merge(outputs.last_hidden_state, flat_attention_mask)
logits = self.scorer(merged_output).view(-1, 5)
return logits
class AlbertBurgerAlpha2(nn.Module, CSLinearBase, BurgerBase):
def __init__(self, config, **kwargs):
super(AlbertBurgerAlpha2, self).__init__()
self.albert1_layers = kwargs['albert1_layers']
self.cs_num = kwargs['model_cs_num']
self.max_cs_len = kwargs['max_cs_len']
self.max_qa_len = kwargs['max_qa_len']
self.config = config
self.config1 = deepcopy(config)
self.config1.num_hidden_layers = self.albert1_layers
self.config2 = deepcopy(config)
self.config2.num_hidden_layers = config.num_hidden_layers - self.albert1_layers
self.config2.without_embedding = True
# modules
self.albert1 = AlbertModel(self.config1)
self.cs_attention_scorer = AttentionLayer(config, self.cs_num)
self.albert2 = AlbertModel(self.config2)
self.attention_merge = AttentionMerge(config.hidden_size, config.hidden_size//4, 0.1)
self.scorer = nn.Sequential(
nn.Dropout(0.1),
nn.Linear(config.hidden_size, 1)
)
self.apply(self.init_weights)
def forward(self, input_ids, attention_mask, token_type_ids, labels=None):
"""
input_ids: [B, 5, L]
labels: [B, ]
"""
logits = self._forward(input_ids, attention_mask, token_type_ids)
loss = F.cross_entropy(logits, labels) # get the CELoss
with torch.no_grad():
logits = F.softmax(logits, dim=1) # get the score
predicts = torch.argmax(logits, dim=1) # find the result
right_num = torch.sum(predicts == labels)
return loss, right_num
def _forward(self, input_ids, attention_mask, token_type_ids):
# [B, 5, L] => [B * 5, L]
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
outputs = self.albert1(
input_ids = flat_input_ids,
attention_mask = flat_attention_mask,
token_type_ids = flat_token_type_ids
)
middle_hidden_state = outputs.last_hidden_state
cs_encoding, _, qa_encoding, qa_padding_mask = self._pad_qacs_to_maxlen(flat_input_ids, middle_hidden_state)
qa_encoding_expand = qa_encoding.unsqueeze(1).expand(-1, self.cs_num, -1, -1)
qa_padding_mask_expand = qa_padding_mask.unsqueeze(1).expand(-1, self.cs_num, -1)
# attn_output:[5B, cs_num, L, H] attn_weights:[5B, cs_num, Lq, Lc]
attn_output, attn_weights = self.cs_attention_scorer(cs_encoding, qa_encoding_expand, qa_padding_mask_expand)
middle_hidden_state = self._remvoe_cs_pad_add_to_last_hidden_state(attn_output, middle_hidden_state)
outputs = self.albert2(inputs_embeds=middle_hidden_state, attention_mask=flat_attention_mask)
outputs = outputs.last_hidden_state
merged_output = self.attention_merge(outputs, flat_attention_mask)
logits = self.scorer(merged_output).view(-1, 5)
return logits
def predict(self, input_ids, attention_mask, token_type_ids):
"""
return: [B, 5]
"""
logits = self._forward(input_ids, attention_mask, token_type_ids)
logits = F.softmax(logits, dim=1)
return logits
class AlbertBurgerAlpha6(nn.Module, CSLinearBase, BurgerBase):
def __init__(self, config, **kwargs):
super(AlbertBurgerAlpha6, self).__init__()
self.albert1_layers = kwargs['albert1_layers']
self.cs_num = kwargs['model_cs_num']
self.max_cs_len = kwargs['max_cs_len']
self.max_qa_len = kwargs['max_qa_len']
self.config = config
self.config1 = deepcopy(config)
self.config1.num_hidden_layers = self.albert1_layers
self.config2 = deepcopy(config)
self.config2.num_hidden_layers = config.num_hidden_layers - self.albert1_layers
self.config2.without_embedding = True
# modules
self.albert1 = AlbertModel(self.config1)
self.cs_qa_attn = CSDecoderLayer(self.config, self.cs_num)
self.albert2 = AlbertModel(self.config2)
self.attention_merge = AttentionMerge(config.hidden_size, config.hidden_size//4, 0.1)
self.scorer = nn.Sequential(
nn.Dropout(0.1),
nn.Linear(config.hidden_size, 1)
)
self.apply(self.init_weights)
def forward(self, input_ids, attention_mask, token_type_ids, labels=None):
"""
input_ids: [B, 5, L]
labels: [B, ]
"""
logits = self._forward(input_ids, attention_mask, token_type_ids)
loss = F.cross_entropy(logits, labels) # get the CELoss
with torch.no_grad():
logits = F.softmax(logits, dim=1) # get the score
predicts = torch.argmax(logits, dim=1) # find the result
right_num = torch.sum(predicts == labels)
return loss, right_num
def _forward(self, input_ids, attention_mask, token_type_ids):
# [B, 5, L] => [B * 5, L]
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
outputs = self.albert1(
input_ids = flat_input_ids,
attention_mask = flat_attention_mask,
token_type_ids = flat_token_type_ids
)
middle_hidden_state = outputs.last_hidden_state
cs_encoding, cs_padding_mask, qa_encoding, qa_padding_mask = self._pad_qacs_to_maxlen(flat_input_ids, middle_hidden_state)
qa_encoding_expand = qa_encoding.unsqueeze(1).expand(-1, self.cs_num, -1, -1)
qa_padding_mask_expand = qa_padding_mask.unsqueeze(1).expand(-1, self.cs_num, -1)
# qa_expand, cs, qa_padding_mask_expand, cs_padding_mask
qa_encoding_expand = self.cs_qa_attn(cs_encoding, qa_encoding_expand, cs_padding_mask, qa_padding_mask_expand)
qa_encoding = qa_encoding_expand.mean(dim=1)
# TODO
# import pdb; pdb.set_trace()
middle_hidden_state = torch.cat((middle_hidden_state[:,0,:].unsqueeze(1), qa_encoding), dim=1)
middle_padding_mask = torch.cat((flat_attention_mask[:,0].unsqueeze(1), qa_padding_mask), dim=1)
outputs = self.albert2(inputs_embeds=middle_hidden_state, attention_mask=middle_padding_mask)
pooler_output = outputs.pooler_output # [CLS]
# [B*5, H] => [B*5, 1] => [B, 5]
logits = self.scorer(pooler_output).view(-1, 5)
return logits
class AlbertBurgerAlpha1(nn.Module):
def __init__(self, config, **kwargs):
super(AlbertBurgerAlpha1, self).__init__()
self.albert1_layers = kwargs['albert1_layers']
self.config = config
self.config1 = deepcopy(config)
self.config1.num_hidden_layers = self.albert1_layers
self.config2 = deepcopy(config)
self.config2.num_hidden_layers = config.num_hidden_layers - self.albert1_layers
self.config2.without_embedding = True
self.albert1 = AlbertModel(self.config1)
self.albert2 = AlbertModel(self.config2)
self.scorer = nn.Sequential(
nn.Dropout(0.1),
nn.Linear(config.hidden_size, 1)
)
self.apply(self.init_weights)
def forward(self, input_ids, attention_mask, token_type_ids, labels=None):
"""
input_ids: [B, 5, L]
labels: [B, ]
"""
logits = self._forward(input_ids, attention_mask, token_type_ids)
loss = F.cross_entropy(logits, labels) # get the CELoss
with torch.no_grad():
logits = F.softmax(logits, dim=1) # get the score
predicts = torch.argmax(logits, dim=1) # find the result
right_num = torch.sum(predicts == labels)
return loss, right_num
def _forward(self, input_ids, attention_mask, token_type_ids):
# [B, 5, L] => [B * 5, L]
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
outputs = self.albert1(
input_ids = flat_input_ids,
attention_mask = flat_attention_mask,
token_type_ids = flat_token_type_ids
)
middle_hidden_state = outputs.last_hidden_state
outputs = self.albert2(inputs_embeds=middle_hidden_state)
pooler_output = outputs.pooler_output # [CLS]
# [B*5, H] => [B*5, 1] => [B, 5]
logits = self.scorer(pooler_output).view(-1, 5)
return logits
@staticmethod
def init_weights(module):
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=0.02)
if module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, model_path_or_name, **kwargs):
config = AlbertConfig()
config.without_embedding = False
if "xxlarge" in model_path_or_name:
config.hidden_size = 4096
config.intermediate_size = 16384
config.num_attention_heads = 64
config.num_hidden_layers = 12
elif "xlarge" in model_path_or_name:
config.hidden_size = 2048
config.intermediate_size = 8192
config.num_attention_heads = 16
config.num_hidden_layers = 24
elif "large" in model_path_or_name:
config.hidden_size = 1024
config.intermediate_size = 4096
config.num_attention_heads = 16
config.num_hidden_layers = 24
elif "base" in model_path_or_name:
config.hidden_size = 768
config.intermediate_size = 3072
config.num_attention_heads = 12
config.num_hidden_layers = 12
model = cls(config, **kwargs)
model.albert1 = model.albert1.from_pretrained(model_path_or_name, config=model.config1)
model.albert2 = model.albert2.from_pretrained(model_path_or_name, config=model.config2)
return model
class AlbertBurgerAlpha0(AlbertPreTrainedModel):
'''
input_ids [b, 5, seq_len] => [5b, seq_len]
=> PTM
cs_encoding [5b, cs_num, cs_seq_len, hidden]
'''
def __init__(self, config, **kwargs):
super(AlbertBurgerAlpha0, self).__init__(config)
# length config
self.cs_num = kwargs['model_cs_num']
self.max_cs_len = kwargs['max_cs_len']
self.max_qa_len = kwargs['max_qa_len']
# modules
self.albert = AlbertModel(config)
self.cs_attention_scorer = AttentionLayer(config, self.cs_num)
# self.albert2 = AlbertModel(config)
self.attention_merge = AttentionMerge(config.hidden_size, config.hidden_size//4, 0.1)
self.scorer = nn.Sequential(
nn.Dropout(0.1),
nn.Linear(config.hidden_size, 1)
)
self.init_weights()
def forward(self, input_ids, attention_mask, token_type_ids, labels):
logits = self._forward(input_ids, attention_mask, token_type_ids)
loss = F.cross_entropy(logits, labels)
with torch.no_grad():
logits = F.softmax(logits, dim=1)
predicts = torch.argmax(logits, dim=1)
right_num = torch.sum(predicts == labels)
return loss, right_num
def _forward(self, input_ids=None, attention_mask=None, token_type_ids=None):
# [B, 5, L] => [B * 5, L]
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
outputs = self.albert(
input_ids = flat_input_ids,
attention_mask=flat_attention_mask,
token_type_ids=flat_token_type_ids
)
# pooler_output = outputs.pooler_output # outputs[1] [5B, H]
last_hidden_state = outputs.last_hidden_state # outputs[0] [5B, L, H]
# separate query and commonsense encoding
# encoding:[5B, cs_num, L, H] mask:[5B, cs_num, L]
cs_encoding, _, qa_encoding, qa_padding_mask = self._pad_qacs_to_maxlen(flat_input_ids, last_hidden_state)
qa_encoding_expand = qa_encoding.unsqueeze(1).expand(-1, self.cs_num, -1, -1)
qa_padding_mask_expand = qa_padding_mask.unsqueeze(1).expand(-1, self.cs_num, -1)
# import pdb; pdb.set_trace()
# attn_output:[5B, cs_num, L, H] attn_weights:[5B, cs_num, Lq, Lc]
attn_output, attn_weights = self.cs_attention_scorer(cs_encoding, qa_encoding_expand, qa_padding_mask_expand)
new_hidden_state = self._remvoe_cs_pad_add_to_last_hidden_state(attn_output, last_hidden_state)
merge = self.attention_merge(new_hidden_state, flat_attention_mask)
logits = self.scorer(merge).view(-1,5)
return logits
def _pad_qacs_to_maxlen(self, flat_input_ids, last_hidden_state):
'''
input
- last_hidden_state [5B, seq_len, hidden]
return
- cs_range_list: [B*5, cs_num] (start, end) sep+1, sep
- qa_range_list: [B*5] (end)
- cs_encoding: [B*5, cs_num, max_cs_len, H]
- qa_encoding: [B*5, cs_num, max_qa_len, H]
- cs_attn_mask
- qa_attn_mask
'''
# Locate SEP token
input_ids = flat_input_ids.cpu().clone().detach().numpy()
sep_ids = input_ids == 3 # sep toekn in albert is 3
sep_locate = [[] for _ in range(len(sep_ids))] # [B*5, seq_num]
for index_1, case in enumerate(sep_ids):
for index_2, token in enumerate(case):
if token:
sep_locate[index_1].append(index_2)
# Get CS, QA range
self.cs_range_list = [[] for _ in range(len(sep_ids))] # [B*5, cs_num]
self.qa_range_list = []
for index, case in enumerate(sep_locate):
# Q [S] QC [S] Choice [S] cs_1[S] cs_2[S]
# qa: Q [S] QC [S] Choice [S]; cs: cs_1[S]
self.qa_range_list.append(case[2]+1)
start = case[2]
for end in case[3:]:
cs_tuple = (start+1, end+1)
start = end
self.cs_range_list[index].append(cs_tuple)
# Get CS and stack to tensor
hidden_size = last_hidden_state.shape[-1]
cs_batch_list, cs_padding_batch_list = [],[]
for index, case in enumerate(self.cs_range_list):
cs_case_list = []
cs_padding_list = []
for cs in case:
start, end = cs
pad_len = self.max_cs_len - (end-start)
cs = last_hidden_state[index, start:end, :]
zero = torch.zeros(pad_len, hidden_size, dtype=last_hidden_state.dtype)
zero = zero.to(last_hidden_state.device)
cs_case_list.append(torch.cat((cs, zero), dim=-2))
mask = torch.cat((torch.zeros(cs.shape[:-1]), torch.ones(pad_len))).bool()
mask = mask.to(last_hidden_state.device)
cs_padding_list.append(mask)
cs_batch_list.append(torch.stack(cs_case_list))
cs_padding_batch_list.append(torch.stack(cs_padding_list))
cs_encoding = torch.stack(cs_batch_list)
cs_padding_mask = torch.stack(cs_padding_batch_list)
# Get QA and stack to tensor
qa_batch_list, qa_padding_batch_list = [], []
for index, case in enumerate(self.qa_range_list):
end = case
pad_len = self.max_qa_len - (end-1)
qa = last_hidden_state[index, 1:end, :] # [CLS] -> [SEP] doesn't contain CLS
zero = torch.zeros(pad_len, hidden_size, dtype=last_hidden_state.dtype)
zero = zero.to(last_hidden_state.device)
qa_batch_list.append(torch.cat((qa, zero), dim=-2))
mask = torch.cat((torch.zeros(qa.shape[:-1]), torch.ones(pad_len))).bool()
mask = mask.to(last_hidden_state.device)
qa_padding_batch_list.append(mask)
qa_encoding = torch.stack(qa_batch_list)
# qa_encoding = qa_encoding.unsqueeze(1).expand(-1, self.cs_num, -1, -1)
qa_padding_mask = torch.stack(qa_padding_batch_list)
# qa_padding_mask = qa_padding_mask.unsqueeze(1).expand(-1, self.cs_num, -1)
return cs_encoding, cs_padding_mask, qa_encoding, qa_padding_mask
def _remvoe_cs_pad_add_to_last_hidden_state(self, cs_encoding, last_hidden_state):
self.cs_range_list # [[(start, end), (start, end)], [], [],]
self.qa_range_list # [end, end, end,]
for index, cs_range in enumerate(self.cs_range_list):
for cs_index, cs_case in enumerate(cs_range):
start, end = cs_case
last_hidden_state[index, start:end] = cs_encoding[index, cs_index,:end-start,:]
return last_hidden_state
| #! -*- encoding:utf-8 -*-
"""
@File : AlbertBurger.py
@Author : <NAME>
@Contact : <EMAIL>
@Dscpt :
"""
import math
import os
from copy import deepcopy
import torch
import torch.nn as nn
import torch.nn.functional as F
# from transformers import AlbertModel as tfms_AlbertModel
from transformers import AlbertPreTrainedModel, AlbertConfig
from .AlbertModel import AlbertModel
from .BurgerBase import CSLinearBase, BurgerBase
from utils import common
class CSDecoderLayer(nn.Module):
def __init__(self, config, cs_num):
super().__init__()
self.hidden_size = config.hidden_size
self.cs_num = cs_num
self.tfm_decoder = nn.TransformerDecoderLayer(d_model=config.hidden_size, nhead=8)
def forward(self, qa_expand, cs, qa_padding_mask_expand, cs_padding_mask):
'''
qa_expand [B, cs_num, qa_len, H] -> [qa_len, B*cs_num, H]
cs [B, cs_num, cs_len, H] -> [cs_len, B*cs_num, H]
qa_padding [B, cs_num, qa_len] -> [B*cs_num, qa_len]
cs_padding [B, cs_num, cs_len] -> [B*cs_num, cs_len]
decoder_output [cs_len, B*cs_num, H] -> [B, cs_num, cs_len, H]
'''
batch_size, cs_num, qa_len, hidden_size = qa_expand.shape
cs_len = cs.shape[-2]
qa_expand = qa_expand.contiguous().view(batch_size*cs_num, qa_len, hidden_size)
qa = qa_expand.transpose(0, 1)
cs = cs.contiguous().view(batch_size*cs_num, cs_len, hidden_size)
cs = cs.transpose(0, 1)
qa_padding = qa_padding_mask_expand.contiguous().view(batch_size*cs_num, qa_len)
cs_padding = cs_padding_mask.contiguous().view(batch_size*cs_num, cs_len)
# import pdb; pdb.set_trace()
decoder_output = self.tfm_decoder(tgt=cs, memory=qa, tgt_key_padding_mask=cs_padding, memory_key_padding_mask=qa_padding)
decoder_output = decoder_output.transpose(0, 1)
decoder_output = decoder_output.contiguous().view(batch_size, cs_num, cs_len, hidden_size)
return decoder_output
class AttentionLayer(nn.Module):
def __init__(self, config, cs_num):
super().__init__()
self.hidden_size = config.hidden_size
self.cs_num = cs_num
self.mult_attn = nn.MultiheadAttention(self.hidden_size, num_heads=1)
def forward(self, query, keyvalue, attn_mask):
'''
input:
- query: [b, cs_num, Lq, hidden]
- keyvalue: [b, cs_num, Lkv, hidden]
output:
- attn_output_weights: [B, cs_num, Lq, Lkv]
- attn_output: [B, cs_num, Lq, H]
'''
Batch_size, cs_num, Lq, hidden_size = query.shape
Lkv = keyvalue.shape[-2]
# [B, cs_num, L, H] -> [B * cs_num, L, H] -> [L, B*cs_num, H]
query = query.contiguous().view(-1, query.size(-2), query.size(-1))
query = query.transpose(0, 1)
keyvalue = keyvalue.contiguous().view(-1, keyvalue.size(-2), keyvalue.size(-1))
keyvalue = keyvalue.transpose(0, 1)
# [B, cs_num, L] -> [B*cs_num, L]
attn_mask = attn_mask.contiguous().view(-1, attn_mask.size(-1))
# [Lq, B*cs_num, H], [B*cs_num, Lq, Ls]
attn_output, attn_output_weights = self.mult_attn(query, keyvalue, keyvalue, key_padding_mask=attn_mask)
# [Lq, B*cs_num, H] -> [B*cs_num, Lq, H] -> [B, cs_num, Lq, H]
attn_output = attn_output.transpose(0, 1)
attn_output = attn_output.view(Batch_size, cs_num, Lq, hidden_size)
# [B*cs_num, Lq, Lkv] -> [B, cs_num, Lq, Lkv]
attn_output_weights = attn_output_weights.view(Batch_size, self.cs_num, Lq, Lkv)
return attn_output, attn_output_weights
class AttentionMerge(nn.Module):
def __init__(self, input_size, attention_size, dropout_prob):
super(AttentionMerge, self).__init__()
self.attention_size = attention_size
self.hidden_layer = nn.Linear(input_size, self.attention_size)
self.query_ = nn.Parameter(torch.Tensor(self.attention_size, 1))
self.dropout = nn.Dropout(dropout_prob)
self.query_.data.normal_(mean=0.0, std=0.02)
def forward(self, values, mask=None):
"""
H (B, L, hidden_size) => h (B, hidden_size)
"""
if mask is None:
mask = torch.zeros_like(values)
# mask = mask.data.normal_(mean=0.0, std=0.02)
else:
mask = (1 - mask.unsqueeze(-1).type(torch.float)) * -1000.
# values [batch*5, len, hidden]
keys = self.hidden_layer(values)
keys = torch.tanh(keys)
query_var = torch.var(self.query_)
# (b, l, h) + (h, 1) -> (b, l, 1)
attention_probs = keys @ self.query_ / math.sqrt(self.attention_size * query_var)
# attention_probs = keys @ self.query_ / math.sqrt(self.attention_size)
# import pdb; pdb.set_trace()
attention_probs = F.softmax(attention_probs * mask, dim=-2) # [batch*5, len, 1]
attention_probs = self.dropout(attention_probs)
context = torch.sum(attention_probs + values, dim=-2) # [batch*5, hidden]
return context
# Burger Here
class AlbertBurgerAlpha5(nn.Module, CSLinearBase, BurgerBase):
def __init__(self, config, **kwargs):
super(AlbertBurgerAlpha5, self).__init__()
self.albert1_layers = kwargs['albert1_layers']
self.cs_num = kwargs['model_cs_num']
self.max_cs_len = kwargs['max_cs_len']
self.max_qa_len = kwargs['max_qa_len']
self.config = config
self.config1 = deepcopy(config)
self.config1.num_hidden_layers = self.albert1_layers
self.config2 = deepcopy(config)
self.config2.num_hidden_layers = config.num_hidden_layers - self.albert1_layers
self.config2.without_embedding = True
# modules
self.albert1 = AlbertModel(self.config1)
# self.cs_attention_scorer = AttentionLayer(config, self.cs_num)
self.cs_qa_attn = CSDecoderLayer(self.config, self.cs_num)
self.albert2 = AlbertModel(self.config2)
self.attention_merge = AttentionMerge(config.hidden_size, config.hidden_size//4, 0.1)
self.scorer = nn.Sequential(
nn.Dropout(0.1),
nn.Linear(config.hidden_size, 1)
)
self.apply(self.init_weights)
def forward(self, input_ids, attention_mask, token_type_ids, labels=None):
"""
input_ids: [B, 5, L]
labels: [B, ]
"""
logits = self._forward(input_ids, attention_mask, token_type_ids)
loss = F.cross_entropy(logits, labels) # get the CELoss
with torch.no_grad():
logits = F.softmax(logits, dim=1) # get the score
predicts = torch.argmax(logits, dim=1) # find the result
right_num = torch.sum(predicts == labels)
return loss, right_num
def _forward(self, input_ids, attention_mask, token_type_ids):
# [B, 5, L] => [B * 5, L]
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
outputs = self.albert1(
input_ids = flat_input_ids,
attention_mask = flat_attention_mask,
token_type_ids = flat_token_type_ids
)
middle_hidden_state = outputs.last_hidden_state
cs_encoding, cs_padding_mask, qa_encoding, qa_padding_mask = self._pad_qacs_to_maxlen(flat_input_ids, middle_hidden_state)
qa_encoding_expand = qa_encoding.unsqueeze(1).expand(-1, self.cs_num, -1, -1)
qa_padding_mask_expand = qa_padding_mask.unsqueeze(1).expand(-1, self.cs_num, -1)
# import pdb; pdb.set_trace()
# attn_output:[5B, cs_num, L, H] attn_weights:[5B, cs_num, Lq, Lc]
decoder_output = self.cs_qa_attn(qa_encoding_expand, cs_encoding, qa_padding_mask_expand, cs_padding_mask)
# import pdb; pdb.set_trace()
middle_hidden_state = self._remvoe_cs_pad_add_to_last_hidden_state(decoder_output, middle_hidden_state)
outputs = self.albert2(inputs_embeds=middle_hidden_state)
merged_output = self.attention_merge(outputs.last_hidden_state, flat_attention_mask)
logits = self.scorer(merged_output).view(-1, 5)
# pooler_output = outputs.pooler_output # [CLS]
# [B*5, H] => [B*5, 1] => [B, 5]
# logits = self.scorer(pooler_output).view(-1, 5)
return logits
class AlbertBurgerAlpha4(nn.Module, CSLinearBase, BurgerBase):
def __init__(self, config, **kwargs):
super(AlbertBurgerAlpha4, self).__init__()
self.albert1_layers = kwargs['albert1_layers']
self.cs_num = kwargs['model_cs_num']
self.max_cs_len = kwargs['max_cs_len']
self.max_qa_len = kwargs['max_qa_len']
self.config = config
self.config1 = deepcopy(config)
self.config1.num_hidden_layers = self.albert1_layers
self.config2 = deepcopy(config)
self.config2.num_hidden_layers = config.num_hidden_layers - self.albert1_layers
self.config2.without_embedding = True
# modules
self.albert1 = AlbertModel(self.config1)
self.cs_attention = AttentionLayer(config, self.cs_num)
self.cs_merge = AttentionMerge(config.hidden_size, config.hidden_size//4, 0.1)
self.cs_scorer = nn.Sequential(
nn.Dropout(0.1),
nn.Linear(config.hidden_size, 1)
)
self.albert2 = AlbertModel(self.config2)
self.scorer = nn.Sequential(
nn.Dropout(0.1),
nn.Linear(config.hidden_size, 1)
)
self.apply(self.init_weights)
def forward(self, input_ids, attention_mask, token_type_ids, labels=None):
"""
input_ids: [B, 5, L]
labels: [B, ]
"""
logits = self._forward(input_ids, attention_mask, token_type_ids)
loss = F.cross_entropy(logits, labels) # get the CELoss
with torch.no_grad():
logits = F.softmax(logits, dim=1) # get the score
predicts = torch.argmax(logits, dim=1) # find the result
right_num = torch.sum(predicts == labels)
return loss, right_num
def _forward(self, input_ids, attention_mask, token_type_ids):
# [B, 5, L] => [B * 5, L]
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
outputs = self.albert1(
input_ids = flat_input_ids,
attention_mask = flat_attention_mask,
token_type_ids = flat_token_type_ids
)
middle_hidden_state = outputs.last_hidden_state
cs_encoding, cs_padding_mask, qa_encoding, qa_padding_mask = self._pad_qacs_to_maxlen(flat_input_ids, middle_hidden_state)
qa_encoding_expand = qa_encoding.unsqueeze(1).expand(-1, self.cs_num, -1, -1)
qa_padding_mask_expand = qa_padding_mask.unsqueeze(1).expand(-1, self.cs_num, -1)
# attn_output:[5B, cs_num, L, H] attn_weights:[5B, cs_num, Lq, Lc]
attn_output, attn_weights = self.cs_attention(cs_encoding, qa_encoding_expand, qa_padding_mask_expand)
# import pdb; pdb.set_trace()
merge = self.cs_merge(attn_output, cs_padding_mask) # merge: [5B, cs_num, H]
cs_score = self.cs_scorer(merge)
cs_score = F.softmax(cs_score, dim=-2).unsqueeze(-1)
cs_encoding = cs_score * cs_encoding
middle_hidden_state = self._remvoe_cs_pad_add_to_last_hidden_state(cs_encoding, middle_hidden_state)
outputs = self.albert2(inputs_embeds=middle_hidden_state)
pooler_output = outputs.pooler_output # [CLS]
# [B*5, H] => [B*5, 1] => [B, 5]
logits = self.scorer(pooler_output).view(-1, 5)
return logits
class AlbertBurgerAlpha3(nn.Module, CSLinearBase, BurgerBase):
def __init__(self, config, **kwargs):
super(AlbertBurgerAlpha3, self).__init__()
self.albert1_layers = kwargs['albert1_layers']
self.cs_num = kwargs['model_cs_num']
self.max_cs_len = kwargs['max_cs_len']
self.max_qa_len = kwargs['max_qa_len']
self.config = config
self.config1 = deepcopy(config)
self.config1.num_hidden_layers = self.albert1_layers
self.config2 = deepcopy(config)
self.config2.num_hidden_layers = config.num_hidden_layers - self.albert1_layers
self.config2.without_embedding = True
# modules
self.albert1 = AlbertModel(self.config1)
self.cs_attention_scorer = AttentionLayer(config, self.cs_num)
self.albert2 = AlbertModel(self.config2)
self.attention_merge = AttentionMerge(config.hidden_size, config.hidden_size//4, 0.1)
self.scorer = nn.Sequential(
nn.Dropout(0.1),
nn.Linear(config.hidden_size, 1)
)
self.apply(self.init_weights)
def forward(self, input_ids, attention_mask, token_type_ids, labels=None):
"""
input_ids: [B, 5, L]
labels: [B, ]
"""
logits = self._forward(input_ids, attention_mask, token_type_ids)
loss = F.cross_entropy(logits, labels) # get the CELoss
with torch.no_grad():
logits = F.softmax(logits, dim=1) # get the score
predicts = torch.argmax(logits, dim=1) # find the result
right_num = torch.sum(predicts == labels)
return loss, right_num
def _forward(self, input_ids, attention_mask, token_type_ids):
# [B, 5, L] => [B * 5, L]
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
outputs = self.albert1(
input_ids = flat_input_ids,
attention_mask = flat_attention_mask,
token_type_ids = flat_token_type_ids
)
middle_hidden_state = outputs.last_hidden_state
cs_encoding, _, qa_encoding, qa_padding_mask = self._pad_qacs_to_maxlen(flat_input_ids, middle_hidden_state)
qa_encoding_expand = qa_encoding.unsqueeze(1).expand(-1, self.cs_num, -1, -1)
qa_padding_mask_expand = qa_padding_mask.unsqueeze(1).expand(-1, self.cs_num, -1)
# attn_output:[5B, cs_num, L, H] attn_weights:[5B, cs_num, Lq, Lc]
attn_output, attn_weights = self.cs_attention_scorer(cs_encoding, qa_encoding_expand, qa_padding_mask_expand)
middle_hidden_state = self._remvoe_cs_pad_add_to_last_hidden_state(attn_output, middle_hidden_state)
outputs = self.albert2(inputs_embeds=middle_hidden_state)
merged_output = self.attention_merge(outputs.last_hidden_state, flat_attention_mask)
logits = self.scorer(merged_output).view(-1, 5)
return logits
class AlbertBurgerAlpha2(nn.Module, CSLinearBase, BurgerBase):
def __init__(self, config, **kwargs):
super(AlbertBurgerAlpha2, self).__init__()
self.albert1_layers = kwargs['albert1_layers']
self.cs_num = kwargs['model_cs_num']
self.max_cs_len = kwargs['max_cs_len']
self.max_qa_len = kwargs['max_qa_len']
self.config = config
self.config1 = deepcopy(config)
self.config1.num_hidden_layers = self.albert1_layers
self.config2 = deepcopy(config)
self.config2.num_hidden_layers = config.num_hidden_layers - self.albert1_layers
self.config2.without_embedding = True
# modules
self.albert1 = AlbertModel(self.config1)
self.cs_attention_scorer = AttentionLayer(config, self.cs_num)
self.albert2 = AlbertModel(self.config2)
self.attention_merge = AttentionMerge(config.hidden_size, config.hidden_size//4, 0.1)
self.scorer = nn.Sequential(
nn.Dropout(0.1),
nn.Linear(config.hidden_size, 1)
)
self.apply(self.init_weights)
def forward(self, input_ids, attention_mask, token_type_ids, labels=None):
"""
input_ids: [B, 5, L]
labels: [B, ]
"""
logits = self._forward(input_ids, attention_mask, token_type_ids)
loss = F.cross_entropy(logits, labels) # get the CELoss
with torch.no_grad():
logits = F.softmax(logits, dim=1) # get the score
predicts = torch.argmax(logits, dim=1) # find the result
right_num = torch.sum(predicts == labels)
return loss, right_num
def _forward(self, input_ids, attention_mask, token_type_ids):
# [B, 5, L] => [B * 5, L]
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
outputs = self.albert1(
input_ids = flat_input_ids,
attention_mask = flat_attention_mask,
token_type_ids = flat_token_type_ids
)
middle_hidden_state = outputs.last_hidden_state
cs_encoding, _, qa_encoding, qa_padding_mask = self._pad_qacs_to_maxlen(flat_input_ids, middle_hidden_state)
qa_encoding_expand = qa_encoding.unsqueeze(1).expand(-1, self.cs_num, -1, -1)
qa_padding_mask_expand = qa_padding_mask.unsqueeze(1).expand(-1, self.cs_num, -1)
# attn_output:[5B, cs_num, L, H] attn_weights:[5B, cs_num, Lq, Lc]
attn_output, attn_weights = self.cs_attention_scorer(cs_encoding, qa_encoding_expand, qa_padding_mask_expand)
middle_hidden_state = self._remvoe_cs_pad_add_to_last_hidden_state(attn_output, middle_hidden_state)
outputs = self.albert2(inputs_embeds=middle_hidden_state, attention_mask=flat_attention_mask)
outputs = outputs.last_hidden_state
merged_output = self.attention_merge(outputs, flat_attention_mask)
logits = self.scorer(merged_output).view(-1, 5)
return logits
def predict(self, input_ids, attention_mask, token_type_ids):
"""
return: [B, 5]
"""
logits = self._forward(input_ids, attention_mask, token_type_ids)
logits = F.softmax(logits, dim=1)
return logits
class AlbertBurgerAlpha6(nn.Module, CSLinearBase, BurgerBase):
def __init__(self, config, **kwargs):
super(AlbertBurgerAlpha6, self).__init__()
self.albert1_layers = kwargs['albert1_layers']
self.cs_num = kwargs['model_cs_num']
self.max_cs_len = kwargs['max_cs_len']
self.max_qa_len = kwargs['max_qa_len']
self.config = config
self.config1 = deepcopy(config)
self.config1.num_hidden_layers = self.albert1_layers
self.config2 = deepcopy(config)
self.config2.num_hidden_layers = config.num_hidden_layers - self.albert1_layers
self.config2.without_embedding = True
# modules
self.albert1 = AlbertModel(self.config1)
self.cs_qa_attn = CSDecoderLayer(self.config, self.cs_num)
self.albert2 = AlbertModel(self.config2)
self.attention_merge = AttentionMerge(config.hidden_size, config.hidden_size//4, 0.1)
self.scorer = nn.Sequential(
nn.Dropout(0.1),
nn.Linear(config.hidden_size, 1)
)
self.apply(self.init_weights)
def forward(self, input_ids, attention_mask, token_type_ids, labels=None):
"""
input_ids: [B, 5, L]
labels: [B, ]
"""
logits = self._forward(input_ids, attention_mask, token_type_ids)
loss = F.cross_entropy(logits, labels) # get the CELoss
with torch.no_grad():
logits = F.softmax(logits, dim=1) # get the score
predicts = torch.argmax(logits, dim=1) # find the result
right_num = torch.sum(predicts == labels)
return loss, right_num
def _forward(self, input_ids, attention_mask, token_type_ids):
# [B, 5, L] => [B * 5, L]
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
outputs = self.albert1(
input_ids = flat_input_ids,
attention_mask = flat_attention_mask,
token_type_ids = flat_token_type_ids
)
middle_hidden_state = outputs.last_hidden_state
cs_encoding, cs_padding_mask, qa_encoding, qa_padding_mask = self._pad_qacs_to_maxlen(flat_input_ids, middle_hidden_state)
qa_encoding_expand = qa_encoding.unsqueeze(1).expand(-1, self.cs_num, -1, -1)
qa_padding_mask_expand = qa_padding_mask.unsqueeze(1).expand(-1, self.cs_num, -1)
# qa_expand, cs, qa_padding_mask_expand, cs_padding_mask
qa_encoding_expand = self.cs_qa_attn(cs_encoding, qa_encoding_expand, cs_padding_mask, qa_padding_mask_expand)
qa_encoding = qa_encoding_expand.mean(dim=1)
# TODO
# import pdb; pdb.set_trace()
middle_hidden_state = torch.cat((middle_hidden_state[:,0,:].unsqueeze(1), qa_encoding), dim=1)
middle_padding_mask = torch.cat((flat_attention_mask[:,0].unsqueeze(1), qa_padding_mask), dim=1)
outputs = self.albert2(inputs_embeds=middle_hidden_state, attention_mask=middle_padding_mask)
pooler_output = outputs.pooler_output # [CLS]
# [B*5, H] => [B*5, 1] => [B, 5]
logits = self.scorer(pooler_output).view(-1, 5)
return logits
class AlbertBurgerAlpha1(nn.Module):
def __init__(self, config, **kwargs):
super(AlbertBurgerAlpha1, self).__init__()
self.albert1_layers = kwargs['albert1_layers']
self.config = config
self.config1 = deepcopy(config)
self.config1.num_hidden_layers = self.albert1_layers
self.config2 = deepcopy(config)
self.config2.num_hidden_layers = config.num_hidden_layers - self.albert1_layers
self.config2.without_embedding = True
self.albert1 = AlbertModel(self.config1)
self.albert2 = AlbertModel(self.config2)
self.scorer = nn.Sequential(
nn.Dropout(0.1),
nn.Linear(config.hidden_size, 1)
)
self.apply(self.init_weights)
def forward(self, input_ids, attention_mask, token_type_ids, labels=None):
"""
input_ids: [B, 5, L]
labels: [B, ]
"""
logits = self._forward(input_ids, attention_mask, token_type_ids)
loss = F.cross_entropy(logits, labels) # get the CELoss
with torch.no_grad():
logits = F.softmax(logits, dim=1) # get the score
predicts = torch.argmax(logits, dim=1) # find the result
right_num = torch.sum(predicts == labels)
return loss, right_num
def _forward(self, input_ids, attention_mask, token_type_ids):
# [B, 5, L] => [B * 5, L]
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
outputs = self.albert1(
input_ids = flat_input_ids,
attention_mask = flat_attention_mask,
token_type_ids = flat_token_type_ids
)
middle_hidden_state = outputs.last_hidden_state
outputs = self.albert2(inputs_embeds=middle_hidden_state)
pooler_output = outputs.pooler_output # [CLS]
# [B*5, H] => [B*5, 1] => [B, 5]
logits = self.scorer(pooler_output).view(-1, 5)
return logits
@staticmethod
def init_weights(module):
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=0.02)
if module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, model_path_or_name, **kwargs):
config = AlbertConfig()
config.without_embedding = False
if "xxlarge" in model_path_or_name:
config.hidden_size = 4096
config.intermediate_size = 16384
config.num_attention_heads = 64
config.num_hidden_layers = 12
elif "xlarge" in model_path_or_name:
config.hidden_size = 2048
config.intermediate_size = 8192
config.num_attention_heads = 16
config.num_hidden_layers = 24
elif "large" in model_path_or_name:
config.hidden_size = 1024
config.intermediate_size = 4096
config.num_attention_heads = 16
config.num_hidden_layers = 24
elif "base" in model_path_or_name:
config.hidden_size = 768
config.intermediate_size = 3072
config.num_attention_heads = 12
config.num_hidden_layers = 12
model = cls(config, **kwargs)
model.albert1 = model.albert1.from_pretrained(model_path_or_name, config=model.config1)
model.albert2 = model.albert2.from_pretrained(model_path_or_name, config=model.config2)
return model
class AlbertBurgerAlpha0(AlbertPreTrainedModel):
'''
input_ids [b, 5, seq_len] => [5b, seq_len]
=> PTM
cs_encoding [5b, cs_num, cs_seq_len, hidden]
'''
def __init__(self, config, **kwargs):
super(AlbertBurgerAlpha0, self).__init__(config)
# length config
self.cs_num = kwargs['model_cs_num']
self.max_cs_len = kwargs['max_cs_len']
self.max_qa_len = kwargs['max_qa_len']
# modules
self.albert = AlbertModel(config)
self.cs_attention_scorer = AttentionLayer(config, self.cs_num)
# self.albert2 = AlbertModel(config)
self.attention_merge = AttentionMerge(config.hidden_size, config.hidden_size//4, 0.1)
self.scorer = nn.Sequential(
nn.Dropout(0.1),
nn.Linear(config.hidden_size, 1)
)
self.init_weights()
def forward(self, input_ids, attention_mask, token_type_ids, labels):
logits = self._forward(input_ids, attention_mask, token_type_ids)
loss = F.cross_entropy(logits, labels)
with torch.no_grad():
logits = F.softmax(logits, dim=1)
predicts = torch.argmax(logits, dim=1)
right_num = torch.sum(predicts == labels)
return loss, right_num
def _forward(self, input_ids=None, attention_mask=None, token_type_ids=None):
# [B, 5, L] => [B * 5, L]
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
outputs = self.albert(
input_ids = flat_input_ids,
attention_mask=flat_attention_mask,
token_type_ids=flat_token_type_ids
)
# pooler_output = outputs.pooler_output # outputs[1] [5B, H]
last_hidden_state = outputs.last_hidden_state # outputs[0] [5B, L, H]
# separate query and commonsense encoding
# encoding:[5B, cs_num, L, H] mask:[5B, cs_num, L]
cs_encoding, _, qa_encoding, qa_padding_mask = self._pad_qacs_to_maxlen(flat_input_ids, last_hidden_state)
qa_encoding_expand = qa_encoding.unsqueeze(1).expand(-1, self.cs_num, -1, -1)
qa_padding_mask_expand = qa_padding_mask.unsqueeze(1).expand(-1, self.cs_num, -1)
# import pdb; pdb.set_trace()
# attn_output:[5B, cs_num, L, H] attn_weights:[5B, cs_num, Lq, Lc]
attn_output, attn_weights = self.cs_attention_scorer(cs_encoding, qa_encoding_expand, qa_padding_mask_expand)
new_hidden_state = self._remvoe_cs_pad_add_to_last_hidden_state(attn_output, last_hidden_state)
merge = self.attention_merge(new_hidden_state, flat_attention_mask)
logits = self.scorer(merge).view(-1,5)
return logits
def _pad_qacs_to_maxlen(self, flat_input_ids, last_hidden_state):
'''
input
- last_hidden_state [5B, seq_len, hidden]
return
- cs_range_list: [B*5, cs_num] (start, end) sep+1, sep
- qa_range_list: [B*5] (end)
- cs_encoding: [B*5, cs_num, max_cs_len, H]
- qa_encoding: [B*5, cs_num, max_qa_len, H]
- cs_attn_mask
- qa_attn_mask
'''
# Locate SEP token
input_ids = flat_input_ids.cpu().clone().detach().numpy()
sep_ids = input_ids == 3 # sep toekn in albert is 3
sep_locate = [[] for _ in range(len(sep_ids))] # [B*5, seq_num]
for index_1, case in enumerate(sep_ids):
for index_2, token in enumerate(case):
if token:
sep_locate[index_1].append(index_2)
# Get CS, QA range
self.cs_range_list = [[] for _ in range(len(sep_ids))] # [B*5, cs_num]
self.qa_range_list = []
for index, case in enumerate(sep_locate):
# Q [S] QC [S] Choice [S] cs_1[S] cs_2[S]
# qa: Q [S] QC [S] Choice [S]; cs: cs_1[S]
self.qa_range_list.append(case[2]+1)
start = case[2]
for end in case[3:]:
cs_tuple = (start+1, end+1)
start = end
self.cs_range_list[index].append(cs_tuple)
# Get CS and stack to tensor
hidden_size = last_hidden_state.shape[-1]
cs_batch_list, cs_padding_batch_list = [],[]
for index, case in enumerate(self.cs_range_list):
cs_case_list = []
cs_padding_list = []
for cs in case:
start, end = cs
pad_len = self.max_cs_len - (end-start)
cs = last_hidden_state[index, start:end, :]
zero = torch.zeros(pad_len, hidden_size, dtype=last_hidden_state.dtype)
zero = zero.to(last_hidden_state.device)
cs_case_list.append(torch.cat((cs, zero), dim=-2))
mask = torch.cat((torch.zeros(cs.shape[:-1]), torch.ones(pad_len))).bool()
mask = mask.to(last_hidden_state.device)
cs_padding_list.append(mask)
cs_batch_list.append(torch.stack(cs_case_list))
cs_padding_batch_list.append(torch.stack(cs_padding_list))
cs_encoding = torch.stack(cs_batch_list)
cs_padding_mask = torch.stack(cs_padding_batch_list)
# Get QA and stack to tensor
qa_batch_list, qa_padding_batch_list = [], []
for index, case in enumerate(self.qa_range_list):
end = case
pad_len = self.max_qa_len - (end-1)
qa = last_hidden_state[index, 1:end, :] # [CLS] -> [SEP] doesn't contain CLS
zero = torch.zeros(pad_len, hidden_size, dtype=last_hidden_state.dtype)
zero = zero.to(last_hidden_state.device)
qa_batch_list.append(torch.cat((qa, zero), dim=-2))
mask = torch.cat((torch.zeros(qa.shape[:-1]), torch.ones(pad_len))).bool()
mask = mask.to(last_hidden_state.device)
qa_padding_batch_list.append(mask)
qa_encoding = torch.stack(qa_batch_list)
# qa_encoding = qa_encoding.unsqueeze(1).expand(-1, self.cs_num, -1, -1)
qa_padding_mask = torch.stack(qa_padding_batch_list)
# qa_padding_mask = qa_padding_mask.unsqueeze(1).expand(-1, self.cs_num, -1)
return cs_encoding, cs_padding_mask, qa_encoding, qa_padding_mask
def _remvoe_cs_pad_add_to_last_hidden_state(self, cs_encoding, last_hidden_state):
self.cs_range_list # [[(start, end), (start, end)], [], [],]
self.qa_range_list # [end, end, end,]
for index, cs_range in enumerate(self.cs_range_list):
for cs_index, cs_case in enumerate(cs_range):
start, end = cs_case
last_hidden_state[index, start:end] = cs_encoding[index, cs_index,:end-start,:]
return last_hidden_state
| en | 0.425878 | #! -*- encoding:utf-8 -*- @File : AlbertBurger.py @Author : <NAME> @Contact : <EMAIL> @Dscpt : # from transformers import AlbertModel as tfms_AlbertModel qa_expand [B, cs_num, qa_len, H] -> [qa_len, B*cs_num, H] cs [B, cs_num, cs_len, H] -> [cs_len, B*cs_num, H] qa_padding [B, cs_num, qa_len] -> [B*cs_num, qa_len] cs_padding [B, cs_num, cs_len] -> [B*cs_num, cs_len] decoder_output [cs_len, B*cs_num, H] -> [B, cs_num, cs_len, H] # import pdb; pdb.set_trace() input: - query: [b, cs_num, Lq, hidden] - keyvalue: [b, cs_num, Lkv, hidden] output: - attn_output_weights: [B, cs_num, Lq, Lkv] - attn_output: [B, cs_num, Lq, H] # [B, cs_num, L, H] -> [B * cs_num, L, H] -> [L, B*cs_num, H] # [B, cs_num, L] -> [B*cs_num, L] # [Lq, B*cs_num, H], [B*cs_num, Lq, Ls] # [Lq, B*cs_num, H] -> [B*cs_num, Lq, H] -> [B, cs_num, Lq, H] # [B*cs_num, Lq, Lkv] -> [B, cs_num, Lq, Lkv] H (B, L, hidden_size) => h (B, hidden_size) # mask = mask.data.normal_(mean=0.0, std=0.02) # values [batch*5, len, hidden] # (b, l, h) + (h, 1) -> (b, l, 1) # attention_probs = keys @ self.query_ / math.sqrt(self.attention_size) # import pdb; pdb.set_trace() # [batch*5, len, 1] # [batch*5, hidden] # Burger Here # modules # self.cs_attention_scorer = AttentionLayer(config, self.cs_num) input_ids: [B, 5, L] labels: [B, ] # get the CELoss # get the score # find the result # [B, 5, L] => [B * 5, L] # import pdb; pdb.set_trace() # attn_output:[5B, cs_num, L, H] attn_weights:[5B, cs_num, Lq, Lc] # import pdb; pdb.set_trace() # pooler_output = outputs.pooler_output # [CLS] # [B*5, H] => [B*5, 1] => [B, 5] # logits = self.scorer(pooler_output).view(-1, 5) # modules input_ids: [B, 5, L] labels: [B, ] # get the CELoss # get the score # find the result # [B, 5, L] => [B * 5, L] # attn_output:[5B, cs_num, L, H] attn_weights:[5B, cs_num, Lq, Lc] # import pdb; pdb.set_trace() # merge: [5B, cs_num, H] # [CLS] # [B*5, H] => [B*5, 1] => [B, 5] # modules input_ids: [B, 5, L] labels: [B, ] # get the CELoss # get the score # find the result # [B, 5, L] => [B * 5, L] # attn_output:[5B, cs_num, L, H] attn_weights:[5B, cs_num, Lq, Lc] # modules input_ids: [B, 5, L] labels: [B, ] # get the CELoss # get the score # find the result # [B, 5, L] => [B * 5, L] # attn_output:[5B, cs_num, L, H] attn_weights:[5B, cs_num, Lq, Lc] return: [B, 5] # modules input_ids: [B, 5, L] labels: [B, ] # get the CELoss # get the score # find the result # [B, 5, L] => [B * 5, L] # qa_expand, cs, qa_padding_mask_expand, cs_padding_mask # TODO # import pdb; pdb.set_trace() # [CLS] # [B*5, H] => [B*5, 1] => [B, 5] input_ids: [B, 5, L] labels: [B, ] # get the CELoss # get the score # find the result # [B, 5, L] => [B * 5, L] # [CLS] # [B*5, H] => [B*5, 1] => [B, 5] input_ids [b, 5, seq_len] => [5b, seq_len] => PTM cs_encoding [5b, cs_num, cs_seq_len, hidden] # length config # modules # self.albert2 = AlbertModel(config) # [B, 5, L] => [B * 5, L] # pooler_output = outputs.pooler_output # outputs[1] [5B, H] # outputs[0] [5B, L, H] # separate query and commonsense encoding # encoding:[5B, cs_num, L, H] mask:[5B, cs_num, L] # import pdb; pdb.set_trace() # attn_output:[5B, cs_num, L, H] attn_weights:[5B, cs_num, Lq, Lc] input - last_hidden_state [5B, seq_len, hidden] return - cs_range_list: [B*5, cs_num] (start, end) sep+1, sep - qa_range_list: [B*5] (end) - cs_encoding: [B*5, cs_num, max_cs_len, H] - qa_encoding: [B*5, cs_num, max_qa_len, H] - cs_attn_mask - qa_attn_mask # Locate SEP token # sep toekn in albert is 3 # [B*5, seq_num] # Get CS, QA range # [B*5, cs_num] # Q [S] QC [S] Choice [S] cs_1[S] cs_2[S] # qa: Q [S] QC [S] Choice [S]; cs: cs_1[S] # Get CS and stack to tensor # Get QA and stack to tensor # [CLS] -> [SEP] doesn't contain CLS # qa_encoding = qa_encoding.unsqueeze(1).expand(-1, self.cs_num, -1, -1) # qa_padding_mask = qa_padding_mask.unsqueeze(1).expand(-1, self.cs_num, -1) # [[(start, end), (start, end)], [], [],] # [end, end, end,] | 2.335596 | 2 |
tests/test_provinsi.py | hexatester/covid19-id | 0 | 6620213 | from covid19_id import get_prov
from covid19_id.provinsi import DataProvinsi
def test_get_prov():
prov = get_prov()
assert isinstance(prov, DataProvinsi)
| from covid19_id import get_prov
from covid19_id.provinsi import DataProvinsi
def test_get_prov():
prov = get_prov()
assert isinstance(prov, DataProvinsi)
| none | 1 | 1.967512 | 2 | |
end-to-end-Machine-Learning/signal/src/transforms.py | lapisco/Lapisco_Courses | 2 | 6620214 | <filename>end-to-end-Machine-Learning/signal/src/transforms.py<gh_stars>1-10
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import StandardScaler
from config import *
'''
The transformations are applied in the dataframe in the following order:
1 - Data cleaning: remove NaN and split into categorical and numerical features
2 - Remove unwanted features in numerical features
3 - Remove unwanted features in categorical features
4 - Feature scaling in numerical features (This one is created by using StandardScaler from sklearn)
5 - Concat two features set
6 - Feature selction (Optional)
7 - Drop NaN that might appear from the scaling tranformation
'''
class FeatureSelector(BaseEstimator, TransformerMixin):
'''
Nothing yet
'''
def __init__(self, features_names):
self.features_names = features_names
def fit(self, X, y=None):
return self
def transform(self, X):
return X[self.features_names].values
class FeatureAppend(BaseEstimator):
'''
Only works for SENSORC and SENSORA
'''
def __init__(self, sensor, feature_set):
self.sensor = sensor
self.feature_set = feature_set
def fit(self, X, y=None):
f1 = SELECTED_FEATURES[self.sensor][self.feature_set]['SF1']
f2 = SELECTED_FEATURES[self.sensor][self.feature_set]['SF2']
f3 = SELECTED_FEATURES[self.sensor][self.feature_set]['SF3']
f4 = SELECTED_FEATURES[self.sensor][self.feature_set]['SF4']
X_out = np.concatenate(
(
X[f1].values,
X[f2].values,
X[f3].values
),
axis = 0
)
# frequency and cc_bus
X_2 = np.concatenate((X[f4], X[f4], X[f4]), axis=0)
X_out = np.concatenate((X_out, X_2), axis=1)
y = np.concatenate((X['Class'], X['Class'], X['Class']), axis=0)
self.X_out = X_out
self.y = y
return X_out
def transform(self, X):
return X
class DataCleaning(BaseEstimator, TransformerMixin):
'''
Clean data according the procedures studied in the notebook analyses-02. In short:
(i) Drops Nan; (ii) split data in categorical and numerical features;
(iii) 1-hot-enconding of categorical features; (iv) Get a unique categorical features
of an user in a period of 16 weeks; (v) Get a unique numerical features of an user
in a period of 16 weeks; (vi) Average the numerical features in a period of 16-week;
(vii) contat both feature set;
-----
Methods
------------------------
> fit(df)
Parameters:
df: dataframe of the dataset, in which the user name must be set as index;
-----
Returns:
self
> transform(df)
Parameters:
- df: dataframe of the dataset, in which the user name must be set as index;
-----
Returns:
- dict: a dictonary variable of dataframes: {'numerical': DataFrame, 'categorical': DataFrame}
-----------------
OBS.: fit_transform method is available, inherited from TransformerMixin class.
'''
def fit(self, df):
return self
def transform(self, df):
# Remove NaN:
df_clean = df.dropna(how='any', inplace=False)
return df_clean
class RemoveFeatures(BaseEstimator, TransformerMixin):
'''
Remove unwanted features from the dataframes;
-----
Initialized parameters:
- features: str or list cointaining the field that ought to be removed. Default: 'week'.
Methods
------------------------
> fit(df)
Parameters:
- dict: a dictonary variable of dataframes: {'numerical': DataFrame, 'categorical': DataFrame}
-----
Returns:
self
> transform(df)
Parameters:
- dict: a dictonary variable of dataframes: {'numerical': DataFrame, 'categorical': DataFrame}
-----
Returns:
- dict: a dictonary variable of dataframes: {'numerical': DataFrame, 'categorical': DataFrame}
-----------------
OBS.: fit_transform method is available, inherited from TransformerMixin class.
'''
def __init__(self, features='week'):
self.features = features
def fit(self, df):
return self
def transform(self, df):
return {'numerical': df['numerical'].drop(columns=self.features),
'categorical': df['categorical'].drop(columns=self.features)}
class FeatureScaling(BaseEstimator, TransformerMixin):
'''
Scale features by standardization;
-----
Initialized parameters:
- type: str cointaining the scaling method. Default: 'std'.
- 'std': StandardScaler()
Methods
------------------------
> fit(df)
Parameters:
- dict: a dictonary variable of dataframes: {'numerical': DataFrame, 'categorical': DataFrame}
-----
Returns:
self
Atrributes:
self._scaler: saved object that sould be used along with the trained model.
> transform(df)
Parameters:
- dict: a dictonary variable of dataframes: {'numerical': DataFrame, 'categorical': DataFrame}
-----
Returns:
- dict: a dictonary variable of dataframes: {'numerical': DataFrame, 'categorical': DataFrame}
-----------------
OBS.: fit_transform method is available, inherited from TransformerMixin class.
'''
def __init__(self, type='std'):
self.type = type
def fit(self, X):
self._scaler = StandardScaler().fit(X)
return self
def transform(self, X):
if self.type == 'std':
return self._scaler.transform(X)
class MergeFeatures(TransformerMixin):
'''
Concat the numerical and categorical dataframes into a single one.
-----
Methods
------------------------
> fit(df)
Parameters:
- dict: a dictonary variable of dataframes: {'numerical': DataFrame, 'categorical': DataFrame}
-----
Returns:
self
> transform(df)
Parameters:
- dict: a dictonary variable of dataframes: {'numerical': DataFrame, 'categorical': DataFrame}
-----
Returns:
- dataframe: a daframe with both feature set.
-----------------
OBS.: fit_transform method is available, inherited from TransformerMixin class.
'''
def fit(self, df):
return self
def transform(self, df):
return pd.concat([df['numerical'], df['categorical']], axis=1)
class DropNaN(TransformerMixin):
'''
Drop any row from the dataframe that contains a NaN.
-----
Methods
------------------------
> fit(df)
Parameters:
- df: a dataframe
-----
Returns:
self
> transform(df)
Parameters:
- df: a dataframe
-----
Returns:
- dataframe: a daframe withou NaN.
-----------------
OBS.: fit_transform method is available, inherited from TransformerMixin class.
'''
def fit(self, df):
return self
def transform(self, df):
return df.dropna()
# class FeatureSelection(TransformerMixin):
# '''
# Select the relevant features.
# -----
# Initialized parameters:
# - features: str or list of str containing the fields the should be kept
# Atrributes:
# self.features: feature names.
# Methods
# ------------------------
# > fit(df)
# Parameters:
# - df: a dataframe.
# -----
# Returns:
# self
# > transform(df)
# Parameters:
# - df: a dataframe.
# -----
# Returns:
# - df: a dataframe.
# -----------------
# OBS.: fit_transform method is available, inherited from TransformerMixin class.
# '''
# def __init__(self, extractor='FOURIER', features=None):
# if not features:
# self.features = SELECTED_FEATURES[extractor]
# self.extractor = extractor
# def fit(self, df):
# return self
# def transform(self, df):
# return df[self.features]
class GetLables(TransformerMixin):
'''
Get the labels following the user index in the feature dataframe.
-----
Methods
------------------------
> fit(df_user, df_features)
Parameters:
- df_user: dataframe containing the user's data
- df_features: dataframe the outta be used as the feature set. It MUST contain
the user's name as index.
-----
Returns:
self
> transform(df_user, df_features)
Parameters:
- df_user: dataframe containing the user's data
- df_features: dataframe the outta be used as the feature set. It MUST contain
the user's name as index.
-----
Returns:
- df: a dataframe.
-----------------
OBS.: fit_transform method is available, inherited from TransformerMixin class.
'''
def fit(self, df):
return self
def transform(self, df):
return df['Class'].values
if __name__ == "__main__":
import pandas as pd
from os.path import join
from config import *
df = pd.read_csv(join(DATA_FOLDER, DATA['SENSORV']['FOURIER']) + '.csv')
tf = FeatureAppend(sensor='SENSORV', feature_set='FOURIER')
X = tf.fit(df)
print(X.shape) | <filename>end-to-end-Machine-Learning/signal/src/transforms.py<gh_stars>1-10
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import StandardScaler
from config import *
'''
The transformations are applied in the dataframe in the following order:
1 - Data cleaning: remove NaN and split into categorical and numerical features
2 - Remove unwanted features in numerical features
3 - Remove unwanted features in categorical features
4 - Feature scaling in numerical features (This one is created by using StandardScaler from sklearn)
5 - Concat two features set
6 - Feature selction (Optional)
7 - Drop NaN that might appear from the scaling tranformation
'''
class FeatureSelector(BaseEstimator, TransformerMixin):
'''
Nothing yet
'''
def __init__(self, features_names):
self.features_names = features_names
def fit(self, X, y=None):
return self
def transform(self, X):
return X[self.features_names].values
class FeatureAppend(BaseEstimator):
'''
Only works for SENSORC and SENSORA
'''
def __init__(self, sensor, feature_set):
self.sensor = sensor
self.feature_set = feature_set
def fit(self, X, y=None):
f1 = SELECTED_FEATURES[self.sensor][self.feature_set]['SF1']
f2 = SELECTED_FEATURES[self.sensor][self.feature_set]['SF2']
f3 = SELECTED_FEATURES[self.sensor][self.feature_set]['SF3']
f4 = SELECTED_FEATURES[self.sensor][self.feature_set]['SF4']
X_out = np.concatenate(
(
X[f1].values,
X[f2].values,
X[f3].values
),
axis = 0
)
# frequency and cc_bus
X_2 = np.concatenate((X[f4], X[f4], X[f4]), axis=0)
X_out = np.concatenate((X_out, X_2), axis=1)
y = np.concatenate((X['Class'], X['Class'], X['Class']), axis=0)
self.X_out = X_out
self.y = y
return X_out
def transform(self, X):
return X
class DataCleaning(BaseEstimator, TransformerMixin):
'''
Clean data according the procedures studied in the notebook analyses-02. In short:
(i) Drops Nan; (ii) split data in categorical and numerical features;
(iii) 1-hot-enconding of categorical features; (iv) Get a unique categorical features
of an user in a period of 16 weeks; (v) Get a unique numerical features of an user
in a period of 16 weeks; (vi) Average the numerical features in a period of 16-week;
(vii) contat both feature set;
-----
Methods
------------------------
> fit(df)
Parameters:
df: dataframe of the dataset, in which the user name must be set as index;
-----
Returns:
self
> transform(df)
Parameters:
- df: dataframe of the dataset, in which the user name must be set as index;
-----
Returns:
- dict: a dictonary variable of dataframes: {'numerical': DataFrame, 'categorical': DataFrame}
-----------------
OBS.: fit_transform method is available, inherited from TransformerMixin class.
'''
def fit(self, df):
return self
def transform(self, df):
# Remove NaN:
df_clean = df.dropna(how='any', inplace=False)
return df_clean
class RemoveFeatures(BaseEstimator, TransformerMixin):
'''
Remove unwanted features from the dataframes;
-----
Initialized parameters:
- features: str or list cointaining the field that ought to be removed. Default: 'week'.
Methods
------------------------
> fit(df)
Parameters:
- dict: a dictonary variable of dataframes: {'numerical': DataFrame, 'categorical': DataFrame}
-----
Returns:
self
> transform(df)
Parameters:
- dict: a dictonary variable of dataframes: {'numerical': DataFrame, 'categorical': DataFrame}
-----
Returns:
- dict: a dictonary variable of dataframes: {'numerical': DataFrame, 'categorical': DataFrame}
-----------------
OBS.: fit_transform method is available, inherited from TransformerMixin class.
'''
def __init__(self, features='week'):
self.features = features
def fit(self, df):
return self
def transform(self, df):
return {'numerical': df['numerical'].drop(columns=self.features),
'categorical': df['categorical'].drop(columns=self.features)}
class FeatureScaling(BaseEstimator, TransformerMixin):
'''
Scale features by standardization;
-----
Initialized parameters:
- type: str cointaining the scaling method. Default: 'std'.
- 'std': StandardScaler()
Methods
------------------------
> fit(df)
Parameters:
- dict: a dictonary variable of dataframes: {'numerical': DataFrame, 'categorical': DataFrame}
-----
Returns:
self
Atrributes:
self._scaler: saved object that sould be used along with the trained model.
> transform(df)
Parameters:
- dict: a dictonary variable of dataframes: {'numerical': DataFrame, 'categorical': DataFrame}
-----
Returns:
- dict: a dictonary variable of dataframes: {'numerical': DataFrame, 'categorical': DataFrame}
-----------------
OBS.: fit_transform method is available, inherited from TransformerMixin class.
'''
def __init__(self, type='std'):
self.type = type
def fit(self, X):
self._scaler = StandardScaler().fit(X)
return self
def transform(self, X):
if self.type == 'std':
return self._scaler.transform(X)
class MergeFeatures(TransformerMixin):
'''
Concat the numerical and categorical dataframes into a single one.
-----
Methods
------------------------
> fit(df)
Parameters:
- dict: a dictonary variable of dataframes: {'numerical': DataFrame, 'categorical': DataFrame}
-----
Returns:
self
> transform(df)
Parameters:
- dict: a dictonary variable of dataframes: {'numerical': DataFrame, 'categorical': DataFrame}
-----
Returns:
- dataframe: a daframe with both feature set.
-----------------
OBS.: fit_transform method is available, inherited from TransformerMixin class.
'''
def fit(self, df):
return self
def transform(self, df):
return pd.concat([df['numerical'], df['categorical']], axis=1)
class DropNaN(TransformerMixin):
'''
Drop any row from the dataframe that contains a NaN.
-----
Methods
------------------------
> fit(df)
Parameters:
- df: a dataframe
-----
Returns:
self
> transform(df)
Parameters:
- df: a dataframe
-----
Returns:
- dataframe: a daframe withou NaN.
-----------------
OBS.: fit_transform method is available, inherited from TransformerMixin class.
'''
def fit(self, df):
return self
def transform(self, df):
return df.dropna()
# class FeatureSelection(TransformerMixin):
# '''
# Select the relevant features.
# -----
# Initialized parameters:
# - features: str or list of str containing the fields the should be kept
# Atrributes:
# self.features: feature names.
# Methods
# ------------------------
# > fit(df)
# Parameters:
# - df: a dataframe.
# -----
# Returns:
# self
# > transform(df)
# Parameters:
# - df: a dataframe.
# -----
# Returns:
# - df: a dataframe.
# -----------------
# OBS.: fit_transform method is available, inherited from TransformerMixin class.
# '''
# def __init__(self, extractor='FOURIER', features=None):
# if not features:
# self.features = SELECTED_FEATURES[extractor]
# self.extractor = extractor
# def fit(self, df):
# return self
# def transform(self, df):
# return df[self.features]
class GetLables(TransformerMixin):
'''
Get the labels following the user index in the feature dataframe.
-----
Methods
------------------------
> fit(df_user, df_features)
Parameters:
- df_user: dataframe containing the user's data
- df_features: dataframe the outta be used as the feature set. It MUST contain
the user's name as index.
-----
Returns:
self
> transform(df_user, df_features)
Parameters:
- df_user: dataframe containing the user's data
- df_features: dataframe the outta be used as the feature set. It MUST contain
the user's name as index.
-----
Returns:
- df: a dataframe.
-----------------
OBS.: fit_transform method is available, inherited from TransformerMixin class.
'''
def fit(self, df):
return self
def transform(self, df):
return df['Class'].values
if __name__ == "__main__":
import pandas as pd
from os.path import join
from config import *
df = pd.read_csv(join(DATA_FOLDER, DATA['SENSORV']['FOURIER']) + '.csv')
tf = FeatureAppend(sensor='SENSORV', feature_set='FOURIER')
X = tf.fit(df)
print(X.shape) | en | 0.575097 | The transformations are applied in the dataframe in the following order: 1 - Data cleaning: remove NaN and split into categorical and numerical features 2 - Remove unwanted features in numerical features 3 - Remove unwanted features in categorical features 4 - Feature scaling in numerical features (This one is created by using StandardScaler from sklearn) 5 - Concat two features set 6 - Feature selction (Optional) 7 - Drop NaN that might appear from the scaling tranformation Nothing yet Only works for SENSORC and SENSORA # frequency and cc_bus Clean data according the procedures studied in the notebook analyses-02. In short: (i) Drops Nan; (ii) split data in categorical and numerical features; (iii) 1-hot-enconding of categorical features; (iv) Get a unique categorical features of an user in a period of 16 weeks; (v) Get a unique numerical features of an user in a period of 16 weeks; (vi) Average the numerical features in a period of 16-week; (vii) contat both feature set; ----- Methods ------------------------ > fit(df) Parameters: df: dataframe of the dataset, in which the user name must be set as index; ----- Returns: self > transform(df) Parameters: - df: dataframe of the dataset, in which the user name must be set as index; ----- Returns: - dict: a dictonary variable of dataframes: {'numerical': DataFrame, 'categorical': DataFrame} ----------------- OBS.: fit_transform method is available, inherited from TransformerMixin class. # Remove NaN: Remove unwanted features from the dataframes; ----- Initialized parameters: - features: str or list cointaining the field that ought to be removed. Default: 'week'. Methods ------------------------ > fit(df) Parameters: - dict: a dictonary variable of dataframes: {'numerical': DataFrame, 'categorical': DataFrame} ----- Returns: self > transform(df) Parameters: - dict: a dictonary variable of dataframes: {'numerical': DataFrame, 'categorical': DataFrame} ----- Returns: - dict: a dictonary variable of dataframes: {'numerical': DataFrame, 'categorical': DataFrame} ----------------- OBS.: fit_transform method is available, inherited from TransformerMixin class. Scale features by standardization; ----- Initialized parameters: - type: str cointaining the scaling method. Default: 'std'. - 'std': StandardScaler() Methods ------------------------ > fit(df) Parameters: - dict: a dictonary variable of dataframes: {'numerical': DataFrame, 'categorical': DataFrame} ----- Returns: self Atrributes: self._scaler: saved object that sould be used along with the trained model. > transform(df) Parameters: - dict: a dictonary variable of dataframes: {'numerical': DataFrame, 'categorical': DataFrame} ----- Returns: - dict: a dictonary variable of dataframes: {'numerical': DataFrame, 'categorical': DataFrame} ----------------- OBS.: fit_transform method is available, inherited from TransformerMixin class. Concat the numerical and categorical dataframes into a single one. ----- Methods ------------------------ > fit(df) Parameters: - dict: a dictonary variable of dataframes: {'numerical': DataFrame, 'categorical': DataFrame} ----- Returns: self > transform(df) Parameters: - dict: a dictonary variable of dataframes: {'numerical': DataFrame, 'categorical': DataFrame} ----- Returns: - dataframe: a daframe with both feature set. ----------------- OBS.: fit_transform method is available, inherited from TransformerMixin class. Drop any row from the dataframe that contains a NaN. ----- Methods ------------------------ > fit(df) Parameters: - df: a dataframe ----- Returns: self > transform(df) Parameters: - df: a dataframe ----- Returns: - dataframe: a daframe withou NaN. ----------------- OBS.: fit_transform method is available, inherited from TransformerMixin class. # class FeatureSelection(TransformerMixin): # ''' # Select the relevant features. # ----- # Initialized parameters: # - features: str or list of str containing the fields the should be kept # Atrributes: # self.features: feature names. # Methods # ------------------------ # > fit(df) # Parameters: # - df: a dataframe. # ----- # Returns: # self # > transform(df) # Parameters: # - df: a dataframe. # ----- # Returns: # - df: a dataframe. # ----------------- # OBS.: fit_transform method is available, inherited from TransformerMixin class. # ''' # def __init__(self, extractor='FOURIER', features=None): # if not features: # self.features = SELECTED_FEATURES[extractor] # self.extractor = extractor # def fit(self, df): # return self # def transform(self, df): # return df[self.features] Get the labels following the user index in the feature dataframe. ----- Methods ------------------------ > fit(df_user, df_features) Parameters: - df_user: dataframe containing the user's data - df_features: dataframe the outta be used as the feature set. It MUST contain the user's name as index. ----- Returns: self > transform(df_user, df_features) Parameters: - df_user: dataframe containing the user's data - df_features: dataframe the outta be used as the feature set. It MUST contain the user's name as index. ----- Returns: - df: a dataframe. ----------------- OBS.: fit_transform method is available, inherited from TransformerMixin class. | 2.997401 | 3 |
content/migrations/0008_auto_20160302_1020.py | RachellCalhoun/lightandleadership | 1 | 6620215 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('content', '0007_merge'),
]
operations = [
migrations.CreateModel(
name='ChildrensProgram',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),
('title', models.TextField(blank=True, null=True)),
('img', models.ImageField(blank=True, null=True, upload_to='')),
('subtitle', models.TextField(blank=True, null=True)),
('text', models.TextField(blank=True, null=True)),
('color', models.CharField(max_length=20, blank=True)),
('order', models.PositiveIntegerField()),
],
),
migrations.AlterField(
model_name='ourstory',
name='img',
field=models.ImageField(blank=True, null=True, upload_to=''),
),
]
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('content', '0007_merge'),
]
operations = [
migrations.CreateModel(
name='ChildrensProgram',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),
('title', models.TextField(blank=True, null=True)),
('img', models.ImageField(blank=True, null=True, upload_to='')),
('subtitle', models.TextField(blank=True, null=True)),
('text', models.TextField(blank=True, null=True)),
('color', models.CharField(max_length=20, blank=True)),
('order', models.PositiveIntegerField()),
],
),
migrations.AlterField(
model_name='ourstory',
name='img',
field=models.ImageField(blank=True, null=True, upload_to=''),
),
]
| en | 0.769321 | # -*- coding: utf-8 -*- | 1.691149 | 2 |
osrm/router.py | BeJulien/TP-Python | 0 | 6620216 | # coding: utf-8
import json
import requests
from http import HTTPStatus
from types import SimpleNamespace
from . import OSRMError
from . import find_config
from . import find_uri
from .parser import Parser
from .types.route import Route
from .utils.profile import Profile
from .utils.service import Service
from .utils.version import Version
class OSRMRouterError(OSRMError):
"""
Classe pour les erreurs liées au router du module OSRM.
"""
pass
class Router:
"""
Calculateur d'un itinéraire entre deux points définis. Attention, aucune
vérification n'est effectuée à l'intérieur de celle-ci, les deux points
peuvent donc être dans des villes différentes.
Attributes:
lat_a (float): La latitude du point de départ.
lon_a (float): La longitude du point de départ.
lat_b (float): La latitude du point d'arrivée.
lon_b (float): La longitude du point d'arrivée.
"""
def __init__(self, lat_a: float, lon_a: float, lat_b: float, lon_b: float):
self.lat_a = lat_a
self.lon_a = lon_a
self.lat_b = lat_b
self.lon_b = lon_b
def shortest_route(self) -> Route:
"""
Récupère l'itinéraire le plus court en utilisant l'API OSRM.
Returns:
Une route contenant toutes les étapes de parcours.
"""
request = requests.get(f"{find_uri()}/{Service.ROUTE}/{Version.V1}/{Profile.FOOT}/"
f"{self.lon_a},{self.lat_a};{self.lon_b},{self.lat_b}", params=find_config())
response = json.loads(request.text, object_hook=lambda d: SimpleNamespace(**d))
if request.status_code != HTTPStatus.OK or not response:
raise OSRMRouterError()
parser = Parser(response)
return parser.shortest_route()
| # coding: utf-8
import json
import requests
from http import HTTPStatus
from types import SimpleNamespace
from . import OSRMError
from . import find_config
from . import find_uri
from .parser import Parser
from .types.route import Route
from .utils.profile import Profile
from .utils.service import Service
from .utils.version import Version
class OSRMRouterError(OSRMError):
"""
Classe pour les erreurs liées au router du module OSRM.
"""
pass
class Router:
"""
Calculateur d'un itinéraire entre deux points définis. Attention, aucune
vérification n'est effectuée à l'intérieur de celle-ci, les deux points
peuvent donc être dans des villes différentes.
Attributes:
lat_a (float): La latitude du point de départ.
lon_a (float): La longitude du point de départ.
lat_b (float): La latitude du point d'arrivée.
lon_b (float): La longitude du point d'arrivée.
"""
def __init__(self, lat_a: float, lon_a: float, lat_b: float, lon_b: float):
self.lat_a = lat_a
self.lon_a = lon_a
self.lat_b = lat_b
self.lon_b = lon_b
def shortest_route(self) -> Route:
"""
Récupère l'itinéraire le plus court en utilisant l'API OSRM.
Returns:
Une route contenant toutes les étapes de parcours.
"""
request = requests.get(f"{find_uri()}/{Service.ROUTE}/{Version.V1}/{Profile.FOOT}/"
f"{self.lon_a},{self.lat_a};{self.lon_b},{self.lat_b}", params=find_config())
response = json.loads(request.text, object_hook=lambda d: SimpleNamespace(**d))
if request.status_code != HTTPStatus.OK or not response:
raise OSRMRouterError()
parser = Parser(response)
return parser.shortest_route()
| fr | 0.990614 | # coding: utf-8 Classe pour les erreurs liées au router du module OSRM. Calculateur d'un itinéraire entre deux points définis. Attention, aucune
vérification n'est effectuée à l'intérieur de celle-ci, les deux points
peuvent donc être dans des villes différentes.
Attributes:
lat_a (float): La latitude du point de départ.
lon_a (float): La longitude du point de départ.
lat_b (float): La latitude du point d'arrivée.
lon_b (float): La longitude du point d'arrivée. Récupère l'itinéraire le plus court en utilisant l'API OSRM.
Returns:
Une route contenant toutes les étapes de parcours. | 2.588409 | 3 |
Chapter10/programs/prog03.py | gits00/raspberry-pi-computer-vision-programming | 17 | 6620217 | import numpy as np
import matplotlib.pyplot as plt
import cv2
img = cv2.imread('/home/pi/book/dataset/house.tiff', 1)
b = img[:, :, 0]
g = img[:, :, 1]
r = img[:, :, 2]
plt.subplots_adjust(hspace=0.5, wspace=0.25)
plt.subplot(2, 2, 1)
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB),
cmap='gray')
plt.axis('off')
plt.title('Original Image')
plt.subplot(2, 2, 2)
plt.hist(r.ravel(), bins=256, range=(0, 255), color='r')
plt.title('Red Histogram')
plt.subplot(2, 2, 3)
plt.hist(g.ravel(), bins=256, range=(0, 255), color='g')
plt.title('Green Histogram')
plt.subplot(2, 2, 4)
plt.hist(b.ravel(), bins=256, range=(0, 255), color='b')
plt.title('Blue Histogram')
plt.show()
| import numpy as np
import matplotlib.pyplot as plt
import cv2
img = cv2.imread('/home/pi/book/dataset/house.tiff', 1)
b = img[:, :, 0]
g = img[:, :, 1]
r = img[:, :, 2]
plt.subplots_adjust(hspace=0.5, wspace=0.25)
plt.subplot(2, 2, 1)
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB),
cmap='gray')
plt.axis('off')
plt.title('Original Image')
plt.subplot(2, 2, 2)
plt.hist(r.ravel(), bins=256, range=(0, 255), color='r')
plt.title('Red Histogram')
plt.subplot(2, 2, 3)
plt.hist(g.ravel(), bins=256, range=(0, 255), color='g')
plt.title('Green Histogram')
plt.subplot(2, 2, 4)
plt.hist(b.ravel(), bins=256, range=(0, 255), color='b')
plt.title('Blue Histogram')
plt.show()
| none | 1 | 3.08326 | 3 | |
src/cfec/constraints/_max_difference.py | LoGosX/counterfactuals | 0 | 6620218 | <filename>src/cfec/constraints/_max_difference.py
from dataclasses import dataclass
from typing import List
@dataclass
class ValueMaxDiff:
columns: List[str]
max_difference: float
| <filename>src/cfec/constraints/_max_difference.py
from dataclasses import dataclass
from typing import List
@dataclass
class ValueMaxDiff:
columns: List[str]
max_difference: float
| none | 1 | 1.676233 | 2 | |
quantify_tokenization/decompress.py | Anonymous-ARR/code | 0 | 6620219 | import zstandard, sys
filename = sys.argv[1]
outfile = filename.split('/')[-1].split('.')[0] + ".jsonl"
dctx = zstandard.ZstdDecompressor()
with open(filename, 'rb') as ifh, open(outfile, 'wb') as ofh:
dctx.copy_stream(ifh, ofh)
| import zstandard, sys
filename = sys.argv[1]
outfile = filename.split('/')[-1].split('.')[0] + ".jsonl"
dctx = zstandard.ZstdDecompressor()
with open(filename, 'rb') as ifh, open(outfile, 'wb') as ofh:
dctx.copy_stream(ifh, ofh)
| none | 1 | 2.40732 | 2 | |
hotkeys.py | kkuba91/ProficyMachineEditionHotkeys | 2 | 6620220 | <filename>hotkeys.py
import signal
from pynput import mouse
from pynput.keyboard import Key, KeyCode, Listener, Controller
from pynput.mouse import Listener as MouseListener
#Exceptions from keybord system shortcuts:
#signal.signal(signal.SIGINT, signal_handler1)
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGSEGV, signal.SIG_IGN)
signal.signal(signal.SIGBREAK, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
# Your functions
keyboard = Controller()
def function_1():
keyboard.type('NOCON')
keyboard.type('\n')
def function_2():
keyboard.type('NCCON')
keyboard.type('\n')
def function_3():
keyboard.type('COIL')
keyboard.type('\n')
def function_4():
keyboard.type('H_WIRE')
keyboard.type('\n')
def function_5():
keyboard.type('V_WIRE')
keyboard.type('\n')
# Create a mapping of keys to function (use frozenset as sets are not hashable - so they can't be used as keys)
combination_to_function = {
frozenset([Key.ctrl_l, Key.f2]): function_1, # NOCON
frozenset([Key.ctrl_l, Key.f3]): function_2, # NCCON
frozenset([Key.ctrl_l, Key.alt_l]): function_3, # COIL
frozenset([Key.ctrl_l, Key.shift_l]): function_4, # h_wire
frozenset([Key.ctrl_l, Key.f9]): function_5, # v_wire
}
# Currently pressed keys
current_keys = set()
def on_press(key):
# When a key is pressed, add it to the set we are keeping track of and check if this set is in the dictionary
current_keys.add(key)
def on_release(key):
# When a key is released, remove it from the set of keys we are keeping track of
if frozenset(current_keys) in combination_to_function:
# If the current set of keys are in the mapping, execute the function
combination_to_function[frozenset(current_keys)]()
current_keys.remove(key)
def on_click(x, y, button, pressed):
if button == mouse.Button.middle:
keyboard.type('\n')
with MouseListener(on_click=on_click) as listener:
with Listener(on_press=on_press, on_release=on_release) as listener:
listener.join()
| <filename>hotkeys.py
import signal
from pynput import mouse
from pynput.keyboard import Key, KeyCode, Listener, Controller
from pynput.mouse import Listener as MouseListener
#Exceptions from keybord system shortcuts:
#signal.signal(signal.SIGINT, signal_handler1)
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGSEGV, signal.SIG_IGN)
signal.signal(signal.SIGBREAK, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
# Your functions
keyboard = Controller()
def function_1():
keyboard.type('NOCON')
keyboard.type('\n')
def function_2():
keyboard.type('NCCON')
keyboard.type('\n')
def function_3():
keyboard.type('COIL')
keyboard.type('\n')
def function_4():
keyboard.type('H_WIRE')
keyboard.type('\n')
def function_5():
keyboard.type('V_WIRE')
keyboard.type('\n')
# Create a mapping of keys to function (use frozenset as sets are not hashable - so they can't be used as keys)
combination_to_function = {
frozenset([Key.ctrl_l, Key.f2]): function_1, # NOCON
frozenset([Key.ctrl_l, Key.f3]): function_2, # NCCON
frozenset([Key.ctrl_l, Key.alt_l]): function_3, # COIL
frozenset([Key.ctrl_l, Key.shift_l]): function_4, # h_wire
frozenset([Key.ctrl_l, Key.f9]): function_5, # v_wire
}
# Currently pressed keys
current_keys = set()
def on_press(key):
# When a key is pressed, add it to the set we are keeping track of and check if this set is in the dictionary
current_keys.add(key)
def on_release(key):
# When a key is released, remove it from the set of keys we are keeping track of
if frozenset(current_keys) in combination_to_function:
# If the current set of keys are in the mapping, execute the function
combination_to_function[frozenset(current_keys)]()
current_keys.remove(key)
def on_click(x, y, button, pressed):
if button == mouse.Button.middle:
keyboard.type('\n')
with MouseListener(on_click=on_click) as listener:
with Listener(on_press=on_press, on_release=on_release) as listener:
listener.join()
| en | 0.916948 | #Exceptions from keybord system shortcuts: #signal.signal(signal.SIGINT, signal_handler1) # Your functions # Create a mapping of keys to function (use frozenset as sets are not hashable - so they can't be used as keys) # NOCON # NCCON # COIL # h_wire # v_wire # Currently pressed keys # When a key is pressed, add it to the set we are keeping track of and check if this set is in the dictionary # When a key is released, remove it from the set of keys we are keeping track of # If the current set of keys are in the mapping, execute the function | 2.917634 | 3 |
spookynet/__init__.py | OUnke/SpookyNet | 29 | 6620221 | <filename>spookynet/__init__.py
from .spookynet import SpookyNet
from .spookynet_ensemble import SpookyNetEnsemble
from .spookynet_calculator import SpookyNetCalculator
| <filename>spookynet/__init__.py
from .spookynet import SpookyNet
from .spookynet_ensemble import SpookyNetEnsemble
from .spookynet_calculator import SpookyNetCalculator
| none | 1 | 1.186995 | 1 | |
desafios/desafio 047.py | juaoantonio/curso_video_python | 0 | 6620222 | from time import sleep
print('Os números pares entre 1 e 50 são: ')
for p in range(2, 51, 2):
print(p)
sleep(0.25)
print('Fim da contagem.') | from time import sleep
print('Os números pares entre 1 e 50 são: ')
for p in range(2, 51, 2):
print(p)
sleep(0.25)
print('Fim da contagem.') | none | 1 | 3.315898 | 3 | |
back-end/app/models/models.py | arsummers/city-explorer-python | 0 | 6620223 | from sqlalchemy import inspect
from app import db
class ModelToDictMixin:
def convert_to_dict(self, fields = None):
inst = inspect(self.__class__)
fields = fields or [c_attr.key for c_attr in inst.mapper.column_attrs]
as_dict = {}
for field in fields:
as_dict[field] = getattr(self, field)
return as_dict
class LocationsModel(ModelToDictMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
search_query = db.Column(db.String(256), unique=True)
formatted_query = db.Column(db.String(256), unique=True)
latitude = db.Column(db.Float(10.7))
longitude = db.Column(db.Float(10.7))
def __repr__(self):
return f'<Location {self.formatted_query}>'
class Forecasts(ModelToDictMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
forecast = db.Column(db.Text)
time = db.Column(db.DateTime)
def __repr__(self):
return f'<Forecast {self.forecast}>'
| from sqlalchemy import inspect
from app import db
class ModelToDictMixin:
def convert_to_dict(self, fields = None):
inst = inspect(self.__class__)
fields = fields or [c_attr.key for c_attr in inst.mapper.column_attrs]
as_dict = {}
for field in fields:
as_dict[field] = getattr(self, field)
return as_dict
class LocationsModel(ModelToDictMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
search_query = db.Column(db.String(256), unique=True)
formatted_query = db.Column(db.String(256), unique=True)
latitude = db.Column(db.Float(10.7))
longitude = db.Column(db.Float(10.7))
def __repr__(self):
return f'<Location {self.formatted_query}>'
class Forecasts(ModelToDictMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
forecast = db.Column(db.Text)
time = db.Column(db.DateTime)
def __repr__(self):
return f'<Forecast {self.forecast}>'
| none | 1 | 2.864389 | 3 | |
Tflow/app/pqc/parametric_circuits.py | mahabubul-alam/iccad_2021_invited_QML | 10 | 6620224 | import pennylane as qml
import numpy as np
def add_dummy_measurements_for_test(func):
def inner(*args, **kwargs):
func(*args, **kwargs)
if test == True:
return qml.expval(qml.PauliY(0))
return inner
class ParametricCircuitsPennylane:
def __init__(self, pqc = None, qubit = None, layers = None):
self.choices = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
assert pqc in self.choices
self.pqc = pqc
self.qubit = qubit
self.layers = layers
def get_pqc(self, weights0, weights1):
if self.pqc == 1:
return self.__pqc_1(weights0)
if self.pqc == 2:
return self.__pqc_2(weights0)
if self.pqc == 3:
return self.__pqc_3(weights0, weights1)
if self.pqc == 4:
return self.__pqc_4(weights0, weights1)
if self.pqc == 5:
return self.__pqc_5(weights0, weights1)
if self.pqc == 6:
return self.__pqc_6(weights0, weights1)
if self.pqc == 7:
return self.__pqc_7(weights0, weights1)
if self.pqc == 8:
return self.__pqc_8(weights0, weights1)
if self.pqc == 9:
return self.__pqc_9(weights0)
if self.pqc == 10:
return self.__pqc_10(weights0)
if self.pqc == 11:
return self.__pqc_11(weights0, weights1)
if self.pqc == 12:
return self.__pqc_12(weights0, weights1)
if self.pqc == 13:
return self.__pqc_13(weights0, weights1)
if self.pqc == 14:
return self.__pqc_14(weights0, weights1)
if self.pqc == 15:
return self.__pqc_15(weights0)
if self.pqc == 16:
return self.__pqc_16(weights0, weights1)
if self.pqc == 17:
return self.__pqc_17(weights0, weights1)
if self.pqc == 18:
return self.__pqc_18(weights0)
if self.pqc == 19:
return self.__pqc_19(weights0)
def weigths_shape(self):
if self.pqc == 1:
return (self.layers, self.qubit, 2)
if self.pqc == 2:
return (self.layers, self.qubit, 2)
if self.pqc == 3:
return ((self.layers, self.qubit, 2), (self.layers, self.qubit - 1))
if self.pqc == 4:
return ((self.layers, self.qubit, 2), (self.layers, self.qubit - 1))
if self.pqc == 5:
return ((self.layers, self.qubit, 4), (self.layers, self.qubit, self.qubit - 1))
if self.pqc == 6:
return ((self.layers, self.qubit, 4), (self.layers, self.qubit, self.qubit - 1))
if self.pqc == 7:
return ((self.layers, self.qubit, 4), (self.layers, self.qubit - 1))
if self.pqc == 8:
return ((self.layers, self.qubit, 4), (self.layers, self.qubit - 1))
if self.pqc == 9:
return (self.layers, self.qubit)
if self.pqc == 10:
return (self.layers, self.qubit, 2)
if self.pqc == 11:
assert self.qubit > 1
return ((self.layers, self.qubit, 2), (self.layers, (self.qubit - 1) if self.qubit % 2 == 1 else self.qubit - 2, 4))
if self.pqc == 12:
assert self.qubit > 1
return ((self.layers, self.qubit, 2), (self.layers, (self.qubit - 1) if self.qubit % 2 == 1 else self.qubit - 2, 4))
if self.pqc == 13:
return ((self.layers, self.qubit, 2), (self.layers, self.qubit, 2))
if self.pqc == 14:
return ((self.layers, self.qubit, 2), (self.layers, self.qubit, 2))
if self.pqc == 15:
return (self.layers, self.qubit, 2)
if self.pqc == 16:
return ((self.layers, self.qubit, 2), (self.layers, self.qubit - 1))
if self.pqc == 17:
return ((self.layers, self.qubit, 2), (self.layers, self.qubit - 1))
if self.pqc == 18:
return (self.layers, self.qubit, 3)
if self.pqc == 19:
return (self.layers, self.qubit, 3)
@add_dummy_measurements_for_test
def __pqc_1(self, weights):
assert weights.shape == self.weigths_shape()
for l in range(self.layers):
for i in range(self.qubit):
qml.RX(weights[l, i, 0], wires = i)
qml.RZ(weights[l, i, 1], wires = i)
@add_dummy_measurements_for_test
def __pqc_2(self, weights):
assert weights.shape == self.weigths_shape()
for l in range(self.layers):
for i in range(self.qubit):
qml.RX(weights[l, i, 0], wires = i)
qml.RZ(weights[l, i, 1], wires = i)
for i in range(self.qubit - 1):
qml.CNOT(wires=[i, (i + 1)])
@add_dummy_measurements_for_test
def __pqc_3(self, weights0, weights1):
assert weights0.shape == self.weigths_shape()[0]
assert weights1.shape == self.weigths_shape()[1]
for l in range(self.layers):
for i in range(self.qubit):
qml.RX(weights0[l, i, 0], wires = i)
qml.RZ(weights0[l, i, 1], wires = i)
for i in range(self.qubit - 1):
qml.CRZ(weights1[l, i], wires = [i, (i + 1)])
@add_dummy_measurements_for_test
def __pqc_4(self, weights0, weights1):
assert weights0.shape == self.weigths_shape()[0]
assert weights1.shape == self.weigths_shape()[1]
for l in range(self.layers):
for i in range(self.qubit):
qml.RX(weights0[l, i, 0], wires = i)
qml.RZ(weights0[l, i, 1], wires = i)
for i in range(self.qubit - 1):
qml.CRX(weights1[l, i], wires = [i, (i + 1)])
@add_dummy_measurements_for_test
def __pqc_5(self, weights0, weights1):
assert weights0.shape == self.weigths_shape()[0]
assert weights1.shape == self.weigths_shape()[1]
for l in range(self.layers):
for i in range(self.qubit):
qml.RX(weights0[l, i, 0], wires = i)
qml.RZ(weights0[l, i, 1], wires = i)
for i in range(self.qubit):
for j in range(self.qubit - 1):
qml.CRZ(weights1[l, i, j], wires = [i, (i + j + 1)%self.qubit])
for i in range(self.qubit):
qml.RX(weights0[l, i, 2], wires = i)
qml.RZ(weights0[l, i, 3], wires = i)
@add_dummy_measurements_for_test
def __pqc_6(self, weights0, weights1):
assert weights0.shape == self.weigths_shape()[0]
assert weights1.shape == self.weigths_shape()[1]
for l in range(self.layers):
for i in range(self.qubit):
qml.RX(weights0[l, i, 0], wires = i)
qml.RZ(weights0[l, i, 1], wires = i)
for i in range(self.qubit):
for j in range(self.qubit - 1):
qml.CRX(weights1[l, i, j], wires = [i, (i + j + 1)%self.qubit])
for i in range(self.qubit):
qml.RX(weights0[l, i, 2], wires = i)
qml.RZ(weights0[l, i, 3], wires = i)
@add_dummy_measurements_for_test
def __pqc_7(self, weights0, weights1):
assert weights0.shape == self.weigths_shape()[0]
assert weights1.shape == self.weigths_shape()[1]
for l in range(self.layers):
for i in range(self.qubit):
qml.RX(weights0[l, i, 0], wires = i)
qml.RZ(weights0[l, i, 1], wires = i)
j = 0
for i in range(0, self.qubit - 1, 2):
qml.CRZ(weights1[l, j], wires = [i, (i+1)])
j += 1
for i in range(self.qubit):
qml.RX(weights0[l, i, 2], wires = i)
qml.RZ(weights0[l, i, 3], wires = i)
for i in range(1, self.qubit - 1, 2):
qml.CRZ(weights1[l, j], wires = [i, (i+1)])
j += 1
@add_dummy_measurements_for_test
def __pqc_8(self, weights0, weights1):
assert weights0.shape == self.weigths_shape()[0]
assert weights1.shape == self.weigths_shape()[1]
for l in range(self.layers):
for i in range(self.qubit):
qml.RX(weights0[l, i, 0], wires = i)
qml.RZ(weights0[l, i, 1], wires = i)
j = 0
for i in range(0, self.qubit - 1, 2):
qml.CRX(weights1[l, j], wires = [i, (i+1)])
j += 1
for i in range(self.qubit):
qml.RX(weights0[l, i, 2], wires = i)
qml.RZ(weights0[l, i, 3], wires = i)
for i in range(1, self.qubit - 1, 2):
qml.CRX(weights1[l, j], wires = [i, (i+1)])
j += 1
@add_dummy_measurements_for_test
def __pqc_9(self, weights):
assert weights.shape == self.weigths_shape()
for l in range(self.layers):
for i in range(self.qubit):
qml.Hadamard(wires = i)
for i in range(self.qubit - 1):
qml.CZ(wires=[i, (i + 1)])
for i in range(self.qubit):
qml.RX(weights[l, i], wires = i)
@add_dummy_measurements_for_test
def __pqc_10(self, weights):
assert weights.shape == self.weigths_shape()
for l in range(self.layers):
for i in range(self.qubit):
qml.RY(weights[l, i, 0], wires = i)
for i in range(self.qubit):
qml.CZ(wires=[i, (i + 1)%self.qubit])
for i in range(self.qubit):
qml.RY(weights[l, i, 1], wires = i)
@add_dummy_measurements_for_test
def __pqc_11(self, weights0, weights1):
assert weights0.shape == self.weigths_shape()[0]
assert weights1.shape == self.weigths_shape()[1]
for l in range(self.layers):
for i in range(self.qubit):
qml.RY(weights0[l, i, 0], wires = i)
qml.RZ(weights0[l, i, 1], wires = i)
for i in range(0, self.qubit - 1, 2):
qml.CNOT(wires=[i, (i + 1)])
for j, i in enumerate(range(1, self.qubit - 1, 2)):
qml.RY(weights1[l, j, 0], wires = i)
qml.RZ(weights1[l, j, 1], wires = i)
qml.RY(weights1[l, j, 2], wires = i+1)
qml.RZ(weights1[l, j, 3], wires = i+1)
for i in range(1, self.qubit - 1, 2):
qml.CNOT(wires=[i, (i + 1)])
@add_dummy_measurements_for_test
def __pqc_12(self, weights0, weights1):
assert weights0.shape == self.weigths_shape()[0]
assert weights1.shape == self.weigths_shape()[1]
for l in range(self.layers):
for i in range(self.qubit):
qml.RY(weights0[l, i, 0], wires = i)
qml.RZ(weights0[l, i, 1], wires = i)
for i in range(0, self.qubit - 1, 2):
qml.CZ(wires=[i, (i + 1)])
for j, i in enumerate(range(1, self.qubit - 1, 2)):
qml.RY(weights1[l, j, 0], wires = i)
qml.RZ(weights1[l, j, 1], wires = i)
qml.RY(weights1[l, j, 2], wires = i+1)
qml.RZ(weights1[l, j, 3], wires = i+1)
for i in range(1, self.qubit - 1, 2):
qml.CZ(wires=[i, (i + 1)])
@add_dummy_measurements_for_test
def __pqc_13(self, weights0, weights1):
assert weights0.shape == self.weigths_shape()[0]
assert weights1.shape == self.weigths_shape()[1]
for l in range(self.layers):
for i in range(self.qubit):
qml.RY(weights0[l, i, 0], wires = i)
for i in range(0, self.qubit):
qml.CRZ(weights1[l, i, 0], wires = [i, (i + self.qubit - 1) % self.qubit])
for i in range(self.qubit):
qml.RY(weights0[l, i, 1], wires = i)
temp = list(range(self.qubit))[1:]
temp.reverse()
temp.insert(0,0)
controls = temp.copy()
temp = list(range(self.qubit))[2:]
temp.reverse()
temp.insert(0, 0)
temp.insert(0, 1)
targets = temp.copy()
for i, control in enumerate(controls):
qml.CRZ(weights1[l, i, 1], wires = [control, targets[i]])
@add_dummy_measurements_for_test
def __pqc_14(self, weights0, weights1):
assert weights0.shape == self.weigths_shape()[0]
assert weights1.shape == self.weigths_shape()[1]
for l in range(self.layers):
for i in range(self.qubit):
qml.RY(weights0[l, i, 0], wires = i)
for i in range(0, self.qubit):
qml.CRX(weights1[l, i, 0], wires = [i, (i + self.qubit - 1) % self.qubit])
for i in range(self.qubit):
qml.RY(weights0[l, i, 1], wires = i)
temp = list(range(self.qubit))[1:]
temp.reverse()
temp.insert(0,0)
controls = temp.copy()
temp = list(range(self.qubit))[2:]
temp.reverse()
temp.insert(0, 0)
temp.insert(0, 1)
targets = temp.copy()
for i, control in enumerate(controls):
qml.CRX(weights1[l, i, 1], wires = [control, targets[i]])
@add_dummy_measurements_for_test
def __pqc_15(self, weights):
assert weights.shape == self.weigths_shape()
for l in range(self.layers):
for i in range(self.qubit):
qml.RY(weights[l, i, 0], wires = i)
for i in range(0, self.qubit):
qml.CNOT(wires = [i, (i + self.qubit - 1) % self.qubit])
for i in range(self.qubit):
qml.RY(weights[l, i, 1], wires = i)
temp = list(range(self.qubit))[1:]
temp.reverse()
temp.insert(0,0)
controls = temp.copy()
temp = list(range(self.qubit))[2:]
temp.reverse()
temp.insert(0, 0)
temp.insert(0, 1)
targets = temp.copy()
for i, control in enumerate(controls):
qml.CNOT(wires = [control, targets[i]])
@add_dummy_measurements_for_test
def __pqc_16(self, weights0, weights1):
assert weights0.shape == self.weigths_shape()[0]
assert weights1.shape == self.weigths_shape()[1]
for l in range(self.layers):
for i in range(self.qubit):
qml.RX(weights0[l, i, 0], wires = i)
qml.RZ(weights0[l, i, 1], wires = i)
for i in range(0, self.qubit - 1, 2):
qml.CRZ(weights1[l, i], wires = [i, (i + 1)])
for i in range(1, self.qubit - 1, 2):
qml.CRZ(weights1[l, i], wires = [i, (i + 1)])
@add_dummy_measurements_for_test
def __pqc_17(self, weights0, weights1):
assert weights0.shape == self.weigths_shape()[0]
assert weights1.shape == self.weigths_shape()[1]
for l in range(self.layers):
for i in range(self.qubit):
qml.RX(weights0[l, i, 0], wires = i)
qml.RZ(weights0[l, i, 1], wires = i)
for i in range(0, self.qubit - 1, 2):
qml.CRX(weights1[l, i], wires = [i, (i + 1)])
for i in range(1, self.qubit - 1, 2):
qml.CRX(weights1[l, i], wires = [i, (i + 1)])
@add_dummy_measurements_for_test
def __pqc_18(self, weights):
assert weights.shape == self.weigths_shape()
for l in range(self.layers):
for i in range(self.qubit):
qml.RX(weights[l, i, 0], wires = i)
qml.RZ(weights[l, i, 1], wires = i)
for i in range(0, self.qubit):
qml.CRZ(weights[l, i, 2], wires = [i, (i + self.qubit - 1) % self.qubit])
@add_dummy_measurements_for_test
def __pqc_19(self, weights):
assert weights.shape == self.weigths_shape()
for l in range(self.layers):
for i in range(self.qubit):
qml.RX(weights[l, i, 0], wires = i)
qml.RZ(weights[l, i, 1], wires = i)
for i in range(0, self.qubit):
qml.CRX(weights[l, i, 2], wires = [i, (i + self.qubit - 1) % self.qubit])
if __name__ == '__main__':
test = True
pqc = ParametricCircuitsPennylane(pqc = 19, qubit = 7, layers = 1)
dev = qml.device("default.qubit", wires = 10) #target pennylane device
qnode = qml.QNode(pqc.get_pqc, dev) #circuit
weight_shape = pqc.weigths_shape()
if isinstance(weight_shape[0], tuple):
weights0 = np.random.random(weight_shape[0])
weights1 = np.random.random(weight_shape[1])
qnode(weights0, weights1)
else:
weights = np.random.random(weight_shape)
qnode(weights)
print(qnode.draw())
else:
test = False
| import pennylane as qml
import numpy as np
def add_dummy_measurements_for_test(func):
def inner(*args, **kwargs):
func(*args, **kwargs)
if test == True:
return qml.expval(qml.PauliY(0))
return inner
class ParametricCircuitsPennylane:
def __init__(self, pqc = None, qubit = None, layers = None):
self.choices = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
assert pqc in self.choices
self.pqc = pqc
self.qubit = qubit
self.layers = layers
def get_pqc(self, weights0, weights1):
if self.pqc == 1:
return self.__pqc_1(weights0)
if self.pqc == 2:
return self.__pqc_2(weights0)
if self.pqc == 3:
return self.__pqc_3(weights0, weights1)
if self.pqc == 4:
return self.__pqc_4(weights0, weights1)
if self.pqc == 5:
return self.__pqc_5(weights0, weights1)
if self.pqc == 6:
return self.__pqc_6(weights0, weights1)
if self.pqc == 7:
return self.__pqc_7(weights0, weights1)
if self.pqc == 8:
return self.__pqc_8(weights0, weights1)
if self.pqc == 9:
return self.__pqc_9(weights0)
if self.pqc == 10:
return self.__pqc_10(weights0)
if self.pqc == 11:
return self.__pqc_11(weights0, weights1)
if self.pqc == 12:
return self.__pqc_12(weights0, weights1)
if self.pqc == 13:
return self.__pqc_13(weights0, weights1)
if self.pqc == 14:
return self.__pqc_14(weights0, weights1)
if self.pqc == 15:
return self.__pqc_15(weights0)
if self.pqc == 16:
return self.__pqc_16(weights0, weights1)
if self.pqc == 17:
return self.__pqc_17(weights0, weights1)
if self.pqc == 18:
return self.__pqc_18(weights0)
if self.pqc == 19:
return self.__pqc_19(weights0)
def weigths_shape(self):
if self.pqc == 1:
return (self.layers, self.qubit, 2)
if self.pqc == 2:
return (self.layers, self.qubit, 2)
if self.pqc == 3:
return ((self.layers, self.qubit, 2), (self.layers, self.qubit - 1))
if self.pqc == 4:
return ((self.layers, self.qubit, 2), (self.layers, self.qubit - 1))
if self.pqc == 5:
return ((self.layers, self.qubit, 4), (self.layers, self.qubit, self.qubit - 1))
if self.pqc == 6:
return ((self.layers, self.qubit, 4), (self.layers, self.qubit, self.qubit - 1))
if self.pqc == 7:
return ((self.layers, self.qubit, 4), (self.layers, self.qubit - 1))
if self.pqc == 8:
return ((self.layers, self.qubit, 4), (self.layers, self.qubit - 1))
if self.pqc == 9:
return (self.layers, self.qubit)
if self.pqc == 10:
return (self.layers, self.qubit, 2)
if self.pqc == 11:
assert self.qubit > 1
return ((self.layers, self.qubit, 2), (self.layers, (self.qubit - 1) if self.qubit % 2 == 1 else self.qubit - 2, 4))
if self.pqc == 12:
assert self.qubit > 1
return ((self.layers, self.qubit, 2), (self.layers, (self.qubit - 1) if self.qubit % 2 == 1 else self.qubit - 2, 4))
if self.pqc == 13:
return ((self.layers, self.qubit, 2), (self.layers, self.qubit, 2))
if self.pqc == 14:
return ((self.layers, self.qubit, 2), (self.layers, self.qubit, 2))
if self.pqc == 15:
return (self.layers, self.qubit, 2)
if self.pqc == 16:
return ((self.layers, self.qubit, 2), (self.layers, self.qubit - 1))
if self.pqc == 17:
return ((self.layers, self.qubit, 2), (self.layers, self.qubit - 1))
if self.pqc == 18:
return (self.layers, self.qubit, 3)
if self.pqc == 19:
return (self.layers, self.qubit, 3)
@add_dummy_measurements_for_test
def __pqc_1(self, weights):
assert weights.shape == self.weigths_shape()
for l in range(self.layers):
for i in range(self.qubit):
qml.RX(weights[l, i, 0], wires = i)
qml.RZ(weights[l, i, 1], wires = i)
@add_dummy_measurements_for_test
def __pqc_2(self, weights):
assert weights.shape == self.weigths_shape()
for l in range(self.layers):
for i in range(self.qubit):
qml.RX(weights[l, i, 0], wires = i)
qml.RZ(weights[l, i, 1], wires = i)
for i in range(self.qubit - 1):
qml.CNOT(wires=[i, (i + 1)])
@add_dummy_measurements_for_test
def __pqc_3(self, weights0, weights1):
assert weights0.shape == self.weigths_shape()[0]
assert weights1.shape == self.weigths_shape()[1]
for l in range(self.layers):
for i in range(self.qubit):
qml.RX(weights0[l, i, 0], wires = i)
qml.RZ(weights0[l, i, 1], wires = i)
for i in range(self.qubit - 1):
qml.CRZ(weights1[l, i], wires = [i, (i + 1)])
@add_dummy_measurements_for_test
def __pqc_4(self, weights0, weights1):
assert weights0.shape == self.weigths_shape()[0]
assert weights1.shape == self.weigths_shape()[1]
for l in range(self.layers):
for i in range(self.qubit):
qml.RX(weights0[l, i, 0], wires = i)
qml.RZ(weights0[l, i, 1], wires = i)
for i in range(self.qubit - 1):
qml.CRX(weights1[l, i], wires = [i, (i + 1)])
@add_dummy_measurements_for_test
def __pqc_5(self, weights0, weights1):
assert weights0.shape == self.weigths_shape()[0]
assert weights1.shape == self.weigths_shape()[1]
for l in range(self.layers):
for i in range(self.qubit):
qml.RX(weights0[l, i, 0], wires = i)
qml.RZ(weights0[l, i, 1], wires = i)
for i in range(self.qubit):
for j in range(self.qubit - 1):
qml.CRZ(weights1[l, i, j], wires = [i, (i + j + 1)%self.qubit])
for i in range(self.qubit):
qml.RX(weights0[l, i, 2], wires = i)
qml.RZ(weights0[l, i, 3], wires = i)
@add_dummy_measurements_for_test
def __pqc_6(self, weights0, weights1):
assert weights0.shape == self.weigths_shape()[0]
assert weights1.shape == self.weigths_shape()[1]
for l in range(self.layers):
for i in range(self.qubit):
qml.RX(weights0[l, i, 0], wires = i)
qml.RZ(weights0[l, i, 1], wires = i)
for i in range(self.qubit):
for j in range(self.qubit - 1):
qml.CRX(weights1[l, i, j], wires = [i, (i + j + 1)%self.qubit])
for i in range(self.qubit):
qml.RX(weights0[l, i, 2], wires = i)
qml.RZ(weights0[l, i, 3], wires = i)
@add_dummy_measurements_for_test
def __pqc_7(self, weights0, weights1):
assert weights0.shape == self.weigths_shape()[0]
assert weights1.shape == self.weigths_shape()[1]
for l in range(self.layers):
for i in range(self.qubit):
qml.RX(weights0[l, i, 0], wires = i)
qml.RZ(weights0[l, i, 1], wires = i)
j = 0
for i in range(0, self.qubit - 1, 2):
qml.CRZ(weights1[l, j], wires = [i, (i+1)])
j += 1
for i in range(self.qubit):
qml.RX(weights0[l, i, 2], wires = i)
qml.RZ(weights0[l, i, 3], wires = i)
for i in range(1, self.qubit - 1, 2):
qml.CRZ(weights1[l, j], wires = [i, (i+1)])
j += 1
@add_dummy_measurements_for_test
def __pqc_8(self, weights0, weights1):
assert weights0.shape == self.weigths_shape()[0]
assert weights1.shape == self.weigths_shape()[1]
for l in range(self.layers):
for i in range(self.qubit):
qml.RX(weights0[l, i, 0], wires = i)
qml.RZ(weights0[l, i, 1], wires = i)
j = 0
for i in range(0, self.qubit - 1, 2):
qml.CRX(weights1[l, j], wires = [i, (i+1)])
j += 1
for i in range(self.qubit):
qml.RX(weights0[l, i, 2], wires = i)
qml.RZ(weights0[l, i, 3], wires = i)
for i in range(1, self.qubit - 1, 2):
qml.CRX(weights1[l, j], wires = [i, (i+1)])
j += 1
@add_dummy_measurements_for_test
def __pqc_9(self, weights):
assert weights.shape == self.weigths_shape()
for l in range(self.layers):
for i in range(self.qubit):
qml.Hadamard(wires = i)
for i in range(self.qubit - 1):
qml.CZ(wires=[i, (i + 1)])
for i in range(self.qubit):
qml.RX(weights[l, i], wires = i)
@add_dummy_measurements_for_test
def __pqc_10(self, weights):
assert weights.shape == self.weigths_shape()
for l in range(self.layers):
for i in range(self.qubit):
qml.RY(weights[l, i, 0], wires = i)
for i in range(self.qubit):
qml.CZ(wires=[i, (i + 1)%self.qubit])
for i in range(self.qubit):
qml.RY(weights[l, i, 1], wires = i)
@add_dummy_measurements_for_test
def __pqc_11(self, weights0, weights1):
assert weights0.shape == self.weigths_shape()[0]
assert weights1.shape == self.weigths_shape()[1]
for l in range(self.layers):
for i in range(self.qubit):
qml.RY(weights0[l, i, 0], wires = i)
qml.RZ(weights0[l, i, 1], wires = i)
for i in range(0, self.qubit - 1, 2):
qml.CNOT(wires=[i, (i + 1)])
for j, i in enumerate(range(1, self.qubit - 1, 2)):
qml.RY(weights1[l, j, 0], wires = i)
qml.RZ(weights1[l, j, 1], wires = i)
qml.RY(weights1[l, j, 2], wires = i+1)
qml.RZ(weights1[l, j, 3], wires = i+1)
for i in range(1, self.qubit - 1, 2):
qml.CNOT(wires=[i, (i + 1)])
@add_dummy_measurements_for_test
def __pqc_12(self, weights0, weights1):
assert weights0.shape == self.weigths_shape()[0]
assert weights1.shape == self.weigths_shape()[1]
for l in range(self.layers):
for i in range(self.qubit):
qml.RY(weights0[l, i, 0], wires = i)
qml.RZ(weights0[l, i, 1], wires = i)
for i in range(0, self.qubit - 1, 2):
qml.CZ(wires=[i, (i + 1)])
for j, i in enumerate(range(1, self.qubit - 1, 2)):
qml.RY(weights1[l, j, 0], wires = i)
qml.RZ(weights1[l, j, 1], wires = i)
qml.RY(weights1[l, j, 2], wires = i+1)
qml.RZ(weights1[l, j, 3], wires = i+1)
for i in range(1, self.qubit - 1, 2):
qml.CZ(wires=[i, (i + 1)])
@add_dummy_measurements_for_test
def __pqc_13(self, weights0, weights1):
assert weights0.shape == self.weigths_shape()[0]
assert weights1.shape == self.weigths_shape()[1]
for l in range(self.layers):
for i in range(self.qubit):
qml.RY(weights0[l, i, 0], wires = i)
for i in range(0, self.qubit):
qml.CRZ(weights1[l, i, 0], wires = [i, (i + self.qubit - 1) % self.qubit])
for i in range(self.qubit):
qml.RY(weights0[l, i, 1], wires = i)
temp = list(range(self.qubit))[1:]
temp.reverse()
temp.insert(0,0)
controls = temp.copy()
temp = list(range(self.qubit))[2:]
temp.reverse()
temp.insert(0, 0)
temp.insert(0, 1)
targets = temp.copy()
for i, control in enumerate(controls):
qml.CRZ(weights1[l, i, 1], wires = [control, targets[i]])
@add_dummy_measurements_for_test
def __pqc_14(self, weights0, weights1):
assert weights0.shape == self.weigths_shape()[0]
assert weights1.shape == self.weigths_shape()[1]
for l in range(self.layers):
for i in range(self.qubit):
qml.RY(weights0[l, i, 0], wires = i)
for i in range(0, self.qubit):
qml.CRX(weights1[l, i, 0], wires = [i, (i + self.qubit - 1) % self.qubit])
for i in range(self.qubit):
qml.RY(weights0[l, i, 1], wires = i)
temp = list(range(self.qubit))[1:]
temp.reverse()
temp.insert(0,0)
controls = temp.copy()
temp = list(range(self.qubit))[2:]
temp.reverse()
temp.insert(0, 0)
temp.insert(0, 1)
targets = temp.copy()
for i, control in enumerate(controls):
qml.CRX(weights1[l, i, 1], wires = [control, targets[i]])
@add_dummy_measurements_for_test
def __pqc_15(self, weights):
assert weights.shape == self.weigths_shape()
for l in range(self.layers):
for i in range(self.qubit):
qml.RY(weights[l, i, 0], wires = i)
for i in range(0, self.qubit):
qml.CNOT(wires = [i, (i + self.qubit - 1) % self.qubit])
for i in range(self.qubit):
qml.RY(weights[l, i, 1], wires = i)
temp = list(range(self.qubit))[1:]
temp.reverse()
temp.insert(0,0)
controls = temp.copy()
temp = list(range(self.qubit))[2:]
temp.reverse()
temp.insert(0, 0)
temp.insert(0, 1)
targets = temp.copy()
for i, control in enumerate(controls):
qml.CNOT(wires = [control, targets[i]])
@add_dummy_measurements_for_test
def __pqc_16(self, weights0, weights1):
assert weights0.shape == self.weigths_shape()[0]
assert weights1.shape == self.weigths_shape()[1]
for l in range(self.layers):
for i in range(self.qubit):
qml.RX(weights0[l, i, 0], wires = i)
qml.RZ(weights0[l, i, 1], wires = i)
for i in range(0, self.qubit - 1, 2):
qml.CRZ(weights1[l, i], wires = [i, (i + 1)])
for i in range(1, self.qubit - 1, 2):
qml.CRZ(weights1[l, i], wires = [i, (i + 1)])
@add_dummy_measurements_for_test
def __pqc_17(self, weights0, weights1):
assert weights0.shape == self.weigths_shape()[0]
assert weights1.shape == self.weigths_shape()[1]
for l in range(self.layers):
for i in range(self.qubit):
qml.RX(weights0[l, i, 0], wires = i)
qml.RZ(weights0[l, i, 1], wires = i)
for i in range(0, self.qubit - 1, 2):
qml.CRX(weights1[l, i], wires = [i, (i + 1)])
for i in range(1, self.qubit - 1, 2):
qml.CRX(weights1[l, i], wires = [i, (i + 1)])
@add_dummy_measurements_for_test
def __pqc_18(self, weights):
assert weights.shape == self.weigths_shape()
for l in range(self.layers):
for i in range(self.qubit):
qml.RX(weights[l, i, 0], wires = i)
qml.RZ(weights[l, i, 1], wires = i)
for i in range(0, self.qubit):
qml.CRZ(weights[l, i, 2], wires = [i, (i + self.qubit - 1) % self.qubit])
@add_dummy_measurements_for_test
def __pqc_19(self, weights):
assert weights.shape == self.weigths_shape()
for l in range(self.layers):
for i in range(self.qubit):
qml.RX(weights[l, i, 0], wires = i)
qml.RZ(weights[l, i, 1], wires = i)
for i in range(0, self.qubit):
qml.CRX(weights[l, i, 2], wires = [i, (i + self.qubit - 1) % self.qubit])
if __name__ == '__main__':
test = True
pqc = ParametricCircuitsPennylane(pqc = 19, qubit = 7, layers = 1)
dev = qml.device("default.qubit", wires = 10) #target pennylane device
qnode = qml.QNode(pqc.get_pqc, dev) #circuit
weight_shape = pqc.weigths_shape()
if isinstance(weight_shape[0], tuple):
weights0 = np.random.random(weight_shape[0])
weights1 = np.random.random(weight_shape[1])
qnode(weights0, weights1)
else:
weights = np.random.random(weight_shape)
qnode(weights)
print(qnode.draw())
else:
test = False
| en | 0.41117 | #target pennylane device #circuit | 2.249302 | 2 |
2021/HANFS/fence-agents/fence/agents/autodetect/b.py | BryanWhitehurst/HPCCEA | 10 | 6620225 | def myf():
return 3
| def myf():
return 3
| none | 1 | 1.527146 | 2 | |
setup.py | veit/cookiecutter-namespace-template | 3 | 6620226 | <reponame>veit/cookiecutter-namespace-template
# !/usr/bin/env python
import codecs
import os
from setuptools import setup, find_packages
###################################################################
PACKAGES = []
META_PATH = os.path.join("__about__.py")
KEYWORDS = ['cookiecutter', 'template', 'package', ]
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development',
]
INSTALL_REQUIRES = []
EXTRAS_REQUIRE = {
"docs": ["sphinx", "furo"],
"tests": [
"pytest",
"pytest-cookies",
],
}
###################################################################
about = {}
with codecs.open(os.path.join("__about__.py")) as f:
exec(f.read(), about)
with codecs.open("README.rst", "r") as fh:
long_description = fh.read()
setup(
name=about["__title__"],
packages=find_packages(include=PACKAGES),
version=about["__version__"],
description=about["__summary__"],
long_description=long_description,
long_description_content_type="text/x-rst",
author=about["__author__"],
license=about["__license__"],
author_email=about["__email__"],
url=about["__url__"],
keywords=KEYWORDS,
classifiers=CLASSIFIERS,
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE,
)
| # !/usr/bin/env python
import codecs
import os
from setuptools import setup, find_packages
###################################################################
PACKAGES = []
META_PATH = os.path.join("__about__.py")
KEYWORDS = ['cookiecutter', 'template', 'package', ]
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development',
]
INSTALL_REQUIRES = []
EXTRAS_REQUIRE = {
"docs": ["sphinx", "furo"],
"tests": [
"pytest",
"pytest-cookies",
],
}
###################################################################
about = {}
with codecs.open(os.path.join("__about__.py")) as f:
exec(f.read(), about)
with codecs.open("README.rst", "r") as fh:
long_description = fh.read()
setup(
name=about["__title__"],
packages=find_packages(include=PACKAGES),
version=about["__version__"],
description=about["__summary__"],
long_description=long_description,
long_description_content_type="text/x-rst",
author=about["__author__"],
license=about["__license__"],
author_email=about["__email__"],
url=about["__url__"],
keywords=KEYWORDS,
classifiers=CLASSIFIERS,
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE,
) | de | 0.818313 | # !/usr/bin/env python ################################################################### ################################################################### | 1.370315 | 1 |
machine_learning/classification/cifar10/models/simple_net.py | tsubame-mz/reinforcement_learning | 0 | 6620227 | import torch.nn as nn
import torch.nn.functional as F
from models.octave_conv import OctaveConv
class Conv_BN_Act(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=1, bias=False):
super(Conv_BN_Act, self).__init__()
self.layers = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
)
def forward(self, x):
return self.layers(x)
class OctConv_BN_Act(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, alpha_in, alpha_out, stride=1, padding=1, bias=False):
super(OctConv_BN_Act, self).__init__()
self.conv = OctaveConv(in_channels, out_channels, kernel_size, alpha_in, alpha_out, stride, padding)
l_out_channels = int(alpha_out * out_channels)
h_out_channels = out_channels - l_out_channels
self.bn_h = nn.BatchNorm2d(h_out_channels)
self.bn_l = nn.BatchNorm2d(l_out_channels)
self.act = nn.ReLU(inplace=True)
def forward(self, x):
x_h, x_l = self.conv(x)
x_h = self.act(self.bn_h(x_h))
x_l = self.act(self.bn_l(x_l)) if x_l is not None else None
return x_h, x_l
class SimpleConvNet(nn.Module):
def __init__(self, innter_channnels):
super(SimpleConvNet, self).__init__()
self.layers = nn.Sequential(
# (N, C, H, W)
Conv_BN_Act(3, innter_channnels, 3),
Conv_BN_Act(innter_channnels, innter_channnels, 3),
Conv_BN_Act(innter_channnels, innter_channnels, 3),
nn.AvgPool2d(kernel_size=(2, 2), stride=2),
Conv_BN_Act(innter_channnels, innter_channnels * 2, 3),
Conv_BN_Act(innter_channnels * 2, innter_channnels * 2, 3),
Conv_BN_Act(innter_channnels * 2, innter_channnels * 2, 3),
nn.AvgPool2d(kernel_size=(2, 2), stride=2),
Conv_BN_Act(innter_channnels * 2, innter_channnels * 4, 3),
Conv_BN_Act(innter_channnels * 4, innter_channnels * 4, 3),
Conv_BN_Act(innter_channnels * 4, innter_channnels * 4, 3),
nn.MaxPool2d(3, stride=1, padding=1),
nn.AdaptiveAvgPool2d((1, 1)),
)
self.fc = nn.Linear(innter_channnels * 4, 10)
# ネットワークの重みを初期化
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.layers(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return F.softmax(x, dim=-1)
class SimpleOctConvNet(nn.Module):
def __init__(self, innter_channnels):
super(SimpleOctConvNet, self).__init__()
alpha = 0.25
self.layer1 = nn.Sequential(
OctConv_BN_Act(3, innter_channnels, 3, 0, alpha),
OctConv_BN_Act(innter_channnels, innter_channnels, 3, alpha, alpha),
OctConv_BN_Act(innter_channnels, innter_channnels, 3, alpha, alpha),
)
self.layer2 = nn.Sequential(
OctConv_BN_Act(innter_channnels, innter_channnels * 2, 3, alpha, alpha),
OctConv_BN_Act(innter_channnels * 2, innter_channnels * 2, 3, alpha, alpha),
OctConv_BN_Act(innter_channnels * 2, innter_channnels * 2, 3, alpha, alpha),
)
self.layer3 = nn.Sequential(
OctConv_BN_Act(innter_channnels * 2, innter_channnels * 4, 3, alpha, alpha),
OctConv_BN_Act(innter_channnels * 4, innter_channnels * 4, 3, alpha, alpha),
OctConv_BN_Act(innter_channnels * 4, innter_channnels * 4, 3, alpha, 0),
)
self.layer4 = nn.Sequential(nn.MaxPool2d(3, stride=1, padding=1), nn.AdaptiveAvgPool2d((1, 1)))
self.downsample = nn.AvgPool2d(kernel_size=(2, 2), stride=2)
self.fc = nn.Linear(innter_channnels * 4, 10)
# ネットワークの重みを初期化
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
x_h, x_l = self.layer1(x)
x_h, x_l = self.downsample(x_h), self.downsample(x_l)
x_h, x_l = self.layer2((x_h, x_l))
x_h, x_l = self.downsample(x_h), self.downsample(x_l)
x_h, _ = self.layer3((x_h, x_l))
x = self.layer4(x_h)
x = x.view(x.size(0), -1)
x = self.fc(x)
return F.softmax(x, dim=-1)
| import torch.nn as nn
import torch.nn.functional as F
from models.octave_conv import OctaveConv
class Conv_BN_Act(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=1, bias=False):
super(Conv_BN_Act, self).__init__()
self.layers = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
)
def forward(self, x):
return self.layers(x)
class OctConv_BN_Act(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, alpha_in, alpha_out, stride=1, padding=1, bias=False):
super(OctConv_BN_Act, self).__init__()
self.conv = OctaveConv(in_channels, out_channels, kernel_size, alpha_in, alpha_out, stride, padding)
l_out_channels = int(alpha_out * out_channels)
h_out_channels = out_channels - l_out_channels
self.bn_h = nn.BatchNorm2d(h_out_channels)
self.bn_l = nn.BatchNorm2d(l_out_channels)
self.act = nn.ReLU(inplace=True)
def forward(self, x):
x_h, x_l = self.conv(x)
x_h = self.act(self.bn_h(x_h))
x_l = self.act(self.bn_l(x_l)) if x_l is not None else None
return x_h, x_l
class SimpleConvNet(nn.Module):
def __init__(self, innter_channnels):
super(SimpleConvNet, self).__init__()
self.layers = nn.Sequential(
# (N, C, H, W)
Conv_BN_Act(3, innter_channnels, 3),
Conv_BN_Act(innter_channnels, innter_channnels, 3),
Conv_BN_Act(innter_channnels, innter_channnels, 3),
nn.AvgPool2d(kernel_size=(2, 2), stride=2),
Conv_BN_Act(innter_channnels, innter_channnels * 2, 3),
Conv_BN_Act(innter_channnels * 2, innter_channnels * 2, 3),
Conv_BN_Act(innter_channnels * 2, innter_channnels * 2, 3),
nn.AvgPool2d(kernel_size=(2, 2), stride=2),
Conv_BN_Act(innter_channnels * 2, innter_channnels * 4, 3),
Conv_BN_Act(innter_channnels * 4, innter_channnels * 4, 3),
Conv_BN_Act(innter_channnels * 4, innter_channnels * 4, 3),
nn.MaxPool2d(3, stride=1, padding=1),
nn.AdaptiveAvgPool2d((1, 1)),
)
self.fc = nn.Linear(innter_channnels * 4, 10)
# ネットワークの重みを初期化
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.layers(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return F.softmax(x, dim=-1)
class SimpleOctConvNet(nn.Module):
def __init__(self, innter_channnels):
super(SimpleOctConvNet, self).__init__()
alpha = 0.25
self.layer1 = nn.Sequential(
OctConv_BN_Act(3, innter_channnels, 3, 0, alpha),
OctConv_BN_Act(innter_channnels, innter_channnels, 3, alpha, alpha),
OctConv_BN_Act(innter_channnels, innter_channnels, 3, alpha, alpha),
)
self.layer2 = nn.Sequential(
OctConv_BN_Act(innter_channnels, innter_channnels * 2, 3, alpha, alpha),
OctConv_BN_Act(innter_channnels * 2, innter_channnels * 2, 3, alpha, alpha),
OctConv_BN_Act(innter_channnels * 2, innter_channnels * 2, 3, alpha, alpha),
)
self.layer3 = nn.Sequential(
OctConv_BN_Act(innter_channnels * 2, innter_channnels * 4, 3, alpha, alpha),
OctConv_BN_Act(innter_channnels * 4, innter_channnels * 4, 3, alpha, alpha),
OctConv_BN_Act(innter_channnels * 4, innter_channnels * 4, 3, alpha, 0),
)
self.layer4 = nn.Sequential(nn.MaxPool2d(3, stride=1, padding=1), nn.AdaptiveAvgPool2d((1, 1)))
self.downsample = nn.AvgPool2d(kernel_size=(2, 2), stride=2)
self.fc = nn.Linear(innter_channnels * 4, 10)
# ネットワークの重みを初期化
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
x_h, x_l = self.layer1(x)
x_h, x_l = self.downsample(x_h), self.downsample(x_l)
x_h, x_l = self.layer2((x_h, x_l))
x_h, x_l = self.downsample(x_h), self.downsample(x_l)
x_h, _ = self.layer3((x_h, x_l))
x = self.layer4(x_h)
x = x.view(x.size(0), -1)
x = self.fc(x)
return F.softmax(x, dim=-1)
| ja | 0.997786 | # (N, C, H, W) # ネットワークの重みを初期化 # ネットワークの重みを初期化 | 2.46111 | 2 |
ffmpeg_streaming/_format.py | ace-smart/py-streaming | 455 | 6620228 | <reponame>ace-smart/py-streaming
"""
ffmpeg_streaming.media
~~~~~~~~~~~~
Video and audio formats
:copyright: (c) 2020 by <NAME>.
:website: https://www.aminyazdanpanah.com
:email: <EMAIL>
:license: MIT, see LICENSE for more details.
"""
import abc
MULTIPLY_BY_ONE = 1
MULTIPLY_BY_TWO = 2
MULTIPLY_BY_FOUR = 4
MULTIPLY_BY_Eight = 8
MULTIPLY_BY_SIXTEEN = 16
MULTIPLY_BY_THIRTY_TWO = 32
def _verify_codecs(codec, codecs):
if codec is None:
return
elif codec not in codecs:
ValueError("The codec is not available!")
else:
return str(codec)
class Format(abc.ABC):
"""
@TODO: add documentation
"""
def __init__(self, video: str, audio: str, **codec_options):
self.video = video
self.audio = audio
self.codec_options = codec_options
@property
def all(self) -> dict:
args = {
'c:v': self.video,
'c:a': self.audio,
}
args.update(self.get_codec_options())
return args
@abc.abstractmethod
def multiply(self) -> int:
pass
@abc.abstractmethod
def get_codec_options(self) -> dict:
pass
class H264(Format):
def __init__(self, video: str = "libx264", audio: str = 'aac', **codec_options):
"""
@TODO: add documentation
"""
videos = ['libx264', 'h264', 'h264_afm', 'h264_nvenc']
audios = ['copy', 'aac', 'libvo_aacenc', 'libfaac', 'libmp3lame', 'libfdk_aac']
super(H264, self).__init__(_verify_codecs(video, videos), _verify_codecs(audio, audios), **codec_options)
def multiply(self) -> int:
return MULTIPLY_BY_TWO
def get_codec_options(self) -> dict:
"""
set the default value of h264 codec options and update the value with the specified value by user
see https://ffmpeg.org/ffmpeg-codecs.html#Options-28 for more information about options
:return: dict
"""
h264_codec_options = {
'bf': 1,
'keyint_min': 25,
'g': 250,
'sc_threshold': 40
}
h264_codec_options.update(self.codec_options)
return h264_codec_options
class HEVC(Format):
"""
@TODO: add documentation
"""
def __init__(self, video: str = "libx265", audio: str = 'aac', **codec_options):
videos = ['libx265', 'h265']
audios = ['copy', 'aac', 'libvo_aacenc', 'libfaac', 'libmp3lame', 'libfdk_aac']
super(HEVC, self).__init__(_verify_codecs(video, videos), _verify_codecs(audio, audios), **codec_options)
def multiply(self) -> int:
return MULTIPLY_BY_TWO
def get_codec_options(self) -> dict:
"""
set the default value of hevc(h265) codec options and update the value with the specified value by user
see https://ffmpeg.org/ffmpeg-codecs.html#Options-29 for more information about options
:return: dict
"""
h265_codec_options = {
'keyint_min': 25,
'g': 250,
'sc_threshold': 40
}
h265_codec_options.update(self.codec_options)
return h265_codec_options
class VP9(Format):
"""
@TODO: add documentation
"""
def __init__(self, video: str = "libvpx-vp9", audio: str = 'aac', **codec_options):
videos = ['libvpx', 'libvpx-vp9']
audios = ['copy', 'aac', 'libvo_aacenc', 'libfaac', 'libmp3lame', 'libfdk_aac']
super(VP9, self).__init__(_verify_codecs(video, videos), _verify_codecs(audio, audios), **codec_options)
def multiply(self) -> int:
return MULTIPLY_BY_TWO
def get_codec_options(self) -> dict:
"""
set the default value of vp9 codec options and update the value with the specified value by user
see https://ffmpeg.org/ffmpeg-codecs.html#Options-26 for more information about options
:return: dict
"""
vp9_codec_options = {}
vp9_codec_options.update(self.codec_options)
return vp9_codec_options
class Formats:
@staticmethod
def h264(video: str = "libx264", audio: str = 'aac', **codec_options) -> Format:
"""
@TODO: add documentation
"""
return H264(video, audio, **codec_options)
@staticmethod
def hevc(video: str = "libx265", audio: str = 'aac', **codec_options) -> Format:
"""
@TODO: add documentation
"""
return HEVC(video, audio, **codec_options)
@staticmethod
def vp9(video: str = "libvpx-vp9", audio: str = 'aac', **codec_options) -> Format:
"""
@TODO: add documentation
"""
return VP9(video, audio, **codec_options)
__all__ = [
'Format',
'Formats'
]
| """
ffmpeg_streaming.media
~~~~~~~~~~~~
Video and audio formats
:copyright: (c) 2020 by <NAME>.
:website: https://www.aminyazdanpanah.com
:email: <EMAIL>
:license: MIT, see LICENSE for more details.
"""
import abc
MULTIPLY_BY_ONE = 1
MULTIPLY_BY_TWO = 2
MULTIPLY_BY_FOUR = 4
MULTIPLY_BY_Eight = 8
MULTIPLY_BY_SIXTEEN = 16
MULTIPLY_BY_THIRTY_TWO = 32
def _verify_codecs(codec, codecs):
if codec is None:
return
elif codec not in codecs:
ValueError("The codec is not available!")
else:
return str(codec)
class Format(abc.ABC):
"""
@TODO: add documentation
"""
def __init__(self, video: str, audio: str, **codec_options):
self.video = video
self.audio = audio
self.codec_options = codec_options
@property
def all(self) -> dict:
args = {
'c:v': self.video,
'c:a': self.audio,
}
args.update(self.get_codec_options())
return args
@abc.abstractmethod
def multiply(self) -> int:
pass
@abc.abstractmethod
def get_codec_options(self) -> dict:
pass
class H264(Format):
def __init__(self, video: str = "libx264", audio: str = 'aac', **codec_options):
"""
@TODO: add documentation
"""
videos = ['libx264', 'h264', 'h264_afm', 'h264_nvenc']
audios = ['copy', 'aac', 'libvo_aacenc', 'libfaac', 'libmp3lame', 'libfdk_aac']
super(H264, self).__init__(_verify_codecs(video, videos), _verify_codecs(audio, audios), **codec_options)
def multiply(self) -> int:
return MULTIPLY_BY_TWO
def get_codec_options(self) -> dict:
"""
set the default value of h264 codec options and update the value with the specified value by user
see https://ffmpeg.org/ffmpeg-codecs.html#Options-28 for more information about options
:return: dict
"""
h264_codec_options = {
'bf': 1,
'keyint_min': 25,
'g': 250,
'sc_threshold': 40
}
h264_codec_options.update(self.codec_options)
return h264_codec_options
class HEVC(Format):
"""
@TODO: add documentation
"""
def __init__(self, video: str = "libx265", audio: str = 'aac', **codec_options):
videos = ['libx265', 'h265']
audios = ['copy', 'aac', 'libvo_aacenc', 'libfaac', 'libmp3lame', 'libfdk_aac']
super(HEVC, self).__init__(_verify_codecs(video, videos), _verify_codecs(audio, audios), **codec_options)
def multiply(self) -> int:
return MULTIPLY_BY_TWO
def get_codec_options(self) -> dict:
"""
set the default value of hevc(h265) codec options and update the value with the specified value by user
see https://ffmpeg.org/ffmpeg-codecs.html#Options-29 for more information about options
:return: dict
"""
h265_codec_options = {
'keyint_min': 25,
'g': 250,
'sc_threshold': 40
}
h265_codec_options.update(self.codec_options)
return h265_codec_options
class VP9(Format):
"""
@TODO: add documentation
"""
def __init__(self, video: str = "libvpx-vp9", audio: str = 'aac', **codec_options):
videos = ['libvpx', 'libvpx-vp9']
audios = ['copy', 'aac', 'libvo_aacenc', 'libfaac', 'libmp3lame', 'libfdk_aac']
super(VP9, self).__init__(_verify_codecs(video, videos), _verify_codecs(audio, audios), **codec_options)
def multiply(self) -> int:
return MULTIPLY_BY_TWO
def get_codec_options(self) -> dict:
"""
set the default value of vp9 codec options and update the value with the specified value by user
see https://ffmpeg.org/ffmpeg-codecs.html#Options-26 for more information about options
:return: dict
"""
vp9_codec_options = {}
vp9_codec_options.update(self.codec_options)
return vp9_codec_options
class Formats:
@staticmethod
def h264(video: str = "libx264", audio: str = 'aac', **codec_options) -> Format:
"""
@TODO: add documentation
"""
return H264(video, audio, **codec_options)
@staticmethod
def hevc(video: str = "libx265", audio: str = 'aac', **codec_options) -> Format:
"""
@TODO: add documentation
"""
return HEVC(video, audio, **codec_options)
@staticmethod
def vp9(video: str = "libvpx-vp9", audio: str = 'aac', **codec_options) -> Format:
"""
@TODO: add documentation
"""
return VP9(video, audio, **codec_options)
__all__ = [
'Format',
'Formats'
] | en | 0.355199 | ffmpeg_streaming.media ~~~~~~~~~~~~ Video and audio formats :copyright: (c) 2020 by <NAME>. :website: https://www.aminyazdanpanah.com :email: <EMAIL> :license: MIT, see LICENSE for more details. @TODO: add documentation @TODO: add documentation set the default value of h264 codec options and update the value with the specified value by user see https://ffmpeg.org/ffmpeg-codecs.html#Options-28 for more information about options :return: dict @TODO: add documentation set the default value of hevc(h265) codec options and update the value with the specified value by user see https://ffmpeg.org/ffmpeg-codecs.html#Options-29 for more information about options :return: dict @TODO: add documentation set the default value of vp9 codec options and update the value with the specified value by user see https://ffmpeg.org/ffmpeg-codecs.html#Options-26 for more information about options :return: dict @TODO: add documentation @TODO: add documentation @TODO: add documentation | 2.70049 | 3 |
StoppingCondition.py | joancafom/GeneticAlgorithm | 0 | 6620229 | <gh_stars>0
from abc import ABCMeta, abstractmethod
import datetime
class StoppingCondition:
__metaclass__ = ABCMeta
@abstractmethod
def is_satisfied(self):
pass
@abstractmethod
def update(self):
pass
class ElapsedTimeStoppingCondition(StoppingCondition):
# Time in seconds must be provided
def __init__(self, time):
self.initTime = datetime.datetime.now()
self.time = datetime.timedelta(seconds=time)
def is_satisfied(self):
now = datetime.datetime.now()
return (now - self.initTime) >= self.time
def update(self):
pass
class NumGenerationsStoppingCondition(StoppingCondition):
def __init__(self, max_generations):
self.maxGenerations = max_generations
self.currentGen = 0
def is_satisfied(self):
return self.currentGen == self.maxGenerations
def update(self):
self.currentGen += 1
| from abc import ABCMeta, abstractmethod
import datetime
class StoppingCondition:
__metaclass__ = ABCMeta
@abstractmethod
def is_satisfied(self):
pass
@abstractmethod
def update(self):
pass
class ElapsedTimeStoppingCondition(StoppingCondition):
# Time in seconds must be provided
def __init__(self, time):
self.initTime = datetime.datetime.now()
self.time = datetime.timedelta(seconds=time)
def is_satisfied(self):
now = datetime.datetime.now()
return (now - self.initTime) >= self.time
def update(self):
pass
class NumGenerationsStoppingCondition(StoppingCondition):
def __init__(self, max_generations):
self.maxGenerations = max_generations
self.currentGen = 0
def is_satisfied(self):
return self.currentGen == self.maxGenerations
def update(self):
self.currentGen += 1 | en | 0.822193 | # Time in seconds must be provided | 3.369144 | 3 |
chat_archive/html/keywords.py | shadowmoon-waltz/python-chat-archive | 15 | 6620230 | # Easy to use offline chat archive.
#
# Author: <NAME> <<EMAIL>>
# Last Change: July 22, 2018
# URL: https://github.com/xolox/python-chat-archive
"""Utility functions for working with the HTML encoded text."""
# Standard library modules.
import html
import html.entities
import html.parser
import io
import re
# Public identifiers that require documentation.
__all__ = ("KeywordHighlighter",)
class KeywordHighlighter(html.parser.HTMLParser):
"""A simple keyword highlighter for HTML based on :class:`html.parser.HTMLParser`."""
def __init__(self, *args, **kw):
"""
Initialize a :class:`KeywordHighlighter` object.
:param keywords: A list of strings with keywords to highlight.
:param highlight_template: A template string with the ``{text}``
placeholder that's used to highlight keyword
matches.
"""
# Hide keyword arguments from our superclass.
self.highlight_template = kw.pop("highlight_template")
# Generate a regular expression to find keywords.
regex = "(%s)" % "|".join(map(re.escape, kw.pop("keywords")))
self.pattern = re.compile(regex, re.IGNORECASE)
# Initialize our superclass.
super(KeywordHighlighter, self).__init__(*args, **kw)
def __call__(self, data):
"""
Highlight keywords in the given HTML fragment.
:param data: The HTML in which to highlight keywords (a string).
:returns: The highlighted HTML (a string).
"""
self.reset()
self.feed(data)
self.close()
return self.output.getvalue()
def handle_charref(self, value):
"""Process a numeric character reference."""
self.output.write("&#%s;" % value)
def handle_data(self, data):
"""Process textual data."""
for token in self.pattern.split(data):
escaped = html.escape(token)
if self.pattern.match(token):
self.output.write(self.highlight_template.format(text=escaped))
else:
self.output.write(escaped)
def handle_endtag(self, tag):
"""Process an end tag."""
self.output.write("</%s>" % tag)
def handle_entityref(self, name):
"""Process a named character reference."""
self.output.write("&%s;" % name)
def handle_starttag(self, tag, attrs):
"""Process a start tag."""
self.output.write("<%s" % tag)
self.render_attrs(attrs)
self.output.write(">")
def handle_startendtag(self, tag, attrs):
"""Process a start tag without end tag."""
self.output.write("<%s" % tag)
self.render_attrs(attrs)
self.output.write("/>")
def render_attrs(self, attrs):
"""Process the attributes of a tag."""
for name, value in attrs:
value = html.escape(value, quote=True)
self.output.write(' %s="%s"' % (name, value))
def reset(self):
"""
Reset the state of the keyword highlighter.
Clears the output buffer but preserves the keywords to be highlighted.
This method is called implicitly during initialization.
"""
# Reset our superclass.
super(KeywordHighlighter, self).reset()
# Clear the output buffer.
self.output = io.StringIO()
| # Easy to use offline chat archive.
#
# Author: <NAME> <<EMAIL>>
# Last Change: July 22, 2018
# URL: https://github.com/xolox/python-chat-archive
"""Utility functions for working with the HTML encoded text."""
# Standard library modules.
import html
import html.entities
import html.parser
import io
import re
# Public identifiers that require documentation.
__all__ = ("KeywordHighlighter",)
class KeywordHighlighter(html.parser.HTMLParser):
"""A simple keyword highlighter for HTML based on :class:`html.parser.HTMLParser`."""
def __init__(self, *args, **kw):
"""
Initialize a :class:`KeywordHighlighter` object.
:param keywords: A list of strings with keywords to highlight.
:param highlight_template: A template string with the ``{text}``
placeholder that's used to highlight keyword
matches.
"""
# Hide keyword arguments from our superclass.
self.highlight_template = kw.pop("highlight_template")
# Generate a regular expression to find keywords.
regex = "(%s)" % "|".join(map(re.escape, kw.pop("keywords")))
self.pattern = re.compile(regex, re.IGNORECASE)
# Initialize our superclass.
super(KeywordHighlighter, self).__init__(*args, **kw)
def __call__(self, data):
"""
Highlight keywords in the given HTML fragment.
:param data: The HTML in which to highlight keywords (a string).
:returns: The highlighted HTML (a string).
"""
self.reset()
self.feed(data)
self.close()
return self.output.getvalue()
def handle_charref(self, value):
"""Process a numeric character reference."""
self.output.write("&#%s;" % value)
def handle_data(self, data):
"""Process textual data."""
for token in self.pattern.split(data):
escaped = html.escape(token)
if self.pattern.match(token):
self.output.write(self.highlight_template.format(text=escaped))
else:
self.output.write(escaped)
def handle_endtag(self, tag):
"""Process an end tag."""
self.output.write("</%s>" % tag)
def handle_entityref(self, name):
"""Process a named character reference."""
self.output.write("&%s;" % name)
def handle_starttag(self, tag, attrs):
"""Process a start tag."""
self.output.write("<%s" % tag)
self.render_attrs(attrs)
self.output.write(">")
def handle_startendtag(self, tag, attrs):
"""Process a start tag without end tag."""
self.output.write("<%s" % tag)
self.render_attrs(attrs)
self.output.write("/>")
def render_attrs(self, attrs):
"""Process the attributes of a tag."""
for name, value in attrs:
value = html.escape(value, quote=True)
self.output.write(' %s="%s"' % (name, value))
def reset(self):
"""
Reset the state of the keyword highlighter.
Clears the output buffer but preserves the keywords to be highlighted.
This method is called implicitly during initialization.
"""
# Reset our superclass.
super(KeywordHighlighter, self).reset()
# Clear the output buffer.
self.output = io.StringIO()
| en | 0.653123 | # Easy to use offline chat archive. # # Author: <NAME> <<EMAIL>> # Last Change: July 22, 2018 # URL: https://github.com/xolox/python-chat-archive Utility functions for working with the HTML encoded text. # Standard library modules. # Public identifiers that require documentation. A simple keyword highlighter for HTML based on :class:`html.parser.HTMLParser`. Initialize a :class:`KeywordHighlighter` object. :param keywords: A list of strings with keywords to highlight. :param highlight_template: A template string with the ``{text}`` placeholder that's used to highlight keyword matches. # Hide keyword arguments from our superclass. # Generate a regular expression to find keywords. # Initialize our superclass. Highlight keywords in the given HTML fragment. :param data: The HTML in which to highlight keywords (a string). :returns: The highlighted HTML (a string). Process a numeric character reference. #%s;" % value) Process textual data. Process an end tag. Process a named character reference. Process a start tag. Process a start tag without end tag. Process the attributes of a tag. Reset the state of the keyword highlighter. Clears the output buffer but preserves the keywords to be highlighted. This method is called implicitly during initialization. # Reset our superclass. # Clear the output buffer. | 2.922888 | 3 |
flaskProject/secret_santa/routes.py | SoerenMLS/SecretSanta | 0 | 6620231 | <gh_stars>0
from santahandler import generate_session, join_session, get_session
from flask import render_template, flash, redirect, request, make_response
from secret_santa import app
from secret_santa.forms import RegistrationForm, JoinForm
from dbhandler import reg_user, get_user, session_exists
from secret_santa.tables import table
@app.route('/', methods=['GET', 'POST'])
@app.route('/index', methods=['GET', 'POST'])
def index():
form = RegistrationForm()
cookie = request.cookies.get("userid")
print(cookie)
if cookie is not None:
return redirect('/secretsanta')
if form.validate_on_submit():
userid = reg_user(form.name.data, form.address.data, form.email.data)
flash(f'{form.name.data}, You have been registered! your user-id is: {userid}, write it down!')
resp = make_response(redirect('/secretsanta'))
resp.set_cookie('userid', str(userid), max_age=604800)
return resp
return render_template('Index.html', form=form)
@app.route('/secretsanta', methods=['GET', 'POST'])
def secret_santa():
user = get_user(request.cookies.get('userid'))
form = JoinForm()
if form.validate_on_submit():
joined_session = join_session(user, form.invitation.data)
if joined_session:
return redirect(f'/session/{joined_session}')
else:
flash(f"I'm sorry but you're not invited to this session")
elif request.method == 'POST':
print(request.json)
return render_template('SecretSanta.html', form=form, name=user[0])
@app.route('/session/<session_id>')
def session(session_id):
if session_exists(session_id):
user = get_user(request.cookies.get('userid'))
print(user)
current_session = get_session(session_id)
flash(f"session invitation is: {current_session.invitation}")
return render_template('session.html', table=table, session_name=session_id)
flash(f"session: '{session_id}' does not exist")
return redirect('/secretsanta')
@app.route('/generate', methods=['POST'])
def generate():
if request.method == 'POST':
request_json = request.json
generated_session = generate_session(request_json['userid'])
return redirect(f'/session/{generated_session.session_id}')
return 'bad request', 400
| from santahandler import generate_session, join_session, get_session
from flask import render_template, flash, redirect, request, make_response
from secret_santa import app
from secret_santa.forms import RegistrationForm, JoinForm
from dbhandler import reg_user, get_user, session_exists
from secret_santa.tables import table
@app.route('/', methods=['GET', 'POST'])
@app.route('/index', methods=['GET', 'POST'])
def index():
form = RegistrationForm()
cookie = request.cookies.get("userid")
print(cookie)
if cookie is not None:
return redirect('/secretsanta')
if form.validate_on_submit():
userid = reg_user(form.name.data, form.address.data, form.email.data)
flash(f'{form.name.data}, You have been registered! your user-id is: {userid}, write it down!')
resp = make_response(redirect('/secretsanta'))
resp.set_cookie('userid', str(userid), max_age=604800)
return resp
return render_template('Index.html', form=form)
@app.route('/secretsanta', methods=['GET', 'POST'])
def secret_santa():
user = get_user(request.cookies.get('userid'))
form = JoinForm()
if form.validate_on_submit():
joined_session = join_session(user, form.invitation.data)
if joined_session:
return redirect(f'/session/{joined_session}')
else:
flash(f"I'm sorry but you're not invited to this session")
elif request.method == 'POST':
print(request.json)
return render_template('SecretSanta.html', form=form, name=user[0])
@app.route('/session/<session_id>')
def session(session_id):
if session_exists(session_id):
user = get_user(request.cookies.get('userid'))
print(user)
current_session = get_session(session_id)
flash(f"session invitation is: {current_session.invitation}")
return render_template('session.html', table=table, session_name=session_id)
flash(f"session: '{session_id}' does not exist")
return redirect('/secretsanta')
@app.route('/generate', methods=['POST'])
def generate():
if request.method == 'POST':
request_json = request.json
generated_session = generate_session(request_json['userid'])
return redirect(f'/session/{generated_session.session_id}')
return 'bad request', 400 | none | 1 | 2.358382 | 2 | |
lib/JumpScale/lib/html/__init__.py | Jumpscale/jumpscale6_core | 1 | 6620232 | <filename>lib/JumpScale/lib/html/__init__.py
from JumpScale import j
j.base.loader.makeAvailable(j, 'tools')
from HTMLFactory import HTMLFactory
j.tools.html = HTMLFactory()
| <filename>lib/JumpScale/lib/html/__init__.py
from JumpScale import j
j.base.loader.makeAvailable(j, 'tools')
from HTMLFactory import HTMLFactory
j.tools.html = HTMLFactory()
| none | 1 | 1.446796 | 1 | |
energymeter/test_pafal.py | msw1970/sensorReporter | 99 | 6620233 | # Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple script to test connection to the pafal smart meter.
"""
from em_connections import Pafal20ec3grConnector
import argparse
_SERIAL_DEVICE_DEFAULT = "/dev/ttyUSB0"
parser = argparse.ArgumentParser(description='Perform a test read to the pafal device.')
parser.add_argument( '--port',
nargs = 1,
required = False,
help = 'The device file to use for accessing the serial connection to Pafal '
'(defaults to {deflt})'.format(deflt = _SERIAL_DEVICE_DEFAULT)
)
args = parser.parse_args()
myPort = _SERIAL_DEVICE_DEFAULT
if args.port:
myPort = args.port[0]
print("Setting up class ...")
serdev = Pafal20ec3grConnector( devicePort=myPort )
print("Requesting data ...")
result = serdev.readData( {
"0.0.0": [False],
"0.2.0": [False],
"1.8.0*00": [True],
"2.8.0*00": [True]
} )
print("Result:")
print("Meter number: " + result.get("0.0.0", "<could not be acquired>"))
print("Meter firmware: " + result.get("0.2.0", "<could not be acquired>"))
print("Total import: " + str(result.get("1.8.0*00", "<could not be acquired>")))
print("Total export: " + str(result.get("2.8.0*00", "<could not be acquired>")))
print("Closing connection")
serdev.close()
print("Finished") | # Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple script to test connection to the pafal smart meter.
"""
from em_connections import Pafal20ec3grConnector
import argparse
_SERIAL_DEVICE_DEFAULT = "/dev/ttyUSB0"
parser = argparse.ArgumentParser(description='Perform a test read to the pafal device.')
parser.add_argument( '--port',
nargs = 1,
required = False,
help = 'The device file to use for accessing the serial connection to Pafal '
'(defaults to {deflt})'.format(deflt = _SERIAL_DEVICE_DEFAULT)
)
args = parser.parse_args()
myPort = _SERIAL_DEVICE_DEFAULT
if args.port:
myPort = args.port[0]
print("Setting up class ...")
serdev = Pafal20ec3grConnector( devicePort=myPort )
print("Requesting data ...")
result = serdev.readData( {
"0.0.0": [False],
"0.2.0": [False],
"1.8.0*00": [True],
"2.8.0*00": [True]
} )
print("Result:")
print("Meter number: " + result.get("0.0.0", "<could not be acquired>"))
print("Meter firmware: " + result.get("0.2.0", "<could not be acquired>"))
print("Total import: " + str(result.get("1.8.0*00", "<could not be acquired>")))
print("Total export: " + str(result.get("2.8.0*00", "<could not be acquired>")))
print("Closing connection")
serdev.close()
print("Finished") | en | 0.839911 | # Copyright 2021 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Simple script to test connection to the pafal smart meter. | 2.446033 | 2 |
insta/instagram/urls.py | toelapiut/Insta | 1 | 6620234 | from django.conf.urls import url
from django.conf import settings
from django.conf.urls.static import static
from . import views
urlpatterns = [
url( r'^$', views.index, name="timeline"),
url( r'^new/post/$', views.post, name="new-post"),
url( r'^profile/(\d+)', views.profile, name="profile"),
url( r'^comment(\d+)', views.comment, name="comment" ),
url( r'^following/(\d+)', views.follow, name="follow"),
url( r'^look_up/(\d+)', views.look_up, name="look-up"),
url( r'^like/(\d+)', views.like, name="liker"),
url( r'^post/(\d+)', views.post_look, name="post_look"),
]
if settings.DEBUG:
urlpatterns += static( settings.MEDIA_URL, document_root=settings.MEDIA_ROOT )
| from django.conf.urls import url
from django.conf import settings
from django.conf.urls.static import static
from . import views
urlpatterns = [
url( r'^$', views.index, name="timeline"),
url( r'^new/post/$', views.post, name="new-post"),
url( r'^profile/(\d+)', views.profile, name="profile"),
url( r'^comment(\d+)', views.comment, name="comment" ),
url( r'^following/(\d+)', views.follow, name="follow"),
url( r'^look_up/(\d+)', views.look_up, name="look-up"),
url( r'^like/(\d+)', views.like, name="liker"),
url( r'^post/(\d+)', views.post_look, name="post_look"),
]
if settings.DEBUG:
urlpatterns += static( settings.MEDIA_URL, document_root=settings.MEDIA_ROOT )
| none | 1 | 1.968792 | 2 | |
ligature/update.py | CorsoSource/ligature | 0 | 6620235 | from weakref import WeakSet
class UpdateModel(object):
"""Provide the object with a way to notify other objects
that depend on it.
"""
# Slots ensures we're explicit and fast
__slots__ = ('_sources', '_listeners', '_up_to_date',
'_metadata', '_notify_callback',
'__weakref__')
def __init__(self, metadata=None, notify_callback=None, *args, **kwargs):
"""Initialize the chain.
By tracking both sources and listeners, we can make a graph of what
gets updated by what.
The metadata is a bucket for runtime info to be checked during notifications.
"""
# Initialize mixins - NOPE Update is not cooperative.
# It expects to be a base class
#super(UpdateModel, self).__init__(*args, **kwargs)
self._sources = tuple()
self._listeners = WeakSet()
self._metadata = metadata
self._notify_callback = notify_callback
self._up_to_date = True
# Initialize mixins
super(UpdateModel, self).__init__(*args, **kwargs)
@property
def metadata(self):
return self._metadata
def subscribe(self, listener):
"""Add a listener to the subscriber list.
This isn't a set - order will likely help efficiency,
the list will be updated infrequently, and the list
should never get very big anyhow.
Note that Calc objects have a source to act as their publisher list.
(In case we want to backtrace.)
"""
if not listener in self._listeners:
self._listeners.add(listener)
def unsubscribe(self, listener):
"""Remove a listener from the subscriber list.
"""
while listener in self._listeners:
self._listeners.remove(listener)
def notify(self, old_selector, new_selector, source=None, depth=0):
"""Fires an update to make sure dependents are updated, if needed.
The selectors show what happened in the update.
"""
for dependent in self._listeners:
try:
# TODO: verify that for each update, only one update is marshalled and fired
# for example, if an update forces a Composable to clear,
# then we can expect that it'll fire for both the clear _and_ the pass-thru update
if dependent.up_to_date or old_selector:
dependent.update(old_selector, new_selector, source or self, depth+1)
except NotImplementedError:
pass
except AttributeError:
pass
if self._notify_callback:
try:
self._notify_callback(old_selector, new_selector, source or self, depth)
except:
pass # isolate event failures
def update(self, old_selector, new_selector, source=None, depth=0):
"""Execute the update. Each class will have its own way to implement this."""
# (None, None) signals that the data is out of date,
# but there is nothing for dependents to do yet.
self._up_to_date = False
# Pass-through updates without triggering
self.notify(old_selector, new_selector, source or self, depth)
# NOTE: super calls in subclasses should mark up_to_date when they're brought up
@property
def listeners(self):
return self._listeners
@property
def up_to_date(self):
return self._up_to_date
@listeners.setter
def listeners(self, new_listeners):
self._replace_listeners(new_listeners)
def _replace_listeners(self, new_listeners):
"""If the listeners are changed en masse, break
all the subscriptions.
This setter makes sure the subscription methods are never skipped.
"""
while self._listeners:
listener = self._listeners[0]
self.unsubscribe(listener)
for listener in new_listeners:
self.subscribe(listener)
@property
def sources(self):
return self._sources
@sources.setter
def sources(self, new_sources):
self._replace_sources(new_sources)
def _replace_sources(self, new_sources):
for source in set(self._sources).difference(set(new_sources)):
source.unsubscribe(self)
for source in new_sources:
source.subscribe(self)
self._sources = new_sources
| from weakref import WeakSet
class UpdateModel(object):
"""Provide the object with a way to notify other objects
that depend on it.
"""
# Slots ensures we're explicit and fast
__slots__ = ('_sources', '_listeners', '_up_to_date',
'_metadata', '_notify_callback',
'__weakref__')
def __init__(self, metadata=None, notify_callback=None, *args, **kwargs):
"""Initialize the chain.
By tracking both sources and listeners, we can make a graph of what
gets updated by what.
The metadata is a bucket for runtime info to be checked during notifications.
"""
# Initialize mixins - NOPE Update is not cooperative.
# It expects to be a base class
#super(UpdateModel, self).__init__(*args, **kwargs)
self._sources = tuple()
self._listeners = WeakSet()
self._metadata = metadata
self._notify_callback = notify_callback
self._up_to_date = True
# Initialize mixins
super(UpdateModel, self).__init__(*args, **kwargs)
@property
def metadata(self):
return self._metadata
def subscribe(self, listener):
"""Add a listener to the subscriber list.
This isn't a set - order will likely help efficiency,
the list will be updated infrequently, and the list
should never get very big anyhow.
Note that Calc objects have a source to act as their publisher list.
(In case we want to backtrace.)
"""
if not listener in self._listeners:
self._listeners.add(listener)
def unsubscribe(self, listener):
"""Remove a listener from the subscriber list.
"""
while listener in self._listeners:
self._listeners.remove(listener)
def notify(self, old_selector, new_selector, source=None, depth=0):
"""Fires an update to make sure dependents are updated, if needed.
The selectors show what happened in the update.
"""
for dependent in self._listeners:
try:
# TODO: verify that for each update, only one update is marshalled and fired
# for example, if an update forces a Composable to clear,
# then we can expect that it'll fire for both the clear _and_ the pass-thru update
if dependent.up_to_date or old_selector:
dependent.update(old_selector, new_selector, source or self, depth+1)
except NotImplementedError:
pass
except AttributeError:
pass
if self._notify_callback:
try:
self._notify_callback(old_selector, new_selector, source or self, depth)
except:
pass # isolate event failures
def update(self, old_selector, new_selector, source=None, depth=0):
"""Execute the update. Each class will have its own way to implement this."""
# (None, None) signals that the data is out of date,
# but there is nothing for dependents to do yet.
self._up_to_date = False
# Pass-through updates without triggering
self.notify(old_selector, new_selector, source or self, depth)
# NOTE: super calls in subclasses should mark up_to_date when they're brought up
@property
def listeners(self):
return self._listeners
@property
def up_to_date(self):
return self._up_to_date
@listeners.setter
def listeners(self, new_listeners):
self._replace_listeners(new_listeners)
def _replace_listeners(self, new_listeners):
"""If the listeners are changed en masse, break
all the subscriptions.
This setter makes sure the subscription methods are never skipped.
"""
while self._listeners:
listener = self._listeners[0]
self.unsubscribe(listener)
for listener in new_listeners:
self.subscribe(listener)
@property
def sources(self):
return self._sources
@sources.setter
def sources(self, new_sources):
self._replace_sources(new_sources)
def _replace_sources(self, new_sources):
for source in set(self._sources).difference(set(new_sources)):
source.unsubscribe(self)
for source in new_sources:
source.subscribe(self)
self._sources = new_sources
| en | 0.909085 | Provide the object with a way to notify other objects that depend on it. # Slots ensures we're explicit and fast Initialize the chain. By tracking both sources and listeners, we can make a graph of what gets updated by what. The metadata is a bucket for runtime info to be checked during notifications. # Initialize mixins - NOPE Update is not cooperative. # It expects to be a base class #super(UpdateModel, self).__init__(*args, **kwargs) # Initialize mixins Add a listener to the subscriber list. This isn't a set - order will likely help efficiency, the list will be updated infrequently, and the list should never get very big anyhow. Note that Calc objects have a source to act as their publisher list. (In case we want to backtrace.) Remove a listener from the subscriber list. Fires an update to make sure dependents are updated, if needed. The selectors show what happened in the update. # TODO: verify that for each update, only one update is marshalled and fired # for example, if an update forces a Composable to clear, # then we can expect that it'll fire for both the clear _and_ the pass-thru update # isolate event failures Execute the update. Each class will have its own way to implement this. # (None, None) signals that the data is out of date, # but there is nothing for dependents to do yet. # Pass-through updates without triggering # NOTE: super calls in subclasses should mark up_to_date when they're brought up If the listeners are changed en masse, break all the subscriptions. This setter makes sure the subscription methods are never skipped. | 2.548155 | 3 |
src/bobbit/modules/vaporwave.py | ginglis13/bobbit | 10 | 6620236 | # vaporwave.py
# Metdata
NAME = 'vapor'
ENABLE = True
PATTERN = r'^!vapor (?P<phrase>.*)'
USAGE = '''Usage: !vapor <phrase>
Given a phrase, this transforms it into a vaporwave a e s t h e t i c phrase
with full-width characters.
Example:
> !vapor it works!
i t w o r k s !
'''
# Command
# Note - thanks to https://github.com/jonesmartins/vapyrwave for figuring out
# how to do full width :)
def transform_vaporwave(sentence):
new_sentence = ''
char_distance = ord('A') - ord('A') # 65248
for character in sentence:
ord_char = ord(character)
if ord('!') <= ord_char <= ord('~'):
character = chr(ord_char + char_distance)
new_sentence += character
return new_sentence
def make_horizontal(sentence, spaces=1):
spaces_str = ' ' * spaces
new_sentence = spaces_str.join([s for s in sentence])
return new_sentence
async def vapor(bot, message, phrase):
phrase = phrase.lower().rstrip()
response = make_horizontal(
transform_vaporwave(phrase)
)
return message.with_body(response)
# Register
def register(bot):
return (
('command', PATTERN, vapor),
)
# vim: set sts=4 sw=4 ts=8 expandtab ft=python:
| # vaporwave.py
# Metdata
NAME = 'vapor'
ENABLE = True
PATTERN = r'^!vapor (?P<phrase>.*)'
USAGE = '''Usage: !vapor <phrase>
Given a phrase, this transforms it into a vaporwave a e s t h e t i c phrase
with full-width characters.
Example:
> !vapor it works!
i t w o r k s !
'''
# Command
# Note - thanks to https://github.com/jonesmartins/vapyrwave for figuring out
# how to do full width :)
def transform_vaporwave(sentence):
new_sentence = ''
char_distance = ord('A') - ord('A') # 65248
for character in sentence:
ord_char = ord(character)
if ord('!') <= ord_char <= ord('~'):
character = chr(ord_char + char_distance)
new_sentence += character
return new_sentence
def make_horizontal(sentence, spaces=1):
spaces_str = ' ' * spaces
new_sentence = spaces_str.join([s for s in sentence])
return new_sentence
async def vapor(bot, message, phrase):
phrase = phrase.lower().rstrip()
response = make_horizontal(
transform_vaporwave(phrase)
)
return message.with_body(response)
# Register
def register(bot):
return (
('command', PATTERN, vapor),
)
# vim: set sts=4 sw=4 ts=8 expandtab ft=python:
| en | 0.584401 | # vaporwave.py # Metdata Usage: !vapor <phrase> Given a phrase, this transforms it into a vaporwave a e s t h e t i c phrase with full-width characters. Example: > !vapor it works! i t w o r k s ! # Command # Note - thanks to https://github.com/jonesmartins/vapyrwave for figuring out # how to do full width :) # 65248 # Register # vim: set sts=4 sw=4 ts=8 expandtab ft=python: | 2.978206 | 3 |
main.py | AlexNilsson/FIFA18 | 0 | 6620237 | import os
import numpy as np
from utility import csv_to_dict, keep_entries, group_by_key, unique_members_from_columns
import config as C
from FIFANET import FIFANET
""" FILE PATHS """
PATH_TO_ROOT = os.path.dirname(os.path.abspath(__file__))
PATH_TO_DATA = os.path.join(PATH_TO_ROOT, 'data')
PATH_TO_PREDICT = os.path.join(PATH_TO_ROOT, 'predict')
""" LOAD DATA """
WORLD_CUPS = csv_to_dict(os.path.join(PATH_TO_DATA, 'WorldCups.csv'))
WORLD_CUP_PLAYERS = csv_to_dict(os.path.join(PATH_TO_DATA, 'WorldCupPlayers.csv'))
WORLD_CUP_MATCHES = csv_to_dict(os.path.join(PATH_TO_DATA, 'WorldCupMatches.csv'))
PREDICT_MATCHES = csv_to_dict(os.path.join(PATH_TO_PREDICT, 'matches.csv'))
""" PREPROCESS DATA"""
# EXTRACT FEATURES OF INTEREST
WORLD_CUP_MATCHES = keep_entries(WORLD_CUP_MATCHES, ['Year','Home Team Name','Away Team Name','Home Team Goals','Away Team Goals'])
# EXTRACT LIST OF ALL TEAMS
ALL_TEAMS = unique_members_from_columns(WORLD_CUP_MATCHES, ['Home Team Name', 'Away Team Name'])
# ONE HOT ENCODINGS OF ALL TEAMS
ALL_TEAMS_ENCODING = dict(zip(ALL_TEAMS, np.eye(len(ALL_TEAMS))))
# GROUP BY YEAR/CUP
WORLD_CUP_MATCHES = group_by_key(WORLD_CUP_MATCHES, 'Year')
# Decay scores to account for athletic improvements over time
N_GAMES = len(WORLD_CUP_MATCHES)
for i, year in enumerate(WORLD_CUP_MATCHES):
scoreDecay = 1 - C.SCORE_DECAY_FACTOR * (N_GAMES-i-1)/(N_GAMES-1)
for i, match in enumerate(WORLD_CUP_MATCHES[year]):
try:
match[2] = scoreDecay * int(match[2])
match[3] = scoreDecay * int(match[2])
except:
# Remove matches written in the wrong format in the database
# Some entries have home team & scores mixed up and can not be trusted :(
del match
# Matches to Predict
temp = []
for x in PREDICT_MATCHES:
teams = list(x.values())
match = []
temp.append(list(np.append(
ALL_TEAMS_ENCODING[teams[0]],
ALL_TEAMS_ENCODING[teams[1]]
)))
PREDICT_MATCHES = np.array(temp, dtype=int)
""" SPLIT DATA INTO TRAIN/TEST SETS """
""" AND SEPARATE FEATURES FROM LABELS """
if C.USE_TEST_DATA:
TEST_YEAR = '2014'
TEST_DATA_RAW = WORLD_CUP_MATCHES.pop(TEST_YEAR)
TRAIN_DATA_RAW = []
for x in WORLD_CUP_MATCHES:
for m in WORLD_CUP_MATCHES[x]:
TRAIN_DATA_RAW.append(m)
TRAIN_DATA_FEATURES = []
TRAIN_DATA_LABELS = []
for x in TRAIN_DATA_RAW:
try:
np.array([x[2],x[3]],dtype=float)
combined = np.append( ALL_TEAMS_ENCODING[x[0]], ALL_TEAMS_ENCODING[x[1]] )
TRAIN_DATA_FEATURES.append(list(combined))
TRAIN_DATA_LABELS.append(
list(np.array([x[2], x[3]], dtype=float))
)
except: continue
TRAIN_DATA_FEATURES = np.array(TRAIN_DATA_FEATURES, dtype=int)
TRAIN_DATA_LABELS = np.array(TRAIN_DATA_LABELS, dtype=float)
if C.USE_TEST_DATA:
TEST_DATA_FEATURES = []
TEST_DATA_LABELS = []
for x in TEST_DATA_RAW:
try:
np.array([x[2],x[3]],dtype=float)
combined = np.append(ALL_TEAMS_ENCODING[x[0]], ALL_TEAMS_ENCODING[x[1]])
TEST_DATA_FEATURES.append(list(combined))
TEST_DATA_LABELS.append(
list(np.array([x[2], x[3]], dtype=float))
)
except: continue
TEST_DATA_FEATURES = np.array(TEST_DATA_FEATURES, dtype=int)
TEST_DATA_LABELS = np.array(TEST_DATA_LABELS, dtype=float)
""" EXECUTE """
fifaNet = FIFANET()
if C.USE_TEST_DATA:
fifaNet.train(
np.array(TRAIN_DATA_FEATURES),
np.array(TRAIN_DATA_LABELS),
np.array(TEST_DATA_FEATURES),
np.array(TEST_DATA_LABELS),
epochs = C.EPOCHS,
batch_size = C.BATCH_SIZE
)
else:
fifaNet.train(
np.array(TRAIN_DATA_FEATURES),
np.array(TRAIN_DATA_LABELS),
np.array(TRAIN_DATA_FEATURES),
np.array(TRAIN_DATA_LABELS),
epochs = C.EPOCHS,
batch_size = C.BATCH_SIZE
)
fifaNet.predict(np.array(PREDICT_MATCHES))
| import os
import numpy as np
from utility import csv_to_dict, keep_entries, group_by_key, unique_members_from_columns
import config as C
from FIFANET import FIFANET
""" FILE PATHS """
PATH_TO_ROOT = os.path.dirname(os.path.abspath(__file__))
PATH_TO_DATA = os.path.join(PATH_TO_ROOT, 'data')
PATH_TO_PREDICT = os.path.join(PATH_TO_ROOT, 'predict')
""" LOAD DATA """
WORLD_CUPS = csv_to_dict(os.path.join(PATH_TO_DATA, 'WorldCups.csv'))
WORLD_CUP_PLAYERS = csv_to_dict(os.path.join(PATH_TO_DATA, 'WorldCupPlayers.csv'))
WORLD_CUP_MATCHES = csv_to_dict(os.path.join(PATH_TO_DATA, 'WorldCupMatches.csv'))
PREDICT_MATCHES = csv_to_dict(os.path.join(PATH_TO_PREDICT, 'matches.csv'))
""" PREPROCESS DATA"""
# EXTRACT FEATURES OF INTEREST
WORLD_CUP_MATCHES = keep_entries(WORLD_CUP_MATCHES, ['Year','Home Team Name','Away Team Name','Home Team Goals','Away Team Goals'])
# EXTRACT LIST OF ALL TEAMS
ALL_TEAMS = unique_members_from_columns(WORLD_CUP_MATCHES, ['Home Team Name', 'Away Team Name'])
# ONE HOT ENCODINGS OF ALL TEAMS
ALL_TEAMS_ENCODING = dict(zip(ALL_TEAMS, np.eye(len(ALL_TEAMS))))
# GROUP BY YEAR/CUP
WORLD_CUP_MATCHES = group_by_key(WORLD_CUP_MATCHES, 'Year')
# Decay scores to account for athletic improvements over time
N_GAMES = len(WORLD_CUP_MATCHES)
for i, year in enumerate(WORLD_CUP_MATCHES):
scoreDecay = 1 - C.SCORE_DECAY_FACTOR * (N_GAMES-i-1)/(N_GAMES-1)
for i, match in enumerate(WORLD_CUP_MATCHES[year]):
try:
match[2] = scoreDecay * int(match[2])
match[3] = scoreDecay * int(match[2])
except:
# Remove matches written in the wrong format in the database
# Some entries have home team & scores mixed up and can not be trusted :(
del match
# Matches to Predict
temp = []
for x in PREDICT_MATCHES:
teams = list(x.values())
match = []
temp.append(list(np.append(
ALL_TEAMS_ENCODING[teams[0]],
ALL_TEAMS_ENCODING[teams[1]]
)))
PREDICT_MATCHES = np.array(temp, dtype=int)
""" SPLIT DATA INTO TRAIN/TEST SETS """
""" AND SEPARATE FEATURES FROM LABELS """
if C.USE_TEST_DATA:
TEST_YEAR = '2014'
TEST_DATA_RAW = WORLD_CUP_MATCHES.pop(TEST_YEAR)
TRAIN_DATA_RAW = []
for x in WORLD_CUP_MATCHES:
for m in WORLD_CUP_MATCHES[x]:
TRAIN_DATA_RAW.append(m)
TRAIN_DATA_FEATURES = []
TRAIN_DATA_LABELS = []
for x in TRAIN_DATA_RAW:
try:
np.array([x[2],x[3]],dtype=float)
combined = np.append( ALL_TEAMS_ENCODING[x[0]], ALL_TEAMS_ENCODING[x[1]] )
TRAIN_DATA_FEATURES.append(list(combined))
TRAIN_DATA_LABELS.append(
list(np.array([x[2], x[3]], dtype=float))
)
except: continue
TRAIN_DATA_FEATURES = np.array(TRAIN_DATA_FEATURES, dtype=int)
TRAIN_DATA_LABELS = np.array(TRAIN_DATA_LABELS, dtype=float)
if C.USE_TEST_DATA:
TEST_DATA_FEATURES = []
TEST_DATA_LABELS = []
for x in TEST_DATA_RAW:
try:
np.array([x[2],x[3]],dtype=float)
combined = np.append(ALL_TEAMS_ENCODING[x[0]], ALL_TEAMS_ENCODING[x[1]])
TEST_DATA_FEATURES.append(list(combined))
TEST_DATA_LABELS.append(
list(np.array([x[2], x[3]], dtype=float))
)
except: continue
TEST_DATA_FEATURES = np.array(TEST_DATA_FEATURES, dtype=int)
TEST_DATA_LABELS = np.array(TEST_DATA_LABELS, dtype=float)
""" EXECUTE """
fifaNet = FIFANET()
if C.USE_TEST_DATA:
fifaNet.train(
np.array(TRAIN_DATA_FEATURES),
np.array(TRAIN_DATA_LABELS),
np.array(TEST_DATA_FEATURES),
np.array(TEST_DATA_LABELS),
epochs = C.EPOCHS,
batch_size = C.BATCH_SIZE
)
else:
fifaNet.train(
np.array(TRAIN_DATA_FEATURES),
np.array(TRAIN_DATA_LABELS),
np.array(TRAIN_DATA_FEATURES),
np.array(TRAIN_DATA_LABELS),
epochs = C.EPOCHS,
batch_size = C.BATCH_SIZE
)
fifaNet.predict(np.array(PREDICT_MATCHES))
| en | 0.751991 | FILE PATHS LOAD DATA PREPROCESS DATA # EXTRACT FEATURES OF INTEREST # EXTRACT LIST OF ALL TEAMS # ONE HOT ENCODINGS OF ALL TEAMS # GROUP BY YEAR/CUP # Decay scores to account for athletic improvements over time # Remove matches written in the wrong format in the database # Some entries have home team & scores mixed up and can not be trusted :( # Matches to Predict SPLIT DATA INTO TRAIN/TEST SETS AND SEPARATE FEATURES FROM LABELS EXECUTE | 2.65286 | 3 |
pyspider/database/sqlite/resultdb.py | hxs91/pyspider | 1 | 6620238 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<<EMAIL>>
# http://binux.me
# Created on 2014-10-13 17:08:43
import re
import time
import json
import thread
import sqlite3
from pyspider.database.base.resultdb import ResultDB as BaseResultDB
from pyspider.database.basedb import BaseDB
class ResultDB(BaseResultDB, BaseDB):
__tablename__ = 'resultdb'
placeholder = '?'
def __init__(self, path):
self.path = path
self.last_pid = 0
self.conn = None
self._list_project()
@property
def dbcur(self):
pid = thread.get_ident()
if not (self.conn and pid == self.last_pid):
self.last_pid = pid
self.conn = sqlite3.connect(self.path, isolation_level=None)
return self.conn.cursor()
def _list_project(self):
self.projects = set()
if self.__tablename__:
prefix = '%s_' % self.__tablename__
else:
prefix = ''
for project, in self._select('sqlite_master', what='name',
where='type = "table"'):
if project.startswith(prefix):
project = project[len(prefix):]
self.projects.add(project)
def _tablename(self, project):
if self.__tablename__:
return '%s_%s' % (self.__tablename__, project)
else:
return project
def _create_project(self, project):
assert re.match(r'^\w+$', project) is not None
tablename = self._tablename(project)
self._execute('''CREATE TABLE IF NOT EXISTS `%s` (
taskid PRIMARY KEY,
url,
result,
updatetime
)''' % tablename)
def _parse(self, data):
if 'result' in data:
data['result'] = json.loads(data['result'])
return data
def _stringify(self, data):
if 'result' in data:
data['result'] = json.dumps(data['result'])
return data
def save(self, project, taskid, url, result):
tablename = self._tablename(project)
if project not in self.projects:
self._create_project(project)
self._list_project()
obj = {
'taskid': taskid,
'url': url,
'result': result,
'updatetime': time.time(),
}
return self._replace(tablename, **self._stringify(obj))
def select(self, project, fields=None, offset=0, limit=None):
if project not in self.projects:
self._list_project()
if project not in self.projects:
return
tablename = self._tablename(project)
for task in self._select2dic(tablename, what=fields, offset=offset, limit=limit):
yield self._parse(task)
def count(self, project):
if project not in self.projects:
self._list_project()
if project not in self.projects:
return 0
tablename = self._tablename(project)
for count, in self._execute("SELECT count(1) FROM %s" % self.escape(tablename)):
return count
def get(self, project, taskid, fields=None):
if project not in self.projects:
self._list_project()
if project not in self.projects:
return
tablename = self._tablename(project)
where = "`taskid` = %s" % self.placeholder
for task in self._select2dic(tablename, what=fields,
where=where, where_values=(taskid, )):
return self._parse(task)
def drop(self, project):
if project not in self.projects:
self._list_project()
if project not in self.projects:
return
tablename = self._tablename(project)
self._execute("DROP TABLE %s" % self.escape(tablename))
self._list_project()
| #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<<EMAIL>>
# http://binux.me
# Created on 2014-10-13 17:08:43
import re
import time
import json
import thread
import sqlite3
from pyspider.database.base.resultdb import ResultDB as BaseResultDB
from pyspider.database.basedb import BaseDB
class ResultDB(BaseResultDB, BaseDB):
__tablename__ = 'resultdb'
placeholder = '?'
def __init__(self, path):
self.path = path
self.last_pid = 0
self.conn = None
self._list_project()
@property
def dbcur(self):
pid = thread.get_ident()
if not (self.conn and pid == self.last_pid):
self.last_pid = pid
self.conn = sqlite3.connect(self.path, isolation_level=None)
return self.conn.cursor()
def _list_project(self):
self.projects = set()
if self.__tablename__:
prefix = '%s_' % self.__tablename__
else:
prefix = ''
for project, in self._select('sqlite_master', what='name',
where='type = "table"'):
if project.startswith(prefix):
project = project[len(prefix):]
self.projects.add(project)
def _tablename(self, project):
if self.__tablename__:
return '%s_%s' % (self.__tablename__, project)
else:
return project
def _create_project(self, project):
assert re.match(r'^\w+$', project) is not None
tablename = self._tablename(project)
self._execute('''CREATE TABLE IF NOT EXISTS `%s` (
taskid PRIMARY KEY,
url,
result,
updatetime
)''' % tablename)
def _parse(self, data):
if 'result' in data:
data['result'] = json.loads(data['result'])
return data
def _stringify(self, data):
if 'result' in data:
data['result'] = json.dumps(data['result'])
return data
def save(self, project, taskid, url, result):
tablename = self._tablename(project)
if project not in self.projects:
self._create_project(project)
self._list_project()
obj = {
'taskid': taskid,
'url': url,
'result': result,
'updatetime': time.time(),
}
return self._replace(tablename, **self._stringify(obj))
def select(self, project, fields=None, offset=0, limit=None):
if project not in self.projects:
self._list_project()
if project not in self.projects:
return
tablename = self._tablename(project)
for task in self._select2dic(tablename, what=fields, offset=offset, limit=limit):
yield self._parse(task)
def count(self, project):
if project not in self.projects:
self._list_project()
if project not in self.projects:
return 0
tablename = self._tablename(project)
for count, in self._execute("SELECT count(1) FROM %s" % self.escape(tablename)):
return count
def get(self, project, taskid, fields=None):
if project not in self.projects:
self._list_project()
if project not in self.projects:
return
tablename = self._tablename(project)
where = "`taskid` = %s" % self.placeholder
for task in self._select2dic(tablename, what=fields,
where=where, where_values=(taskid, )):
return self._parse(task)
def drop(self, project):
if project not in self.projects:
self._list_project()
if project not in self.projects:
return
tablename = self._tablename(project)
self._execute("DROP TABLE %s" % self.escape(tablename))
self._list_project()
| en | 0.264259 | #!/usr/bin/env python # -*- encoding: utf-8 -*- # vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8: # Author: Binux<<EMAIL>> # http://binux.me # Created on 2014-10-13 17:08:43 CREATE TABLE IF NOT EXISTS `%s` ( taskid PRIMARY KEY, url, result, updatetime ) | 2.092113 | 2 |
mobsec/forms.py | droidsec-cn/mobsec | 0 | 6620239 | <gh_stars>0
from flask_wtf import Form
from wtforms import validators
| from flask_wtf import Form
from wtforms import validators | none | 1 | 1.141161 | 1 | |
students/k3340/practical_works/Zakoulov_Ilya/lr1/simple_django_app/admin.py | TonikX/ITMO_ICT_-WebProgramming_2020 | 10 | 6620240 | from django.contrib import admin
from .models import Driver, Car, DriverLicense, CarDriverOwn
admin.site.register(Driver)
admin.site.register(Car)
admin.site.register(DriverLicense)
admin.site.register(CarDriverOwn)
| from django.contrib import admin
from .models import Driver, Car, DriverLicense, CarDriverOwn
admin.site.register(Driver)
admin.site.register(Car)
admin.site.register(DriverLicense)
admin.site.register(CarDriverOwn)
| none | 1 | 1.445995 | 1 | |
utils/linux_build.py | swarminglogic/scons-x-compile | 10 | 6620241 | <filename>utils/linux_build.py
import subprocess
# General setup
##############################
sdl_prefix = subprocess.check_output(["sdl2-config", "--prefix"]).strip()
sdl_libs = ['GLEW', 'GL', 'GLU',
'SDL2', 'SDL2_image',
'SDL2_mixer', 'SDL2_ttf', 'pthread']
other_libs = ['m', 'dl', 'rt']
# Required output variables
##############################
targetSuffix = ''
base ='#/src'
libs = sdl_libs + other_libs
libpaths = ['#/lib/',
sdl_prefix + '/lib']
cppflags = ['-D_REENTRANT']
sourcepaths = [base,
sdl_prefix + '/include']
linkflags = []
| <filename>utils/linux_build.py
import subprocess
# General setup
##############################
sdl_prefix = subprocess.check_output(["sdl2-config", "--prefix"]).strip()
sdl_libs = ['GLEW', 'GL', 'GLU',
'SDL2', 'SDL2_image',
'SDL2_mixer', 'SDL2_ttf', 'pthread']
other_libs = ['m', 'dl', 'rt']
# Required output variables
##############################
targetSuffix = ''
base ='#/src'
libs = sdl_libs + other_libs
libpaths = ['#/lib/',
sdl_prefix + '/lib']
cppflags = ['-D_REENTRANT']
sourcepaths = [base,
sdl_prefix + '/include']
linkflags = []
| de | 0.652951 | # General setup ############################## # Required output variables ############################## | 2.259864 | 2 |
do_username/generate.py | shreyas44/do_username_py | 0 | 6620242 | <gh_stars>0
import math
import random
sea_creatures = "walrus seal fish shark clam coral whale crab lobster starfish eel dolphin squid jellyfish ray shrimp mantaRay angler snorkler scubaDiver urchin anemone morel axolotl".split(" ")
sea_objects = "boat ship submarine yacht dinghy raft kelp seaweed anchor".split(" ")
adjective_descriptors = "cute adorable lovable happy sandy bubbly friendly floating drifting".split(" ")
size_descriptors = "large big small giant massive tiny little yuge".split(" ")
creature_descriptors = "swimming sleeping eating hiding".split(" ")
sea_list = sea_creatures + sea_objects
descriptors = adjective_descriptors + size_descriptors
colors = "blue blueGreen darkCyan electricBlue greenBlue lightCyan lightSeaGreen seaGreen turquoise aqua aquamarine teal cyan gray darkBlue cerulean azure lapis navy".split(" ")
rand = lambda array: array[math.floor(random.random()*len(array))]
random_noun = lambda: rand(sea_list)
random_descriptor = lambda noun: rand(descriptors) if noun not in sea_creatures else rand(descriptors + creature_descriptors)
random_color = lambda: rand(colors)
format = lambda array: "".join(map(lambda word: word.title(), array))
def generate(max_size=30):
noun = random_noun()
descriptor = random_descriptor(noun)
color = random_color()
if len(descriptor + noun + color) <= max_size:
return format([descriptor, color, noun])
elif len(descriptor + noun) <= max_size:
return format([descriptor, color])
elif len(color + noun) <= max_size:
return format([color, noun])
else:
return format([noun])
| import math
import random
sea_creatures = "walrus seal fish shark clam coral whale crab lobster starfish eel dolphin squid jellyfish ray shrimp mantaRay angler snorkler scubaDiver urchin anemone morel axolotl".split(" ")
sea_objects = "boat ship submarine yacht dinghy raft kelp seaweed anchor".split(" ")
adjective_descriptors = "cute adorable lovable happy sandy bubbly friendly floating drifting".split(" ")
size_descriptors = "large big small giant massive tiny little yuge".split(" ")
creature_descriptors = "swimming sleeping eating hiding".split(" ")
sea_list = sea_creatures + sea_objects
descriptors = adjective_descriptors + size_descriptors
colors = "blue blueGreen darkCyan electricBlue greenBlue lightCyan lightSeaGreen seaGreen turquoise aqua aquamarine teal cyan gray darkBlue cerulean azure lapis navy".split(" ")
rand = lambda array: array[math.floor(random.random()*len(array))]
random_noun = lambda: rand(sea_list)
random_descriptor = lambda noun: rand(descriptors) if noun not in sea_creatures else rand(descriptors + creature_descriptors)
random_color = lambda: rand(colors)
format = lambda array: "".join(map(lambda word: word.title(), array))
def generate(max_size=30):
noun = random_noun()
descriptor = random_descriptor(noun)
color = random_color()
if len(descriptor + noun + color) <= max_size:
return format([descriptor, color, noun])
elif len(descriptor + noun) <= max_size:
return format([descriptor, color])
elif len(color + noun) <= max_size:
return format([color, noun])
else:
return format([noun]) | none | 1 | 3.833351 | 4 | |
codepipeline/update_product_files.py | kkvinjam/aws-custom-sc-pipeline | 0 | 6620243 | <filename>codepipeline/update_product_files.py
'''
Update the respective SC product file when a source file changes.
'''
import json
import base64
import argparse
from datetime import datetime
from github import Github
from github import InputGitTreeElement
import boto3
from botocore.exceptions import ClientError
def update_sc_product_version(file_name):
'''
Update the file name with latest version
Increment last digit and add new provisioning artifact.
'''
try:
print('Reading from:', file_name)
with open(file_name, 'r') as content:
data = json.load(content)
content.close()
prod_obj = data['Resources']['SCProduct']
artifacts = prod_obj['Properties']['ProvisioningArtifactParameters']
latest_artifact = artifacts[-1]
latest_version = latest_artifact['Name']
temp_list = latest_version.split('.')
temp_list[-1] = str(int(latest_version.split('.').pop())+1)
updated_version = ".".join(temp_list)
new_artifact=latest_artifact.copy()
new_artifact['Name'] = updated_version
artifacts.append(new_artifact)
print('Writing to:', file_name)
with open(file_name, 'w') as new_content:
json.dump(data, new_content)
new_content.close()
print('File updated')
except ClientError as exe:
raise exe
def get_secret(secret_name):
'''
Get the value of secret stored in secrets manager
'''
session = boto3.session.Session()
client = session.client('secretsmanager')
get_secret_value_response = None
try:
get_secret_value_response = client.get_secret_value(
SecretId=secret_name
)
except ClientError as e:
if e.response['Error']['Code'] == 'DecryptionFailureException':
# Secrets Manager can't decrypt the protected secret text using the provided KMS key.
# Deal with the exception here, and/or rethrow at your discretion.
raise e
elif e.response['Error']['Code'] == 'InternalServiceErrorException':
# An error occurred on the server side.
# Deal with the exception here, and/or rethrow at your discretion.
raise e
elif e.response['Error']['Code'] == 'InvalidParameterException':
# You provided an invalid value for a parameter.
# Deal with the exception here, and/or rethrow at your discretion.
raise e
elif e.response['Error']['Code'] == 'InvalidRequestException':
# You provided a parameter value that is not valid for
# the current state of the resource.
# Deal with the exception here, and/or rethrow at your discretion.
raise e
elif e.response['Error']['Code'] == 'ResourceNotFoundException':
# We can't find the resource that you asked for.
# Deal with the exception here, and/or rethrow at your discretion.
raise e
else:
# Decrypts secret using the associated KMS key.
# Depending on whether the secret is a string or binary,
# one of these fields will be populated.
if 'SecretString' in get_secret_value_response:
secret = get_secret_value_response['SecretString']
else:
decoded_binary_secret = base64.b64decode(get_secret_value_response['SecretBinary'])
return(get_secret_value_response)
def checkin_to_git_repo(access_key, repo_name, file_path):
'''
Checkin the updated files to Git Repository
'''
git = Github(access_key)
repo = git.get_user().get_repo(repo_name)
file_list = [ file_path ]
file_name = file_path.split('/')[-1]
file_names = [ file_name ]
time_stamp = datetime.now().strftime("%m%d%y-%H%M%S")
commit_message = 'Commit for ' + file_name + ' at ' + time_stamp
main_ref = repo.get_git_ref('heads/main')
main_sha = main_ref.object.sha
base_tree = repo.get_git_tree(main_sha)
element_list = list()
for i, entry in enumerate(file_list):
with open(entry) as input_file:
data = input_file.read()
print('Filename:', file_names[i])
print('Filelist:', file_list[i])
element = InputGitTreeElement(file_list[i], '100644', 'blob', data)
element_list.append(element)
tree = repo.create_git_tree(element_list, base_tree)
parent = repo.get_git_commit(main_sha)
commit = repo.create_git_commit(commit_message, tree, [parent])
main_ref.edit(commit.sha)
print('Code check in complete')
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(prog='update_product_files.py', usage='%(prog)s -p -s', \
description='Add a new version to the product.')
#PARSER.add_argument("-a", "--artifact", type=str, required=True, help="Artifact file")
PARSER.add_argument("-p", "--port_file", type=str, required=True, help="Portfolio name")
PARSER.add_argument("-s", "--secret_name", type=str, default='github/kkvinjam', \
help="secrets manager secret name")
PARSER.add_argument("-r", "--repo", type=str, default='aws-custom-sc-pipeline', \
help="repository name in GitHub repo")
ARGS = PARSER.parse_args()
#ARTIFACT = ARGS.artifact
PORT_FILE = ARGS.port_file
SECRET = ARGS.secret_name
REPO = ARGS.repo
# FILE = 'templates/dev-portfolio/sc-dev-product-ec2-linux.json'
print('PORT File:', PORT_FILE)
update_sc_product_version(PORT_FILE)
pers_access_key = get_secret(SECRET)
if pers_access_key:
secret_key = json.loads(pers_access_key['SecretString'])['Token']
checkin_to_git_repo(secret_key, REPO, PORT_FILE)
| <filename>codepipeline/update_product_files.py
'''
Update the respective SC product file when a source file changes.
'''
import json
import base64
import argparse
from datetime import datetime
from github import Github
from github import InputGitTreeElement
import boto3
from botocore.exceptions import ClientError
def update_sc_product_version(file_name):
'''
Update the file name with latest version
Increment last digit and add new provisioning artifact.
'''
try:
print('Reading from:', file_name)
with open(file_name, 'r') as content:
data = json.load(content)
content.close()
prod_obj = data['Resources']['SCProduct']
artifacts = prod_obj['Properties']['ProvisioningArtifactParameters']
latest_artifact = artifacts[-1]
latest_version = latest_artifact['Name']
temp_list = latest_version.split('.')
temp_list[-1] = str(int(latest_version.split('.').pop())+1)
updated_version = ".".join(temp_list)
new_artifact=latest_artifact.copy()
new_artifact['Name'] = updated_version
artifacts.append(new_artifact)
print('Writing to:', file_name)
with open(file_name, 'w') as new_content:
json.dump(data, new_content)
new_content.close()
print('File updated')
except ClientError as exe:
raise exe
def get_secret(secret_name):
'''
Get the value of secret stored in secrets manager
'''
session = boto3.session.Session()
client = session.client('secretsmanager')
get_secret_value_response = None
try:
get_secret_value_response = client.get_secret_value(
SecretId=secret_name
)
except ClientError as e:
if e.response['Error']['Code'] == 'DecryptionFailureException':
# Secrets Manager can't decrypt the protected secret text using the provided KMS key.
# Deal with the exception here, and/or rethrow at your discretion.
raise e
elif e.response['Error']['Code'] == 'InternalServiceErrorException':
# An error occurred on the server side.
# Deal with the exception here, and/or rethrow at your discretion.
raise e
elif e.response['Error']['Code'] == 'InvalidParameterException':
# You provided an invalid value for a parameter.
# Deal with the exception here, and/or rethrow at your discretion.
raise e
elif e.response['Error']['Code'] == 'InvalidRequestException':
# You provided a parameter value that is not valid for
# the current state of the resource.
# Deal with the exception here, and/or rethrow at your discretion.
raise e
elif e.response['Error']['Code'] == 'ResourceNotFoundException':
# We can't find the resource that you asked for.
# Deal with the exception here, and/or rethrow at your discretion.
raise e
else:
# Decrypts secret using the associated KMS key.
# Depending on whether the secret is a string or binary,
# one of these fields will be populated.
if 'SecretString' in get_secret_value_response:
secret = get_secret_value_response['SecretString']
else:
decoded_binary_secret = base64.b64decode(get_secret_value_response['SecretBinary'])
return(get_secret_value_response)
def checkin_to_git_repo(access_key, repo_name, file_path):
'''
Checkin the updated files to Git Repository
'''
git = Github(access_key)
repo = git.get_user().get_repo(repo_name)
file_list = [ file_path ]
file_name = file_path.split('/')[-1]
file_names = [ file_name ]
time_stamp = datetime.now().strftime("%m%d%y-%H%M%S")
commit_message = 'Commit for ' + file_name + ' at ' + time_stamp
main_ref = repo.get_git_ref('heads/main')
main_sha = main_ref.object.sha
base_tree = repo.get_git_tree(main_sha)
element_list = list()
for i, entry in enumerate(file_list):
with open(entry) as input_file:
data = input_file.read()
print('Filename:', file_names[i])
print('Filelist:', file_list[i])
element = InputGitTreeElement(file_list[i], '100644', 'blob', data)
element_list.append(element)
tree = repo.create_git_tree(element_list, base_tree)
parent = repo.get_git_commit(main_sha)
commit = repo.create_git_commit(commit_message, tree, [parent])
main_ref.edit(commit.sha)
print('Code check in complete')
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(prog='update_product_files.py', usage='%(prog)s -p -s', \
description='Add a new version to the product.')
#PARSER.add_argument("-a", "--artifact", type=str, required=True, help="Artifact file")
PARSER.add_argument("-p", "--port_file", type=str, required=True, help="Portfolio name")
PARSER.add_argument("-s", "--secret_name", type=str, default='github/kkvinjam', \
help="secrets manager secret name")
PARSER.add_argument("-r", "--repo", type=str, default='aws-custom-sc-pipeline', \
help="repository name in GitHub repo")
ARGS = PARSER.parse_args()
#ARTIFACT = ARGS.artifact
PORT_FILE = ARGS.port_file
SECRET = ARGS.secret_name
REPO = ARGS.repo
# FILE = 'templates/dev-portfolio/sc-dev-product-ec2-linux.json'
print('PORT File:', PORT_FILE)
update_sc_product_version(PORT_FILE)
pers_access_key = get_secret(SECRET)
if pers_access_key:
secret_key = json.loads(pers_access_key['SecretString'])['Token']
checkin_to_git_repo(secret_key, REPO, PORT_FILE)
| en | 0.773699 | Update the respective SC product file when a source file changes. Update the file name with latest version Increment last digit and add new provisioning artifact. Get the value of secret stored in secrets manager # Secrets Manager can't decrypt the protected secret text using the provided KMS key. # Deal with the exception here, and/or rethrow at your discretion. # An error occurred on the server side. # Deal with the exception here, and/or rethrow at your discretion. # You provided an invalid value for a parameter. # Deal with the exception here, and/or rethrow at your discretion. # You provided a parameter value that is not valid for # the current state of the resource. # Deal with the exception here, and/or rethrow at your discretion. # We can't find the resource that you asked for. # Deal with the exception here, and/or rethrow at your discretion. # Decrypts secret using the associated KMS key. # Depending on whether the secret is a string or binary, # one of these fields will be populated. Checkin the updated files to Git Repository #PARSER.add_argument("-a", "--artifact", type=str, required=True, help="Artifact file") #ARTIFACT = ARGS.artifact # FILE = 'templates/dev-portfolio/sc-dev-product-ec2-linux.json' | 2.538658 | 3 |
utilities/pyPlotting/puffdata/puffDataClass.py | mightylorenzo/Puffin | 15 | 6620244 | <reponame>mightylorenzo/Puffin
# Copyright (c) 2012-2018, University of Strathclyde
# Authors: <NAME>
# License: BSD-3-Clause
"""
This file is part of the example post-processing tools for Puffin, a
multi-frequency FEL code absent of the averaging / SVEA approximations. It
contains the object definition of the puffData class, used for holding the
scaling and mesh data about the simulation.
"""
import sys, glob, os
import numpy as np
from numpy import pi
from numpy import arange
import tables
class puffData:
c0 = 2.99792458e8
qe = 1.60217653e-19
eps0 = 8.854187817e-12
me = 9.1093826e-31
h = 6.626e-34
def __init__(self):
self.rho = 0.001
self.gamma0 = 800.
self.au = 1.
self.lw = 0.04
self.lr = 1.0e-7
self.eta = 1.0e-5
self.kappa = 1.0
self.lg = 1.
self.lc = 1.e-6
self.npkbar = 1.0e9
self.qscale = 1
self.iMesh = 1
self.q1d = 1
self.dxbar = 1.0
self.dybar = 1.0
self.dz2 = 1.0
self.nx = 1
self.ny = 1
self.nz2 = 1
self.dx = 1.
self.dy = 1.
self.dzbar = 1.0e-3
self.zbar = 0.
self.zbarloc = 0.
self.z = 0.
self.zloc = 0.
self.powScale = self.lg * self.lc * self.c0 * self.eps0 * \
np.square((self.gamma0 * self.me * np.square(self.c0) ) \
/ (self.qe * self.kappa * self.lg ))
self.intensScale = self.c0 * self.eps0 * \
np.square((self.gamma0 * self.me * np.square(self.c0) ) \
/ (self.qe * self.kappa * self.lg ))
self.fieldScale = ((self.gamma0 * self.me * np.square(self.c0) ) \
/ (self.qe * self.kappa * self.lg ))
def unscale(self):
self.dx = self.dxbar * np.sqrt(self.lg * self.lc) # ...etc | # Copyright (c) 2012-2018, University of Strathclyde
# Authors: <NAME>
# License: BSD-3-Clause
"""
This file is part of the example post-processing tools for Puffin, a
multi-frequency FEL code absent of the averaging / SVEA approximations. It
contains the object definition of the puffData class, used for holding the
scaling and mesh data about the simulation.
"""
import sys, glob, os
import numpy as np
from numpy import pi
from numpy import arange
import tables
class puffData:
c0 = 2.99792458e8
qe = 1.60217653e-19
eps0 = 8.854187817e-12
me = 9.1093826e-31
h = 6.626e-34
def __init__(self):
self.rho = 0.001
self.gamma0 = 800.
self.au = 1.
self.lw = 0.04
self.lr = 1.0e-7
self.eta = 1.0e-5
self.kappa = 1.0
self.lg = 1.
self.lc = 1.e-6
self.npkbar = 1.0e9
self.qscale = 1
self.iMesh = 1
self.q1d = 1
self.dxbar = 1.0
self.dybar = 1.0
self.dz2 = 1.0
self.nx = 1
self.ny = 1
self.nz2 = 1
self.dx = 1.
self.dy = 1.
self.dzbar = 1.0e-3
self.zbar = 0.
self.zbarloc = 0.
self.z = 0.
self.zloc = 0.
self.powScale = self.lg * self.lc * self.c0 * self.eps0 * \
np.square((self.gamma0 * self.me * np.square(self.c0) ) \
/ (self.qe * self.kappa * self.lg ))
self.intensScale = self.c0 * self.eps0 * \
np.square((self.gamma0 * self.me * np.square(self.c0) ) \
/ (self.qe * self.kappa * self.lg ))
self.fieldScale = ((self.gamma0 * self.me * np.square(self.c0) ) \
/ (self.qe * self.kappa * self.lg ))
def unscale(self):
self.dx = self.dxbar * np.sqrt(self.lg * self.lc) # ...etc | en | 0.821073 | # Copyright (c) 2012-2018, University of Strathclyde # Authors: <NAME> # License: BSD-3-Clause This file is part of the example post-processing tools for Puffin, a multi-frequency FEL code absent of the averaging / SVEA approximations. It contains the object definition of the puffData class, used for holding the scaling and mesh data about the simulation. # ...etc | 2.401931 | 2 |
timm/models/efficientnet_blocks.py | abyssss52/pytorch-image-models | 0 | 6620245 | """ EfficientNet, MobileNetV3, etc Blocks
Hacked together by / Copyright 2020 <NAME>
"""
import math
import torch
import torch.nn as nn
from torch.nn import functional as F
from .layers import create_conv2d, drop_path, get_act_layer
from .layers.activations import sigmoid
# Defaults used for Google/Tensorflow training of mobile networks /w RMSprop as per
# papers and TF reference implementations. PT momentum equiv for TF decay is (1 - TF decay)
# NOTE: momentum varies btw .99 and .9997 depending on source
# .99 in official TF TPU impl
# .9997 (/w .999 in search space) for paper
BN_MOMENTUM_TF_DEFAULT = 1 - 0.99
BN_EPS_TF_DEFAULT = 1e-3
_BN_ARGS_TF = dict(momentum=BN_MOMENTUM_TF_DEFAULT, eps=BN_EPS_TF_DEFAULT)
def get_bn_args_tf():
return _BN_ARGS_TF.copy()
def resolve_bn_args(kwargs):
bn_args = get_bn_args_tf() if kwargs.pop('bn_tf', False) else {}
bn_momentum = kwargs.pop('bn_momentum', None)
if bn_momentum is not None:
bn_args['momentum'] = bn_momentum
bn_eps = kwargs.pop('bn_eps', None)
if bn_eps is not None:
bn_args['eps'] = bn_eps
return bn_args
_SE_ARGS_DEFAULT = dict(
gate_fn=sigmoid,
act_layer=None,
reduce_mid=False,
divisor=1)
def resolve_se_args(kwargs, in_chs, act_layer=None):
se_kwargs = kwargs.copy() if kwargs is not None else {}
# fill in args that aren't specified with the defaults
for k, v in _SE_ARGS_DEFAULT.items():
se_kwargs.setdefault(k, v)
# some models, like MobilNetV3, calculate SE reduction chs from the containing block's mid_ch instead of in_ch
if not se_kwargs.pop('reduce_mid'):
se_kwargs['reduced_base_chs'] = in_chs
# act_layer override, if it remains None, the containing block's act_layer will be used
if se_kwargs['act_layer'] is None:
assert act_layer is not None
se_kwargs['act_layer'] = act_layer
return se_kwargs
def resolve_act_layer(kwargs, default='relu'):
act_layer = kwargs.pop('act_layer', default)
if isinstance(act_layer, str):
act_layer = get_act_layer(act_layer)
return act_layer
def make_divisible(v, divisor=8, min_value=None):
min_value = min_value or divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def round_channels(channels, multiplier=1.0, divisor=8, channel_min=None):
"""Round number of filters based on depth multiplier."""
if not multiplier:
return channels
channels *= multiplier
return make_divisible(channels, divisor, channel_min)
class ChannelShuffle(nn.Module):
# FIXME haven't used yet
def __init__(self, groups):
super(ChannelShuffle, self).__init__()
self.groups = groups
def forward(self, x):
"""Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]"""
N, C, H, W = x.size()
g = self.groups
assert C % g == 0, "Incompatible group size {} for input channel {}".format(
g, C
)
return (
x.view(N, g, int(C / g), H, W)
.permute(0, 2, 1, 3, 4)
.contiguous()
.view(N, C, H, W)
)
class SqueezeExcite(nn.Module):
def __init__(self, in_chs, se_ratio=0.25, reduced_base_chs=None,
act_layer=nn.ReLU, gate_fn=sigmoid, divisor=1, **_):
super(SqueezeExcite, self).__init__()
reduced_chs = make_divisible((reduced_base_chs or in_chs) * se_ratio, divisor)
self.conv_reduce = nn.Conv2d(in_chs, reduced_chs, 1, bias=True)
self.act1 = act_layer(inplace=True)
self.conv_expand = nn.Conv2d(reduced_chs, in_chs, 1, bias=True)
self.gate_fn = gate_fn
def forward(self, x):
x_se = x.mean((2, 3), keepdim=True)
x_se = self.conv_reduce(x_se)
x_se = self.act1(x_se)
x_se = self.conv_expand(x_se)
return x * self.gate_fn(x_se)
class ConvBnAct(nn.Module):
def __init__(self, in_chs, out_chs, kernel_size,
stride=1, dilation=1, pad_type='', act_layer=nn.ReLU,
norm_layer=nn.BatchNorm2d, norm_kwargs=None):
super(ConvBnAct, self).__init__()
norm_kwargs = norm_kwargs or {}
self.conv = create_conv2d(in_chs, out_chs, kernel_size, stride=stride, dilation=dilation, padding=pad_type)
self.bn1 = norm_layer(out_chs, **norm_kwargs)
self.act1 = act_layer(inplace=True)
def feature_info(self, location):
if location == 'expansion': # output of conv after act, same as block coutput
info = dict(module='act1', hook_type='forward', num_chs=self.conv.out_channels)
else: # location == 'bottleneck', block output
info = dict(module='', hook_type='', num_chs=self.conv.out_channels)
return info
def forward(self, x):
x = self.conv(x)
x = self.bn1(x)
x = self.act1(x)
return x
class DepthwiseSeparableConv(nn.Module):
""" DepthwiseSeparable block
Used for DS convs in MobileNet-V1 and in the place of IR blocks that have no expansion
(factor of 1.0). This is an alternative to having a IR with an optional first pw conv.
"""
def __init__(self, in_chs, out_chs, dw_kernel_size=3,
stride=1, dilation=1, pad_type='', act_layer=nn.ReLU, noskip=False,
pw_kernel_size=1, pw_act=False, se_ratio=0., se_kwargs=None,
norm_layer=nn.BatchNorm2d, norm_kwargs=None, drop_path_rate=0.):
super(DepthwiseSeparableConv, self).__init__()
norm_kwargs = norm_kwargs or {}
has_se = se_ratio is not None and se_ratio > 0.
self.has_residual = (stride == 1 and in_chs == out_chs) and not noskip
self.has_pw_act = pw_act # activation after point-wise conv
self.drop_path_rate = drop_path_rate
self.conv_dw = create_conv2d(
in_chs, in_chs, dw_kernel_size, stride=stride, dilation=dilation, padding=pad_type, depthwise=True)
self.bn1 = norm_layer(in_chs, **norm_kwargs)
self.act1 = act_layer(inplace=True)
# Squeeze-and-excitation
if has_se:
se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer)
self.se = SqueezeExcite(in_chs, se_ratio=se_ratio, **se_kwargs)
else:
self.se = None
self.conv_pw = create_conv2d(in_chs, out_chs, pw_kernel_size, padding=pad_type)
self.bn2 = norm_layer(out_chs, **norm_kwargs)
self.act2 = act_layer(inplace=True) if self.has_pw_act else nn.Identity()
def feature_info(self, location):
if location == 'expansion': # after SE, input to PW
info = dict(module='conv_pw', hook_type='forward_pre', num_chs=self.conv_pw.in_channels)
else: # location == 'bottleneck', block output
info = dict(module='', hook_type='', num_chs=self.conv_pw.out_channels)
return info
def forward(self, x):
residual = x
x = self.conv_dw(x)
x = self.bn1(x)
x = self.act1(x)
if self.se is not None:
x = self.se(x)
x = self.conv_pw(x)
x = self.bn2(x)
x = self.act2(x)
if self.has_residual:
if self.drop_path_rate > 0.:
x = drop_path(x, self.drop_path_rate, self.training)
x += residual
return x
class FinalLayer(nn.Module):
def __init__(self, in_chs, num_features, pad_type, norm_kwargs, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU6):
super(FinalLayer, self).__init__()
self._in_chs = in_chs
self.num_features = num_features
norm_kwargs = norm_kwargs or {}
self.conv_head = create_conv2d(self._in_chs, self.num_features, 1, padding=pad_type)
self.bn2 = norm_layer(self.num_features, **norm_kwargs)
self.act2 = act_layer(inplace=True)
def forward(self, x):
x = self.conv_head(x)
x = self.bn2(x)
x = self.act2(x)
return x
class InvertedResidual_easy(nn.Module):
def __init__(self, in_chs, num_features, pad_type, norm_kwargs, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU6):
super(InvertedResidual_easy, self).__init__()
self._in_chs = in_chs
self.num_features = num_features
norm_kwargs = norm_kwargs or {}
self.conv_head = create_conv2d(self._in_chs, self.num_features, 1, padding=pad_type)
self.bn2 = norm_layer(self.num_features, **norm_kwargs)
self.act2 = act_layer(inplace=True)
def forward(self, x):
x = self.conv_head(x)
x = self.bn2(x)
x = self.act2(x)
return x
class InvertedResidual(nn.Module):
""" Inverted residual block w/ optional SE and CondConv routing"""
def __init__(self, in_chs, out_chs, dw_kernel_size=3,
stride=1, dilation=1, pad_type='', act_layer=nn.ReLU, noskip=False,
exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1,
se_ratio=0., se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None,
conv_kwargs=None, drop_path_rate=0.):
super(InvertedResidual, self).__init__()
norm_kwargs = norm_kwargs or {}
conv_kwargs = conv_kwargs or {}
mid_chs = make_divisible(in_chs * exp_ratio)
has_se = se_ratio is not None and se_ratio > 0.
self.has_residual = (in_chs == out_chs and stride == 1) and not noskip
self.drop_path_rate = drop_path_rate
# Point-wise expansion
self.conv_pw = create_conv2d(in_chs, mid_chs, exp_kernel_size, padding=pad_type, **conv_kwargs)
self.bn1 = norm_layer(mid_chs, **norm_kwargs)
self.act1 = act_layer(inplace=True)
# Depth-wise convolution
self.conv_dw = create_conv2d(
mid_chs, mid_chs, dw_kernel_size, stride=stride, dilation=dilation,
padding=pad_type, depthwise=True, **conv_kwargs)
self.bn2 = norm_layer(mid_chs, **norm_kwargs)
self.act2 = act_layer(inplace=True)
# Squeeze-and-excitation
if has_se:
se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer)
self.se = SqueezeExcite(mid_chs, se_ratio=se_ratio, **se_kwargs)
else:
self.se = None
# Point-wise linear projection
self.conv_pwl = create_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type, **conv_kwargs)
self.bn3 = norm_layer(out_chs, **norm_kwargs)
def feature_info(self, location):
if location == 'expansion': # after SE, input to PWL
info = dict(module='conv_pwl', hook_type='forward_pre', num_chs=self.conv_pwl.in_channels)
else: # location == 'bottleneck', block output
info = dict(module='', hook_type='', num_chs=self.conv_pwl.out_channels)
return info
def forward(self, x):
residual = x
# Point-wise expansion
x = self.conv_pw(x)
x = self.bn1(x)
x = self.act1(x)
# Depth-wise convolution
x = self.conv_dw(x)
x = self.bn2(x)
x = self.act2(x)
# Squeeze-and-excitation
if self.se is not None:
x = self.se(x)
# Point-wise linear projection
x = self.conv_pwl(x)
x = self.bn3(x)
if self.has_residual:
if self.drop_path_rate > 0.:
x = drop_path(x, self.drop_path_rate, self.training)
x += residual
return x
class I2RGhostBlock(nn.Module):
"""
Mobile Inverted Residual Bottleneck Block
Args:
block_args (namedtuple): BlockArgs, see above
global_params (namedtuple): GlobalParam, see above
Attributes:
has_se (bool): Whether the block contains a Squeeze and Excitation layer.
"""
def __init__(self, in_chs, out_chs, dw_kernel_size=3,
stride=1, dilation=1, pad_type='', act_layer=nn.ReLU, noskip=False,
exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1,
se_ratio=0., se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None,
conv_kwargs=None, drop_path_rate=0., keep_3x3=False, group_1x1=1):
super().__init__()
norm_kwargs = norm_kwargs or {}
conv_kwargs = conv_kwargs or {}
mid_chs = make_divisible(in_chs * exp_ratio)
self.has_se = se_ratio is not None and se_ratio > 0.
self.has_residual = (in_chs == out_chs and stride == 1) and not noskip
self.drop_path_rate = drop_path_rate
self.expand_ratio = exp_ratio
# Get static or dynamic convolution depending on image size
# Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)
Conv2d = nn.Conv2d
# Expansion phase
inp = in_chs
oup = in_chs // self.expand_ratio # number of output channels
final_oup = out_chs
self.inp, self.final_oup = inp, final_oup
self.identity = False
if oup < oup / 6.:
oup = math.ceil(oup / 6.)
oup = make_divisible(oup, 16)
oup = make_divisible(oup, 2)
k = dw_kernel_size
s = stride
# apply repeat scheme
self.split_ratio = 2
self.ghost_idx_inp = inp // self.split_ratio
self.ghost_idx_oup = int(final_oup - self.ghost_idx_inp)
self.inp, self.final_oup, self.s = inp, final_oup, s
# if self._block_args.expand_ratio != 1:
# self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
# self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
if self.expand_ratio == 2:
# self.features = nn.Sequential(
self.dwise_conv1 = Conv2d(in_channels=in_chs, out_channels=in_chs, kernel_size=k, padding=k // 2,
bias=False, groups=in_chs)
self.bn1 = norm_layer(in_chs, **norm_kwargs)
self.act = act_layer(inplace=True)
# first linear layer
self.project_layer = Conv2d(in_channels=self.ghost_idx_inp, out_channels=oup, kernel_size=1, bias=False)
self.bn2 = norm_layer(oup, **norm_kwargs)
# sec linear layer
self.expand_layer = Conv2d(in_channels=oup, out_channels=self.ghost_idx_oup, kernel_size=1, bias=False)
self.bn3 = norm_layer(self.ghost_idx_oup, **norm_kwargs)
# act_layer(inplace=True),
# expand layer
self.dwise_conv2 = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, padding=k // 2,
bias=False, groups=final_oup, stride=s)
self.bn4 = norm_layer(final_oup, **norm_kwargs)
# )
elif inp != final_oup and s == 1:
# self.features=nn.Sequential(
self.project_layer = Conv2d(in_channels=in_chs, out_channels=oup, kernel_size=1, bias=False)
self.bn2 = norm_layer(oup, **norm_kwargs)
# only two linear layers are needed
self.expand_layer = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False,
groups=group_1x1)
self.bn3 = norm_layer(final_oup, **norm_kwargs)
self.act = act_layer(inplace=True)
# )
elif in_chs != final_oup and s == 2:
# self.features = nn.Sequential(
self.project_layer = Conv2d(in_channels=in_chs, out_channels=oup, kernel_size=1, bias=False)
self.bn2 = norm_layer(oup, **norm_kwargs)
self.expand_layer = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
self.bn3 = norm_layer(final_oup, **norm_kwargs)
self.act = act_layer(inplace=True)
self.dwise_conv2 = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, padding=k // 2,
bias=False, groups=final_oup, stride=s)
self.bn4 = norm_layer(final_oup, **norm_kwargs)
# )
else:
self.identity = True
# self.features = nn.Sequential(
self.dwise_conv1 = Conv2d(in_channels=in_chs, out_channels=in_chs, kernel_size=k, padding=k // 2,
bias=False, groups=in_chs)
self.bn1 = norm_layer(in_chs, **norm_kwargs)
self.act = act_layer(inplace=True)
self.project_layer = Conv2d(in_channels=self.ghost_idx_inp, out_channels=oup, kernel_size=1, bias=False,
groups=group_1x1)
self.bn2 = norm_layer(oup, **norm_kwargs)
self.expand_layer = Conv2d(in_channels=oup, out_channels=self.ghost_idx_oup, kernel_size=1, bias=False,
groups=group_1x1)
self.bn3 = norm_layer(self.ghost_idx_oup, **norm_kwargs)
# act_layer(inplace=True),
self.dwise_conv2 = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, padding=k // 2,
bias=False, groups=final_oup)
self.bn4 = norm_layer(final_oup, **norm_kwargs)
# )
if self.has_se:
se_mode = 'large'
if se_mode == 'large':
se_frac = 0.5
se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer)
self.se = SqueezeExcite(out_chs, se_ratio=se_ratio * se_frac, **se_kwargs)
else:
se_frac = 1
se_kwargs = resolve_se_args(se_kwargs, out_chs, act_layer)
self.se = SqueezeExcite(out_chs, se_ratio=se_ratio * se_frac / exp_ratio, **se_kwargs)
def forward(self, inputs, drop_path_rate=None):
"""
:param inputs: input tensor
:param drop_path_rate: drop path rate (float, between 0 and 1)
:return: output of block
"""
# Expansion and Depthwise Convolution
# import pdb;pdb.set_trace()
# x = self.features(inputs)
if self.expand_ratio == 2:
# first dwise conv
x = self.act(self.bn1(self.dwise_conv1(inputs)))
# first 1x1 conv
ghost_id = x[:, self.ghost_idx_inp:, :, :]
x = self.bn2(self.project_layer(x[:, :self.ghost_idx_inp, :, :]))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
# generate more features
x = torch.cat([x, ghost_id], dim=1)
# second dwise conv
x = self.bn4(self.dwise_conv2(x))
elif self.inp != self.final_oup and self.s == 1:
# first 1x1 conv
x = self.bn2(self.project_layer(inputs))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
elif self.inp != self.final_oup and self.s == 2:
# first 1x1 conv
x = self.bn2(self.project_layer(inputs))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
# second dwise conv
x = self.bn4(self.dwise_conv2(x))
else:
# first dwise conv
x = self.act(self.bn1(self.dwise_conv1(inputs)))
# first 1x1 conv
ghost_id = x[:, self.ghost_idx_inp:, :, :]
x = self.bn2(self.project_layer(x[:, :self.ghost_idx_inp, :, :]))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
# second dwise conv
x = torch.cat([x, ghost_id], dim=1)
x = self.bn4(self.dwise_conv2(x))
# Squeeze-and-excitation
if self.has_se:
x = self.se(x)
# Skip connection and drop connect
# input_filters, output_filters = self.in_chs, self.out_chs
# if self.identity and self._block_args.stride == 1 and input_filters == output_filters:
# # import pdb;pdb.set_trace()
# if drop_connect_rate:
# x = drop_connect(x, p=drop_connect_rate, training=self.training)
# x = x + inputs # skip connection
# return x
if self.identity:
if self.drop_path_rate > 0.:
x = drop_path(x, self.drop_path_rate, self.training)
x = x + inputs
return x
else:
return x
class I2RBlock(nn.Module):
"""
Mobile Inverted Residual Bottleneck Block
Args:
block_args (namedtuple): BlockArgs, see above
global_params (namedtuple): GlobalParam, see above
Attributes:
has_se (bool): Whether the block contains a Squeeze and Excitation layer.
"""
def __init__(self, in_chs, out_chs, dw_kernel_size=3,
stride=1, dilation=1, pad_type='', act_layer=nn.ReLU, noskip=False,
exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1,
se_ratio=0., se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None,
conv_kwargs=None, drop_path_rate=0., keep_3x3=False, group_1x1=2):
super().__init__()
norm_kwargs = norm_kwargs or {}
conv_kwargs = conv_kwargs or {}
mid_chs = make_divisible(in_chs * exp_ratio)
self.has_se = se_ratio is not None and se_ratio > 0.
self.has_residual = (in_chs == out_chs and stride == 1) and not noskip
self.drop_path_rate = drop_path_rate
self.expand_ratio = exp_ratio
# Get static or dynamic convolution depending on image size
# Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)
Conv2d = nn.Conv2d
# Expansion phase
inp = in_chs
oup = in_chs // self.expand_ratio # number of output channels
final_oup = out_chs
self.inp, self.final_oup = inp, final_oup
self.identity = False
if oup < oup / 6.:
oup = math.ceil(oup / 6.)
oup = make_divisible(oup, 16)
oup = make_divisible(oup, 2)
k = dw_kernel_size
s = stride
# apply repeat scheme
self.ghost_idx_inp = inp
self.ghost_idx_oup = final_oup
self.inp, self.final_oup, self.s = inp, final_oup, s
# if self._block_args.expand_ratio != 1:
# self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
# self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
if self.expand_ratio == 2:
# self.features = nn.Sequential(
self.dwise_conv1 = Conv2d(in_channels=in_chs, out_channels=in_chs, kernel_size=k, padding=k // 2,
bias=False, groups=in_chs)
self.bn1 = norm_layer(in_chs, **norm_kwargs)
self.act = act_layer(inplace=True)
# first linear layer
self.project_layer = Conv2d(in_channels=self.ghost_idx_inp, out_channels=oup, kernel_size=1, bias=False,
groups=group_1x1)
self.bn2 = norm_layer(oup, **norm_kwargs)
# sec linear layer
self.expand_layer = Conv2d(in_channels=oup, out_channels=self.ghost_idx_oup, kernel_size=1, bias=False,
groups=group_1x1)
self.bn3 = norm_layer(self.ghost_idx_oup, **norm_kwargs)
# act_layer(inplace=True),
# expand layer
self.dwise_conv2 = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, padding=k // 2,
bias=False, groups=final_oup, stride=s)
self.bn4 = norm_layer(final_oup, **norm_kwargs)
# )
elif inp != final_oup and s == 1:
# self.features=nn.Sequential(
self.project_layer = Conv2d(in_channels=in_chs, out_channels=oup, kernel_size=1, bias=False)
self.bn2 = norm_layer(oup, **norm_kwargs)
# only two linear layers are needed
self.expand_layer = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
self.bn3 = norm_layer(final_oup, **norm_kwargs)
self.act = act_layer(inplace=True)
# )
elif in_chs != final_oup and s == 2:
# self.features = nn.Sequential(
self.project_layer = Conv2d(in_channels=in_chs, out_channels=oup, kernel_size=1, bias=False)
self.bn2 = norm_layer(oup, **norm_kwargs)
self.expand_layer = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
self.bn3 = norm_layer(final_oup, **norm_kwargs)
self.act = act_layer(inplace=True)
self.dwise_conv2 = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, padding=k // 2,
bias=False, groups=final_oup, stride=s)
self.bn4 = norm_layer(final_oup, **norm_kwargs)
# )
else:
self.identity = True
# self.features = nn.Sequential(
self.dwise_conv1 = Conv2d(in_channels=in_chs, out_channels=in_chs, kernel_size=k, padding=k // 2,
bias=False, groups=in_chs)
self.bn1 = norm_layer(in_chs, **norm_kwargs)
self.act = act_layer(inplace=True)
self.project_layer = Conv2d(in_channels=self.ghost_idx_inp, out_channels=oup, kernel_size=1, bias=False,
groups=group_1x1)
self.bn2 = norm_layer(oup, **norm_kwargs)
self.expand_layer = Conv2d(in_channels=oup, out_channels=self.ghost_idx_oup, kernel_size=1, bias=False,
groups=group_1x1)
self.bn3 = norm_layer(self.ghost_idx_oup, **norm_kwargs)
# act_layer(inplace=True),
self.dwise_conv2 = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, padding=k // 2,
bias=False, groups=final_oup)
self.bn4 = norm_layer(final_oup, **norm_kwargs)
# )
if self.has_se:
se_mode = 'small'
if se_mode == 'large':
se_frac = 0.5
se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer)
self.se = SqueezeExcite(out_chs, se_ratio=se_ratio * se_frac, **se_kwargs)
else:
se_frac = 1
se_kwargs = resolve_se_args(se_kwargs, out_chs, act_layer)
self.se = SqueezeExcite(out_chs, se_ratio=se_ratio * se_frac / exp_ratio, **se_kwargs)
def forward(self, inputs, drop_path_rate=None):
"""
:param inputs: input tensor
:param drop_path_rate: drop path rate (float, between 0 and 1)
:return: output of block
"""
# Expansion and Depthwise Convolution
# import pdb;pdb.set_trace()
# x = self.features(inputs)
if self.expand_ratio == 2:
# first dwise conv
x = self.act(self.bn1(self.dwise_conv1(inputs)))
# first 1x1 conv
ghost_id = x[:, self.ghost_idx_inp:, :, :]
x = self.bn2(self.project_layer(x[:, :self.ghost_idx_inp, :, :]))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
# generate more features
x = torch.cat([x, ghost_id], dim=1)
# second dwise conv
x = self.bn4(self.dwise_conv2(x))
elif self.inp != self.final_oup and self.s == 1:
# first 1x1 conv
x = self.bn2(self.project_layer(inputs))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
elif self.inp != self.final_oup and self.s == 2:
# first 1x1 conv
x = self.bn2(self.project_layer(inputs))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
# second dwise conv
x = self.bn4(self.dwise_conv2(x))
else:
# first dwise conv
x = self.act(self.bn1(self.dwise_conv1(inputs)))
# first 1x1 conv
ghost_id = x[:, self.ghost_idx_inp:, :, :]
x = self.bn2(self.project_layer(x[:, :self.ghost_idx_inp, :, :]))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
# second dwise conv
x = torch.cat([x, ghost_id], dim=1)
x = self.bn4(self.dwise_conv2(x))
# Squeeze-and-excitation
if self.has_se:
x = self.se(x)
# Skip connection and drop connect
# input_filters, output_filters = self.in_chs, self.out_chs
# if self.identity and self._block_args.stride == 1 and input_filters == output_filters:
# # import pdb;pdb.set_trace()
# if drop_connect_rate:
# x = drop_connect(x, p=drop_connect_rate, training=self.training)
# x = x + inputs # skip connection
# return x
if self.identity:
if self.drop_path_rate > 0.:
x = drop_path(x, self.drop_path_rate, self.training)
x = x + inputs
return x
else:
return x
class CondConvResidual(InvertedResidual):
""" Inverted residual block w/ CondConv routing"""
def __init__(self, in_chs, out_chs, dw_kernel_size=3,
stride=1, dilation=1, pad_type='', act_layer=nn.ReLU, noskip=False,
exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1,
se_ratio=0., se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None,
num_experts=0, drop_path_rate=0.):
self.num_experts = num_experts
conv_kwargs = dict(num_experts=self.num_experts)
super(CondConvResidual, self).__init__(
in_chs, out_chs, dw_kernel_size=dw_kernel_size, stride=stride, dilation=dilation, pad_type=pad_type,
act_layer=act_layer, noskip=noskip, exp_ratio=exp_ratio, exp_kernel_size=exp_kernel_size,
pw_kernel_size=pw_kernel_size, se_ratio=se_ratio, se_kwargs=se_kwargs,
norm_layer=norm_layer, norm_kwargs=norm_kwargs, conv_kwargs=conv_kwargs,
drop_path_rate=drop_path_rate)
self.routing_fn = nn.Linear(in_chs, self.num_experts)
def forward(self, x):
residual = x
# CondConv routing
pooled_inputs = F.adaptive_avg_pool2d(x, 1).flatten(1)
routing_weights = torch.sigmoid(self.routing_fn(pooled_inputs))
# Point-wise expansion
x = self.conv_pw(x, routing_weights)
x = self.bn1(x)
x = self.act1(x)
# Depth-wise convolution
x = self.conv_dw(x, routing_weights)
x = self.bn2(x)
x = self.act2(x)
# Squeeze-and-excitation
if self.se is not None:
x = self.se(x)
# Point-wise linear projection
x = self.conv_pwl(x, routing_weights)
x = self.bn3(x)
if self.has_residual:
if self.drop_path_rate > 0.:
x = drop_path(x, self.drop_path_rate, self.training)
x += residual
return x
class EdgeResidual(nn.Module):
""" Residual block with expansion convolution followed by pointwise-linear w/ stride"""
def __init__(self, in_chs, out_chs, exp_kernel_size=3, exp_ratio=1.0, fake_in_chs=0,
stride=1, dilation=1, pad_type='', act_layer=nn.ReLU, noskip=False, pw_kernel_size=1,
se_ratio=0., se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None,
drop_path_rate=0.):
super(EdgeResidual, self).__init__()
norm_kwargs = norm_kwargs or {}
if fake_in_chs > 0:
mid_chs = make_divisible(fake_in_chs * exp_ratio)
else:
mid_chs = make_divisible(in_chs * exp_ratio)
has_se = se_ratio is not None and se_ratio > 0.
self.has_residual = (in_chs == out_chs and stride == 1) and not noskip
self.drop_path_rate = drop_path_rate
# Expansion convolution
self.conv_exp = create_conv2d(
in_chs, mid_chs, exp_kernel_size, stride=stride, dilation=dilation, padding=pad_type)
self.bn1 = norm_layer(mid_chs, **norm_kwargs)
self.act1 = act_layer(inplace=True)
# Squeeze-and-excitation
if has_se:
se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer)
self.se = SqueezeExcite(mid_chs, se_ratio=se_ratio, **se_kwargs)
else:
self.se = None
# Point-wise linear projection
self.conv_pwl = create_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type)
self.bn2 = norm_layer(out_chs, **norm_kwargs)
def feature_info(self, location):
if location == 'expansion': # after SE, before PWL
info = dict(module='conv_pwl', hook_type='forward_pre', num_chs=self.conv_pwl.in_channels)
else: # location == 'bottleneck', block output
info = dict(module='', hook_type='', num_chs=self.conv_pwl.out_channels)
return info
def forward(self, x):
residual = x
# Expansion convolution
x = self.conv_exp(x)
x = self.bn1(x)
x = self.act1(x)
# Squeeze-and-excitation
if self.se is not None:
x = self.se(x)
# Point-wise linear projection
x = self.conv_pwl(x)
x = self.bn2(x)
if self.has_residual:
if self.drop_path_rate > 0.:
x = drop_path(x, self.drop_path_rate, self.training)
x += residual
return x
| """ EfficientNet, MobileNetV3, etc Blocks
Hacked together by / Copyright 2020 <NAME>
"""
import math
import torch
import torch.nn as nn
from torch.nn import functional as F
from .layers import create_conv2d, drop_path, get_act_layer
from .layers.activations import sigmoid
# Defaults used for Google/Tensorflow training of mobile networks /w RMSprop as per
# papers and TF reference implementations. PT momentum equiv for TF decay is (1 - TF decay)
# NOTE: momentum varies btw .99 and .9997 depending on source
# .99 in official TF TPU impl
# .9997 (/w .999 in search space) for paper
BN_MOMENTUM_TF_DEFAULT = 1 - 0.99
BN_EPS_TF_DEFAULT = 1e-3
_BN_ARGS_TF = dict(momentum=BN_MOMENTUM_TF_DEFAULT, eps=BN_EPS_TF_DEFAULT)
def get_bn_args_tf():
return _BN_ARGS_TF.copy()
def resolve_bn_args(kwargs):
bn_args = get_bn_args_tf() if kwargs.pop('bn_tf', False) else {}
bn_momentum = kwargs.pop('bn_momentum', None)
if bn_momentum is not None:
bn_args['momentum'] = bn_momentum
bn_eps = kwargs.pop('bn_eps', None)
if bn_eps is not None:
bn_args['eps'] = bn_eps
return bn_args
_SE_ARGS_DEFAULT = dict(
gate_fn=sigmoid,
act_layer=None,
reduce_mid=False,
divisor=1)
def resolve_se_args(kwargs, in_chs, act_layer=None):
se_kwargs = kwargs.copy() if kwargs is not None else {}
# fill in args that aren't specified with the defaults
for k, v in _SE_ARGS_DEFAULT.items():
se_kwargs.setdefault(k, v)
# some models, like MobilNetV3, calculate SE reduction chs from the containing block's mid_ch instead of in_ch
if not se_kwargs.pop('reduce_mid'):
se_kwargs['reduced_base_chs'] = in_chs
# act_layer override, if it remains None, the containing block's act_layer will be used
if se_kwargs['act_layer'] is None:
assert act_layer is not None
se_kwargs['act_layer'] = act_layer
return se_kwargs
def resolve_act_layer(kwargs, default='relu'):
act_layer = kwargs.pop('act_layer', default)
if isinstance(act_layer, str):
act_layer = get_act_layer(act_layer)
return act_layer
def make_divisible(v, divisor=8, min_value=None):
min_value = min_value or divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def round_channels(channels, multiplier=1.0, divisor=8, channel_min=None):
"""Round number of filters based on depth multiplier."""
if not multiplier:
return channels
channels *= multiplier
return make_divisible(channels, divisor, channel_min)
class ChannelShuffle(nn.Module):
# FIXME haven't used yet
def __init__(self, groups):
super(ChannelShuffle, self).__init__()
self.groups = groups
def forward(self, x):
"""Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]"""
N, C, H, W = x.size()
g = self.groups
assert C % g == 0, "Incompatible group size {} for input channel {}".format(
g, C
)
return (
x.view(N, g, int(C / g), H, W)
.permute(0, 2, 1, 3, 4)
.contiguous()
.view(N, C, H, W)
)
class SqueezeExcite(nn.Module):
def __init__(self, in_chs, se_ratio=0.25, reduced_base_chs=None,
act_layer=nn.ReLU, gate_fn=sigmoid, divisor=1, **_):
super(SqueezeExcite, self).__init__()
reduced_chs = make_divisible((reduced_base_chs or in_chs) * se_ratio, divisor)
self.conv_reduce = nn.Conv2d(in_chs, reduced_chs, 1, bias=True)
self.act1 = act_layer(inplace=True)
self.conv_expand = nn.Conv2d(reduced_chs, in_chs, 1, bias=True)
self.gate_fn = gate_fn
def forward(self, x):
x_se = x.mean((2, 3), keepdim=True)
x_se = self.conv_reduce(x_se)
x_se = self.act1(x_se)
x_se = self.conv_expand(x_se)
return x * self.gate_fn(x_se)
class ConvBnAct(nn.Module):
def __init__(self, in_chs, out_chs, kernel_size,
stride=1, dilation=1, pad_type='', act_layer=nn.ReLU,
norm_layer=nn.BatchNorm2d, norm_kwargs=None):
super(ConvBnAct, self).__init__()
norm_kwargs = norm_kwargs or {}
self.conv = create_conv2d(in_chs, out_chs, kernel_size, stride=stride, dilation=dilation, padding=pad_type)
self.bn1 = norm_layer(out_chs, **norm_kwargs)
self.act1 = act_layer(inplace=True)
def feature_info(self, location):
if location == 'expansion': # output of conv after act, same as block coutput
info = dict(module='act1', hook_type='forward', num_chs=self.conv.out_channels)
else: # location == 'bottleneck', block output
info = dict(module='', hook_type='', num_chs=self.conv.out_channels)
return info
def forward(self, x):
x = self.conv(x)
x = self.bn1(x)
x = self.act1(x)
return x
class DepthwiseSeparableConv(nn.Module):
""" DepthwiseSeparable block
Used for DS convs in MobileNet-V1 and in the place of IR blocks that have no expansion
(factor of 1.0). This is an alternative to having a IR with an optional first pw conv.
"""
def __init__(self, in_chs, out_chs, dw_kernel_size=3,
stride=1, dilation=1, pad_type='', act_layer=nn.ReLU, noskip=False,
pw_kernel_size=1, pw_act=False, se_ratio=0., se_kwargs=None,
norm_layer=nn.BatchNorm2d, norm_kwargs=None, drop_path_rate=0.):
super(DepthwiseSeparableConv, self).__init__()
norm_kwargs = norm_kwargs or {}
has_se = se_ratio is not None and se_ratio > 0.
self.has_residual = (stride == 1 and in_chs == out_chs) and not noskip
self.has_pw_act = pw_act # activation after point-wise conv
self.drop_path_rate = drop_path_rate
self.conv_dw = create_conv2d(
in_chs, in_chs, dw_kernel_size, stride=stride, dilation=dilation, padding=pad_type, depthwise=True)
self.bn1 = norm_layer(in_chs, **norm_kwargs)
self.act1 = act_layer(inplace=True)
# Squeeze-and-excitation
if has_se:
se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer)
self.se = SqueezeExcite(in_chs, se_ratio=se_ratio, **se_kwargs)
else:
self.se = None
self.conv_pw = create_conv2d(in_chs, out_chs, pw_kernel_size, padding=pad_type)
self.bn2 = norm_layer(out_chs, **norm_kwargs)
self.act2 = act_layer(inplace=True) if self.has_pw_act else nn.Identity()
def feature_info(self, location):
if location == 'expansion': # after SE, input to PW
info = dict(module='conv_pw', hook_type='forward_pre', num_chs=self.conv_pw.in_channels)
else: # location == 'bottleneck', block output
info = dict(module='', hook_type='', num_chs=self.conv_pw.out_channels)
return info
def forward(self, x):
residual = x
x = self.conv_dw(x)
x = self.bn1(x)
x = self.act1(x)
if self.se is not None:
x = self.se(x)
x = self.conv_pw(x)
x = self.bn2(x)
x = self.act2(x)
if self.has_residual:
if self.drop_path_rate > 0.:
x = drop_path(x, self.drop_path_rate, self.training)
x += residual
return x
class FinalLayer(nn.Module):
def __init__(self, in_chs, num_features, pad_type, norm_kwargs, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU6):
super(FinalLayer, self).__init__()
self._in_chs = in_chs
self.num_features = num_features
norm_kwargs = norm_kwargs or {}
self.conv_head = create_conv2d(self._in_chs, self.num_features, 1, padding=pad_type)
self.bn2 = norm_layer(self.num_features, **norm_kwargs)
self.act2 = act_layer(inplace=True)
def forward(self, x):
x = self.conv_head(x)
x = self.bn2(x)
x = self.act2(x)
return x
class InvertedResidual_easy(nn.Module):
def __init__(self, in_chs, num_features, pad_type, norm_kwargs, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU6):
super(InvertedResidual_easy, self).__init__()
self._in_chs = in_chs
self.num_features = num_features
norm_kwargs = norm_kwargs or {}
self.conv_head = create_conv2d(self._in_chs, self.num_features, 1, padding=pad_type)
self.bn2 = norm_layer(self.num_features, **norm_kwargs)
self.act2 = act_layer(inplace=True)
def forward(self, x):
x = self.conv_head(x)
x = self.bn2(x)
x = self.act2(x)
return x
class InvertedResidual(nn.Module):
""" Inverted residual block w/ optional SE and CondConv routing"""
def __init__(self, in_chs, out_chs, dw_kernel_size=3,
stride=1, dilation=1, pad_type='', act_layer=nn.ReLU, noskip=False,
exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1,
se_ratio=0., se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None,
conv_kwargs=None, drop_path_rate=0.):
super(InvertedResidual, self).__init__()
norm_kwargs = norm_kwargs or {}
conv_kwargs = conv_kwargs or {}
mid_chs = make_divisible(in_chs * exp_ratio)
has_se = se_ratio is not None and se_ratio > 0.
self.has_residual = (in_chs == out_chs and stride == 1) and not noskip
self.drop_path_rate = drop_path_rate
# Point-wise expansion
self.conv_pw = create_conv2d(in_chs, mid_chs, exp_kernel_size, padding=pad_type, **conv_kwargs)
self.bn1 = norm_layer(mid_chs, **norm_kwargs)
self.act1 = act_layer(inplace=True)
# Depth-wise convolution
self.conv_dw = create_conv2d(
mid_chs, mid_chs, dw_kernel_size, stride=stride, dilation=dilation,
padding=pad_type, depthwise=True, **conv_kwargs)
self.bn2 = norm_layer(mid_chs, **norm_kwargs)
self.act2 = act_layer(inplace=True)
# Squeeze-and-excitation
if has_se:
se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer)
self.se = SqueezeExcite(mid_chs, se_ratio=se_ratio, **se_kwargs)
else:
self.se = None
# Point-wise linear projection
self.conv_pwl = create_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type, **conv_kwargs)
self.bn3 = norm_layer(out_chs, **norm_kwargs)
def feature_info(self, location):
if location == 'expansion': # after SE, input to PWL
info = dict(module='conv_pwl', hook_type='forward_pre', num_chs=self.conv_pwl.in_channels)
else: # location == 'bottleneck', block output
info = dict(module='', hook_type='', num_chs=self.conv_pwl.out_channels)
return info
def forward(self, x):
residual = x
# Point-wise expansion
x = self.conv_pw(x)
x = self.bn1(x)
x = self.act1(x)
# Depth-wise convolution
x = self.conv_dw(x)
x = self.bn2(x)
x = self.act2(x)
# Squeeze-and-excitation
if self.se is not None:
x = self.se(x)
# Point-wise linear projection
x = self.conv_pwl(x)
x = self.bn3(x)
if self.has_residual:
if self.drop_path_rate > 0.:
x = drop_path(x, self.drop_path_rate, self.training)
x += residual
return x
class I2RGhostBlock(nn.Module):
"""
Mobile Inverted Residual Bottleneck Block
Args:
block_args (namedtuple): BlockArgs, see above
global_params (namedtuple): GlobalParam, see above
Attributes:
has_se (bool): Whether the block contains a Squeeze and Excitation layer.
"""
def __init__(self, in_chs, out_chs, dw_kernel_size=3,
stride=1, dilation=1, pad_type='', act_layer=nn.ReLU, noskip=False,
exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1,
se_ratio=0., se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None,
conv_kwargs=None, drop_path_rate=0., keep_3x3=False, group_1x1=1):
super().__init__()
norm_kwargs = norm_kwargs or {}
conv_kwargs = conv_kwargs or {}
mid_chs = make_divisible(in_chs * exp_ratio)
self.has_se = se_ratio is not None and se_ratio > 0.
self.has_residual = (in_chs == out_chs and stride == 1) and not noskip
self.drop_path_rate = drop_path_rate
self.expand_ratio = exp_ratio
# Get static or dynamic convolution depending on image size
# Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)
Conv2d = nn.Conv2d
# Expansion phase
inp = in_chs
oup = in_chs // self.expand_ratio # number of output channels
final_oup = out_chs
self.inp, self.final_oup = inp, final_oup
self.identity = False
if oup < oup / 6.:
oup = math.ceil(oup / 6.)
oup = make_divisible(oup, 16)
oup = make_divisible(oup, 2)
k = dw_kernel_size
s = stride
# apply repeat scheme
self.split_ratio = 2
self.ghost_idx_inp = inp // self.split_ratio
self.ghost_idx_oup = int(final_oup - self.ghost_idx_inp)
self.inp, self.final_oup, self.s = inp, final_oup, s
# if self._block_args.expand_ratio != 1:
# self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
# self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
if self.expand_ratio == 2:
# self.features = nn.Sequential(
self.dwise_conv1 = Conv2d(in_channels=in_chs, out_channels=in_chs, kernel_size=k, padding=k // 2,
bias=False, groups=in_chs)
self.bn1 = norm_layer(in_chs, **norm_kwargs)
self.act = act_layer(inplace=True)
# first linear layer
self.project_layer = Conv2d(in_channels=self.ghost_idx_inp, out_channels=oup, kernel_size=1, bias=False)
self.bn2 = norm_layer(oup, **norm_kwargs)
# sec linear layer
self.expand_layer = Conv2d(in_channels=oup, out_channels=self.ghost_idx_oup, kernel_size=1, bias=False)
self.bn3 = norm_layer(self.ghost_idx_oup, **norm_kwargs)
# act_layer(inplace=True),
# expand layer
self.dwise_conv2 = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, padding=k // 2,
bias=False, groups=final_oup, stride=s)
self.bn4 = norm_layer(final_oup, **norm_kwargs)
# )
elif inp != final_oup and s == 1:
# self.features=nn.Sequential(
self.project_layer = Conv2d(in_channels=in_chs, out_channels=oup, kernel_size=1, bias=False)
self.bn2 = norm_layer(oup, **norm_kwargs)
# only two linear layers are needed
self.expand_layer = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False,
groups=group_1x1)
self.bn3 = norm_layer(final_oup, **norm_kwargs)
self.act = act_layer(inplace=True)
# )
elif in_chs != final_oup and s == 2:
# self.features = nn.Sequential(
self.project_layer = Conv2d(in_channels=in_chs, out_channels=oup, kernel_size=1, bias=False)
self.bn2 = norm_layer(oup, **norm_kwargs)
self.expand_layer = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
self.bn3 = norm_layer(final_oup, **norm_kwargs)
self.act = act_layer(inplace=True)
self.dwise_conv2 = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, padding=k // 2,
bias=False, groups=final_oup, stride=s)
self.bn4 = norm_layer(final_oup, **norm_kwargs)
# )
else:
self.identity = True
# self.features = nn.Sequential(
self.dwise_conv1 = Conv2d(in_channels=in_chs, out_channels=in_chs, kernel_size=k, padding=k // 2,
bias=False, groups=in_chs)
self.bn1 = norm_layer(in_chs, **norm_kwargs)
self.act = act_layer(inplace=True)
self.project_layer = Conv2d(in_channels=self.ghost_idx_inp, out_channels=oup, kernel_size=1, bias=False,
groups=group_1x1)
self.bn2 = norm_layer(oup, **norm_kwargs)
self.expand_layer = Conv2d(in_channels=oup, out_channels=self.ghost_idx_oup, kernel_size=1, bias=False,
groups=group_1x1)
self.bn3 = norm_layer(self.ghost_idx_oup, **norm_kwargs)
# act_layer(inplace=True),
self.dwise_conv2 = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, padding=k // 2,
bias=False, groups=final_oup)
self.bn4 = norm_layer(final_oup, **norm_kwargs)
# )
if self.has_se:
se_mode = 'large'
if se_mode == 'large':
se_frac = 0.5
se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer)
self.se = SqueezeExcite(out_chs, se_ratio=se_ratio * se_frac, **se_kwargs)
else:
se_frac = 1
se_kwargs = resolve_se_args(se_kwargs, out_chs, act_layer)
self.se = SqueezeExcite(out_chs, se_ratio=se_ratio * se_frac / exp_ratio, **se_kwargs)
def forward(self, inputs, drop_path_rate=None):
"""
:param inputs: input tensor
:param drop_path_rate: drop path rate (float, between 0 and 1)
:return: output of block
"""
# Expansion and Depthwise Convolution
# import pdb;pdb.set_trace()
# x = self.features(inputs)
if self.expand_ratio == 2:
# first dwise conv
x = self.act(self.bn1(self.dwise_conv1(inputs)))
# first 1x1 conv
ghost_id = x[:, self.ghost_idx_inp:, :, :]
x = self.bn2(self.project_layer(x[:, :self.ghost_idx_inp, :, :]))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
# generate more features
x = torch.cat([x, ghost_id], dim=1)
# second dwise conv
x = self.bn4(self.dwise_conv2(x))
elif self.inp != self.final_oup and self.s == 1:
# first 1x1 conv
x = self.bn2(self.project_layer(inputs))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
elif self.inp != self.final_oup and self.s == 2:
# first 1x1 conv
x = self.bn2(self.project_layer(inputs))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
# second dwise conv
x = self.bn4(self.dwise_conv2(x))
else:
# first dwise conv
x = self.act(self.bn1(self.dwise_conv1(inputs)))
# first 1x1 conv
ghost_id = x[:, self.ghost_idx_inp:, :, :]
x = self.bn2(self.project_layer(x[:, :self.ghost_idx_inp, :, :]))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
# second dwise conv
x = torch.cat([x, ghost_id], dim=1)
x = self.bn4(self.dwise_conv2(x))
# Squeeze-and-excitation
if self.has_se:
x = self.se(x)
# Skip connection and drop connect
# input_filters, output_filters = self.in_chs, self.out_chs
# if self.identity and self._block_args.stride == 1 and input_filters == output_filters:
# # import pdb;pdb.set_trace()
# if drop_connect_rate:
# x = drop_connect(x, p=drop_connect_rate, training=self.training)
# x = x + inputs # skip connection
# return x
if self.identity:
if self.drop_path_rate > 0.:
x = drop_path(x, self.drop_path_rate, self.training)
x = x + inputs
return x
else:
return x
class I2RBlock(nn.Module):
"""
Mobile Inverted Residual Bottleneck Block
Args:
block_args (namedtuple): BlockArgs, see above
global_params (namedtuple): GlobalParam, see above
Attributes:
has_se (bool): Whether the block contains a Squeeze and Excitation layer.
"""
def __init__(self, in_chs, out_chs, dw_kernel_size=3,
stride=1, dilation=1, pad_type='', act_layer=nn.ReLU, noskip=False,
exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1,
se_ratio=0., se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None,
conv_kwargs=None, drop_path_rate=0., keep_3x3=False, group_1x1=2):
super().__init__()
norm_kwargs = norm_kwargs or {}
conv_kwargs = conv_kwargs or {}
mid_chs = make_divisible(in_chs * exp_ratio)
self.has_se = se_ratio is not None and se_ratio > 0.
self.has_residual = (in_chs == out_chs and stride == 1) and not noskip
self.drop_path_rate = drop_path_rate
self.expand_ratio = exp_ratio
# Get static or dynamic convolution depending on image size
# Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)
Conv2d = nn.Conv2d
# Expansion phase
inp = in_chs
oup = in_chs // self.expand_ratio # number of output channels
final_oup = out_chs
self.inp, self.final_oup = inp, final_oup
self.identity = False
if oup < oup / 6.:
oup = math.ceil(oup / 6.)
oup = make_divisible(oup, 16)
oup = make_divisible(oup, 2)
k = dw_kernel_size
s = stride
# apply repeat scheme
self.ghost_idx_inp = inp
self.ghost_idx_oup = final_oup
self.inp, self.final_oup, self.s = inp, final_oup, s
# if self._block_args.expand_ratio != 1:
# self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
# self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
if self.expand_ratio == 2:
# self.features = nn.Sequential(
self.dwise_conv1 = Conv2d(in_channels=in_chs, out_channels=in_chs, kernel_size=k, padding=k // 2,
bias=False, groups=in_chs)
self.bn1 = norm_layer(in_chs, **norm_kwargs)
self.act = act_layer(inplace=True)
# first linear layer
self.project_layer = Conv2d(in_channels=self.ghost_idx_inp, out_channels=oup, kernel_size=1, bias=False,
groups=group_1x1)
self.bn2 = norm_layer(oup, **norm_kwargs)
# sec linear layer
self.expand_layer = Conv2d(in_channels=oup, out_channels=self.ghost_idx_oup, kernel_size=1, bias=False,
groups=group_1x1)
self.bn3 = norm_layer(self.ghost_idx_oup, **norm_kwargs)
# act_layer(inplace=True),
# expand layer
self.dwise_conv2 = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, padding=k // 2,
bias=False, groups=final_oup, stride=s)
self.bn4 = norm_layer(final_oup, **norm_kwargs)
# )
elif inp != final_oup and s == 1:
# self.features=nn.Sequential(
self.project_layer = Conv2d(in_channels=in_chs, out_channels=oup, kernel_size=1, bias=False)
self.bn2 = norm_layer(oup, **norm_kwargs)
# only two linear layers are needed
self.expand_layer = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
self.bn3 = norm_layer(final_oup, **norm_kwargs)
self.act = act_layer(inplace=True)
# )
elif in_chs != final_oup and s == 2:
# self.features = nn.Sequential(
self.project_layer = Conv2d(in_channels=in_chs, out_channels=oup, kernel_size=1, bias=False)
self.bn2 = norm_layer(oup, **norm_kwargs)
self.expand_layer = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
self.bn3 = norm_layer(final_oup, **norm_kwargs)
self.act = act_layer(inplace=True)
self.dwise_conv2 = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, padding=k // 2,
bias=False, groups=final_oup, stride=s)
self.bn4 = norm_layer(final_oup, **norm_kwargs)
# )
else:
self.identity = True
# self.features = nn.Sequential(
self.dwise_conv1 = Conv2d(in_channels=in_chs, out_channels=in_chs, kernel_size=k, padding=k // 2,
bias=False, groups=in_chs)
self.bn1 = norm_layer(in_chs, **norm_kwargs)
self.act = act_layer(inplace=True)
self.project_layer = Conv2d(in_channels=self.ghost_idx_inp, out_channels=oup, kernel_size=1, bias=False,
groups=group_1x1)
self.bn2 = norm_layer(oup, **norm_kwargs)
self.expand_layer = Conv2d(in_channels=oup, out_channels=self.ghost_idx_oup, kernel_size=1, bias=False,
groups=group_1x1)
self.bn3 = norm_layer(self.ghost_idx_oup, **norm_kwargs)
# act_layer(inplace=True),
self.dwise_conv2 = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, padding=k // 2,
bias=False, groups=final_oup)
self.bn4 = norm_layer(final_oup, **norm_kwargs)
# )
if self.has_se:
se_mode = 'small'
if se_mode == 'large':
se_frac = 0.5
se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer)
self.se = SqueezeExcite(out_chs, se_ratio=se_ratio * se_frac, **se_kwargs)
else:
se_frac = 1
se_kwargs = resolve_se_args(se_kwargs, out_chs, act_layer)
self.se = SqueezeExcite(out_chs, se_ratio=se_ratio * se_frac / exp_ratio, **se_kwargs)
def forward(self, inputs, drop_path_rate=None):
"""
:param inputs: input tensor
:param drop_path_rate: drop path rate (float, between 0 and 1)
:return: output of block
"""
# Expansion and Depthwise Convolution
# import pdb;pdb.set_trace()
# x = self.features(inputs)
if self.expand_ratio == 2:
# first dwise conv
x = self.act(self.bn1(self.dwise_conv1(inputs)))
# first 1x1 conv
ghost_id = x[:, self.ghost_idx_inp:, :, :]
x = self.bn2(self.project_layer(x[:, :self.ghost_idx_inp, :, :]))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
# generate more features
x = torch.cat([x, ghost_id], dim=1)
# second dwise conv
x = self.bn4(self.dwise_conv2(x))
elif self.inp != self.final_oup and self.s == 1:
# first 1x1 conv
x = self.bn2(self.project_layer(inputs))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
elif self.inp != self.final_oup and self.s == 2:
# first 1x1 conv
x = self.bn2(self.project_layer(inputs))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
# second dwise conv
x = self.bn4(self.dwise_conv2(x))
else:
# first dwise conv
x = self.act(self.bn1(self.dwise_conv1(inputs)))
# first 1x1 conv
ghost_id = x[:, self.ghost_idx_inp:, :, :]
x = self.bn2(self.project_layer(x[:, :self.ghost_idx_inp, :, :]))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
# second dwise conv
x = torch.cat([x, ghost_id], dim=1)
x = self.bn4(self.dwise_conv2(x))
# Squeeze-and-excitation
if self.has_se:
x = self.se(x)
# Skip connection and drop connect
# input_filters, output_filters = self.in_chs, self.out_chs
# if self.identity and self._block_args.stride == 1 and input_filters == output_filters:
# # import pdb;pdb.set_trace()
# if drop_connect_rate:
# x = drop_connect(x, p=drop_connect_rate, training=self.training)
# x = x + inputs # skip connection
# return x
if self.identity:
if self.drop_path_rate > 0.:
x = drop_path(x, self.drop_path_rate, self.training)
x = x + inputs
return x
else:
return x
class CondConvResidual(InvertedResidual):
""" Inverted residual block w/ CondConv routing"""
def __init__(self, in_chs, out_chs, dw_kernel_size=3,
stride=1, dilation=1, pad_type='', act_layer=nn.ReLU, noskip=False,
exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1,
se_ratio=0., se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None,
num_experts=0, drop_path_rate=0.):
self.num_experts = num_experts
conv_kwargs = dict(num_experts=self.num_experts)
super(CondConvResidual, self).__init__(
in_chs, out_chs, dw_kernel_size=dw_kernel_size, stride=stride, dilation=dilation, pad_type=pad_type,
act_layer=act_layer, noskip=noskip, exp_ratio=exp_ratio, exp_kernel_size=exp_kernel_size,
pw_kernel_size=pw_kernel_size, se_ratio=se_ratio, se_kwargs=se_kwargs,
norm_layer=norm_layer, norm_kwargs=norm_kwargs, conv_kwargs=conv_kwargs,
drop_path_rate=drop_path_rate)
self.routing_fn = nn.Linear(in_chs, self.num_experts)
def forward(self, x):
residual = x
# CondConv routing
pooled_inputs = F.adaptive_avg_pool2d(x, 1).flatten(1)
routing_weights = torch.sigmoid(self.routing_fn(pooled_inputs))
# Point-wise expansion
x = self.conv_pw(x, routing_weights)
x = self.bn1(x)
x = self.act1(x)
# Depth-wise convolution
x = self.conv_dw(x, routing_weights)
x = self.bn2(x)
x = self.act2(x)
# Squeeze-and-excitation
if self.se is not None:
x = self.se(x)
# Point-wise linear projection
x = self.conv_pwl(x, routing_weights)
x = self.bn3(x)
if self.has_residual:
if self.drop_path_rate > 0.:
x = drop_path(x, self.drop_path_rate, self.training)
x += residual
return x
class EdgeResidual(nn.Module):
""" Residual block with expansion convolution followed by pointwise-linear w/ stride"""
def __init__(self, in_chs, out_chs, exp_kernel_size=3, exp_ratio=1.0, fake_in_chs=0,
stride=1, dilation=1, pad_type='', act_layer=nn.ReLU, noskip=False, pw_kernel_size=1,
se_ratio=0., se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None,
drop_path_rate=0.):
super(EdgeResidual, self).__init__()
norm_kwargs = norm_kwargs or {}
if fake_in_chs > 0:
mid_chs = make_divisible(fake_in_chs * exp_ratio)
else:
mid_chs = make_divisible(in_chs * exp_ratio)
has_se = se_ratio is not None and se_ratio > 0.
self.has_residual = (in_chs == out_chs and stride == 1) and not noskip
self.drop_path_rate = drop_path_rate
# Expansion convolution
self.conv_exp = create_conv2d(
in_chs, mid_chs, exp_kernel_size, stride=stride, dilation=dilation, padding=pad_type)
self.bn1 = norm_layer(mid_chs, **norm_kwargs)
self.act1 = act_layer(inplace=True)
# Squeeze-and-excitation
if has_se:
se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer)
self.se = SqueezeExcite(mid_chs, se_ratio=se_ratio, **se_kwargs)
else:
self.se = None
# Point-wise linear projection
self.conv_pwl = create_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type)
self.bn2 = norm_layer(out_chs, **norm_kwargs)
def feature_info(self, location):
if location == 'expansion': # after SE, before PWL
info = dict(module='conv_pwl', hook_type='forward_pre', num_chs=self.conv_pwl.in_channels)
else: # location == 'bottleneck', block output
info = dict(module='', hook_type='', num_chs=self.conv_pwl.out_channels)
return info
def forward(self, x):
residual = x
# Expansion convolution
x = self.conv_exp(x)
x = self.bn1(x)
x = self.act1(x)
# Squeeze-and-excitation
if self.se is not None:
x = self.se(x)
# Point-wise linear projection
x = self.conv_pwl(x)
x = self.bn2(x)
if self.has_residual:
if self.drop_path_rate > 0.:
x = drop_path(x, self.drop_path_rate, self.training)
x += residual
return x
| en | 0.705141 | EfficientNet, MobileNetV3, etc Blocks Hacked together by / Copyright 2020 <NAME> # Defaults used for Google/Tensorflow training of mobile networks /w RMSprop as per # papers and TF reference implementations. PT momentum equiv for TF decay is (1 - TF decay) # NOTE: momentum varies btw .99 and .9997 depending on source # .99 in official TF TPU impl # .9997 (/w .999 in search space) for paper # fill in args that aren't specified with the defaults # some models, like MobilNetV3, calculate SE reduction chs from the containing block's mid_ch instead of in_ch # act_layer override, if it remains None, the containing block's act_layer will be used # Make sure that round down does not go down by more than 10%. Round number of filters based on depth multiplier. # FIXME haven't used yet Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W] # output of conv after act, same as block coutput # location == 'bottleneck', block output DepthwiseSeparable block Used for DS convs in MobileNet-V1 and in the place of IR blocks that have no expansion (factor of 1.0). This is an alternative to having a IR with an optional first pw conv. # activation after point-wise conv # Squeeze-and-excitation # after SE, input to PW # location == 'bottleneck', block output Inverted residual block w/ optional SE and CondConv routing # Point-wise expansion # Depth-wise convolution # Squeeze-and-excitation # Point-wise linear projection # after SE, input to PWL # location == 'bottleneck', block output # Point-wise expansion # Depth-wise convolution # Squeeze-and-excitation # Point-wise linear projection Mobile Inverted Residual Bottleneck Block Args: block_args (namedtuple): BlockArgs, see above global_params (namedtuple): GlobalParam, see above Attributes: has_se (bool): Whether the block contains a Squeeze and Excitation layer. # Get static or dynamic convolution depending on image size # Conv2d = get_same_padding_conv2d(image_size=global_params.image_size) # Expansion phase # number of output channels # apply repeat scheme # if self._block_args.expand_ratio != 1: # self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False) # self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps) # self.features = nn.Sequential( # first linear layer # sec linear layer # act_layer(inplace=True), # expand layer # ) # self.features=nn.Sequential( # only two linear layers are needed # ) # self.features = nn.Sequential( # ) # self.features = nn.Sequential( # act_layer(inplace=True), # ) :param inputs: input tensor :param drop_path_rate: drop path rate (float, between 0 and 1) :return: output of block # Expansion and Depthwise Convolution # import pdb;pdb.set_trace() # x = self.features(inputs) # first dwise conv # first 1x1 conv # second 1x1 conv # generate more features # second dwise conv # first 1x1 conv # second 1x1 conv # first 1x1 conv # second 1x1 conv # second dwise conv # first dwise conv # first 1x1 conv # second 1x1 conv # second dwise conv # Squeeze-and-excitation # Skip connection and drop connect # input_filters, output_filters = self.in_chs, self.out_chs # if self.identity and self._block_args.stride == 1 and input_filters == output_filters: # # import pdb;pdb.set_trace() # if drop_connect_rate: # x = drop_connect(x, p=drop_connect_rate, training=self.training) # x = x + inputs # skip connection # return x Mobile Inverted Residual Bottleneck Block Args: block_args (namedtuple): BlockArgs, see above global_params (namedtuple): GlobalParam, see above Attributes: has_se (bool): Whether the block contains a Squeeze and Excitation layer. # Get static or dynamic convolution depending on image size # Conv2d = get_same_padding_conv2d(image_size=global_params.image_size) # Expansion phase # number of output channels # apply repeat scheme # if self._block_args.expand_ratio != 1: # self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False) # self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps) # self.features = nn.Sequential( # first linear layer # sec linear layer # act_layer(inplace=True), # expand layer # ) # self.features=nn.Sequential( # only two linear layers are needed # ) # self.features = nn.Sequential( # ) # self.features = nn.Sequential( # act_layer(inplace=True), # ) :param inputs: input tensor :param drop_path_rate: drop path rate (float, between 0 and 1) :return: output of block # Expansion and Depthwise Convolution # import pdb;pdb.set_trace() # x = self.features(inputs) # first dwise conv # first 1x1 conv # second 1x1 conv # generate more features # second dwise conv # first 1x1 conv # second 1x1 conv # first 1x1 conv # second 1x1 conv # second dwise conv # first dwise conv # first 1x1 conv # second 1x1 conv # second dwise conv # Squeeze-and-excitation # Skip connection and drop connect # input_filters, output_filters = self.in_chs, self.out_chs # if self.identity and self._block_args.stride == 1 and input_filters == output_filters: # # import pdb;pdb.set_trace() # if drop_connect_rate: # x = drop_connect(x, p=drop_connect_rate, training=self.training) # x = x + inputs # skip connection # return x Inverted residual block w/ CondConv routing # CondConv routing # Point-wise expansion # Depth-wise convolution # Squeeze-and-excitation # Point-wise linear projection Residual block with expansion convolution followed by pointwise-linear w/ stride # Expansion convolution # Squeeze-and-excitation # Point-wise linear projection # after SE, before PWL # location == 'bottleneck', block output # Expansion convolution # Squeeze-and-excitation # Point-wise linear projection | 2.27117 | 2 |
tuna/documentation/developer/tabu_experiments_code.py | russellnakamura/thetuna | 0 | 6620246 | <gh_stars>0
# python standard library
import timeit
import random
# third-party
import matplotlib.pyplot as plt
collection = None
value = None
repetitions = 10**4
def measure_list(size):
global collection
global value
collection = range(size)
value = size - 1
command = '{0} in collection'.format(value)
import_string = 'from __main__ import collection, value'
timer = timeit.Timer(command, import_string)
return timer.timeit(repetitions)
def measure_set(size):
global collection
global value
collection = set(range(size))
value = size - 1
command = '{0} in collection'.format(value)
import_string = 'from __main__ import collection, value'
timer = timeit.Timer(command, import_string)
return timer.timeit(repetitions)
step = 100000
lower_bound = 80000
upper_bound = 9 * 10**5 + 1
list_sizes_times = [(size, measure_list(size)) for size in xrange(lower_bound, upper_bound, step)]
set_sizes_times = [(size, measure_set(size)) for size in xrange(lower_bound, upper_bound, step)]
output = 'figures/set_list_times.png'
figure = plt.figure()
axe = figure.gca()
list_sizes = [size_time[0] for size_time in list_sizes_times]
list_times = [size_time[1] for size_time in list_sizes_times]
set_sizes = [size_time[0] for size_time in set_sizes_times]
set_times = [size_time[1] for size_time in set_sizes_times]
axe.plot(list_sizes, list_times, color='r', alpha=0.5, label='Lists')
axe.plot(set_sizes, set_times, color='b', alpha=0.5, label='Sets')
axe.set_xlabel("Collection Sizes")
axe.set_ylabel("Times (seconds)")
axe.set_title("Collection Size Vs Time")
axe.legend(loc='upper left')
figure.savefig(output)
with open('data/list_times.csv', 'w') as lw:
lw.write("Size,Time\n")
for size, time in list_sizes_times:
lw.write("{0},{1}\n".format(size,time))
with open('data/set_times.csv', 'w') as sw:
sw.write("Size,Time\n")
for size, time in set_sizes_times:
sw.write("{0},{1}\n".format(size,time))
| # python standard library
import timeit
import random
# third-party
import matplotlib.pyplot as plt
collection = None
value = None
repetitions = 10**4
def measure_list(size):
global collection
global value
collection = range(size)
value = size - 1
command = '{0} in collection'.format(value)
import_string = 'from __main__ import collection, value'
timer = timeit.Timer(command, import_string)
return timer.timeit(repetitions)
def measure_set(size):
global collection
global value
collection = set(range(size))
value = size - 1
command = '{0} in collection'.format(value)
import_string = 'from __main__ import collection, value'
timer = timeit.Timer(command, import_string)
return timer.timeit(repetitions)
step = 100000
lower_bound = 80000
upper_bound = 9 * 10**5 + 1
list_sizes_times = [(size, measure_list(size)) for size in xrange(lower_bound, upper_bound, step)]
set_sizes_times = [(size, measure_set(size)) for size in xrange(lower_bound, upper_bound, step)]
output = 'figures/set_list_times.png'
figure = plt.figure()
axe = figure.gca()
list_sizes = [size_time[0] for size_time in list_sizes_times]
list_times = [size_time[1] for size_time in list_sizes_times]
set_sizes = [size_time[0] for size_time in set_sizes_times]
set_times = [size_time[1] for size_time in set_sizes_times]
axe.plot(list_sizes, list_times, color='r', alpha=0.5, label='Lists')
axe.plot(set_sizes, set_times, color='b', alpha=0.5, label='Sets')
axe.set_xlabel("Collection Sizes")
axe.set_ylabel("Times (seconds)")
axe.set_title("Collection Size Vs Time")
axe.legend(loc='upper left')
figure.savefig(output)
with open('data/list_times.csv', 'w') as lw:
lw.write("Size,Time\n")
for size, time in list_sizes_times:
lw.write("{0},{1}\n".format(size,time))
with open('data/set_times.csv', 'w') as sw:
sw.write("Size,Time\n")
for size, time in set_sizes_times:
sw.write("{0},{1}\n".format(size,time)) | en | 0.787306 | # python standard library # third-party | 3.096392 | 3 |
django/filters.py | hanjm/myutils | 0 | 6620247 | <reponame>hanjm/myutils
# coding=utf-8
from __future__ import unicode_literals, print_function
from datetime import date
from django import template
register = template.Library()
@register.filter(name='format_timestamp')
def format_timestamp(timestamp):
return date.fromtimestamp(timestamp).strftime('%Y-%m-%d')
@register.filter(name='query_dict_string')
def query_dict_string(query_dict):
try:
return ' '.join([i[1] for i in query_dict.values_list()])
except AttributeError:
return ''
| # coding=utf-8
from __future__ import unicode_literals, print_function
from datetime import date
from django import template
register = template.Library()
@register.filter(name='format_timestamp')
def format_timestamp(timestamp):
return date.fromtimestamp(timestamp).strftime('%Y-%m-%d')
@register.filter(name='query_dict_string')
def query_dict_string(query_dict):
try:
return ' '.join([i[1] for i in query_dict.values_list()])
except AttributeError:
return '' | en | 0.644078 | # coding=utf-8 | 2.405689 | 2 |
inference.py | markshih91/peleenet_pytorch2keras_convertor | 0 | 6620248 | from keras_peleenet import peleenet_model
from PIL import Image
import numpy as np
import torchvision.transforms as transforms
def softmax(x):
return np.exp(x)/np.sum(np.exp(x),axis=0)
model = peleenet_model(input_shape=(224, 224, 3))
model.load_weights('peleenet_keras_weights.h5')
file_name = 'synset_words.txt'
classes = {}
for line in open(file_name):
line = line.rstrip().split(':')
classes[int(line[0])] = line[1]
print(classes)
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
normalize,
])
img = 'images/pig.jpeg'
img = Image.open(img)
np_img = np.asarray(img)
img = transform(img)
img.unsqueeze_(dim=0)
print(img.shape)
img = img.cpu().numpy()
img = img.transpose((0, 2, 3, 1))
output = model.predict(img)[0]
print(output)
output = softmax(output)
print(classes[np.argmax(output)]) | from keras_peleenet import peleenet_model
from PIL import Image
import numpy as np
import torchvision.transforms as transforms
def softmax(x):
return np.exp(x)/np.sum(np.exp(x),axis=0)
model = peleenet_model(input_shape=(224, 224, 3))
model.load_weights('peleenet_keras_weights.h5')
file_name = 'synset_words.txt'
classes = {}
for line in open(file_name):
line = line.rstrip().split(':')
classes[int(line[0])] = line[1]
print(classes)
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
normalize,
])
img = 'images/pig.jpeg'
img = Image.open(img)
np_img = np.asarray(img)
img = transform(img)
img.unsqueeze_(dim=0)
print(img.shape)
img = img.cpu().numpy()
img = img.transpose((0, 2, 3, 1))
output = model.predict(img)[0]
print(output)
output = softmax(output)
print(classes[np.argmax(output)]) | none | 1 | 2.87036 | 3 | |
src/octopus/core/http.py | smaragden/OpenRenderManagement | 35 | 6620249 | import httplib
import socket
class Request(object):
'''A class that encapsulates a HTTP request and its execution.
Example:
>>> import httplib
>>> conn = httplib.HTTPConnection("localhost", 8004)
>>> def onResponse(request, response):
... print "%s %s ==> %d %s" % (request.method, request.path, response.status, response.reason)
... print response.read()
>>> def onError(request, error):
... print "%s %s ==> %s" % (request.method, request.path, error)
>>> r = Request("GET", "/nodes/0", {"Accept": "application/json"}, "")
>>> r.call(conn, onResponse, onError)
GET /nodes/0 ==> 200 OK
{}
'''
def __init__(self, method, path, headers={}, body=''):
self.method = method
self.path = path
self.headers = headers
self.body = body
def call(self, conn, onResponse, onError):
try:
conn.request(self.method, self.path, self.body, self.headers)
response = conn.getresponse()
except (EnvironmentError, httplib.error, socket.error), e:
onError(self, e)
except:
onError(self, None)
else:
onResponse(self, response)
if response.length:
response.read()
| import httplib
import socket
class Request(object):
'''A class that encapsulates a HTTP request and its execution.
Example:
>>> import httplib
>>> conn = httplib.HTTPConnection("localhost", 8004)
>>> def onResponse(request, response):
... print "%s %s ==> %d %s" % (request.method, request.path, response.status, response.reason)
... print response.read()
>>> def onError(request, error):
... print "%s %s ==> %s" % (request.method, request.path, error)
>>> r = Request("GET", "/nodes/0", {"Accept": "application/json"}, "")
>>> r.call(conn, onResponse, onError)
GET /nodes/0 ==> 200 OK
{}
'''
def __init__(self, method, path, headers={}, body=''):
self.method = method
self.path = path
self.headers = headers
self.body = body
def call(self, conn, onResponse, onError):
try:
conn.request(self.method, self.path, self.body, self.headers)
response = conn.getresponse()
except (EnvironmentError, httplib.error, socket.error), e:
onError(self, e)
except:
onError(self, None)
else:
onResponse(self, response)
if response.length:
response.read()
| en | 0.547372 | A class that encapsulates a HTTP request and its execution. Example: >>> import httplib >>> conn = httplib.HTTPConnection("localhost", 8004) >>> def onResponse(request, response): ... print "%s %s ==> %d %s" % (request.method, request.path, response.status, response.reason) ... print response.read() >>> def onError(request, error): ... print "%s %s ==> %s" % (request.method, request.path, error) >>> r = Request("GET", "/nodes/0", {"Accept": "application/json"}, "") >>> r.call(conn, onResponse, onError) GET /nodes/0 ==> 200 OK {} | 3.563087 | 4 |
ops-tests/component/test_sysmond_ct_top.py | OpenSwitchNOS/openswitch-ops-sysmond | 0 | 6620250 | # (C) Copyright 2016 Hewlett Packard Enterprise Development LP
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
##########################################################################
"""
OpenSwitch Test for verifying output of top commands
"""
TOPOLOGY = """
# +-------+
# | ops1 |
# +-------+
# Nodes
[type=openswitch name="OpenSwitch 1"] ops1
# Links
"""
def test_top_command(topology, step):
ops1 = topology.get('ops1')
# Maximum cpu usage in terms of percentage
max_cpu_usage_of_a_process = 30
# Maximum memory usage in terms of percentage
max_memory_usage_of_a_process = 10
assert ops1 is not None
step("Step 1: Test top cpu command")
output = ops1("top cpu")
lines = output.split('\n')
assert len(lines) > 0,\
'Test top cpu command - Failed'
step("Test top cpu output to spot core processes available")
check_point = 0
for line in lines:
if "systemd-journald" in line or "dbus-daemon" in line or \
"rsyslogd" in line or "system-logind" in line or \
"ovsdb-server" in line:
check_point += 1
print(line)
print("Number of check points achieved %d" % (check_point))
assert check_point == 5,\
'Test top cpu command output to spot core processes - Failed'
step("Test top cpu and spot high cpu processes")
top_process_cpu_usage = int(lines[7].split()[8].split(".")[0])
if top_process_cpu_usage >= max_cpu_usage_of_a_process:
check_point += 1
print("Number of check points achieved %d" % (check_point))
assert check_point == 5,\
'Test top cpu command output to spot core processes - Failed'
step("Step 2: Test top memory command")
output = ops1("top memory")
lines = output.split('\n')
assert len(lines) > 0,\
'Test top memory command - Failed'
step("Test top memory output to spot core processes available")
check_point = 0
for line in lines:
if "systemd-journald" in line or "dbus-daemon" in line or \
"rsyslogd" in line or "system-logind" in line or \
"ovsdb-server" in line:
check_point += 1
print(line)
print("Number of check points achieved %d" % (check_point))
assert check_point == 5,\
'Test top memory command output to spot core processes - Failed'
step("Test top memory and spot high memory usage processes")
top_process_memory_usage = int(lines[7].split()[8].split(".")[0])
if top_process_memory_usage >= max_memory_usage_of_a_process:
check_point += 1
print("Number of check points achieved %d" % (check_point))
assert check_point == 5,\
'Test top memory command output to spot \
high memory usage processes - Failed'
| # (C) Copyright 2016 Hewlett Packard Enterprise Development LP
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
##########################################################################
"""
OpenSwitch Test for verifying output of top commands
"""
TOPOLOGY = """
# +-------+
# | ops1 |
# +-------+
# Nodes
[type=openswitch name="OpenSwitch 1"] ops1
# Links
"""
def test_top_command(topology, step):
ops1 = topology.get('ops1')
# Maximum cpu usage in terms of percentage
max_cpu_usage_of_a_process = 30
# Maximum memory usage in terms of percentage
max_memory_usage_of_a_process = 10
assert ops1 is not None
step("Step 1: Test top cpu command")
output = ops1("top cpu")
lines = output.split('\n')
assert len(lines) > 0,\
'Test top cpu command - Failed'
step("Test top cpu output to spot core processes available")
check_point = 0
for line in lines:
if "systemd-journald" in line or "dbus-daemon" in line or \
"rsyslogd" in line or "system-logind" in line or \
"ovsdb-server" in line:
check_point += 1
print(line)
print("Number of check points achieved %d" % (check_point))
assert check_point == 5,\
'Test top cpu command output to spot core processes - Failed'
step("Test top cpu and spot high cpu processes")
top_process_cpu_usage = int(lines[7].split()[8].split(".")[0])
if top_process_cpu_usage >= max_cpu_usage_of_a_process:
check_point += 1
print("Number of check points achieved %d" % (check_point))
assert check_point == 5,\
'Test top cpu command output to spot core processes - Failed'
step("Step 2: Test top memory command")
output = ops1("top memory")
lines = output.split('\n')
assert len(lines) > 0,\
'Test top memory command - Failed'
step("Test top memory output to spot core processes available")
check_point = 0
for line in lines:
if "systemd-journald" in line or "dbus-daemon" in line or \
"rsyslogd" in line or "system-logind" in line or \
"ovsdb-server" in line:
check_point += 1
print(line)
print("Number of check points achieved %d" % (check_point))
assert check_point == 5,\
'Test top memory command output to spot core processes - Failed'
step("Test top memory and spot high memory usage processes")
top_process_memory_usage = int(lines[7].split()[8].split(".")[0])
if top_process_memory_usage >= max_memory_usage_of_a_process:
check_point += 1
print("Number of check points achieved %d" % (check_point))
assert check_point == 5,\
'Test top memory command output to spot \
high memory usage processes - Failed'
| en | 0.747433 | # (C) Copyright 2016 Hewlett Packard Enterprise Development LP # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # ########################################################################## OpenSwitch Test for verifying output of top commands # +-------+ # | ops1 | # +-------+ # Nodes [type=openswitch name="OpenSwitch 1"] ops1 # Links # Maximum cpu usage in terms of percentage # Maximum memory usage in terms of percentage | 2.141694 | 2 |