text string | size int64 | token_count int64 |
|---|---|---|
from .neural_network import Neural_network
| 43 | 13 |
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
from django.utils.translation import ugettext_lazy as _
from .models import Ride
class RideForm(forms.ModelForm):
date = forms.DateField(
label=_('Date'),
widget=forms.DateInput(format=('%Y-%m-%d'),attrs={
'class': 'form-control input-group-alternative',
'type': 'date'
})
)
time = forms.TimeField(
label=_('Time'),
required=False,
input_formats=['%H:%M'],
widget=forms.TimeInput(format=('%H:%M'), attrs={
'class': 'form-control input-group-alternative',
'type': 'time'
})
)
description = forms.CharField(
label=_('Description'),
required=False,
help_text=_('Write here any additional information.'),
widget=forms.Textarea(attrs={
'class': 'form-control input-group-alternative',
})
)
class Meta:
model = Ride
fields = ('date', 'time', 'origin', 'destination', 'seats', 'price', 'description')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.add_input(
Submit('submit', _('Save Ride'), css_class='btn-block'))
for visible in self.visible_fields():
visible.field.widget.attrs['class'] = 'input-group-alternative'
| 1,496 | 447 |
import asyncio
from mlserver import MLModel
from mlserver.codecs import NumpyCodec
from mlserver.types import InferenceRequest, InferenceResponse
class SumModel(MLModel):
async def predict(self, payload: InferenceRequest) -> InferenceResponse:
decoded = self.decode(payload.inputs[0])
total = decoded.sum(axis=1, keepdims=True)
output = NumpyCodec().encode(name="total", payload=total)
return InferenceResponse(id=payload.id, model_name=self.name, outputs=[output])
class SlowModel(MLModel):
async def load(self) -> bool:
await asyncio.sleep(10)
self.ready = True
return self.ready
async def infer(self, payload: InferenceRequest) -> InferenceResponse:
await asyncio.sleep(10)
return InferenceResponse(id=payload.id, model_name=self.name, outputs=[])
| 843 | 256 |
from time import time
import torch
import torch.nn as nn
class FastAttention(nn.Module):
def __init__(self, input_shape, head, n_features):
super(FastAttention, self).__init__()
self.head = head
self.input_shape = input_shape
self.depth = int(input_shape // head)
self.n_features = n_features
self.key_ORF = self.OrthogonalRandomFeature()
self.query_ORF = self.OrthogonalRandomFeature()
self.query = nn.Linear(self.depth, self.depth)
self.key = nn.Linear(self.depth, self.depth)
self.value = nn.Linear(self.depth, self.depth)
self.fc = nn.Linear(self.depth*head, input_shape)
def kernel_function(self, x, flag):
ORF = self.query_ORF if flag == 'query' else self.key_ORF
normalization_factor = 1/ORF.shape[-1]**0.25
x *= normalization_factor
out = torch.einsum('nhsd, fd -> nhsf', x, ORF)
kernel_fn = nn.ReLU()(out) + 1e-3
return kernel_fn
def OrthogonalRandomFeature(self):
n = self.n_features//self.depth
remainder = self.n_features%self.depth
orthogonal_features = []
for _ in range(n):
normal_feature = torch.rand(self.depth, self.depth)
orthogonal_feature, _ = torch.qr(normal_feature)
orthogonal_features.append(orthogonal_feature)
if remainder > 0 :
normal_feature = torch.rand(self.depth, self.depth)
orthogonal_feature, _ = torch.qr(normal_feature)
orthogonal_features.append(orthogonal_feature[0: remainder])
orthogonal_features = torch.cat(orthogonal_features)
mutilplier = torch.randn(self.n_features, self.depth).norm(dim=1)
final_features = torch.matmul(torch.diag(mutilplier), orthogonal_features)
return final_features
def causal_attention(self, q, k, v):
denominator = 1/torch.einsum('nhqf, nhkf -> nhqf', q, k.cumsum(dim=-2))
x = torch.einsum('nhkf, nhkd -> nhkfd', k, v)
x = x.cumsum(dim=-3)
out = torch.einsum('nhqfd, nhqf, nhqf -> nhqd', x, q, denominator)
return out
def bidirectional_attention(self, q, k, v):
kt_i = torch.einsum('nhkf -> nhf', k)
normalization_factor = 1/(torch.einsum('nhqf, nhf -> nhq', q, kt_i))
k_v = torch.einsum('nhkf, nhkd -> nhfd', k, v)
attention = torch.einsum('nhfd, nhqf, nhq-> nhqd', k_v, q, normalization_factor)
return attention
def forward(self, query, key, value, mask=None, casual_mask=False):
batch = query.shape[0]
query_len, key_len, value_len = query.shape[1], key.shape[1], value.shape[1]
query = query.reshape(batch, query_len, self.head, self.depth)
key = key.reshape(batch, key_len, self.head, self.depth)
value = value.reshape(batch, value_len, self.head, self.depth)
query = query.permute(0, 2, 1, 3)
key = key.permute(0, 2, 1, 3)
value = value.permute(0, 2, 1, 3)
query = self.query(query)
key = self.key(key)
value = self.value(value)
if mask is not None:
key.masked_fill(mask == 0, float("-1e20"))
query = self.kernel_function(query, 'query')
key = self.kernel_function(key, 'key')
if casual_mask:
out = self.causal_attention(query, key, value)
else:
out = self.bidirectional_attention(query, key, value)
out = out.permute(0, 2, 1, 3)
out = out.reshape(batch, query_len, self.head*self.depth)
out = self.fc(out)
return out
class PerformerBlock(nn.Module):
def __init__(self, input_shape, head, n_features, dropout, forward_expansion):
super(PerformerBlock, self).__init__()
self.attention = FastAttention(input_shape, head, n_features)
self.feed_forward = nn.Sequential(
nn.Linear(input_shape, input_shape*forward_expansion),
nn.GELU(),
nn.Linear(input_shape*forward_expansion, input_shape)
)
self.layernorm1 = nn.LayerNorm(input_shape)
self.layernorm2 = nn.LayerNorm(input_shape)
self.dropout = nn.Dropout(dropout)
def forward(self, query, key, value, mask):
attention = self.attention(query, key, value, mask)
add = attention + query
regulazation = self.dropout(self.layernorm1(add))
forward = self.feed_forward(regulazation)
out = self.dropout(self.layernorm2(forward + regulazation))
return out
class Encoder(nn.Module):
def __init__(
self,
vocab_size,
embedding_out,
num_layers,
heads,
n_features,
forward_expansion,
dropout,
max_len
):
super(Encoder, self).__init__()
self.word_embedding = nn.Embedding(vocab_size, embedding_out)
self.postional_embedding = nn.Parameter(torch.zeros(1, max_len, embedding_out))
self.dropout = nn.Dropout(dropout)
self.layers = nn.Sequential(
*[
PerformerBlock(
embedding_out,
heads,
n_features,
dropout,
forward_expansion
)
for _ in range(num_layers)
]
)
def forward(self, x, mask):
word_embedding = self.word_embedding(x)
postional_embedding = self.postional_embedding[:, :x.shape[1], :]
out = self.dropout(word_embedding + postional_embedding)
for layer in self.layers:
out = layer(out, out, out, mask)
return out
class DecoderBlock(nn.Module):
def __init__(
self,
embedding_out,
head,
n_features,
forward_expansion,
dropout
):
super(DecoderBlock, self).__init__()
self.attention = FastAttention(embedding_out, head, n_features)
self.Performer_block = PerformerBlock(
embedding_out,
head,
n_features,
dropout,
forward_expansion
)
self.dropout = nn.Dropout(dropout)
self.norm = nn.LayerNorm(embedding_out)
def forward(self, query, key, value, src_mask):
attention = self.attention(query, query, query, src_mask, True)
query = self.dropout(self.norm(attention + query))
out = self.Performer_block(query, key, value, src_mask)
return out
class Decoder(nn.Module):
def __init__(
self,
vocab_size,
embedding_out,
num_layers,
head,
n_features,
forward_expansion,
dropout,
max_len
):
super(Decoder, self).__init__()
self.word_embedding = nn.Embedding(vocab_size, embedding_out)
self.positional_embedding = nn.Parameter(torch.zeros(1, max_len, embedding_out))
self.layers = nn.Sequential(
*[
DecoderBlock(
embedding_out,
head,
n_features,
forward_expansion,
dropout
)
for _ in range(num_layers)
]
)
self.fc = nn.Linear(embedding_out, vocab_size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, encoder_output, src_mask):
x = self.dropout(self.word_embedding(x) + self.positional_embedding[:, :x.shape[1], :])
for layer in self.layers:
x = layer(
x,
encoder_output,
encoder_output,
src_mask
)
out = self.fc(x)
return out
class Performers(nn.Module):
def __init__(
self,
input_vocab_size,
output_vocab_size,
pad_idx,
embedding_out,
num_layers,
forward_expansion,
head,
n_features,
dropout,
max_len
):
super(Performers, self).__init__()
self.encoder = Encoder(
input_vocab_size,
embedding_out,
num_layers,
head,
n_features,
forward_expansion,
dropout,
max_len
)
self.decoder = Decoder(
output_vocab_size,
embedding_out,
num_layers,
head,
n_features,
forward_expansion,
dropout,
max_len
)
self.pad_idx = pad_idx
self.apply(self._init_weights)
#From @HuggingFace
def _init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
elif isinstance(module, nn.LayerNorm):
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def input_pad_mask(self, inputs):
pad_mask = (inputs != self.pad_idx).unsqueeze(1).unsqueeze(3)
return pad_mask
def output_pad_mask(self, targets):
pad_mask = (targets != self.pad_idx).unsqueeze(1).unsqueeze(3)
def forward(self, inputs, target):
input_pad_mask = self.input_pad_mask(inputs)
output_pad_mask = self.output_pad_mask(targets)
encoder_output = self.encoder(inputs, input_pad_mask)
decoder_out = self.decoder(target, encoder_output, output_pad_mask)
return decoder_out
if __name__ == "__main__":
#Depends on the Tokenizer
input_vocab_size = 100
output_vocab_size = 200
#DEFAULT PerFORMERS PARAMETERS:-
pad_idx = 0
embedding_out = 512
num_layers = 6
forward_expansion = 4
head = 8
n_features = 256
dropout = 0.1
max_len = 512
inputs = torch.randint(0, 100, (32, 200))
targets = torch.randint(0, 100, (32,100))
model = Performers(
input_vocab_size,
output_vocab_size,
pad_idx,
embedding_out,
num_layers,
forward_expansion,
head,
n_features,
dropout,
max_len
)
start = time()
y = model(inputs, targets)
print(f'INFERENCE TIME = {time() - start}sec')
x = sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f'NUMBER OF PARAMETERS ARE = {x}') | 10,459 | 3,496 |
"""
gtp_connection.py
Module for playing games of Go using GoTextProtocol
Parts of this code were originally based on the gtp module
in the Deep-Go project by Isaac Henrion and Amos Storkey
at the University of Edinburgh.
"""
import signal, os
import traceback
from sys import stdin, stdout, stderr
from board_util import GoBoardUtil, BLACK, WHITE, EMPTY, BORDER, PASS, \
MAXSIZE, coord_to_point
import numpy as np
import re
import time
import random
class GtpConnection():
def __init__(self, go_engine, board, debug_mode = False):
"""
Manage a GTP connection for a Go-playing engine
Parameters
----------
go_engine:
a program that can reply to a set of GTP commandsbelow
board:
Represents the current board state.
"""
self.totalTime = 0
self.count = 0
self.nodeExp = 0
self.timeLimit = 1
self.to_play = BLACK
#H table is a dictionary that stores (state,value) pairs
#value = Black win -> 1, White win -1
self.H_table = {}
self._winner = ''
self._optimal_move = ''
self._debug_mode = debug_mode
self.go_engine = go_engine
self.board = board
self.commands = {
"protocol_version": self.protocol_version_cmd,
"quit": self.quit_cmd,
"name": self.name_cmd,
"boardsize": self.boardsize_cmd,
"showboard": self.showboard_cmd,
"clear_board": self.clear_board_cmd,
"komi": self.komi_cmd,
"version": self.version_cmd,
"known_command": self.known_command_cmd,
"genmove": self.genmove_cmd,
"list_commands": self.list_commands_cmd,
"play": self.play_cmd,
"legal_moves": self.legal_moves_cmd,
"gogui-rules_game_id": self.gogui_rules_game_id_cmd,
"gogui-rules_board_size": self.gogui_rules_board_size_cmd,
"gogui-rules_legal_moves": self.gogui_rules_legal_moves_cmd,
"gogui-rules_side_to_move": self.gogui_rules_side_to_move_cmd,
"gogui-rules_board": self.gogui_rules_board_cmd,
"gogui-rules_final_result": self.gogui_rules_final_result_cmd,
"gogui-analyze_commands": self.gogui_analyze_cmd,
"timelimit": self.timelimit_cmd,
"solve":self.solve_cmd
}
# used for argument checking
# values: (required number of arguments,
# error message on argnum failure)
self.argmap = {
"boardsize": (1, 'Usage: boardsize INT'),
"komi": (1, 'Usage: komi FLOAT'),
"known_command": (1, 'Usage: known_command CMD_NAME'),
"genmove": (1, 'Usage: genmove {w,b}'),
"play": (2, 'Usage: play {b,w} MOVE'),
"legal_moves": (1, 'Usage: legal_moves {w,b}'),
"timelimit": (1, 'Usage: timelimit INT, 1 <= INT <= 100'),
}
def write(self, data):
stdout.write(data)
def flush(self):
stdout.flush()
def start_connection(self):
"""
Start a GTP connection.
This function continuously monitors standard input for commands.
"""
line = stdin.readline()
while line:
self.get_cmd(line)
line = stdin.readline()
def get_cmd(self, command):
"""
Parse command string and execute it
"""
if len(command.strip(' \r\t')) == 0:
return
if command[0] == '#':
return
# Strip leading numbers from regression tests
if command[0].isdigit():
command = re.sub("^\d+", "", command).lstrip()
elements = command.split()
if not elements:
return
command_name = elements[0]; args = elements[1:]
if self.has_arg_error(command_name, len(args)):
return
if command_name in self.commands:
try:
self.commands[command_name](args)
except Exception as e:
self.debug_msg("Error executing command {}\n".format(str(e)))
self.debug_msg("Stack Trace:\n{}\n".
format(traceback.format_exc()))
raise e
else:
self.debug_msg("Unknown command: {}\n".format(command_name))
self.error('Unknown command')
stdout.flush()
def has_arg_error(self, cmd, argnum):
"""
Verify the number of arguments of cmd.
argnum is the number of parsed arguments
"""
if cmd in self.argmap and self.argmap[cmd][0] != argnum:
self.error(self.argmap[cmd][1])
return True
return False
def debug_msg(self, msg):
""" Write msg to the debug stream """
if self._debug_mode:
stderr.write(msg)
stderr.flush()
def error(self, error_msg):
""" Send error msg to stdout """
stdout.write('? {}\n\n'.format(error_msg))
stdout.flush()
def respond(self, response=''):
""" Send response to stdout """
stdout.write('= {}\n\n'.format(response))
stdout.flush()
def reset(self, size):
"""
Reset the board to empty board of given size
"""
self.board.reset(size)
def board2d(self):
return str(GoBoardUtil.get_twoD_board(self.board))
def protocol_version_cmd(self, args):
""" Return the GTP protocol version being used (always 2) """
self.respond('2')
def quit_cmd(self, args):
""" Quit game and exit the GTP interface """
self.respond()
exit()
def name_cmd(self, args):
""" Return the name of the Go engine """
self.respond(self.go_engine.name)
def version_cmd(self, args):
""" Return the version of the Go engine """
self.respond(self.go_engine.version)
def clear_board_cmd(self, args):
""" clear the board """
self.reset(self.board.size)
self.respond()
def boardsize_cmd(self, args):
"""
Reset the game with new boardsize args[0]
"""
self.reset(int(args[0]))
self.respond()
#newly added
def timelimit_cmd(self, args):
"""
Reset the game with new timelimit args[0]
"""
self.timeLimit = int(args[0])
self.respond()
def showboard_cmd(self, args):
self.respond('\n' + self.board2d())
def komi_cmd(self, args):
"""
Set the engine's komi to args[0]
"""
self.go_engine.komi = float(args[0])
self.respond()
def known_command_cmd(self, args):
"""
Check if command args[0] is known to the GTP interface
"""
if args[0] in self.commands:
self.respond("true")
else:
self.respond("false")
def list_commands_cmd(self, args):
""" list all supported GTP commands """
self.respond(' '.join(list(self.commands.keys())))
def legal_moves_cmd(self, args):
"""
List legal moves for color args[0] in {'b','w'}
"""
board_color = args[0].lower()
color = color_to_int(board_color)
moves = GoBoardUtil.generate_legal_moves(self.board, color)
gtp_moves = []
for move in moves:
coords = point_to_coord(move, self.board.size)
gtp_moves.append(format_point(coords))
sorted_moves = ' '.join(sorted(gtp_moves))
self.respond(sorted_moves)
def play_cmd(self, args):
"""
play a move args[1] for given color args[0] in {'b','w'}
"""
try:
board_color = args[0].lower()
board_move = args[1]
if board_color != "b" and board_color !="w":
self.respond("illegal move: \"{}\" wrong color".format(board_color))
return
color = color_to_int(board_color)
#change turn to the other player
self.to_play = GoBoardUtil.opponent(color)
if args[1].lower() == 'pass':
self.respond("illegal move: \"{} {}\" wrong coordinate".format(args[0], args[1]))
return
coord = move_to_coord(args[1], self.board.size)
if coord:
move = coord_to_point(coord[0],coord[1], self.board.size)
else:
self.error("Error executing move {} converted from {}"
.format(move, args[1]))
return
if not self.board.play_move(move, color):
self.respond("illegal move: \"{} {}\" ".format(args[0], board_move))
return
else:
self.debug_msg("Move: {}\nBoard:\n{}\n".
format(board_move, self.board2d()))
self.respond()
except Exception as e:
self.respond('illegal move: \"{} {}\" {}'.format(args[0], args[1], str(e)))
def solve_helper(self):
winner = 'unknown'
#the copy of board can be viewed as a state
cp_board = self.board.copy()
start = time.time()
signal.signal(signal.SIGALRM, handler)
signal.alarm(self.timeLimit)
try:
value,move = self.advanced_search(cp_board,81,-1,1)
except Exception as e:
value,move = 0,None
#print("nodeExp",self.nodeExp)
#print("count",self.count)
signal.alarm(0)
end = time.time()
print("time: ",end - start)
#print("partial time: ",self.totalTime)
if value == 1:
winner = 'b'
elif value == -1:
winner = 'w'
if (winner == 'b' and self.to_play !=BLACK) or (winner == 'w' and self.to_play !=WHITE):
move = None
return winner,move
#newly added
def solve_cmd(self,args):
moveStr = ''
winner,move = self.solve_helper()
if move:
moveStr = ' '+ coord_to_move(move,self.board.size)
self.respond(winner+moveStr)
#alpha beta pruning, referencing from wikipedia: https://en.wikipedia.org/wiki/Alpha%E2%80%93beta_pruning
#color is the player. black is max player, white is min player
def ab_search(self, color, copy_of_board, depth, alpha, beta):
_alpha = alpha
_beta = beta
bestMove = None
#base case, no more legal move
#print(GoBoardUtil.generate_legal_moves(copy_of_board, color))
if depth == 0 or (GoBoardUtil.generate_legal_moves(copy_of_board, color) == []):
#depth should always be >0
#since NOGO cannot capture nor suiside, if last move is by WHITE/BLACK, it must be a BLACK/WHITE win.
if color == WHITE:
return 1,None
#color == BLACK
else:
return -1,None
#color is black; max player
if color == BLACK:
value = -1000000
#make a copy of current state
allmoves = GoBoardUtil.generate_legal_moves(copy_of_board, color)
#print("allmoves:")
#print(allmoves)
for move in allmoves:
child = copy_of_board.copy()
child.play_move(move, color)
childValue,_ = self.ab_search(WHITE,child,depth-1,_alpha,_beta)
value = max(value,childValue)
_alpha = max(_alpha,value)
bestMove = move
#beta cut-off
if _alpha >= _beta:
break
return value,bestMove
#color is white; min player
else:
value = 1000000
allmoves = GoBoardUtil.generate_legal_moves(copy_of_board, color)
#print("allmoves:")
#print(allmoves)
for move in allmoves:
child = copy_of_board.copy()
child.play_move(move, color)
childValue,_ = self.ab_search(BLACK,child,depth-1,_alpha,_beta)
value = min(value,childValue)
_beta = min(_beta,value)
bestMove = move
#alpha cut-off
if _alpha >= _beta:
break
return value,bestMove
def advanced_search(self,copy_of_board,depth,alpha,beta):
_alpha = alpha
_beta = beta
bestMove = None
self.nodeExp += 1
#base case, depth 0
if depth == 0:
return 0,None
#Start = time.time()
allmoves = GoBoardUtil.generate_legal_moves(copy_of_board, copy_of_board.current_player)
#End =time.time()
#self.totalTime += End-Start
#base case, no more legal move
if allmoves == []:
#since NOGO cannot capture nor suiside, if last move is by WHITE/BLACK, it must be a BLACK/WHITE win.
if copy_of_board.current_player == WHITE:
self.H_table[self.tuple_to_str(self.matrix_to_tuple(GoBoardUtil.get_twoD_board(copy_of_board),copy_of_board.size))] = 1
return 1,None
#color == BLACK
else:
self.H_table[self.tuple_to_str(self.matrix_to_tuple(GoBoardUtil.get_twoD_board(copy_of_board),copy_of_board.size))] = -1
return -1,None
searchedMoves = []
unsearchedMoves = []
unsearched = {}
searchedValue = {}
isoSet = set()
singleMoveIsoSet = set()
for move in allmoves:
singleMoveIsoSet.clear()
child = copy_of_board.copy()
child.play_move(move, copy_of_board.current_player)
#get all isomorphics of the board, in order to prunning as many as redundent states possible
isomorphics = self.get_all_isomorphic(GoBoardUtil.get_twoD_board(child),child.size)
found = False
for iso in isomorphics:
if self.tuple_to_str(iso) in self.H_table:
found = True
searchedMoves.append(move)
searchedValue[move] = self.H_table[self.tuple_to_str(iso)]
break
if iso in isoSet:
found = True
break
else:
isoSet.add(iso)
singleMoveIsoSet.add(iso)
if not found:
'''
the following is the heuristic I created for ordering the moves:
(1) eye-filling is the last thing we want to do;
(2) the few the number of player's stones with MD 1, the better;
(3) the more the number of opponent's stones with MD 1, the better;
(4) the more the number of player's stones with MD 2, the better;
'''
num_same = 49
dis1 = [move+1,move-1,move+child.size+1,move-child.size-1]
dis2 = [move+2,move-2,move+2*(child.size+1),move-2*(child.size+1),move+child.size+2,move-child.size-2,move+child.size,move-child.size]
valid1 = []
for point in dis1:
x = point%(child.size+1)
y = point//(child.size+1)
if 1<=x<=child.size and 1<=y<=child.size:
valid1.append(point)
valid2 = []
for point in dis2:
x = point%(child.size+1)
y = point//(child.size+1)
if 1<=x<=child.size and 1<=y<=child.size:
valid2.append(point)
if copy_of_board.is_eye(move,copy_of_board.current_player):
num_same += 1000
for point in valid1:
if child.get_color(point)==copy_of_board.current_player:
num_same += 100
if child.get_color(point)== BLACK+WHITE-copy_of_board.current_player:
num_same -= 10
for point in valid2:
if child.get_color(point)==copy_of_board.current_player:
num_same -= 1
unsearched[move] = num_same
#print("dic:",unsearched)
#print("searched:",searchedMoves)
#sorting unsearched moves by the heuristic value
sorted_x = sorted(unsearched.items(), key=lambda kv: kv[1])
for item in sorted_x:
unsearchedMoves.append(item[0])
orderedMoves = searchedMoves + unsearchedMoves
self.count += len(allmoves) - len(orderedMoves)
state = self.tuple_to_str(self.matrix_to_tuple(GoBoardUtil.get_twoD_board(copy_of_board),copy_of_board.size))
#below is normal alpha-beta search
#color is black; max player
if copy_of_board.current_player == BLACK:
value = -1000000
#make a copy of current state
for move in orderedMoves:
if move in searchedMoves:
childValue = searchedValue[move]
else:
child = copy_of_board.copy()
child.play_move(move, copy_of_board.current_player)
childValue,_ = self.advanced_search(child,depth-1,_alpha,_beta)
#childValue,_ = self.advanced_search(copy_of_board,depth-1,_alpha,_beta)
value = max(value,childValue)
_alpha = max(_alpha,value)
bestMove = move
#beta cut-off
if _alpha >= _beta:
break
self.H_table[state] = value
return value,bestMove
#color is white; min player
else:
value = 1000000
for move in orderedMoves:
if move in searchedMoves:
childValue = searchedValue[move]
else:
child = copy_of_board.copy()
child.play_move(move, copy_of_board.current_player)
#childValue,_ = self.advanced_search(copy_of_board,depth-1,_alpha,_beta)
childValue,_ = self.advanced_search(child,depth-1,_alpha,_beta)
value = min(value,childValue)
_beta = min(_beta,value)
bestMove = move
#alpha cut-off
if _alpha >= _beta:
break
self.H_table[state] = value
return value,bestMove
def get_all_isomorphic(self, board_2d,size):
"""
input: matrix of a board
output: a set of tuples
"""
isomorphics = set()
#original
#print("mat to tuple:")
#print(self.matrix_to_tuple(board_2d,size))
isomorphics.add(self.matrix_to_tuple(board_2d,size))
#return isomorphics
tmp_board = []
#reflectional sym, 2 cases
#swap rows
cp_board_2dx = board_2d.copy()
for i in range(size//2):
tmp = cp_board_2dx[i,:].copy()
cp_board_2dx[i,:] = cp_board_2dx[size-1-i,:]
cp_board_2dx[size-1-i,:]=tmp
isomorphics.add(self.matrix_to_tuple(cp_board_2dx,size))
#swap columns
cp_board_2dy = board_2d.copy()
for j in range(size//2):
for i in range(size):
tmp = cp_board_2dy[i,j]
cp_board_2dy[i,j] = cp_board_2dy[i,size-1-j]
cp_board_2dy[i,size-1-j] = tmp
isomorphics.add(self.matrix_to_tuple(cp_board_2dy,size))
#rotational sym, 3 cases
board_90 = np.rot90(board_2d)
#board_90 = self.rotateMatrix(board_2d,size)
isomorphics.add(self.matrix_to_tuple(board_90,size))
#reflectional sym of 90 degree, 2 cases
#swap rows
cp_board_90x = board_90.copy()
for i in range(size//2):
tmp = cp_board_90x[i,:].copy()
cp_board_90x[i,:] = cp_board_90x[size-1-i,:]
cp_board_90x[size-1-i,:] = tmp
isomorphics.add(self.matrix_to_tuple(cp_board_90x,size))
#swap columns
cp_board_90y = board_90.copy()
for j in range(size//2):
for i in range(size):
tmp = cp_board_90y[i,j]
cp_board_90y[i,j] = cp_board_90y[i,size-1-j]
cp_board_90y[i,size-1-j] = tmp
isomorphics.add(self.matrix_to_tuple(cp_board_90y,size))
#print("90",board_90)
board_180 = np.rot90(board_90)
#print("180",board_180)
isomorphics.add(self.matrix_to_tuple(board_180,size))
board_270 = np.rot90(board_180)
#print("270",board_270)
isomorphics.add(self.matrix_to_tuple(board_270,size))
#board_180 = self.rotateMatrix(board_90,size)
#isomorphics.add(self.matrix_to_tuple(board_180,size))
#board_270 = self.rotateMatrix(board_180,size)
#isomorphics.add(self.matrix_to_tuple(board_270,size))
return isomorphics
def matrix_to_tuple(self,matrix,dim):
board1d = np.zeros((dim* dim), dtype = np.int32)
for i in range(dim):
board1d[i*dim:i*dim+dim] = matrix[i,:]
return tuple(board1d)
def get_oneD_board(self,goboard):
"""
Return: numpy array
a 1-d numpy array with the stones as the goboard.
Does not pad with BORDER
Rows 1..size of goboard are copied into rows 0..size - 1 of board2d
"""
size = goboard.size
board1d = np.zeros((size* size), dtype = np.int32)
for row in range(size):
start = goboard.row_start(row + 1)
board1d[row*size:row*size+size] = goboard.board[start : start + size]
return board1d
def tuple_to_str(self,tup):
res = ''
for i in tup:
res += str(int(i))
return res
#genemove overrided
def genmove_cmd(self, args):
"""
Generate a move for the color args[0] in {'b', 'w'}, for the game of gomoku.
"""
board_color = args[0].lower()
color = color_to_int(board_color)
self.to_play = color
winnerStr,optMove = self.solve_helper()
winner = EMPTY
if winnerStr=='b':
winner = BLACK
elif winnerStr =='w':
winner = WHITE
#if current player is winner, we will take bestmove; otherwise we should take a random move
if board_color == winner:
move = optMove
else:
move = GoBoardUtil.generate_random_move(self.board, color,False)
move_coord = point_to_coord(move, self.board.size)
move_as_string = format_point(move_coord)
if self.board.is_legal(move, color):
self.board.play_move(move, color)
self.respond(move_as_string)
else:
self.respond("resign")
def gogui_rules_game_id_cmd(self, args):
self.respond("NoGo")
def gogui_rules_board_size_cmd(self, args):
self.respond(str(self.board.size))
def legal_moves_cmd(self, args):
"""
List legal moves for color args[0] in {'b','w'}
"""
board_color = args[0].lower()
color = color_to_int(board_color)
moves = GoBoardUtil.generate_legal_moves(self.board, color)
gtp_moves = []
for move in moves:
coords = point_to_coord(move, self.board.size)
gtp_moves.append(format_point(coords))
sorted_moves = ' '.join(sorted(gtp_moves))
self.respond(sorted_moves)
def gogui_rules_legal_moves_cmd(self, args):
empties = self.board.get_empty_points()
color = self.board.current_player
legal_moves = []
for move in empties:
if self.board.is_legal(move, color):
legal_moves.append(move)
gtp_moves = []
for move in legal_moves:
coords = point_to_coord(move, self.board.size)
gtp_moves.append(format_point(coords))
sorted_moves = ' '.join(sorted(gtp_moves))
self.respond(sorted_moves)
def gogui_rules_side_to_move_cmd(self, args):
color = "black" if self.board.current_player == BLACK else "white"
self.respond(color)
def gogui_rules_board_cmd(self, args):
size = self.board.size
str = ''
for row in range(size-1, -1, -1):
start = self.board.row_start(row + 1)
for i in range(size):
point = self.board.board[start + i]
if point == BLACK:
str += 'X'
elif point == WHITE:
str += 'O'
elif point == EMPTY:
str += '.'
else:
assert False
str += '\n'
self.respond(str)
def gogui_rules_final_result_cmd(self, args):
empties = self.board.get_empty_points()
color = self.board.current_player
legal_moves = []
for move in empties:
if self.board.is_legal(move, color):
legal_moves.append(move)
if not legal_moves:
result = "black" if self.board.current_player == WHITE else "white"
else:
result = "unknown"
self.respond(result)
def gogui_analyze_cmd(self, args):
self.respond("pstring/Legal Moves For ToPlay/gogui-rules_legal_moves\n"
"pstring/Side to Play/gogui-rules_side_to_move\n"
"pstring/Final Result/gogui-rules_final_result\n"
"pstring/Board Size/gogui-rules_board_size\n"
"pstring/Rules GameID/gogui-rules_game_id\n"
"pstring/Show Board/gogui-rules_board\n"
)
def point_to_coord(point, boardsize):
"""
Transform point given as board array index
to (row, col) coordinate representation.
Special case: PASS is not transformed
"""
if point == PASS:
return PASS
else:
NS = boardsize + 1
return divmod(point, NS)
def format_point(move):
"""
Return move coordinates as a string such as 'a1', or 'pass'.
"""
column_letters = "ABCDEFGHJKLMNOPQRSTUVWXYZ"
#column_letters = "abcdefghjklmnopqrstuvwxyz"
if move == PASS:
return "pass"
row, col = move
if not 0 <= row < MAXSIZE or not 0 <= col < MAXSIZE:
raise ValueError
return column_letters[col - 1]+ str(row)
def move_to_coord(point_str, board_size):
"""
Convert a string point_str representing a point, as specified by GTP,
to a pair of coordinates (row, col) in range 1 .. board_size.
Raises ValueError if point_str is invalid
"""
if not 2 <= board_size <= MAXSIZE:
raise ValueError("board_size out of range")
s = point_str.lower()
if s == "pass":
return PASS
try:
col_c = s[0]
if (not "a" <= col_c <= "z") or col_c == "i":
raise ValueError
col = ord(col_c) - ord("a")
if col_c < "i":
col += 1
row = int(s[1:])
if row < 1:
raise ValueError
except (IndexError, ValueError):
# e.g. "a0"
raise ValueError("wrong coordinate")
if not (col <= board_size and row <= board_size):
# e.g. "a20"
raise ValueError("wrong coordinate")
return row, col
def coord_to_move(move, board_size):
"""
Convert a string point_str representing a point, as specified by GTP,
to a pair of coordinates (row, col) in range 1 .. board_size.
Raises ValueError if point_str is invalid
"""
if not 2 <= board_size <= MAXSIZE:
raise ValueError("board_size out of range")
#s = point_str.lower()
x = move%(board_size+1)
y = move//(board_size+1)
col = chr(x-1 + ord("a"))
#col = col.upper()
return col+str(y)
def color_to_int(c):
"""convert character to the appropriate integer code"""
color_to_int = {"b": BLACK , "w": WHITE, "e": EMPTY,
"BORDER": BORDER}
return color_to_int[c]
def handler(signum, frame):
print('Signal handler called with signal', signum)
raise Exception("Timeout!")
| 28,970 | 8,959 |
import weakref
from .Cell import Cell
class SubCell(Cell):
def __init__(self, parent, cell, subpath, readonly):
assert isinstance(cell, Cell)
assert not isinstance(cell, SubCell)
fullpath = cell._path + subpath
super().__init__(parent=parent, path=fullpath)
self._cell = weakref.ref(cell)
self._readonly = readonly
self._subpath = subpath
def _get_hcell(self):
return self._cell()._get_hcell()
def _get_cell_subpath(self, cell, subpath):
return cell
def __setattr__(self, attr, value):
if attr.startswith("_"):
return object.__setattr__(self, attr, value)
from .assign import assign_to_subcell
parent = self._parent()
path = self._subpath + (attr,)
assign_to_subcell(self._cell(), path, value)
def __getattr__(self, attr):
if attr.startswith("_"):
return super().__getattribute__(attr)
if attr in type(self).__dict__ or attr in self.__dict__:
return super().__getattribute__(attr)
parent = self._parent()
readonly = self._readonly
return SubCell(self._parent(), self._cell(), self._subpath + (attr,), readonly=readonly)
@property
def authoritative(self):
#TODO: determine if the subcell didn't get any inbound connections
# If it did, you can't get another inbound connection, nor a link
return True #stub
@property
def links(self):
#TODO: return the other partner of all Link objects with self in it
return [] #stub
@property
def value(self):
cell = self._cell()
cellvalue = cell.value
if cellvalue.unsilk is None:
raise ValueError
for attr in self._subpath:
if isinstance(attr, int):
cellvalue = cellvalue[attr]
else:
cellvalue = getattr(cellvalue, attr)
return cellvalue
def set(self, value):
assert not self._readonly
cell = self._cell()
attr = self._subpath[-1]
if len(self._subpath) == 1:
return setattr(cell, attr, value)
else:
parent_subcell = SubCell(self._parent(), cell, self._subpath[:-1], False)
return setattr(parent_subcell, attr, value)
@property
def _virtual_path(self):
cell = self._cell()
p = cell._virtual_path
if p is None:
return None
return p + self._subpath
def _set_observers(self):
pass
def __str__(self):
return "Seamless SubCell: %s" % ".".join(self._path)
| 2,634 | 761 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# The aim of this package is to :
# - guarantee protected code execution is safe and *will* happen (eventually)
# - report usage via colosstat
# - recover when code fails ( possibly recording previous state, for example )
# one possibility is to implement another levelof abstraction ( like a language - cstk aim )
# another is to just isolate portions of python code with postconditions to guarantee success...
| 460 | 122 |
from frisky.events import MessageEvent
from frisky.plugin import FriskyPlugin, PluginRepositoryMixin
from frisky.responses import FriskyResponse
class HelpPlugin(FriskyPlugin, PluginRepositoryMixin):
commands = ['help']
def command_help(self, message: MessageEvent) -> FriskyResponse:
if len(message.args) == 1:
plugin_name = message.args[0]
if plugin_name == 'help':
return 'Usage: `?help` or `?help <plugin_name>`'
plugin = self.get_plugin_by_name(plugin_name)
if plugin is None:
return f'No such plugin: `{plugin_name}`, try `?help` to list installed plugins'
if (help_text := plugin.help_text()) is None:
return f'Plugin `{plugin_name}` does not provide help text.'
return help_text
plugins = self.get_plugin_names()
joined_string = ', '.join(plugins)
return f'Available plugins: {joined_string}'
| 965 | 274 |
# -*- coding: utf-8 -*-
from PyQt5.QtWidgets import QGraphicsItem, QGraphicsRectItem, QGraphicsItemGroup
from PyQt5.QtCore import pyqtSlot
class MyItemGroup(QGraphicsItemGroup):
Type = QGraphicsItem.UserType + 3
def __init__(self, parent=None):
super(MyItemGroup, self).__init__(parent)
def __repr__(self):
return str(type(self).__name__)
class MyRectItemNOIC(QGraphicsRectItem):
Type = QGraphicsItem.UserType + 2
def __init__(self, parent=None):
super(MyRectItemNOIC, self).__init__(parent)
def __repr__(self):
return str(type(self).__name__)
# end class
class MyRectItem(QGraphicsRectItem):
Type = QGraphicsItem.UserType + 1
def __init__(self, parent=None):
super(MyRectItem, self).__init__(parent)
# def __repr__(self):
# return str(type(self).__name__)
def itemChange(self, change, value):
assert isinstance(self, MyRectItem)
# print("\nChange %s\n" % self, change, value)
return super(MyRectItem, self).itemChange(change, value)
# end def
def testItemChangeRegression():
"""Make sure PyQt5 handles QGraphicsItem.itemChange correctly
as there was a regression in PyQt5 v 5.6 that was fixed in v 5.7
"""
a = MyRectItemNOIC()
b = MyRectItem(a)
item_group = MyItemGroup()
assert b.parentItem() is a
assert a.childItems()[0] is b
item_group.addToGroup(b)
assert item_group.childItems()[0] is b
assert b.parentItem() is item_group
e = MyRectItem()
c = MyRectItemNOIC(e)
assert c.parentItem() is e
item_group.addToGroup(c)
assert c.parentItem() is item_group
# end def
| 1,663 | 564 |
# Copyright 2022 MosaicML. All Rights Reserved.
from dataclasses import dataclass
import yahp as hp
from composer.models.model_hparams import ModelHparams
@dataclass
class SSDHparams(ModelHparams):
input_size: int = hp.optional(
doc="input size",
default=300,
)
num_classes: int = hp.optional(
doc="num_classes",
default=80,
)
overlap_threshold: float = hp.optional(
doc="threshold",
default=0.5,
)
nms_max_detections: int = hp.optional(
doc="nms max dets",
default=200,
)
data: str = hp.optional(
doc="data",
default="/localdisk/coco",
)
def initialize_object(self):
from composer.models.ssd.ssd import SSD
return SSD(
input_size=self.input_size,
overlap_threshold=self.overlap_threshold,
nms_max_detections=self.nms_max_detections,
num_classes=self.num_classes,
data=self.data,
)
| 998 | 337 |
from os.path import join
from utils import getFileList
class ImageFolder:
def __init__(self, path, sub=None, annot='annot') -> None:
self.root = path
self.image = 'images'
self.annot = annot
self.image_root = join(path, self.image)
self.annot_root = join(path, self.annot)
self.annot_root_tmp = join(path, self.annot + '_tmp')
if sub is None:
self.imgnames = getFileList(self.image_root, ext='.jpg')
self.annnames = getFileList(self.annot_root, ext='.json')
else:
self.imgnames = getFileList(join(self.image_root, sub), ext='.jpg')
self.annnames = getFileList(join(self.annot_root, sub), ext='.json')
self.imgnames = [join(sub, name) for name in self.imgnames]
self.annnames = [join(sub, name) for name in self.annnames]
self.isTmp = True
assert len(self.imgnames) == len(self.annnames)
def __getitem__(self, index):
imgname = join(self.image_root, self.imgnames[index])
if self.isTmp:
annname = join(self.annot_root_tmp, self.annnames[index])
else:
annname = join(self.annot_root, self.annnames[index])
return imgname, annname
def __len__(self):
return len(self.imgnames) | 1,303 | 410 |
'''
Author: your name
Date: 2021-06-18 10:13:00
LastEditTime: 2021-07-08 14:13:07
LastEditors: Please set LastEditors
Description: In User Settings Edit
FilePath: /genetic-drawing/main.py
'''
import cv2
import os
import time
from IPython.display import clear_output
from genetic_drawing import *
gen = GeneticDrawing('03.jpg', seed=time.time())
out = gen.generate(400, 50)
brushesRange = np.array([[0.1, 0.3], [0.3, 0.7]])
for i in range(len(gen.imgBuffer)):
cv2.imwrite(os.path.join("out", f"{i:06d}.png"), gen.imgBuffer[i])
try:
for i in range(5):
brushesRange_tmp = brushesRange/(2**(i+1))
gen.brushesRange = brushesRange_tmp.tolist()
maskname = "masks-03/mask-{}.jpg".format(i)
gen.sampling_mask = cv2.cvtColor(cv2.imread(maskname), cv2.COLOR_BGR2GRAY)
#keep drawing on top of our previous result
out = gen.generate(100, 30)
for i in range(len(gen.imgBuffer)):
cv2.imwrite(os.path.join("out", f"{i:06d}.png"), gen.imgBuffer[i])
except:
if not os.path.exists('out'):
os.mkdir("out")
for i in range(len(gen.imgBuffer)):
cv2.imwrite(os.path.join("out", f"{i:06d}.png"), gen.imgBuffer[i])
#brushesRange_tmp = brushesRange/100
#gen.brushesRange = brushesRange_tmp.tolist()
##gen.brushesRange = [[0.005, 0.015],[0.015, 0.035]]
#gen.sampling_mask = cv2.cvtColor(cv2.imread("masks/mask-end.jpg"), cv2.COLOR_BGR2GRAY)
#
##keep drawing on top of our previous result
#out = gen.generate(50, 30)
#save all the images from the image buffer
if not os.path.exists('out'):
os.mkdir("out")
for i in range(len(gen.imgBuffer)):
cv2.imwrite(os.path.join("out", f"{i:06d}.png"), gen.imgBuffer[i]) | 1,704 | 710 |
import logging
from typing import List, Union, Optional
import torch
import torch.nn
import torch.nn.functional as F
from tqdm import tqdm
import flair.nn
from flair.data import Dictionary, Sentence, Label
from flair.datasets import SentenceDataset, DataLoader
from flair.embeddings import TokenEmbeddings
from flair.training_utils import store_embeddings
log = logging.getLogger("flair")
class SimpleSequenceTagger(flair.nn.Classifier):
"""
This class is a simple version of the SequenceTagger class.
The purpose of this class is to demonstrate the basic hierarchy of a
sequence tagger (this could be helpful for new developers).
It only uses the given embeddings and maps them with a linear layer to
the tag_dictionary dimension.
Thus, this class misses following functionalities from the SequenceTagger:
- CRF,
- RNN,
- Reprojection.
As a result, only poor results can be expected.
"""
def __init__(
self,
embeddings: TokenEmbeddings,
tag_dictionary: Dictionary,
tag_type: str,
):
"""
Initializes a SimpleSequenceTagger
:param embeddings: word embeddings used in tagger
:param tag_dictionary: dictionary of tags you want to predict
:param tag_type: string identifier for tag type
:param beta: Parameter for F-beta score for evaluation and training annealing
"""
super(SimpleSequenceTagger, self).__init__()
# embeddings
self.embeddings = embeddings
# dictionaries
self.tag_dictionary: Dictionary = tag_dictionary
self.tag_type: str = tag_type
self.tagset_size: int = len(tag_dictionary)
# linear layer
self.linear = torch.nn.Linear(self.embeddings.embedding_length, len(tag_dictionary))
# all parameters will be pushed internally to the specified device
self.to(flair.device)
def forward_loss(
self, data_points: Union[List[Sentence], Sentence], sort=True
) -> torch.tensor:
features = self.forward(data_points)
return self._calculate_loss(features, data_points)
def _get_state_dict(self):
model_state = {
"state_dict": self.state_dict(),
"embeddings": self.embeddings,
"tag_dictionary": self.tag_dictionary,
"tag_type": self.tag_type,
}
return model_state
@staticmethod
def _init_model_with_state_dict(state):
model = SimpleSequenceTagger(
embeddings=state["embeddings"],
tag_dictionary=state["tag_dictionary"],
tag_type=state["tag_type"],
)
model.load_state_dict(state["state_dict"])
return model
def predict(
self,
sentences: Union[List[Sentence], Sentence],
mini_batch_size=32,
all_tag_prob: bool = False,
verbose: bool = False,
label_name: Optional[str] = None,
return_loss=False,
embedding_storage_mode="none",
):
"""
Predict sequence tags for Named Entity Recognition task
:param sentences: a Sentence or a List of Sentence
:param mini_batch_size: size of the minibatch, usually bigger is more rapid but consume more memory,
up to a point when it has no more effect.
:param all_tag_prob: True to compute the score for each tag on each token,
otherwise only the score of the best tag is returned
:param verbose: set to True to display a progress bar
:param return_loss: set to True to return loss
:param label_name: set this to change the name of the label type that is predicted
:param embedding_storage_mode: default is 'none' which is always best. Only set to 'cpu' or 'gpu' if
you wish to not only predict, but also keep the generated embeddings in CPU or GPU memory respectively.
'gpu' to store embeddings in GPU memory.
"""
if label_name is None:
label_name = self.tag_type
with torch.no_grad():
if not sentences:
return sentences
if isinstance(sentences, Sentence):
sentences = [sentences]
# reverse sort all sequences by their length
rev_order_len_index = sorted(
range(len(sentences)), key=lambda k: len(sentences[k]), reverse=True
)
reordered_sentences: List[Union[Sentence, str]] = [
sentences[index] for index in rev_order_len_index
]
dataloader = DataLoader(
dataset=SentenceDataset(reordered_sentences), batch_size=mini_batch_size
)
# progress bar for verbosity
if verbose:
dataloader = tqdm(dataloader)
overall_loss = 0
batch_no = 0
for batch in dataloader:
batch_no += 1
if verbose:
dataloader.set_description(f"Inferencing on batch {batch_no}")
batch = self._filter_empty_sentences(batch)
# stop if all sentences are empty
if not batch:
continue
feature = self.forward(batch)
if return_loss:
overall_loss += self._calculate_loss(feature, batch)
tags, all_tags = self._obtain_labels(
feature=feature,
batch_sentences=batch,
get_all_tags=all_tag_prob,
)
for (sentence, sent_tags) in zip(batch, tags):
for (token, tag) in zip(sentence.tokens, sent_tags):
token.add_tag_label(label_name, tag)
# all_tags will be empty if all_tag_prob is set to False, so the for loop will be avoided
for (sentence, sent_all_tags) in zip(batch, all_tags):
for (token, token_all_tags) in zip(sentence.tokens, sent_all_tags):
token.add_tags_proba_dist(label_name, token_all_tags)
# clearing token embeddings to save memory
store_embeddings(batch, storage_mode=embedding_storage_mode)
if return_loss:
return overall_loss / batch_no
def forward(self, sentences: List[Sentence]):
self.embeddings.embed(sentences)
names = self.embeddings.get_names()
lengths: List[int] = [len(sentence.tokens) for sentence in sentences]
longest_token_sequence_in_batch: int = max(lengths)
pre_allocated_zero_tensor = torch.zeros(
self.embeddings.embedding_length * longest_token_sequence_in_batch,
dtype=torch.float,
device=flair.device,
)
all_embs = list()
for sentence in sentences:
all_embs += [
emb for token in sentence for emb in token.get_each_embedding(names)
]
nb_padding_tokens = longest_token_sequence_in_batch - len(sentence)
if nb_padding_tokens > 0:
t = pre_allocated_zero_tensor[
: self.embeddings.embedding_length * nb_padding_tokens
]
all_embs.append(t)
sentence_tensor = torch.cat(all_embs).view(
[
len(sentences),
longest_token_sequence_in_batch,
self.embeddings.embedding_length,
]
)
features = self.linear(sentence_tensor)
return features
def _calculate_loss(
self, features: torch.tensor, sentences: List[Sentence]
) -> float:
lengths: List[int] = [len(sentence.tokens) for sentence in sentences]
tag_list: List = []
for s_id, sentence in enumerate(sentences):
# get the tags in this sentence
tag_idx: List[int] = [
self.tag_dictionary.get_idx_for_item(token.get_tag(self.tag_type).value)
for token in sentence
]
# add tags as tensor
tag = torch.tensor(tag_idx, device=flair.device)
tag_list.append(tag)
score = 0
for sentence_feats, sentence_tags, sentence_length in zip(
features, tag_list, lengths
):
sentence_feats = sentence_feats[:sentence_length]
score += torch.nn.functional.cross_entropy(
sentence_feats, sentence_tags
)
score /= len(features)
return score
def _obtain_labels(
self,
feature: torch.Tensor,
batch_sentences: List[Sentence],
get_all_tags: bool,
) -> (List[List[Label]], List[List[List[Label]]]):
"""
Returns a tuple of two lists:
- The first list corresponds to the most likely `Label` per token in each sentence.
- The second list contains a probability distribution over all `Labels` for each token
in a sentence for all sentences.
"""
lengths: List[int] = [len(sentence.tokens) for sentence in batch_sentences]
tags = []
all_tags = []
feature = feature.cpu()
for index, length in enumerate(lengths):
feature[index, length:] = 0
softmax_batch = F.softmax(feature, dim=2).cpu()
scores_batch, prediction_batch = torch.max(softmax_batch, dim=2)
feature = zip(softmax_batch, scores_batch, prediction_batch)
for feats, length in zip(feature, lengths):
softmax, score, prediction = feats
confidences = score[:length].tolist()
tag_seq = prediction[:length].tolist()
scores = softmax[:length].tolist()
tags.append(
[
Label(self.tag_dictionary.get_item_for_index(tag), conf)
for conf, tag in zip(confidences, tag_seq)
]
)
if get_all_tags:
all_tags.append(
[
[
Label(
self.tag_dictionary.get_item_for_index(score_id), score
)
for score_id, score in enumerate(score_dist)
]
for score_dist in scores
]
)
return tags, all_tags
@staticmethod
def _filter_empty_sentences(sentences: List[Sentence]) -> List[Sentence]:
filtered_sentences = [sentence for sentence in sentences if sentence.tokens]
if len(sentences) != len(filtered_sentences):
log.warning(
f"Ignore {len(sentences) - len(filtered_sentences)} sentence(s) with no tokens."
)
return filtered_sentences
@property
def label_type(self):
return self.tag_type | 10,991 | 2,978 |
import requests
from bs4 import BeautifulSoup
def spider_xiaohuar_content(url, headers):
response = requests.get(url=url, headers=headers)
print(response.status_code)
if response.status_code == 200:
response.encoding = 'utf-8'
html = response.content
# 参数:网页内容,解析器
soup = BeautifulSoup(html, 'html5lib')
div_list = soup.find_all('div', attrs={'class': 'all_lanmu'})
text = ''
file = open('爬虫校花.md', 'w', encoding='utf-8')
for div in div_list:
title_div = div.find('div', attrs={'class': 'title1000'})
title = title_div.find('a').string
text += '<style>img[src*="headimg-style"]{width:100px;height:100px}</style>\n\n## 标题:'+title+'\n\n'
ul = div.find('ul')
li_list = ul.find_all('li')
for li in li_list:
img_src = li.find('img').attrs['lazysrc']
a_href = li.find('a').attrs['href']
img_title = li.find('span').string
school = li.find('b', attrs={'class': 'b1'}).string
fav = li.find('b', attrs={'class': 'b2'}).string
if url not in img_src:
img_src = url+img_src
text += '> ' + img_title+'\n\n'
text += ''+'\n\n'
text += '- 学校:'+school+'\n\n'
text += '- 点赞人数:'+fav+'\n\n'
file.write(text)
file.close
url = 'http://xiaohuar.com/'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'}
spider_xiaohuar_content(url, headers) | 1,708 | 637 |
#尝试直接读取文件夹内所有csv,记得看看列表,是不是读对了
import glob
import pandas as pd
import numpy as np
io = glob.glob(r"*.csv")
len_io=len(io)
print('总共输入表的数量为:',len_io)
prob_list=[]
for i in range(len_io):
sub_1 = pd.read_csv(io[i])
denominator=len(sub_1)
for my_classes in ['healthy','multiple_diseases','rust','scab']:
sub_label_1 = sub_1.loc[:, my_classes].values
sort_1=np.argsort(sub_label_1)
for i,temp_sort in enumerate(sort_1):
sub_label_1[temp_sort]=i/denominator
sub_1.loc[:,my_classes]=sub_label_1
prob_list.append(sub_1.loc[:,'healthy':].values)
sub_1.loc[:,'healthy':] = np.mean(prob_list,axis =0)
sub_1.to_csv('out/submission.csv', index=False)
print(sub_1.head()) | 749 | 342 |
# Copyright 2016 NTT Data.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Masakari common internal object model"""
import datetime
from oslo_utils import versionutils
from oslo_versionedobjects import base as ovoo_base
from oslo_versionedobjects import fields as obj_fields
from masakari import objects
def get_attrname(name):
"""Return the mangled name of the attribute's underlying storage."""
return '_obj_' + name
class MasakariObjectRegistry(ovoo_base.VersionedObjectRegistry):
notification_classes = []
def registration_hook(self, cls, index):
# NOTE(Dinesh_Bhor): This is called when an object is registered,
# and is responsible for maintaining masakari.objects.$OBJECT
# as the highest-versioned implementation of a given object.
version = versionutils.convert_version_to_tuple(cls.VERSION)
if not hasattr(objects, cls.obj_name()):
setattr(objects, cls.obj_name(), cls)
else:
cur_version = versionutils.convert_version_to_tuple(
getattr(objects, cls.obj_name()).VERSION)
if version >= cur_version:
setattr(objects, cls.obj_name(), cls)
@classmethod
def register_notification(cls, notification_cls):
"""Register a class as notification.
Use only to register concrete notification or payload classes,
do not register base classes intended for inheritance only.
"""
cls.register_if(False)(notification_cls)
cls.notification_classes.append(notification_cls)
return notification_cls
@classmethod
def register_notification_objects(cls):
"""Register previously decorated notification as normal ovos.
This is not intended for production use but only for testing and
document generation purposes.
"""
for notification_cls in cls.notification_classes:
cls.register(notification_cls)
remotable_classmethod = ovoo_base.remotable_classmethod
remotable = ovoo_base.remotable
class MasakariObject(ovoo_base.VersionedObject):
"""Base class and object factory.
This forms the base of all objects that can be remoted or instantiated
via RPC. Simply defining a class that inherits from this base class
will make it remotely instantiatable. Objects should implement the
necessary "get" classmethod routines as well as "save" object methods
as appropriate.
"""
OBJ_SERIAL_NAMESPACE = 'masakari_object'
OBJ_PROJECT_NAMESPACE = 'masakari'
def masakari_obj_get_changes(self):
"""Returns a dict of changed fields with tz unaware datetimes.
Any timezone aware datetime field will be converted to UTC timezone
and returned as timezone unaware datetime.
This will allow us to pass these fields directly to a db update
method as they can't have timezone information.
"""
# Get dirtied/changed fields
changes = self.obj_get_changes()
# Look for datetime objects that contain timezone information
for k, v in changes.items():
if isinstance(v, datetime.datetime) and v.tzinfo:
# Remove timezone information and adjust the time according to
# the timezone information's offset.
changes[k] = v.replace(tzinfo=None) - v.utcoffset()
# Return modified dict
return changes
def obj_reset_changes(self, fields=None, recursive=False):
"""Reset the list of fields that have been changed.
.. note::
- This is NOT "revert to previous values"
- Specifying fields on recursive resets will only be honored at the
top level. Everything below the top will reset all.
:param fields: List of fields to reset, or "all" if None.
:param recursive: Call obj_reset_changes(recursive=True) on
any sub-objects within the list of fields
being reset.
"""
if recursive:
for field in self.obj_get_changes():
# Ignore fields not in requested set (if applicable)
if fields and field not in fields:
continue
# Skip any fields that are unset
if not self.obj_attr_is_set(field):
continue
value = getattr(self, field)
# Don't reset nulled fields
if value is None:
continue
# Reset straight Object and ListOfObjects fields
if isinstance(self.fields[field], obj_fields.ObjectField):
value.obj_reset_changes(recursive=True)
elif isinstance(self.fields[field],
obj_fields.ListOfObjectsField):
for thing in value:
thing.obj_reset_changes(recursive=True)
if fields:
self._changed_fields -= set(fields)
else:
self._changed_fields.clear()
class MasakariObjectDictCompat(ovoo_base.VersionedObjectDictCompat):
def __iter__(self):
for name in self.obj_fields:
if (self.obj_attr_is_set(name) or
name in self.obj_extra_fields):
yield name
def keys(self):
return list(self)
class MasakariTimestampObject(object):
"""Mixin class for db backed objects with timestamp fields.
Sqlalchemy models that inherit from the oslo_db TimestampMixin will include
these fields and the corresponding objects will benefit from this mixin.
"""
fields = {
'created_at': obj_fields.DateTimeField(nullable=True),
'updated_at': obj_fields.DateTimeField(nullable=True),
}
class MasakariPersistentObject(object):
"""Mixin class for Persistent objects.
This adds the fields that we use in common for most persistent objects.
"""
fields = {
'created_at': obj_fields.DateTimeField(nullable=True),
'updated_at': obj_fields.DateTimeField(nullable=True),
'deleted_at': obj_fields.DateTimeField(nullable=True),
'deleted': obj_fields.BooleanField(default=False),
}
class ObjectListBase(ovoo_base.ObjectListBase):
@classmethod
def _obj_primitive_key(cls, field):
return 'masakari_object.%s' % field
@classmethod
def _obj_primitive_field(cls, primitive, field,
default=obj_fields.UnspecifiedDefault):
key = cls._obj_primitive_key(field)
if default == obj_fields.UnspecifiedDefault:
return primitive[key]
else:
return primitive.get(key, default)
class MasakariObjectSerializer(ovoo_base.VersionedObjectSerializer):
"""A Masakari Object Serializer.
This implements the Oslo Serializer interface and provides
the ability to serialize and deserialize MasakariObject entities.
Any service that needs to accept or return MasakariObjects
as arguments or result values should pass this to its RPCClient
and RPCServer objects.
"""
OBJ_BASE_CLASS = MasakariObject
def __init__(self):
super(MasakariObjectSerializer, self).__init__()
def obj_make_list(context, list_obj, item_cls, db_list, **extra_args):
"""Construct an object list from a list of primitives.
This calls item_cls._from_db_object() on each item of db_list, and
adds the resulting object to list_obj.
:param:context: Request context
:param:list_obj: An ObjectListBase object
:param:item_cls: The MasakariObject class of the objects within the list
:param:db_list: The list of primitives to convert to objects
:param:extra_args: Extra arguments to pass to _from_db_object()
:returns: list_obj
"""
list_obj.objects = []
for db_item in db_list:
item = item_cls._from_db_object(context, item_cls(), db_item,
**extra_args)
list_obj.objects.append(item)
list_obj._context = context
list_obj.obj_reset_changes()
return list_obj
def obj_to_primitive(obj):
"""Recursively turn an object into a python primitive.
A MasakariObject becomes a dict, and anything that implements
ObjectListBase becomes a list.
"""
if isinstance(obj, ObjectListBase):
return [obj_to_primitive(x) for x in obj]
elif isinstance(obj, MasakariObject):
result = {}
for key in obj.obj_fields:
if obj.obj_attr_is_set(key) or key in obj.obj_extra_fields:
result[key] = obj_to_primitive(getattr(obj, key))
return result
else:
return obj
def obj_equal_prims(obj_1, obj_2, ignore=None):
"""Compare two primitives for equivalence ignoring some keys.
This operation tests the primitives of two objects for equivalence.
Object primitives may contain a list identifying fields that have been
changed - this is ignored in the comparison. The ignore parameter lists
any other keys to be ignored.
:param:obj1: The first object in the comparison
:param:obj2: The second object in the comparison
:param:ignore: A list of fields to ignore
:returns: True if the primitives are equal ignoring changes
and specified fields, otherwise False.
"""
def _strip(prim, keys):
if isinstance(prim, dict):
for k in keys:
prim.pop(k, None)
for v in prim.values():
_strip(v, keys)
if isinstance(prim, list):
for v in prim:
_strip(v, keys)
return prim
if ignore is not None:
keys = ['masakari_object.changes'] + ignore
else:
keys = ['masakari_object.changes']
prim_1 = _strip(obj_1.obj_to_primitive(), keys)
prim_2 = _strip(obj_2.obj_to_primitive(), keys)
return prim_1 == prim_2
| 10,468 | 2,843 |
news = ['klik untuk membaca', 'klik untuk maklumat']
| 53 | 24 |
from sonosscripts import stop, play_pause, previous, next, change_bass, change_volume, mute_volume
modules = {
"stop": stop,
"play_pause": play_pause,
"previous": previous,
"next": next,
"change_bass": change_bass,
"change_volume": change_volume,
"mute_volume": mute_volume
}
| 306 | 110 |
"""
Class SaleBot
It is initialised by nlp model (bag-of-word, tf-idf, word2vec)
It returns response with a question as the input
"""
from gensim.corpora import Dictionary
#from gensim.models import FastText
from gensim.models import Word2Vec , WordEmbeddingSimilarityIndex
from gensim.similarities import SoftCosineSimilarity, SparseTermSimilarityMatrix
from gensim.models import TfidfModel
from multiprocessing import cpu_count
from nlp_helper import preprocessing
class AskeBayBot:
"""
- Using tf-idf and word2vec to build vector matrix from the corpus
- Using soft-cosine similarity to calculate the similarity between query and matrix
"""
"""
References:
- https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/soft_cosine_tutorial.ipynb
"""
def __init__(self, questions, responses, model_type="word2vec"):
self.questions = questions
self.responses = responses
self.model_type = model_type
self.docsim_index = []
self.dictionary = []
self.tfidf = []
self.compute_sim_matrix()
def compute_sim_matrix(self):
'''
if(self.model_type.lower() == "fasttext"):
model = FastText(self.questions)
else:
model = Word2Vec(self.questions)
'''
self.dictionary = Dictionary(self.questions)
self.tfidf = TfidfModel(dictionary = self.dictionary)
word2vec_model = Word2Vec(self.questions
, workers=cpu_count()
, min_count=5
, size=300
, seed=12345)
sim_index = WordEmbeddingSimilarityIndex(word2vec_model.wv)
sim_matrix = SparseTermSimilarityMatrix(sim_index
, self.dictionary
, self.tfidf
, nonzero_limit=100)
bow_corpus = [self.dictionary.doc2bow(document) for document in self.questions]
tfidf_corpus = [self.tfidf[bow] for bow in bow_corpus]
self.docsim_index = SoftCosineSimilarity(tfidf_corpus, sim_matrix, num_best=10)
def get_similarities(self, question):
'''
@return indices of anwsers whose questions are similar to the input question
'''
vectorizer = self.dictionary.doc2bow(preprocessing(question))
tfidf_vectorizer = self.tfidf[vectorizer]
similarities = self.docsim_index[tfidf_vectorizer]
return similarities
def get_response(self, question):
similarities = self.get_similarities(question)
return self.get_sim(similarities, 1)
def get_all_responses(self, question):
similarities = self.get_similarities(question)
return self.get_sim(similarities, 10)
def get_sim(self, similarities, n_top=1):
"""
@return a tuple of similar question and best response in similarity matrix
"""
sim_questions = []
sim_responses = []
sim_scores = []
if (len(similarities) > 0):
for (idx, score) in similarities:
if (idx < len(self.responses)):
sim_questions.append(self.questions[idx])
sim_responses.append(self.responses[idx])
sim_scores.append(score)
# return self.questions[idx], self.responses[idx], score
else:
return "Just a moment, someone will contact you"
if (n_top == 1):
return sim_questions[0], sim_responses[0], sim_scores[0]
else:
return sim_questions, sim_responses, sim_scores
if __name__ == "__main__":
print("I'm a bot") | 3,925 | 1,184 |
# -*- coding: utf-8 -*-
"""Demo123_Convolution_Visualization.ipynb
# **Spit some [tensor] flow**
We need to learn the intricacies of tensorflow to master deep learning
`Let's get this over with`
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
print(tf.__version__)
"""## Reference MachineLearningMastery.com"""
from tensorflow.keras.layers import Input, Dense, Dropout, Flatten, Conv2D
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import SGD, Adam
from glob import glob
import sys, os
import cv2
!wget https://www.theluxecafe.com/wp-content/uploads/2014/07/ferrari-spider-indian-theluxecafe.jpg
!ls
X = cv2.imread('ferrari-spider-indian-theluxecafe.jpg')
X = cv2.cvtColor(X, cv2.COLOR_BGR2RGB)
plt.imshow(X)
print(X.shape)
IMAGE_SIZE = X.shape
X = np.expand_dims(X, axis=0)
print(X.shape)
y = np.ndarray([1])
print(y.shape)
i_layer = Input(shape = IMAGE_SIZE)
h_layer = Conv2D(8, (3,3), strides = 1, activation='relu', padding='same')(i_layer)
h_layer = Flatten()(h_layer)
o_layer = Dense(1, activation='sigmoid')(h_layer)
model = Model(i_layer, o_layer)
model.summary()
model.compile(
optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
report = model.fit(X, y, epochs = 10)
model.layers
conv_layer = model.layers[1]
print(conv_layer)
filters, biases = conv_layer.get_weights()
print(conv_layer.name, filters.shape)
f_min, f_max = filters.min(), filters.max()
filters = (filters - f_min) / (f_max - f_min)
plt.figure(figsize=(20,10))
n_filters, idx = 8, 1
for i in range(n_filters):
# get filter
f = filters[:, :, :, i]
for j in range(3):
ax = plt.subplot(n_filters, 3, idx)
ax.set_xticks([])
ax.set_yticks([])
plt.imshow(f[:, :, j], cmap='gray')
idx += 1
plt.show()
model_visual = Model(inputs=model.inputs, outputs=conv_layer.output)
model_visual.summary()
maps = model_visual(X)
print(maps.shape)
plt.figure(figsize=(20,10))
square = 4
idx = 1
for _ in range(square):
for _ in range(square):
if (idx > square * 2):
break
# specify subplot and turn of axis
ax = plt.subplot(square, square, idx)
ax.set_xticks([])
ax.set_yticks([])
plt.imshow(maps[0, :, :, idx-1], cmap='gray')
idx += 1
plt.show()
maps.shape[3]
for i in range(maps.shape[3]):
ax = plt.subplot()
plt.imshow(maps[0, :, :, i], cmap='gray')
ax.set_xticks([])
ax.set_yticks([])
plt.show()
| 2,452 | 1,015 |
import argparse as ap
import hail
from pprint import pprint
import time
from hail_scripts.v01.utils.vds_utils import write_vds
p = ap.ArgumentParser(description="Convert a tsv table to a .vds")
p.add_argument("-c", "--chrom-column", required=True)
p.add_argument("-p", "--pos-column", required=True)
p.add_argument("-r", "--ref-column", required=True)
p.add_argument("-a", "--alt-column", required=True)
p.add_argument("table_path", nargs="+")
args = p.parse_args()
print(", ".join(args.vcf_path))
hc = hail.HailContext(log="./hail_{}.log".format(time.strftime("%y%m%d_%H%M%S")))
for table_path in args.table_path:
print("\n")
print("==> import_table: %s" % table_path)
output_path = table_path.replace(".tsv", "").replace(".gz", "").replace(".bgz", "") + ".vds"
print("==> output: %s" % output_path)
kt = hc.import_table(table_path, impute=True, no_header=args.no_header, delimiter=args.delimiter, missing=args.missing_value, min_partitions=1000)
#kt = kt.drop(columns_to_drop)
#kt = kt.rename(rename_columns)
kt = kt.filter("%(ref_column)s == %(alt_column)s" % args.__dict__, keep=False)
kt = kt.annotate("variant=Variant(%(chrom_column)s, %(pos_column)s, %(ref_column)s, %(alt_column)s)" % args.__dict__)
kt = kt.key_by('variant')
kt = kt.drop([args.chrom_column, args.pos_column, args.ref_column, args.alt_column])
vds = hail.VariantDataset.from_table(kt)
pprint(vds.variant_schema)
write_vds(vds, output_path)
| 1,482 | 591 |
#!/usr/bin/env python
# Copyright (c) 2014, Stanford University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
Created on Dec 22, 2013
@author: paepcke
'''
import os
import re
import sys
from edxTrackLogJSONParser import EdXTrackLogJSONParser
from modulestoreImporter import ModulestoreImporter
from unidecode import unidecode
idExtractPat = re.compile(r'^"([^"]*)')
seqIDExtractPat = re.compile(r'","([^"]*)')
hashLookup = ModulestoreImporter(os.path.join(os.path.dirname(__file__),'data/modulestore_latest.json'),
useCache=True)
def makeInsertSafe(unsafeStr):
'''
Makes the given string safe for use as a value in a MySQL INSERT
statement. Looks for embedded CR or LFs, and turns them into
semicolons. Escapes commas and single quotes. Backslash is
replaced by double backslash. This is needed for unicode, like
\0245 (invented example)
@param unsafeStr: string that possibly contains unsafe chars
@type unsafeStr: String
@return: same string, with unsafe chars properly replaced or escaped
@rtype: String
'''
#return unsafeStr.replace("'", "\\'").replace('\n', "; ").replace('\r', "; ").replace(',', "\\,").replace('\\', '\\\\')
if unsafeStr is None or not isinstance(unsafeStr, basestring) or len(unsafeStr) == 0:
return ''
# Check for chars > 128 (illegal for standard ASCII):
for oneChar in unsafeStr:
if ord(oneChar) > 128:
# unidecode() replaces unicode with approximations.
# I tried all sorts of escapes, and nothing worked
# for all cases, except this:
unsafeStr = unidecode(unicode(unsafeStr))
break
return unsafeStr.replace('\n', "; ").replace('\r', "; ").replace('\\', '').replace("'", r"\'")
def fixSequencIDs():
counter = 0
with open('/home/paepcke/tmp/sequenceIDs.sql','w') as outfd:
outfd.write("USE Edx;\nINSERT INTO EdxTrackEvent(_id,resource_display_name)\n")
with open('/home/paepcke/tmp/sequenceIDs.csv','r') as fd:
for idSeqID in fd:
sqlid = idExtractPat.search(idSeqID).group(1)
seqID = seqIDExtractPat.search(idSeqID).group(1)
resourceNameMatch = EdXTrackLogJSONParser.findHashPattern.search(seqID)
if resourceNameMatch is not None:
resourceName = makeInsertSafe(hashLookup.getDisplayName(resourceNameMatch.group(1)))
if counter == 0:
outfd.write('("%s","%s")' % (sqlid,resourceName))
else:
outfd.write(',\n("%s","%s")' % (sqlid,resourceName))
else:
continue
counter += 1
#if counter > 10:
# break
outfd.write("\nON DUPLICATE KEY UPDATE resource_display_name = VALUES(resource_display_name);\n")
print("Created %d corrections." % counter)
if __name__ == '__main__':
fixSequencIDs()
#INSERT INTO EdxTrackEvent (_id,long_answer) VALUES ('fbcefe06_fb7c_48aa_a12e_d85e6988dbda','first answer'),('bbd3ddf3_8ed0_4eee_8ff7_f5791b9e4a7e','second answer') ON DUPLICATE KEY UPDATE long_answer=VALUES(long_answer);
| 4,649 | 1,486 |
#########################################################################################
# -*- coding: utf-8 -*-
#
# This file is part of the SKALogger project
#
#
#
#########################################################################################
"""Contain the tests for the SKALogger."""
import re
import pytest
from tango import DevState
from tango.test_context import MultiDeviceTestContext
from ska_tango_base.base import ReferenceBaseComponentManager
from ska_tango_base.logger_device import SKALogger
from ska_tango_base.subarray import SKASubarray
import tango
# PROTECTED REGION ID(SKALogger.test_additional_imports) ENABLED START #
from ska_tango_base.control_model import (
AdminMode,
ControlMode,
HealthState,
LoggingLevel,
SimulationMode,
TestMode,
)
# PROTECTED REGION END # // SKALogger.test_additional_imports
# PROTECTED REGION ID(SKALogger.test_SKALogger_decorators) ENABLED START #
@pytest.mark.usefixtures("tango_context", "initialize_device")
# PROTECTED REGION END # // SKALogger.test_SKALogger_decorators
class TestSKALogger(object):
"""
Test class for tests of the SKALogger device class.
"""
@pytest.fixture(scope="class")
def device_test_config(self, device_properties):
"""
Fixture that specifies the device to be tested, along with its
properties and memorized attributes.
"""
return {
"device": SKALogger,
"component_manager_patch": lambda self: ReferenceBaseComponentManager(
self.op_state_model, logger=self.logger
),
"properties": device_properties,
"memorized": {"adminMode": str(AdminMode.ONLINE.value)},
}
@pytest.mark.skip("Not implemented")
def test_properties(self, tango_context):
# test the properties
# PROTECTED REGION ID(SKALogger.test_properties) ENABLED START #
# PROTECTED REGION END # // SKALogger.test_properties
pass
# PROTECTED REGION ID(SKALogger.test_State_decorators) ENABLED START #
# PROTECTED REGION END # // SKALogger.test_State_decorators
def test_State(self, tango_context):
"""Test for State"""
# PROTECTED REGION ID(SKALogger.test_State) ENABLED START #
assert tango_context.device.State() == DevState.OFF
# PROTECTED REGION END # // SKALogger.test_State
# PROTECTED REGION ID(SKALogger.test_Status_decorators) ENABLED START #
# PROTECTED REGION END # // SKALogger.test_Status_decorators
def test_Status(self, tango_context):
"""Test for Status"""
# PROTECTED REGION ID(SKALogger.test_Status) ENABLED START #
assert tango_context.device.Status() == "The device is in OFF state."
# PROTECTED REGION END # // SKALogger.test_Status
# PROTECTED REGION ID(SKALogger.test_GetVersionInfo_decorators) ENABLED START #
# PROTECTED REGION END # // SKALogger.test_GetVersionInfo_decorators
def test_GetVersionInfo(self, tango_context):
"""Test for GetVersionInfo"""
# PROTECTED REGION ID(SKALogger.test_GetVersionInfo) ENABLED START #
versionPattern = re.compile(
f"{tango_context.device.info().dev_class}, ska_tango_base, [0-9]+.[0-9]+.[0-9]+, "
"A set of generic base devices for SKA Telescope."
)
versionInfo = tango_context.device.GetVersionInfo()
assert (re.match(versionPattern, versionInfo[0])) is not None
# PROTECTED REGION END # // SKALogger.test_GetVersionInfo
# PROTECTED REGION ID(SKALogger.test_buildState_decorators) ENABLED START #
# PROTECTED REGION END # // SKALogger.test_buildState_decorators
def test_buildState(self, tango_context):
"""Test for buildState"""
# PROTECTED REGION ID(SKALogger.test_buildState) ENABLED START #
buildPattern = re.compile(
r"ska_tango_base, [0-9]+.[0-9]+.[0-9]+, "
r"A set of generic base devices for SKA Telescope"
)
assert (re.match(buildPattern, tango_context.device.buildState)) is not None
# PROTECTED REGION END # // SKALogger.test_buildState
# PROTECTED REGION ID(SKALogger.test_versionId_decorators) ENABLED START #
# PROTECTED REGION END # // SKALogger.test_versionId_decorators
def test_versionId(self, tango_context):
"""Test for versionId"""
# PROTECTED REGION ID(SKALogger.test_versionId) ENABLED START #
versionIdPattern = re.compile(r"[0-9]+.[0-9]+.[0-9]+")
assert (re.match(versionIdPattern, tango_context.device.versionId)) is not None
# PROTECTED REGION END # // SKALogger.test_versionId
# PROTECTED REGION ID(SKALogger.test_loggingLevel_decorators) ENABLED START #
# PROTECTED REGION END # // SKALogger.test_loggingLevel_decorators
def test_loggingLevel(self, tango_context):
"""Test for loggingLevel"""
# PROTECTED REGION ID(SKALogger.test_loggingLevel) ENABLED START #
assert tango_context.device.loggingLevel == LoggingLevel.INFO
# PROTECTED REGION END # // SKALogger.test_loggingLevel
# PROTECTED REGION ID(SKALogger.test_healthState_decorators) ENABLED START #
# PROTECTED REGION END # // SKALogger.test_healthState_decorators
def test_healthState(self, tango_context):
"""Test for healthState"""
# PROTECTED REGION ID(SKALogger.test_healthState) ENABLED START #
assert tango_context.device.healthState == HealthState.OK
# PROTECTED REGION END # // SKALogger.test_healthState
# PROTECTED REGION ID(SKALogger.test_adminMode_decorators) ENABLED START #
# PROTECTED REGION END # // SKALogger.test_adminMode_decorators
def test_adminMode(self, tango_context):
"""Test for adminMode"""
# PROTECTED REGION ID(SKALogger.test_adminMode) ENABLED START #
assert tango_context.device.adminMode == AdminMode.ONLINE
# PROTECTED REGION END # // SKALogger.test_adminMode
# PROTECTED REGION ID(SKALogger.test_controlMode_decorators) ENABLED START #
# PROTECTED REGION END # // SKALogger.test_controlMode_decorators
def test_controlMode(self, tango_context):
"""Test for controlMode"""
# PROTECTED REGION ID(SKALogger.test_controlMode) ENABLED START #
assert tango_context.device.controlMode == ControlMode.REMOTE
# PROTECTED REGION END # // SKALogger.test_controlMode
# PROTECTED REGION ID(SKALogger.test_simulationMode_decorators) ENABLED START #
# PROTECTED REGION END # // SKALogger.test_simulationMode_decorators
def test_simulationMode(self, tango_context):
"""Test for simulationMode"""
# PROTECTED REGION ID(SKALogger.test_simulationMode) ENABLED START #
assert tango_context.device.simulationMode == SimulationMode.FALSE
# PROTECTED REGION END # // SKALogger.test_simulationMode
# PROTECTED REGION ID(SKALogger.test_testMode_decorators) ENABLED START #
# PROTECTED REGION END # // SKALogger.test_testMode_decorators
def test_testMode(self, tango_context):
"""Test for testMode"""
# PROTECTED REGION ID(SKALogger.test_testMode) ENABLED START #
assert tango_context.device.testMode == TestMode.NONE
# PROTECTED REGION END # // SKALogger.test_testMode
@pytest.mark.forked
def test_SetLoggingLevel():
"""Test for SetLoggingLevel"""
logging_level = int(tango.LogLevel.LOG_ERROR)
logging_target = "logger/target/1"
logger_device = "logger/device/1"
devices_info = (
{"class": SKALogger, "devices": [{"name": logger_device}]},
{"class": SKASubarray, "devices": [{"name": logging_target}]},
)
with MultiDeviceTestContext(devices_info, process=False) as multi_context:
dev_proxy = multi_context.get_device(logging_target)
dev_proxy.Init()
dev_proxy.loggingLevel = int(tango.LogLevel.LOG_FATAL)
assert dev_proxy.loggingLevel != logging_level
levels = []
levels.append(logging_level)
targets = []
targets.append(multi_context.get_device_access(logging_target))
device_details = []
device_details.append(levels)
device_details.append(targets)
multi_context.get_device(logger_device).SetLoggingLevel(device_details)
assert dev_proxy.loggingLevel == logging_level
| 8,433 | 2,803 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import re
import urlparse
def process_buffer(buffer):
if not buffer or len(buffer) < 2:
return
buffer = [line.decode('utf-8', 'ignore') for line in buffer]
split_buffer = [line.strip().lower().split("\t")
for line in buffer]
if list(set(map(len, split_buffer))) != [4]:
for line in buffer:
sys.stderr.write(line.encode('utf-8'))
return
original_urls = []
stripped_languages = []
detected_languages = []
for stripped_url, \
original_url, \
stripped_language, \
detected_language in split_buffer:
original_urls.append(original_url)
stripped_languages.append(stripped_language)
detected_languages.append(detected_language)
if len(set(original_urls)) < 2:
# print "not enough urls"
return
if len(set(stripped_languages)) < 2:
# print "not enough stripped languages", languages_stripped
return
if len(set(detected_languages)) < 2:
# print "not enough detected_languages", detected_languages
return
for language in stripped_languages:
for detected_language in detected_languages:
# print "looking for ", language, " in ", detected_languages
if language in detected_language.replace("chineset", "chinese") \
.split('/'):
for line in buffer:
sys.stdout.write(line.encode("utf-8"))
return
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
buffer = []
buffer_url = None
for line in sys.stdin:
# line = line.decode("utf-8", "ignore")
url = line.split("\t", 1)[0]
if url != buffer_url:
process_buffer(buffer)
buffer = [line]
buffer_url = url
else:
buffer.append(line)
# print url != buffer_url
process_buffer(buffer)
| 2,053 | 590 |
import pytest
from graphviz import jupyter_integration
def test_get_jupyter_format_mimetype_invalid_raises_unknown():
with pytest.raises(ValueError, match=r'unknown'):
jupyter_integration.get_jupyter_format_mimetype('Brian!')
def test_get_jupyter_mimetype_format_normalizes():
assert jupyter_integration.get_jupyter_mimetype_format(
jupyter_integration.get_jupyter_format_mimetype('jpg')) == 'jpeg'
def test_get_jupyter_mimetype_format_raises_unsupported():
with pytest.raises(ValueError, match='unsupported'):
jupyter_integration.get_jupyter_mimetype_format('A boy called Brian!')
| 625 | 219 |
import numpy as np
def main():
s = np.mean(np.random.randn(100))
print(s)
if __name__ == '__main__':
main()
| 122 | 52 |
import mountaincar as mc
import numpy as np
from collections import namedtuple
from collections import defaultdict
import matplotlib.pylab as plb
import matplotlib.pyplot as plt
from time import time
State = namedtuple('State', ['x', 'v'])
class SarsaMountainCar(object):
def __init__(self, learning_rate=0.1, reward_factor=0.95, eligibility_decay=0.7):
self.learning_rate = learning_rate
self.reward_factor = reward_factor
self.eligibility_decay = eligibility_decay
def _vizualize(self):
pass
| 544 | 170 |
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
"""
The following functions are used to create an annotated heatmap and they were copied from:
https://matplotlib.org/stable/gallery/images_contours_and_fields/image_annotated_heatmap.html#using-the-helper-function-code-style
"""
def heatmap(data, row_labels, col_labels, ax=None,
**kwargs):
"""
Create a heatmap from a numpy array and two lists of labels.
Parameters
----------
data
A 2D numpy array of shape (N, M).
row_labels
A list or array of length N with the labels for the rows.
col_labels
A list or array of length M with the labels for the columns.
ax
A `matplotlib.axes.Axes` instance to which the heatmap is plotted. If
not provided, use current axes or create a new one. Optional.
cbar_kw
A dictionary with arguments to `matplotlib.Figure.colorbar`. Optional.
cbarlabel
The label for the colorbar. Optional.
**kwargs
All other arguments are forwarded to `imshow`.
"""
if not ax:
ax = plt.gca()
# Plot the heatmap
im = ax.imshow(data, **kwargs)
# We want to show all ticks...
ax.set_xticks(np.arange(data.shape[1]))
ax.set_yticks(np.arange(data.shape[0]))
# ... and label them with the respective list entries.
ax.set_xticklabels(col_labels)
ax.set_yticklabels(row_labels)
# Let the horizontal axes labeling appear on top.
ax.tick_params(top=True, bottom=False,
labeltop=True, labelbottom=False)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=-30, ha="right",
rotation_mode="anchor")
# Turn spines off and create white grid.
# ax.spines[:].set_visible(False)
ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)
ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)
ax.grid(which="minor", color="w", linestyle='-', linewidth=3)
ax.tick_params(which="minor", bottom=False, left=False)
return im
def annotate_heatmap(im, data=None, valfmt="{x:.2f}",
textcolors=("black", "white"),
threshold=None, **textkw):
"""
A function to annotate a heatmap.
Parameters
----------
im
The AxesImage to be labeled.
data
Data used to annotate. If None, the image's data is used. Optional.
valfmt
The format of the annotations inside the heatmap. This should either
use the string format method, e.g. "$ {x:.2f}", or be a
`matplotlib.ticker.Formatter`. Optional.
textcolors
A pair of colors. The first is used for values below a threshold,
the second for those above. Optional.
threshold
Value in data units according to which the colors from textcolors are
applied. If None (the default) uses the middle of the colormap as
separation. Optional.
**kwargs
All other arguments are forwarded to each call to `text` used to create
the text labels.
"""
if not isinstance(data, (list, np.ndarray)):
data = im.get_array()
# Normalize the threshold to the images color range.
if threshold is not None:
threshold = im.norm(threshold)
else:
threshold = im.norm(data.max())/2.
# Set default alignment to center, but allow it to be
# overwritten by textkw.
kw = dict(horizontalalignment="center",
verticalalignment="center")
kw.update(textkw)
# Get the formatter in case a string is supplied
if isinstance(valfmt, str):
valfmt = matplotlib.ticker.StrMethodFormatter(valfmt)
# Loop over the data and create a `Text` for each "pixel".
# Change the text's color depending on the data.
texts = []
for i in range(data.shape[0]):
for j in range(data.shape[1]):
kw.update(color=textcolors[int(im.norm(data[i, j]) > threshold)])
text = im.axes.text(j, i, valfmt(data[i, j], None), **kw)
texts.append(text)
return texts
"""
The following functions are used to get the top pairs from a correlation matrix and they were copied from:
https://stackoverflow.com/a/41453817
"""
def get_redundant_pairs(df):
'''Get diagonal and lower triangular pairs of correlation matrix'''
pairs_to_drop = set()
cols = df.columns
for i in range(0, df.shape[1]):
for j in range(0, i+1):
pairs_to_drop.add((cols[i], cols[j]))
return pairs_to_drop
def get_top_abs_correlations(df, min_val=0.6):
au_corr = df.corr().abs().unstack()
labels_to_drop = get_redundant_pairs(df)
au_corr = au_corr.drop(labels=labels_to_drop).sort_values(ascending=False)
au_corr_df = pd.DataFrame(au_corr, columns=['Score'])
return au_corr_df.where(au_corr_df['Score'] >= min_val, np.nan).dropna() | 4,931 | 1,543 |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""auto generate rank table and export envs"""
import sys
import subprocess
import os
import socket
import json
from argparse import ArgumentParser, REMAINDER
def parse_args():
parser = ArgumentParser(description="mindspore distributed training launch "
"helper utilty that will spawn up "
"multiple distributed processes")
parser.add_argument("--nproc_per_node", type=int, default=1,
help="The number of processes to launch on each node, "
"for D training, this is recommended to be set "
"to the number of D in your system so that "
"each process can be bound to a single D.")
parser.add_argument("--visible_devices", type=str, default="0,1,2,3,4,5,6,7",
help="will use the visible devices sequentially")
parser.add_argument("--env_sh", type=str, default="",
help="env for 1p")
parser.add_argument("--server_id", type=str, default="",
help="server ip")
# positional
parser.add_argument("training_script", type=str,
help="The full path to the single D training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script")
# device mode
parser.add_argument("--device", type=str, default="A+K")
# task_set, to impove cpu utilization for multi-npu(e.g., 8P) training
parser.add_argument("--task_set", type=bool, default=False)
parser.add_argument("--task_set_core", type=int, default=24)
# ranktable file
parser.add_argument("--table_fn", type=str, default="",
help="The ranktable file path, if not set, "
"we will auto-generate a ranktable for user")
# rest from the training program
parser.add_argument('training_script_args', nargs=REMAINDER)
return parser.parse_args()
def main():
args = parse_args()
print('args:{}'.format(args))
visible_devices = args.visible_devices.split(',')
assert len(visible_devices) >= args.nproc_per_node
print('visible_devices:{}'.format(visible_devices))
if(args.server_id == ''):
print('pleaser input server ip!!!')
exit(0)
print('server_id:{}'.format(args.server_id))
hccn_configs = open('/etc/hccn.conf', 'r').readlines()
device_ips = {}
for hccn_item in hccn_configs:
hccn_item = hccn_item.strip()
if hccn_item.startswith('address_'):
device_id, device_ip = hccn_item.split('=')
device_id = device_id.split('_')[1]
device_ips[device_id] = device_ip
print('device_id:{}, device_ip:{}'.format(device_id, device_ip))
hccn_table = {}
if args.device == 'A+K':
hccn_table['board_id'] = '0x002f'
else:
hccn_table['board_id'] = '0x0000'
hccn_table['chip_info'] = '910'
hccn_table['deploy_mode'] = 'lab'
hccn_table['group_count'] = '1'
hccn_table['group_list'] = []
instance_list = []
usable_dev = ''
for instance_id in range(args.nproc_per_node):
instance = {}
instance['devices'] = []
device_id = visible_devices[instance_id]
device_ip = device_ips[device_id]
usable_dev += str(device_id)
instance['devices'].append({
'device_id': device_id,
'device_ip': device_ip,
})
instance['rank_id'] = str(instance_id)
instance['server_id'] = args.server_id
instance_list.append(instance)
hccn_table['group_list'].append({
'device_num': str(args.nproc_per_node),
'server_num': '1',
'group_name': '',
'instance_count': str(args.nproc_per_node),
'instance_list': instance_list,
})
hccn_table['para_plane_nic_location'] = 'device'
hccn_table['para_plane_nic_name'] = []
for instance_id in range(args.nproc_per_node):
eth_id = visible_devices[instance_id]
hccn_table['para_plane_nic_name'].append('eth{}'.format(eth_id))
hccn_table['para_plane_nic_num'] = str(args.nproc_per_node)
hccn_table['status'] = 'completed'
if args.table_fn is "":
table_fn = os.path.join(os.getcwd(), 'rank_table_{}p_{}_{}.json'.format(args.nproc_per_node, usable_dev, args.server_id))
with open(table_fn, 'w') as table_fp:
json.dump(hccn_table, table_fp, indent=4)
else:
table_fn = args.table_fn
# world size in terms of number of processes
dist_group_size = args.nproc_per_node
for rank in range(0, args.nproc_per_node):
rank_id = rank
device_id = visible_devices[rank]
device_root_fn = os.path.join(os.getcwd(), 'device{}'.format(device_id)) #format(rank_id))
rank_process = ''
if args.nproc_per_node > 1:
rank_process += 'export RANK_TABLE_FILE={} && '.format(table_fn)
if args.task_set:
left = int(device_id) * args.task_set_core
right = left + args.task_set_core - 1
rank_process += 'export RANK_SIZE={} && source {} && export RANK_ID={} && export DEVICE_ID={} && rm -rf {} && mkdir {} && cd {} && taskset -c {}-{} python {} '.format(args.nproc_per_node, args.env_sh, rank_id, device_id, device_root_fn, device_root_fn, device_root_fn, left, right, args.training_script)
else:
rank_process += 'export RANK_SIZE={} && source {} && export RANK_ID={} && export DEVICE_ID={} && rm -rf {} && mkdir {} && cd {} && python {} '.format(args.nproc_per_node, args.env_sh, rank_id, device_id, device_root_fn, device_root_fn, device_root_fn, args.training_script)
rank_process += ' '.join(args.training_script_args) + ' >log{}.log 2>&1 &'.format(rank_id)
os.system(rank_process)
if __name__ == "__main__":
main()
| 6,675 | 2,113 |
import logging
import keyring
SERVICE_NAME = "Orange3 - {}"
log = logging.getLogger(__name__)
class CredentialManager:
"""
Class for storage of passwords in the system keyring service.
All attributes of this class are safely stored.
Args:
service_name (str): service name used for storing in keyring.
Examples:
>>> cm = CredentialManager('Widget Name')
>>> cm.some_secret = 'api-key-1234'
>>> cm.some_secret
'api-key-1234'
>>> del cm.some_secret
>>> cm.some_secret
"""
def __init__(self, service_name):
self.__dict__["__service_name"] = SERVICE_NAME.format(service_name)
@property
def service_name(self):
return self.__dict__["__service_name"]
def __setattr__(self, key, value):
try:
keyring.set_password(self.service_name, key, value)
except Exception:
log.exception("Failed to set secret '%s' of '%r'.", key, self.service_name)
def __getattr__(self, item):
try:
return keyring.get_password(self.service_name, item)
except Exception:
log.exception("Failed to get secret '%s' of '%r'.", item, self.service_name)
def __delattr__(self, item):
try:
keyring.delete_password(self.service_name, item)
except Exception:
log.exception(
"Failed to delete secret '%s' of '%r'.", item, self.service_name
)
| 1,475 | 437 |
# -*- coding: utf-8 -*-
# Copyright 2017-2019 ControlScan, Inc.
#
# This file is part of Cyphon Engine.
#
# Cyphon Engine is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# Cyphon Engine is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cyphon Engine. If not, see <http://www.gnu.org/licenses/>.
"""
"""
# standard library
try:
from unittest.mock import Mock, patch
except ImportError:
from mock import Mock, patch
# third party
from django.test import TestCase
# local
import platforms.jira.handlers as jira_module
from responder.actions.models import Action
from tests.fixture_manager import get_fixtures
class ActionsBaseTestCase(TestCase):
"""
Base class for testing Actions.
"""
fixtures = get_fixtures(['actions', 'dispatches'])
def setUp(self):
self.action = Action.objects.get(pk=1)
class ActionTestCase(ActionsBaseTestCase):
"""
Tests the Action class.
"""
def test_str(self):
"""
Tests the string representation of a Pipe.
"""
self.assertEqual(str(self.action), 'Jira IssueAPI')
def test_get_module(self):
"""
Tests the _get_module method for getting the module for an
Action's Destination.
"""
self.assertEqual(self.action._get_module(), jira_module)
def test_create_request_handler(self):
"""
Tests the create_request_handler method for getting a request
handler for an Action.
"""
mock_user = Mock()
mock_handler = Mock()
with patch('platforms.jira.handlers.IssueAPI',
return_value=mock_handler) as mock_api:
kwargs = {
'user': mock_user,
}
result = self.action.create_request_handler(**kwargs)
mock_api.assert_called_once_with(endpoint=self.action,
user=mock_user)
self.assertEqual(result, mock_handler)
def test_save_w_no_descr(self):
"""
Test the save method of an Action with the Action has no
description.
"""
self.assertEqual(self.action.description, None)
self.action.save()
self.assertEqual(self.action.description, 'Jira IssueAPI')
def test_save_w_descr(self):
"""
Test the save method of an Action with the Action has a
description.
"""
self.action.description = 'Create a JIRA Issue'
self.action.save()
self.assertEqual(self.action.description, 'Create a JIRA Issue')
def test_get_dispatch(self):
"""
Test the get_dispatch method of an Action.
"""
mock_alert = Mock()
mock_user = Mock()
mock_record = Mock()
mock_handler = Mock()
mock_handler.run = Mock(return_value=mock_record)
mock_handler.record = mock_record
with patch('platforms.jira.handlers.IssueAPI',
return_value=mock_handler) as mock_api:
kwargs = {
'alert': mock_alert,
'user': mock_user,
}
result = self.action.get_dispatch(**kwargs)
mock_api.assert_called_once_with(endpoint=self.action,
user=mock_user)
mock_handler.run.assert_called_once_with(mock_alert)
self.assertEqual(result, mock_record)
| 3,799 | 1,090 |
import json
import requests
from collections import defaultdict
from fuzzywuzzy import process
from random import sample
# Constants
"""
Constants for default responses that do not need any further computation.
"""
DEFAULT_STOP_RESPONSE = 'All right. See you next time!'
DEFAULT_ERROR_MESSAGE = "I'm sorry. I don't know how to do that yet."
DEFAULT_HELP_MESSAGE = "Try asking me about prediction markets. Ask me to look up midterm elections."
PREDEFINED_RESPONSES = {
'AMAZON.FallbackIntent': "I couldn't understand what you were asking. Why don't you ask me about elections?",
'AMAZON.CancelIntent': DEFAULT_STOP_RESPONSE,
'AMAZON.HelpIntent': DEFAULT_HELP_MESSAGE,
'AMAZON.StopIntent': DEFAULT_STOP_RESPONSE,
'AMAZON.NavigateHomeIntent': DEFAULT_STOP_RESPONSE,
}
"""
To be considered as a match, any other title would have to be within this percentage of the score of the best match.
"""
PERCENTAGE_THRESHOLD = 0.1
# API Helpers
def get_all_markets():
"""
Query the PredictIt API to get all available markets in a dictionary that maps from the name of the market to its ID.
"""
all_markets = requests.request(
'GET', 'https://www.predictit.org/api/marketdata/all/')
all_markets = json.loads(all_markets.content)
return dict((market['name'], market['id']) for market in all_markets['markets'])
def get_market(id):
"""
Query the PredictIt API to get the details of a particular market given the market's ID.
"""
market = requests.request(
'GET', "https://www.predictit.org/api/marketdata/markets/%d" % id)
return json.loads(market.content)
# "UI" Helpers
def market_message(market):
"""
Given the response from `get_market`, generates a message that conveys the relevant information of the particular market.
"""
if len(market['contracts']) > 1:
return "%s is too complicated." % market['name']
return "%s is trading at %d percent." % \
(market['name'], market['contracts'][0]['lastTradePrice'] * 100)
def response_from_message(message):
"""
Helper to wrap a message string into the minimum acceptable Alexa response JSON.
"""
return {
'version': '1.0',
'response': {
'outputSpeech': {
'type': 'PlainText',
'text': message,
}
}
}
def can_fulfill(intent):
if intent['name'] == 'Query' and intent['slots'] and \
intent['slots']['Market'] and intent['slots']['Market']['value']:
return {
'version': '1.0',
'response': {
'canFulfillIntent': {
'canFulfill': 'YES',
'slots': {
'Market': {
'canUnderstand': 'YES',
'canFulfill': 'YES'
},
}
}
}
}
return {
'version': '1.0',
'response': {
'canFulfillIntent': {
'canFulfill': 'NO',
}
}
}
# Main function
def main(event, context):
"""
Entry point for the Alexa action.
"""
request_type = event['request']['type']
if request_type != 'IntentRequest':
if request_type == 'LaunchRequest':
return response_from_message(DEFAULT_HELP_MESSAGE)
elif request_type == 'CanFulfillIntentRequest':
return can_fulfill(event['request']['intent'])
elif request_type == 'SessionEndedRequest':
return
intent = event['request']['intent']
intent_type = intent['name']
# Get the canned responses out of the way before we do any heavy lifting
# with external API calls.
if intent_type in PREDEFINED_RESPONSES:
return response_from_message(PREDEFINED_RESPONSES[intent_type])
# Sanity check.
if intent_type != 'Query' or 'Market' not in intent['slots']:
return response_from_message(DEFAULT_ERROR_MESSAGE)
keyword = intent['slots']['Market']['value']
markets = get_all_markets()
# Only take the ones that are within percentage threshold of the first
# result. Bucket them by score.
likely_markets = process.extract(keyword, markets.keys(), limit=100)
(_, best_score) = likely_markets[0]
result_markets = defaultdict(list) # Multimap score -> id's
for (name, score) in likely_markets:
if best_score - score <= PERCENTAGE_THRESHOLD * best_score:
result_markets[score].append(markets[name])
# List of market JSON response's.
result_markets = [get_market(id) for id in sum(
[sample(ids, 1) for (_, ids) in result_markets.items()], [])]
return response_from_message(' '.join(market_message(market) for market in result_markets))
| 4,805 | 1,458 |
import torch
import numpy as np
import logging, yaml, os, sys, argparse, time
from tqdm import tqdm
from collections import defaultdict
from Logger import Logger
import matplotlib
matplotlib.use('agg')
matplotlib.rcParams['agg.path.chunksize'] = 10000
import matplotlib.pyplot as plt
from scipy.io import wavfile
from random import sample
from sklearn.manifold import TSNE
from Modules import GE2E, GE2E_Loss
from Datasets import Dataset, Collater, Inference_Collater
from Noam_Scheduler import Modified_Noam_Scheduler
from Radam import RAdam
from Arg_Parser import Recursive_Parse
hp = Recursive_Parse(yaml.load(
open('Hyper_Parameters.yaml', encoding='utf-8'),
Loader=yaml.Loader
))
if not hp.Device is None:
os.environ['CUDA_VISIBLE_DEVICES']= str(hp.Device)
if not torch.cuda.is_available():
device = torch.device('cpu')
else:
device = torch.device('cuda:0')
torch.backends.cudnn.benchmark = True
torch.cuda.set_device(0)
logging.basicConfig(
level=logging.INFO, stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"
)
if hp.Use_Mixed_Precision:
try:
from apex import amp
except:
logging.warn('There is no apex modules in the environment. Mixed precision does not work.')
hp.Use_Mixed_Precision = False
class Trainer:
def __init__(self, steps= 0):
self.steps = steps
self.epochs = 0
self.Datset_Generate()
self.Model_Generate()
self.scalar_Dict = {
'Train': defaultdict(float),
'Evaluation': defaultdict(float),
}
self.writer_Dict = {
'Train': Logger(os.path.join(hp.Log_Path, 'Train')),
'Evaluation': Logger(os.path.join(hp.Log_Path, 'Evaluation')),
}
self.Load_Checkpoint()
def Datset_Generate(self):
train_Dataset = Dataset(
pattern_path= hp.Train.Train_Pattern.Path,
metadata_file= hp.Train.Train_Pattern.Metadata_File,
pattern_per_speaker= hp.Train.Batch.Train.Pattern_per_Speaker,
use_cache= hp.Train.Use_Pattern_Cache
)
dev_Dataset = Dataset(
pattern_path= hp.Train.Eval_Pattern.Path,
metadata_file= hp.Train.Eval_Pattern.Metadata_File,
pattern_per_speaker= hp.Train.Batch.Eval.Pattern_per_Speaker,
use_cache= hp.Train.Use_Pattern_Cache
)
inference_Dataset = Dataset(
pattern_path= hp.Train.Eval_Pattern.Path,
metadata_file= hp.Train.Eval_Pattern.Metadata_File,
pattern_per_speaker= hp.Train.Batch.Eval.Pattern_per_Speaker,
num_speakers= 50, #Maximum number by tensorboard.
use_cache= hp.Train.Use_Pattern_Cache
)
logging.info('The number of train speakers = {}.'.format(len(train_Dataset)))
logging.info('The number of development speakers = {}.'.format(len(dev_Dataset)))
collater = Collater(
min_frame_length= hp.Train.Frame_Length.Min,
max_frame_length= hp.Train.Frame_Length.Max
)
inference_Collater = Inference_Collater(
samples= hp.Train.Inference.Samples,
frame_length= hp.Train.Inference.Frame_Length,
overlap_length= hp.Train.Inference.Overlap_Length
)
self.dataLoader_Dict = {}
self.dataLoader_Dict['Train'] = torch.utils.data.DataLoader(
dataset= train_Dataset,
shuffle= True,
collate_fn= collater,
batch_size= hp.Train.Batch.Train.Speaker,
num_workers= hp.Train.Num_Workers,
pin_memory= True
)
self.dataLoader_Dict['Dev'] = torch.utils.data.DataLoader(
dataset= dev_Dataset,
shuffle= True,
collate_fn= collater,
batch_size= hp.Train.Batch.Eval.Speaker,
num_workers= hp.Train.Num_Workers,
pin_memory= True
)
self.dataLoader_Dict['Inference'] = torch.utils.data.DataLoader(
dataset= inference_Dataset,
shuffle= True,
collate_fn= inference_Collater,
batch_size= hp.Train.Batch.Eval.Speaker,
num_workers= hp.Train.Num_Workers,
pin_memory= True
)
def Model_Generate(self):
self.model = GE2E(
mel_dims= hp.Sound.Mel_Dim,
lstm_size= hp.GE2E.LSTM.Sizes,
lstm_stacks= hp.GE2E.LSTM.Stacks,
embedding_size= hp.GE2E.Embedding_Size,
).to(device)
self.criterion = GE2E_Loss().to(device)
self.optimizer = RAdam(
params= self.model.parameters(),
lr= hp.Train.Learning_Rate.Initial,
betas= (hp.Train.ADAM.Beta1, hp.Train.ADAM.Beta2),
eps= hp.Train.ADAM.Epsilon,
weight_decay= hp.Train.Weight_Decay
)
self.scheduler = Modified_Noam_Scheduler(
optimizer= self.optimizer,
base= hp.Train.Learning_Rate.Base,
)
if hp.Use_Mixed_Precision:
self.model, self.optimizer = amp.initialize(
models= self.model,
optimizers=self.optimizer
)
logging.info(self.model)
def Train_Step(self, mels):
loss_Dict = {}
mels = mels.to(device, non_blocking=True)
embeddings = self.model(mels)
loss_Dict['Embedding'] = self.criterion(embeddings, hp.Train.Batch.Train.Pattern_per_Speaker)
self.optimizer.zero_grad()
if hp.Use_Mixed_Precision:
with amp.scale_loss(loss_Dict['Embedding'], self.optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(
parameters= amp.master_params(self.optimizer),
max_norm= hp.Train.Gradient_Norm
)
else:
loss_Dict['Embedding'].backward()
torch.nn.utils.clip_grad_norm_(
parameters= self.model.parameters(),
max_norm= hp.Train.Gradient_Norm
)
self.optimizer.step()
self.scheduler.step()
self.steps += 1
self.tqdm.update(1)
for tag, loss in loss_Dict.items():
self.scalar_Dict['Train']['Loss/{}'.format(tag)] += loss_Dict['Embedding']
def Train_Epoch(self):
for mels in self.dataLoader_Dict['Train']:
self.Train_Step(mels)
if self.steps % hp.Train.Checkpoint_Save_Interval == 0:
self.Save_Checkpoint()
if self.steps % hp.Train.Logging_Interval == 0:
self.scalar_Dict['Train'] = {
tag: loss / hp.Train.Logging_Interval
for tag, loss in self.scalar_Dict['Train'].items()
}
self.scalar_Dict['Train']['Learning_Rate'] = self.scheduler.get_last_lr()
self.writer_Dict['Train'].add_scalar_dict(self.scalar_Dict['Train'], self.steps)
self.scalar_Dict['Train'] = defaultdict(float)
if self.steps % hp.Train.Evaluation_Interval == 0:
self.Evaluation_Epoch()
if self.steps % hp.Train.Inference_Interval == 0:
self.Inference_Epoch()
if self.steps >= hp.Train.Max_Step:
return
self.epochs += 1
@torch.no_grad()
def Evaluation_Step(self, mels):
loss_Dict = {}
mels = mels.to(device, non_blocking=True)
embeddings = self.model(mels)
loss_Dict['Embedding'] = self.criterion(embeddings, hp.Train.Batch.Eval.Pattern_per_Speaker)
for tag, loss in loss_Dict.items():
self.scalar_Dict['Evaluation']['Loss/{}'.format(tag)] += loss
def Evaluation_Epoch(self):
logging.info('(Steps: {}) Start evaluation.'.format(self.steps))
self.model.eval()
for step, mels in tqdm(enumerate(self.dataLoader_Dict['Dev'], 1), desc='[Evaluation]'):
self.Evaluation_Step(mels)
self.scalar_Dict['Evaluation'] = {
tag: loss / step
for tag, loss in self.scalar_Dict['Evaluation'].items()
}
self.writer_Dict['Evaluation'].add_scalar_dict(self.scalar_Dict['Evaluation'], self.steps)
self.writer_Dict['Evaluation'].add_histogram_model(self.model, self.steps, delete_keywords=['layer_Dict', 'layer'])
self.scalar_Dict['Evaluation'] = defaultdict(float)
self.model.train()
@torch.no_grad()
def Inference_Step(self, mels):
return self.model(
mels= mels.to(device, non_blocking=True),
samples= hp.Train.Inference.Samples
)
def Inference_Epoch(self):
logging.info('(Steps: {}) Start inference.'.format(self.steps))
self.model.eval()
embeddings, speakers = zip(*[
(self.Inference_Step(mels), speakers)
for mels, speakers in tqdm(self.dataLoader_Dict['Inference'], desc='[Inference]')
])
embeddings = torch.cat(embeddings, dim= 0).cpu().numpy()
speakers = [speaker for speaker_List in speakers for speaker in speaker_List]
self.writer_Dict['Evaluation'].add_embedding(
embeddings,
metadata= speakers,
global_step= self.steps,
tag= 'Embeddings'
)
self.model.train()
def Load_Checkpoint(self):
if self.steps == 0:
paths = [
os.path.join(root, file).replace('\\', '/')
for root, _, files in os.walk(hp.Checkpoint_Path)
for file in files
if os.path.splitext(file)[1] == '.pt'
]
if len(paths) > 0:
path = max(paths, key = os.path.getctime)
else:
return # Initial training
else:
path = os.path.join(path, 'S_{}.pt'.format(self.steps).replace('\\', '/'))
state_Dict = torch.load(os.path.join(path), map_location= 'cpu')
self.model.load_state_dict(state_Dict['Model'])
self.optimizer.load_state_dict(state_Dict['Optimizer'])
self.scheduler.load_state_dict(state_Dict['Scheduler'])
self.steps = state_Dict['Steps']
self.epochs = state_Dict['Epochs']
if hp.Use_Mixed_Precision:
if not 'AMP' in state_Dict.keys():
logging.warn('No AMP state dict is in the checkpoint. Model regards this checkpoint is trained without mixed precision.')
else:
amp.load_state_dict(state_Dict['AMP'])
logging.info('Checkpoint loaded at {} steps.'.format(self.steps))
def Save_Checkpoint(self):
os.makedirs(hp.Checkpoint_Path, exist_ok= True)
state_Dict = {
'Model': self.model.state_dict(),
'Optimizer': self.optimizer.state_dict(),
'Scheduler': self.scheduler.state_dict(),
'Steps': self.steps,
'Epochs': self.epochs,
}
if hp.Use_Mixed_Precision:
state_Dict['AMP'] = amp.state_dict()
torch.save(
state_Dict,
os.path.join(hp.Checkpoint_Path, 'S_{}.pt'.format(self.steps).replace('\\', '/'))
)
logging.info('Checkpoint saved at {} steps.'.format(self.steps))
def Train(self):
hp_Path = os.path.join(hp.Checkpoint_Path, 'Hyper_Parameters.yaml').replace('\\', '/')
if not os.path.exists(hp_Path):
from shutil import copyfile
os.makedirs(hp.Checkpoint_Path, exist_ok= True)
copyfile('Hyper_Parameters.yaml', hp_Path)
if self.steps == 0:
self.Evaluation_Epoch()
if hp.Train.Initial_Inference:
self.Inference_Epoch()
self.tqdm = tqdm(
initial= self.steps,
total= hp.Train.Max_Step,
desc='[Training]'
)
while self.steps < hp.Train.Max_Step:
try:
self.Train_Epoch()
except KeyboardInterrupt:
self.Save_Checkpoint()
exit(1)
self.tqdm.close()
logging.info('Finished training.')
if __name__ == '__main__':
argParser = argparse.ArgumentParser()
argParser.add_argument('-s', '--steps', default= 0, type= int)
args = argParser.parse_args()
new_Trainer = Trainer(steps= args.steps)
new_Trainer.Train() | 12,649 | 4,095 |
import logging
import asyncio
import aiohttp
from .defaults import *
from .app import App
from .featured import FeaturedList
log = logging.getLogger(__name__)
class Client:
"""Main class for the Steam API"""
def __init__(self, *, loop=None, **opts):
self.loop = asyncio.get_event_loop() if loop is None else loop
self.ready = False
self.http = None
def __cleanup(self):
loop = self.loop
if self.http:
asyncio.ensure_future(self.http.close(), loop=loop)
def stop(self):
self.__cleanup()
def __build_api(self, path:str, qs:dict=None, **args):
url = base_api + path
if qs:
url += "?" + '&'.join([name + "=" + str(value) for name, value in qs.items()])
return url
async def __get_json(self, req:aiohttp.client_reqrep.ClientResponse, *args, **kwargs) -> dict:
json_resp = None
try:
json_resp = await req.json()
except:
print("Error")
return # TODO: Handle this
return json_resp
async def start(self, *args, **kwargs):
self.http = await aiohttp.ClientSession().__aenter__()
self.ready = True
def run(self, *args, **kwargs):
loop = self.loop
async def runner():
try:
await self.start(*args, **kwargs)
finally:
pass # TODO: Handle this
asyncio.ensure_future(runner(), loop=loop)
try:
loop.run_forever()
except KeyboardInterrupt:
self.__cleanup()
async def get_app_from_id(self, appid:int, currency_code:str="us", language_code:str="en"):
req = await self.http.get(self.__build_api("appdetails", {"appids":appid, "cc":currency_code, "l":language_code}))
json_resp = await self.__get_json(req)
for item in json_resp:
data = json_resp[item]
return App(data=data["data"]) if data["success"] else None # TODO: Handle This
async def get_featured(self, *args, **kwargs):
req = await self.http.get(self.__build_api("featured"))
json_resp = await self.__get_json(req)
return FeaturedList(data=json_resp)
| 2,247 | 700 |
import sys
import numpy as np
import matplotlib.pyplot as plt
f = open(sys.argv[1], 'r')
lines = f.readlines()
f.close()
pop_size = int(lines.pop(0))
pops = []
for l in lines:
if l[0] == '[':
pops.append(l.strip())
for j in range(len(pops)):
p = []
for n in pops[j][1:-1].split(','):
p.append(int(n))
d = {}
for i in range(-16, 16):
d[i] = 0
for i in p:
d[i] += 1
x = []
y = []
for k in d:
x.append(k)
y.append(d[k])
axes = plt.gca()
axes.set_xlim([-17, 16])
axes.set_ylim([0, pop_size+1])
# plt.scatter(x, y, s=5, c=[(0,0,0)], alpha=0.5)
plt.bar(x, y, 1, color='blue')
plt.title('Population {:03d}'.format(j))
plt.xlabel('x')
plt.ylabel('qnt')
name = 'pop{:03d}.png'.format(j)
plt.savefig(name)
print('saving {}'.format(name))
plt.clf()
| 881 | 398 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
analyses for bVAE entanglement, etc
"""
import torch
import sys
sys.path.append("..") # Adds higher directory to python modules path.
import matplotlib.pyplot as plt
import numpy as np
from data.dspritesb import dSpriteBackgroundDataset
from torchvision import transforms
ds = dSpriteBackgroundDataset(transform=transforms.Resize((32,32)),shapetype = 'circle')
# Build sweeps through model ...
def sweepCircleLatents(model,latents=np.linspace(0,1,16),def_latents=None):
"""sweepCircleLatents(model,latents,def_latents):
generates input images that sweep through each latent variable,
and evaluates them on given model
model = loaded model, e.g., vae = staticVAE32(n_latent = 4)
latents = latents to sweep through. defaults to
np.linspace(0,1,16)
def_latents = 'default latents': defines the non-swept latents.
defaults to [0.5,0.5,0.5,0.5] if None
---e.g.,---
yhat, x = sweepCircleLatents(vae)
"""
# Initialization
nsweep = len(latents)
if type(model).__name__ == 'encoderBVAE_like':
n_latent = model.fc.out_features
encoder = model
else:
n_latent = model.n_latent
encoder = model.encode
if def_latents is None:
def_latents = 0.5*np.ones(n_latent)
# Generate stimulus sweeps
x = torch.zeros((n_latent,nsweep,1,32,32))
for i in np.arange(0,nsweep):
x[0,i,:,:,:] = ds.arbitraryCircle(latents[i],def_latents[1],def_latents[2],def_latents[3])
x[1,i,:,:,:] = ds.arbitraryCircle(def_latents[0],latents[i],def_latents[2],def_latents[3])
x[2,i,:,:,:] = ds.arbitraryCircle(def_latents[0],def_latents[1],latents[i],def_latents[3])
x[3,i,:,:,:] = ds.arbitraryCircle(def_latents[0],def_latents[1],def_latents[2],latents[i])
# ... and evaulate them all at once
yhat = encoder(x)
if not (type(model).__name__ == 'encoderBVAE_like' or type(model).__name__ == 'dynamicAE32'):
yhat = yhat[0]
return yhat,x
# Plot sweeps through model
def plotCircleSweep(x=None,nimgs=5):
"""plotCircleSweep(yhat,x):
plots a subset of stimuli,
generated from sweepCircleLatents()
---e.g.,---
yhat, x = sweepCircleLatents(vae)
plotCircleSweep(x)
alternatively,
plotCircleSweep(sweepCircleLatents(vae))
"""
# Initialization
if x is None and type(nimgs) is tuple:
x = yhat[1]
# Start a-plottin'
fig, ax = plt.subplots(nimgs,4,figsize=(9, 15), dpi= 80, facecolor='w', edgecolor='k')
for latentdim in range(4):
cnt = -1
for img in np.linspace(0,15,nimgs).astype(int):
cnt+=1
plt.sca(ax[cnt,latentdim])
plt.set_cmap('gray')
ax[cnt,latentdim].imshow(
x[latentdim*16+img,:,:,:].squeeze(), vmin=0, vmax=1)
plt.axis('off')
return fig, ax
def plotLatentsSweep(yhat,nmodels=1):
"""plotLatentsSweep(yhat):
plots model latents and a subset of the corresponding stimuli,
generated from sweepCircleLatents()
---e.g.,---
yhat, x = sweepCircleLatents(vae)
plotCircleSweep(yhat,x)
alternatively,
plotLatentsSweep(sweepCircleLatents(vae))
"""
# Initialization
if type(yhat) is tuple:
yhat = yhat[0]
# Start a-plottin'
fig, ax = plt.subplots(nmodels,4,figsize=(9, 15), dpi= 80, facecolor='w', edgecolor='k', sharey='row',sharex='col')
for latentdim in range(4):
if nmodels > 1:
for imodel in range(nmodels):
plt.sca(ax[imodel,latentdim])
plt.plot(yhat[imodel][latentdim*16+np.arange(0,16),:].detach().numpy())
# ax[imodel,latentdim].set_aspect(1./ax[imodel,latentdim].get_data_ratio())
ax[imodel,latentdim].spines['top'].set_visible(False)
ax[imodel,latentdim].spines['right'].set_visible(False)
if latentdim>0:
ax[imodel,latentdim].spines['left'].set_visible(False)
# ax[imodel,latentdim].set_yticklabels([])
ax[imodel,latentdim].tick_params(axis='y', length=0)
# if imodel<nmodels-1 or latentdim>0:
ax[imodel,latentdim].spines['bottom'].set_visible(False)
ax[imodel,latentdim].set_xticklabels([])
ax[imodel,latentdim].tick_params(axis='x', length=0)
else:
imodel=0
plt.sca(ax[latentdim])
plt.plot(yhat[latentdim*16+np.arange(0,16),:].detach().numpy())
ax[latentdim].set_aspect(1./ax[latentdim].get_data_ratio())
ax[latentdim].spines['top'].set_visible(False)
ax[latentdim].spines['right'].set_visible(False)
if latentdim>0:
ax[latentdim].spines['left'].set_visible(False)
ax[latentdim].tick_params(axis='y', length=0)
# if imodel<nmodels-1 or latentdim>0:
ax[latentdim].spines['bottom'].set_visible(False)
ax[latentdim].set_xticklabels([])
ax[latentdim].tick_params(axis='x', length=0)
return fig, ax
def colorAxisNormalize(colorbar):
"""colorAxisNormalize(colorbar):
normalizes a color axis so it is centered on zero.
useful for diverging colormaps
(e.g., cmap='bwr': blue=negative, red=positive, white=0)
input is already initialized colorbar object from a plot
---e.g.,---
corr_vae = np.corrcoef(yhat_vae.detach().numpy().T)
plt.set_cmap('bwr')
plt.imshow(corr_vae)
cb = plt.colorbar()
colorAxisNormalize(cb)
---or---
colorAxisNormalize(plt.colorbar())
"""
cm = np.max(np.abs(colorbar.get_clim()))
colorbar.set_clim(-cm,cm)
def showReconstructionsAndErrors(model):
"""showReconstructionsAndErrors(model):
generates random inputs, runs them through a specified model
to generate their reconstructions. plots the inputs,
reconstructions, and their difference
---e.g.---
from staticvae.models import staticVAE32
vae = staticVAE32(n_latent = 4)
vae.eval()
checkpoint = torch.load('../staticvae/trained/staticvae32_dsprites_circle_last_500K',map_location='cpu')
vae.load_state_dict(checkpoint['model_states']['net'])
showReconstructionsAndErrors(model)
"""
fig=plt.figure(figsize=(18, 16), dpi= 80, facecolor='w',
edgecolor='k')
cnt = 0
for ii in range(12):
x,label = ds[np.random.randint(1000)]
x = x[np.newaxis, :, :]
mu,logvar = model.encode(x.float())
recon = model.decode(mu).detach()
diff = x - recon
cnt += 1
ax = plt.subplot(6,6,cnt)
plt.set_cmap('gray')
ax.imshow(x.squeeze(), vmin=0, vmax=1)
plt.title('true')
plt.axis('off')
cnt += 1
ax = plt.subplot(6,6,cnt)
ax.imshow(recon.squeeze(), vmin=0, vmax=1)
plt.title('recon')
plt.axis('off')
cnt += 1
ax = plt.subplot(6,6,cnt)
plt.set_cmap('bwr')
img = ax.imshow(diff.numpy().squeeze())
colorAxisNormalize(fig.colorbar(img))
plt.title('diff')
plt.axis('off')
| 7,486 | 2,695 |
##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
## Centering & Scaling
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%% Standard scaling
import numpy as np
from sklearn.preprocessing import StandardScaler
X = np.array([[ 1000, 0.01, 300],
[ 1200, 0.06, 350],
[ 1500, 0.1, 320]])
scaler = StandardScaler().fit(X) # computes mean & std column-wise
X_scaled = scaler.transform(X) # transform using computed mean and std
# check mean = 0 and variance = 1 for every variable/column after scaling
print(X_scaled.mean(axis=0)) # return 1D array of size(3,1)
print(X_scaled.std(axis=0)) # return 1D array of size(3,1)
# access mean and variance via object properties
print(scaler.mean_) # return 1D array of size(3,1)
print(scaler.var_) # return 1D array of size(3,1)
#%% Normalization
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler() # create object
X_scaled = scaler.fit_transform(X) # fit & transform
# check min = 0 and max = 1 for every variable/column after scaling
print(X_scaled.min(axis=0))
print(X_scaled.max(axis=0))
# access min and max via object properties
print(scaler.data_min_)
print(scaler.data_max_)
##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
## Robust Centering & Scaling
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%% Generate oulier-infested data
X = np.random.normal(40, 1, (1500,1))
X[200:300] = X[200:300] +8; X[1000:1150] = X[1000:1150] + 8
# plot
import matplotlib.pyplot as plt
plt.plot(X, '.-')
plt.xlabel('sample #'), plt.ylabel('variable measurement')
plt.title('Raw measurements')
#%% Transform via standard scaling
scaler = StandardScaler().fit(X)
X_scaled = scaler.transform(X)
# mean and std
print('Estimated mean = ', scaler.mean_[0])
print('Estimated standard deviation = ', np.sqrt(scaler.var_[0]))
# plot
plt.figure()
plt.plot(X_scaled, '.-')
plt.xlabel('sample #'), plt.ylabel('scaled variable measurement')
plt.xlim((0,1500))
plt.title('Standard scaling')
#%% Transform via robust MAD scaling
# compute median and MAD
from scipy import stats
median = np.median(X)
MAD = stats.median_absolute_deviation(X)
# scale
X_scaled = (X - median)/MAD[0]
# median and MAD
print('Estimated robust location = ', median)
print('Estimated robust spread = ', MAD)
# plot
plt.figure()
plt.plot(X_scaled, '.-')
plt.xlabel('sample #'), plt.ylabel('scaled variable measurement')
plt.xlim((0,1500))
plt.title('Robust MAD scaling')
| 2,684 | 991 |
from pbtaskrunner import db
from pbtaskrunner import app
from datetime import datetime
def date_time_now():
return datetime.now
class TestTask(db.Model):
"""Database representation of a Task test"""
__tablename__ = 'test_task'
request_id = db.Column(db.Integer, primary_key=True)
requester = db.Column('requester', db.String(30))
created = db.Column(db.DateTime, default=date_time_now())
test_environment = db.Column('test_environment', db.Integer)
template = db.Column('template', db.String(256))
status = db.Column('status', db.String(15))
output = db.Column('output', db.Text)
task_id = db.Column('task_id', db.String(40))
class TestEnvironment(db.Model):
"""Database representation of a test environment"""
__tablename__ = 'test_environment'
id = db.Column(db.Integer, primary_key=True)
env_number = db.Column(db.Integer)
in_use = db.Column(db.Boolean, default=False)
| 943 | 306 |
# -*- coding: utf-8 -*-
from openerp.osv import fields, osv
from openerp import tools
class MassMailingReport(osv.Model):
_name = 'mail.statistics.report'
_auto = False
_description = 'Mass Mailing Statistics'
_columns = {
'scheduled_date': fields.datetime('Scheduled Date', readonly=True),
'name': fields.char('Mass Mail', readonly=True),
'campaign': fields.char('Mass Mail Campaign', readonly=True),
'sent': fields.integer('Sent', readonly=True),
'delivered': fields.integer('Delivered', readonly=True),
'opened': fields.integer('Opened', readonly=True),
'bounced': fields.integer('Bounced', readonly=True),
'replied': fields.integer('Replied', readonly=True),
'state': fields.selection(
[('draft', 'Draft'), ('test', 'Tested'), ('done', 'Sent')],
string='Status', readonly=True,
),
'email_from': fields.char('From', readonly=True),
}
def init(self, cr):
"""Mass Mail Statistical Report: based on mail.mail.statistics that models the various
statistics collected for each mailing, and mail.mass_mailing model that models the
various mailing performed. """
tools.drop_view_if_exists(cr, 'mail_statistics_report')
cr.execute("""
CREATE OR REPLACE VIEW mail_statistics_report AS (
SELECT
min(ms.id) as id,
ms.scheduled as scheduled_date,
mm.name as name,
mc.name as campaign,
count(ms.bounced) as bounced,
count(ms.sent) as sent,
(count(ms.sent) - count(ms.bounced)) as delivered,
count(ms.opened) as opened,
count(ms.replied) as replied,
mm.state,
mm.email_from
FROM
mail_mail_statistics as ms
left join mail_mass_mailing as mm ON (ms.mass_mailing_id=mm.id)
left join mail_mass_mailing_campaign as mc ON (ms.mass_mailing_campaign_id=mc.id)
GROUP BY ms.scheduled, mm.name, mc.name, mm.state, mm.email_from
)""")
| 2,244 | 648 |
import app
if __name__ == "__main__":
app.daily_summary("data/Input.txt", "data/Output.csv") | 97 | 37 |
import urllib, re
class FakeUseragentURLopener(urllib.FancyURLopener):
version = "Mozilla/5.0 (Ubuntu; X11; Linux i686; rv:9.0.1) Gecko/20100101 Firefox/9.0.1"
urllib._urlopener = FakeUseragentURLopener()
download_pdf_regex = re.compile('.*<li class="pdf"><a class="sprite pdf-resource-sprite" href="([^"]*)" title="Download PDF.*')
viewstate_regex = re.compile('.*<input type="hidden" name="__VIEWSTATE" id="__VIEWSTATE" value="([^"]*)" />.*')
eventvalidation_regex = re.compile('.*<input type="hidden" name="__EVENTVALIDATION" id="__EVENTVALIDATION" value="([^"]*)" />.*')
def download_pdf(url, filename):
page = urllib.urlopen(url).read()
result = download_pdf_regex.search(page)
if result is None:
return False
fulltext_url = "http://www.springerlink.com" + result.group(1)
return urllib.urlretrieve(fulltext_url, filename) is not None
def download_bib(url, filename):
url += 'export-citation/'
form = urllib.urlopen(url).read()
viewstate = viewstate_regex.search(form)
eventvalidation = eventvalidation_regex.search(form)
if viewstate is None or eventvalidation is None:
return False
viewstate = viewstate.group(1)
eventvalidation = eventvalidation.group(1)
data = urllib.urlencode([
('__VIEWSTATE', viewstate),
('ctl00$ctl14$cultureList', 'en-us'),
('ctl00$ctl14$SearchControl$BasicSearchForTextBox', ''),
('ctl00$ctl14$SearchControl$BasicAuthorOrEditorTextBox', ''),
('ctl00$ctl14$SearchControl$BasicPublicationTextBox', ''),
('ctl00$ctl14$SearchControl$BasicVolumeTextBox', ''),
('ctl00$ctl14$SearchControl$BasicIssueTextBox', ''),
('ctl00$ctl14$SearchControl$BasicPageTextBox', ''),
('ctl00$ContentPrimary$ctl00$ctl00$Export', 'CitationOnlyRadioButton'),
('ctl00$ContentPrimary$ctl00$ctl00$CitationManagerDropDownList', 'BibTex'),
('ctl00$ContentPrimary$ctl00$ctl00$ExportCitationButton', 'Export+Citation'),
('__EVENTVALIDATION', eventvalidation)])
return urllib.urlretrieve(url, filename, data=data) is not None
def download_pdf_chapter(url, filename):
return urllib.urlretrieve(url.replace('/chapter/', '/content/pdf/', 1) + '.pdf', filename) is not None
import base
base.register_module('http://www\.springerlink\.com/content/.*',
{'name': 'springerlink',
'download_pdf': download_pdf,
'download_bib': download_bib,
})
base.register_module('http://link\.springer\.com/chapter/.*',
{'name': 'springerlink_chapter',
'download_pdf': download_pdf_chapter,
})
| 2,750 | 882 |
import os
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.login import LoginManager
from flask.ext.openid import OpenID
from config import basedir, ADMINS, MAIL_SERVER, MAIL_PORT, MAIL_USERNAME, MAIL_PASSWORD, MAIL_SECURE
app = Flask(__name__)
app.config.from_object('config')
db = SQLAlchemy(app)
from app import models
lm = LoginManager()
lm.init_app(app)
lm.login_view = 'login'
oid = OpenID(app, os.path.join(basedir, 'tmp'))
@lm.user_loader
def load_user(id):
return models.User.query.get(int(id))
from app import views
# Error handling
if not app.debug:
import logging
from logging.handlers import SMTPHandler, RotatingFileHandler
# SMTP based handler configuration
credentials = None
secure = None
if MAIL_USERNAME or MAIL_PASSWORD:
credentials = (MAIL_USERNAME, MAIL_PASSWORD)
if MAIL_SECURE:
secure = MAIL_SECURE
mail_handler = SMTPHandler((MAIL_SERVER, MAIL_PORT), MAIL_USERNAME, ADMINS, 'Microblog failure', credentials, secure)
mail_handler.setLevel(logging.ERROR)
# File based handler
file_handler = RotatingFileHandler('tmp/microblog.log', 'a', 1 * 1024 * 1024, 10)
file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'))
file_handler.setLevel(logging.INFO)
# Set handlers
app.logger.setLevel(logging.INFO)
app.logger.addHandler(mail_handler)
app.logger.addHandler(file_handler)
app.logger.info('Microblog startup') | 1,533 | 535 |
# Generated by Django 3.0.8 on 2020-09-03 17:04
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Blog', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='wallpaper_representation',
),
]
| 330 | 114 |
from threading import Lock
import discord
from discord.ext import commands
from loguru import logger
from local_types import Snowflake
from modules import is_bot_admin
class Colors(commands.Cog):
bot: discord.ext.commands.Bot
colorRoles = {}
mutex = Lock()
def __init__(self, bot):
self.bot = bot
self.reload()
def reload(self):
self.mutex.acquire()
for g in self.bot.guilds:
try:
self.colorRoles[g.id].clear()
except Exception:
pass # Ignore error
d = {}
for r in g.roles:
if r.name.lower().startswith("color- "):
color_name = r.name.lower().split("color- ")[1]
d[color_name] = Snowflake(r.id)
# logger.debug(f"color roles: {d}")
self.colorRoles[g.id] = d
self.mutex.release()
@commands.command(name='reload_colors', hidden=True)
@commands.check_any(is_bot_admin(), commands.has_permissions(manage_roles=True), commands.is_owner())
@commands.max_concurrency(1, wait=True)
@commands.guild_only()
async def reload_colors(self, ctx):
await self.reload()
async def print_colors(self, ctx: discord.ext.commands.Context):
g: discord.Guild = ctx.guild
d: dict = self.colorRoles[g.id]
roles = []
for r in d.keys():
roles.append(r)
await ctx.send(f"```{', '.join(roles)}```")
# do not use outside of color command function
async def remove_roles(self, ctx: discord.ext.commands.Context):
g: discord.Guild = ctx.guild
member: discord.member.Member = g.get_member(ctx.author.id)
d: dict = self.colorRoles[g.id]
to_remove = []
for r in d.values():
for mr in member.roles:
if r.id == mr.id:
to_remove.append(r)
await member.remove_roles(*to_remove, reason="Color Command", atomic=True)
@commands.command(name='color', help="Choose your name color")
@commands.cooldown(type=commands.BucketType.user, rate=1, per=3)
@commands.guild_only()
async def color(self, ctx: discord.ext.commands.Context, color: str):
self.mutex.acquire()
g: discord.Guild = ctx.guild
member: discord.member.Member = g.get_member(ctx.author.id)
color = color.lower()
if color == "list":
await self.print_colors(ctx)
else:
d: dict = self.colorRoles[g.id]
if d is None:
await ctx.send(f"{ctx.author.mention} could not find any color roles in this server!")
else:
try:
r = d[color]
await self.remove_roles(ctx)
await member.add_roles(r)
await ctx.send(f"{ctx.author.mention} successfully changed your color to {color}")
except KeyError:
await ctx.send(
f"{ctx.author.mention} could not find any such color!\n ```{self.bot.command_prefix}{ctx.command.name} list``` to view available colors")
self.mutex.release()
@color.error
async def color_error(self, ctx, error):
if isinstance(error, discord.ext.commands.errors.CommandOnCooldown):
await ctx.send(f"{ctx.author.mention} {error}")
else:
logger.error(f"color error: {error}")
| 3,460 | 1,076 |
import sys
sys.path.append("./generated")
sys.path.append("../../package/pywinrt/projection/pywinrt")
import _winrt
_winrt.init_apartment(_winrt.MTA)
def import_ns(ns):
import importlib.machinery
import importlib.util
module_name = "_winrt_" + ns.replace('.', '_')
loader = importlib.machinery.ExtensionFileLoader(module_name, _winrt.__file__)
spec = importlib.util.spec_from_loader(module_name, loader)
module = importlib.util.module_from_spec(spec)
loader.exec_module(module)
return module
| 526 | 181 |
#!/usr/bin/env python
import sys
import subprocess
import evalBedFile
# Delly file format (when only del summaries in file - cat *.del.txt | grep Deletion)
# The summary line contains the chromosome, the estimated start and end of the structural variant,
# the size of the variant, the number of supporting pairs, the average mapping quality and a unique structural variant id.
# 2 3666033 3666250 217 2 1.5 >Deletion_JCVICHR2SIM_00000053<
delly_filename = sys.argv[1]
truth_filename = sys.argv[2]
score_values = []
print_hits = False
print_bed = False
if len(sys.argv) == 5 and sys.argv[3] == "--printHits":
threshold = float(sys.argv[4])
score_values.append(threshold)
print_hits = True
elif len(sys.argv) == 5 and sys.argv[3] == "--printBed":
threshold = float(sys.argv[4])
score_values.append(threshold)
print_bed = True
else:
delly_file = open(delly_filename, "r")
for line in delly_file:
if line.startswith("#"):
continue
fields = line.split("\t")
# use num pairs as score for now
score = float(fields[4])
score_values.append(score)
delly_file.close()
unique_score_values = list(set(score_values))
unique_score_values.sort()
if not print_hits and not print_bed:
print "\t".join(["Thresh", "Calls", "TP", "WrongType", "Short", "TPR"])
for v in unique_score_values:
calls_gte_threshold = []
delly_file = open(delly_filename, "r")
non_del_calls = 0
for line in delly_file:
if line.startswith("#"):
continue
fields = line.split("\t")
if float(fields[4]) >= v:
chrom = fields[0]
ostart = fields[1]
oend = fields[2]
bed_line = "\t".join([chrom, ostart, oend])
#print bed_line.strip()
calls_gte_threshold.append(bed_line)
if print_bed:
print "\n".join(calls_gte_threshold)
continue
(qualified_calls, matches, short_calls) = evalBedFile.eval_bed_deletions(truth_filename, calls_gte_threshold, print_hits)
tpr = float(matches) / (qualified_calls)
if not print_hits:
print "\t".join(map(str, [v, qualified_calls, matches, non_del_calls, short_calls, tpr]))
| 2,247 | 781 |
import ctypes
import struct
import time
#
# A small example how to use basic_dsp in a different language.
#
class VecResult(ctypes.Structure):
_fields_ = [("resultCode", ctypes.c_int),
("result", ctypes.c_void_p)]
lib = ctypes.WinDLL('basic_dsp.dll')
new64Proto = ctypes.WINFUNCTYPE (
ctypes.c_void_p, # Return type.
ctypes.c_int,
ctypes.c_int,
ctypes.c_double,
ctypes.c_ulong,
ctypes.c_double)
new64 = new64Proto (("new64", lib))
getValue64Proto = ctypes.WINFUNCTYPE (
ctypes.c_double, # Return type.
ctypes.c_void_p,
ctypes.c_ulong)
getValue64 = getValue64Proto (("get_value64", lib))
offset64Proto = ctypes.WINFUNCTYPE (
VecResult, # Return type.
ctypes.c_void_p,
ctypes.c_double)
offset64 = offset64Proto (("real_offset64", lib))
vec = new64(
ctypes.c_int(0),
ctypes.c_int(0),
ctypes.c_double(0.0),
ctypes.c_ulong(100000),
ctypes.c_double(1.0))
val = getValue64(vec, ctypes.c_ulong(0))
print('At the start: vec[0] = {}'.format(val))
start = time.clock()
iterations = 100000
toNs = 1e9 / iterations
increment = 5.0
for x in range(0, iterations):
vecRes = offset64(vec, ctypes.c_double(increment))
vec = vecRes.result
end = time.clock()
print('{} ns per iteration, each iteration has {} samples'.format((end - start) * toNs, iterations))
print('Result code: {} (0 means no error)'.format(vecRes.resultCode))
vecRes = offset64(vec, ctypes.c_double(5.0))
vec = vecRes.result
val = getValue64(vec, ctypes.c_ulong(0))
print('After {} iterations of increment by {}: vec[0] = {}'.format(iterations + 1, increment, val))
| 1,645 | 651 |
#!/usr/bin/env python
# coding: utf-8
import pygame
import operator
from mino import *
from random import *
from pygame.locals import *
from ui import *
from screeninfo import get_monitors
from pygame.surface import Surface
import sys
from function import *
#화면크기 조정
screen_width = 0
screen_height = 0
for m in get_monitors():
screen_width = int(m.width*0.7)
screen_height = int(m.height*0.7)
# Define
block_size = 25
width = 10 # Board width
height = 20 # Board height
framerate = 30 # Bigger -> Slower
framerate_n = 30
pygame.init()
size = [screen_width, screen_height]
clock = pygame.time.Clock()
screen = pygame.display.set_mode(size)
pygame.time.set_timer(pygame.USEREVENT, framerate * 10)
pygame.time.set_timer(pygame.USEREVENT, framerate_n * 10)
pygame.display.set_caption("ACOTRIS™")
background_file = '../assets/images/backgroundimage.png'
# draw single board
def draw_single_board(next, hold, score, level, goal, matrix):
screen.fill(ui_variables.black)
background_image_alpha(screen, background_file,screen_width, screen_height)
# Draw next mino
grid_n = tetrimino.mino_map[next - 1][0]
for i in range(4):
for j in range(4):
dx = screen_width*0.692 + block_size * j
dy = screen_height*0.22 + block_size * i
if grid_n[i][j] != 0:
pygame.draw.rect(
screen,
ui_variables.t_color[grid_n[i][j]],
Rect(dx, dy, block_size*0.9, block_size*0.9)
)
# Draw hold mino
grid_h = tetrimino.mino_map[hold - 1][0]
if hold_mino != -1:
for i in range(4):
for j in range(4):
dx = screen_width*0.252 + block_size * j
dy = screen_height*0.22 + block_size * i
if grid_h[i][j] != 0:
pygame.draw.rect(
screen,
ui_variables.t_color[grid_h[i][j]],
Rect(dx, dy, block_size*0.9, block_size*0.9)
)
# Set max score
if score > 999999:
score = 999999
# Draw texts
text_hold = ui_variables.DG_v_small.render("HOLD", 1, ui_variables.white)
text_next = ui_variables.DG_v_small.render("NEXT", 1, ui_variables.white)
text_score = ui_variables.DG_v_small.render("SCORE", 1, ui_variables.white)
score_value = ui_variables.DG_v_small.render(str(score), 1, ui_variables.white)
text_level = ui_variables.DG_v_small.render("LEVEL", 1, ui_variables.white)
level_value = ui_variables.DG_v_small.render(str(level), 1, ui_variables.white)
text_goal = ui_variables.DG_v_small.render("GOAL", 1, ui_variables.white)
goal_value = ui_variables.DG_v_small.render(str(goal), 1, ui_variables.white)
aco = ui_variables.DG_v_small.render("ACO level", 1, ui_variables.white)
screen.blit(text_hold, (screen_width*0.25, screen_height*0.15))
screen.blit(text_level, (screen_width*0.25, screen_height*0.35))
screen.blit(level_value, (screen_width*0.25, screen_height*0.4))
screen.blit(text_goal, (screen_width*0.25, screen_height*0.65))
screen.blit(goal_value, (screen_width*0.25, screen_height*0.7))
screen.blit(text_next, (screen_width*0.69, screen_height*0.15))
screen.blit(aco, (screen_width*0.69, screen_height*0.35))
screen.blit(text_score, (screen_width*0.69, screen_height*0.65))
screen.blit(score_value, (screen_width*0.69, screen_height*0.7))\
# 플레이 화면에 아코 사진
aco_level(level, int(screen_width*0.68), int(screen_height*0.41))
# Draw board
for x in range(width):
for y in range(height):
dx = screen_width*0.4 + block_size * x
dy = screen_height*0.1 + block_size * y
draw_block(screen,dx, dy, ui_variables.t_color[matrix[x][y + 1]], block_size)
def draw_multi_board_1(next, hold_n, score, level, goal, matrix_n):
# Draw next mino_player1
grid_n = tetrimino.mino_map[next - 1][0]
for x in range(4):
for y in range(4):
dx = screen_width*0.39 + block_size * 0.72 * y
dy = screen_height*0.23 + block_size * 0.72 * x
if grid_n[x][y] != 0:
pygame.draw.rect(
screen,
ui_variables.t_color[grid_n[x][y]],
Rect(dx, dy, block_size * 0.7, block_size * 0.7)
)
# Draw hold mino_player1
grid_h = tetrimino.mino_map[hold_n - 1][0]
if hold_mino_n != -1:
for x in range(4):
for y in range(4):
dx = screen_width*0.095 + block_size * 0.72 * y
dy = screen_height*0.23 + block_size * 0.72 * x
if grid_h[x][y] != 0:
pygame.draw.rect(
screen,
ui_variables.t_color[grid_h[x][y]],
Rect(dx, dy, block_size * 0.7, block_size * 0.7)
)
# Set max score
if score > 999999:
score = 999999
# Draw texts
text_hold = ui_variables.DG_v_small.render("HOLD", 1, ui_variables.white)
text_next = ui_variables.DG_v_small.render("NEXT", 1, ui_variables.white)
text_score = ui_variables.DG_v_small.render("SCORE", 1, ui_variables.white)
score_value = ui_variables.DG_v_small.render(str(score), 1, ui_variables.white)
text_level = ui_variables.DG_v_small.render("LEVEL", 1, ui_variables.white)
level_value = ui_variables.DG_v_small.render(str(level), 1, ui_variables.white)
text_goal = ui_variables.DG_v_small.render("GOAL", 1, ui_variables.white)
goal_value = ui_variables.DG_v_small.render(str(goal), 1, ui_variables.white)
# Place texts for player1
screen.blit(text_hold, (screen_width*0.091, screen_height*0.15))
screen.blit(text_level, (screen_width*0.083, screen_height*0.43))
screen.blit(level_value, (screen_width*0.11, screen_height*0.48))
screen.blit(text_goal, (screen_width*0.092, screen_height*0.7))
screen.blit(goal_value, (screen_width*0.115, screen_height*0.75))
screen.blit(text_next, (screen_width*0.389, screen_height*0.15))
screen.blit(text_score, (screen_width*0.388, screen_height*0.7))
screen.blit(score_value, (screen_width*0.393, screen_height*0.75))
#aco_level(screen_width*0.38, screen_height*0.48)
aco = ui_variables.DG_v_small.render("ACO level", 1, ui_variables.white)
screen.blit(aco, (screen_width*0.39, screen_height*0.43))
aco_level(level1, screen_width*0.38, screen_height*0.48)
# Draw board - player1
for x in range(width):
for y in range(height):
dx = screen_width*0.15 + block_size * x
dy = screen_height*0.1 + block_size * y
draw_block(screen, dx, dy, ui_variables.t_color[matrix_n[x][y + 1]], block_size)
# Draw multi board
def draw_multi_board_2(next, hold, score, level, goal, matrix):
# Draw next mino_player
grid_m = tetrimino.mino_map[next - 1][0]
# Draw next mino_player2
for x in range(4):
for y in range(4):
dx = screen_width*0.84 + block_size * 0.72 * y
dy = screen_height*0.23 + block_size * 0.72 * x
if grid_m[x][y] != 0:
pygame.draw.rect(
screen,
ui_variables.t_color[grid_m[x][y]],
Rect(dx,dy, block_size*0.7, block_size*0.7)
)
# Draw hold mino_player1
grid_i = tetrimino.mino_map[hold - 1][0]
# Draw hold mino_player2
if hold_mino != -1:
for x in range(4):
for y in range(4):
dx = screen_width*0.55 + block_size * 0.72 * y
dy = screen_height*0.23 + block_size * 0.72 * x
if grid_i[x][y] != 0:
pygame.draw.rect(
screen,
ui_variables.t_color[grid_i[x][y]],
Rect(dx, dy, block_size * 0.7, block_size * 0.7)
)
# Set max score
if score > 999999:
score = 999999
# Draw texts
text_hold = ui_variables.DG_v_small.render("HOLD", 1, ui_variables.white)
text_next = ui_variables.DG_v_small.render("NEXT", 1, ui_variables.white)
text_score = ui_variables.DG_v_small.render("SCORE", 1, ui_variables.white)
score_value = ui_variables.DG_v_small.render(str(score), 1, ui_variables.white)
text_level = ui_variables.DG_v_small.render("LEVEL", 1, ui_variables.white)
level_value = ui_variables.DG_v_small.render(str(level), 1, ui_variables.white)
text_goal = ui_variables.DG_v_small.render("GOAL", 1, ui_variables.white)
goal_value = ui_variables.DG_v_small.render(str(goal), 1, ui_variables.white)
# Place texts for player2
screen.blit(text_hold, (screen_width*0.546, screen_height*0.15))
screen.blit(text_level, (screen_width*0.54, screen_height*0.43))
screen.blit(level_value, (screen_width*0.546, screen_height*0.48))
screen.blit(text_goal, (screen_width*0.54, screen_height*0.7))
screen.blit(goal_value, (screen_width*0.562, screen_height*0.75))
screen.blit(text_next, (screen_width*0.84, screen_height*0.15))
screen.blit(text_score, (screen_width*0.845, screen_height*0.7))
screen.blit(score_value, (screen_width*0.85, screen_height*0.75))
#aco_level(screen_width*0.84, screen_height*0.48)
aco = ui_variables.DG_v_small.render("ACO level", 1, ui_variables.white)
screen.blit(aco, (screen_width*0.845, screen_height*0.43))
aco_level(level, screen_width*0.84, screen_height*0.48)
# Draw board - player2
for i in range(width):
for j in range(height):
di = screen_width*0.6 + block_size * i
dj = screen_height*0.1 + block_size * j
draw_block(screen, di, dj, ui_variables.t_color[matrix[i][j + 1]], block_size)
#background image
def background_image(filename, width, height, blit_pos):
background = pygame.image.load(filename)
picture = pygame.transform.scale(background,(width, height))
screen.blit(picture,(0,blit_pos))
def aco_level(level, x, y):
# 플레이 화면에 아코 사진
if type == 1:
screen.blit(rect_aco1, (x, y))
if level >=5 and level <=9:
screen.blit(rect_aco2, (x, y))
elif level >= 10:
screen.blit(rect_aco3, (x, y))
elif type == 2:
screen.blit(rect_aco2, (x, y))
if level >= 10:
screen.blit(rect_aco3, (x, y))
elif type == 3:
screen.blit(rect_aco3, (x, y))
# insert image x,y 이미지 위치, r이미지 가로 길이, c이미지 세로 길이
def insert_image(image, x, y, r, c):
photo = pygame.transform.scale(image, (r, c))
screen.blit(photo, (x, y))
# image
image_aco1 = pygame.image.load('../assets/images/aco1.png')
image_aco2 = pygame.image.load('../assets/images/aco2.png')
image_aco3 = pygame.image.load('../assets/images/aco3.png')
image_manual = pygame.image.load('../assets/images/manual.png')
image_winner = pygame.image.load('../assets/images/winner1.png')
image_trophy = pygame.image.load('../assets/images/trophy.png')
rect_aco1b = pygame.image.load('../assets/images/aco1.png').convert()
rect_aco2b = pygame.image.load('../assets/images/aco2.png').convert()
rect_aco3b = pygame.image.load('../assets/images/aco3.png').convert()
rect_aco1 = pygame.transform.scale(rect_aco1b, (int(screen_width*0.12), int(screen_height*0.13)))
rect_aco2 = pygame.transform.scale(rect_aco2b, (int(screen_width*0.13), int(screen_height*0.16)))
rect_aco3 = pygame.transform.scale(rect_aco3b, (int(screen_width*0.14), int(screen_height*0.18)))
# Initial values
blink = False
start_single = False # sinlge mode
start_multi = False # multi mode
pause = False
done = False
game_over = False
multi_over = False
show_score = False
show_manual = False
screen_Start = True
game_mode = False
score = 0
score_n = 0
level = 1
level_n = 1
goal = 1
goal_n = 1
bottom_count = 0
bottom_count_n = 0
hard_drop = False
hard_drop_n = False
player = 0
dx, dy = 3, 0 # Minos location status
dp, dq = 3, 0
rotation = 0 # Minos rotation status
rotation_n = 0
mino = randint(1, 7) # Current mino
mino_n = randint(1,7)
next_mino = randint(1, 7) # Next mino
next_mino_n = randint(1,7)
hold = False # Hold status
hold_n=False
hold_mino = -1 # Holded mino
hold_mino_n = -1
name_location = 0
name = [65, 65, 65]
#모드 별 아코 사진 넣을려고 만듦
type = 0
level1 = 0
level2 = 0
with open('leaderboard.txt') as f:
lines = f.readlines()
lines = [line.rstrip('\n') for line in open('leaderboard.txt')]
leaders = {}
for i in lines:
leaders[i.split(' ')[0]] = int(i.split(' ')[1])
leaders = sorted(leaders.items(), key=operator.itemgetter(1), reverse=True)
matrix= [[0 for y in range(height + 1)] for x in range(width)] # Board matrix
matrix_n = [[0 for k in range(height + 1)] for p in range(width)]
###########################################################
# Loop Start
###########################################################
while not done:
# Pause screen
if pause:
for event in pygame.event.get():
if event.type == QUIT:
done = True
elif event.type == USEREVENT:
pygame.time.set_timer(pygame.USEREVENT, 300)
if start_single == True:
draw_single_board(next_mino, hold_mino, score, level, goal, matrix)
elif start_multi == True:
draw_multi_board_1(next_mino_n, hold_mino_n, score_n, level_n, goal_n, matrix_n)
draw_multi_board_2(next_mino, hold_mino, score, level, goal, matrix)
#pause시 화면 불투명하게
pause_surface = screen.convert_alpha()
pause_surface.fill((0, 0, 0, 0))
pygame.draw.rect(pause_surface, ui_variables.black_t, [0, 0, int(screen_width), int(screen_height)])
screen.blit(pause_surface, (0, 0))
pause_text = ui_variables.DG_70.render("PAUSED", 1, ui_variables.white)
pause_start = ui_variables.DG_small.render("Press esc to continue", 1, ui_variables.white)
screen.blit(pause_text, (screen_width*0.415, screen_height*0.35))
if blink:
screen.blit(pause_start, (screen_width*0.36, screen_height*0.6))
blink = False
else:
blink = True
pygame.display.update()
elif event.type == KEYDOWN:
erase_mino(dx, dy, mino, rotation, matrix)
erase_mino(dp, dq, mino_n, rotation_n, matrix_n)
if event.key == K_ESCAPE:
pause = False
pygame.time.set_timer(pygame.USEREVENT, 1)
elif event.key == K_q:
done = True
# Game screen
# Start_single screen
elif start_single:
for event in pygame.event.get():
if event.type == QUIT:
done = True
elif event.type == USEREVENT:
# Set speed
if not game_over:
keys_pressed = pygame.key.get_pressed()
if keys_pressed[K_DOWN]:
pygame.time.set_timer(pygame.USEREVENT, framerate * 1)
else:
pygame.time.set_timer(pygame.USEREVENT, framerate * 10)
# Draw a mino
draw_mino(dx, dy, mino, rotation, matrix)
draw_single_board(next_mino, hold_mino, score, level, goal, matrix)
# Erase a mino
if not game_over:
erase_mino(dx, dy, mino, rotation, matrix)
# Move mino down
if not is_bottom(dx, dy, mino, rotation, matrix):
dy += 1
# Create new mino
else:
if hard_drop or bottom_count == 6:
hard_drop = False
bottom_count = 0
score += 10 * level
draw_mino(dx, dy, mino, rotation, matrix)
draw_single_board(next_mino, hold_mino, score, level, goal, matrix)
if is_stackable(next_mino, matrix):
mino = next_mino
next_mino = randint(1, 7)
dx, dy = 3, 0
rotation = 0
hold = False
else:
start_single = False
game_over = True
single = True
pygame.time.set_timer(pygame.USEREVENT, 1)
else:
bottom_count += 1
# Erase line
erase_count = 0
for j in range(21):
is_full = True
for i in range(10):
if matrix[i][j] == 0:
is_full = False
if is_full:
erase_count += 1
k = j
while k > 0:
for i in range(10):
matrix[i][k] = matrix[i][k - 1]
k -= 1
if erase_count == 1:
score += 50 * level
elif erase_count == 2:
score += 150 * level
elif erase_count == 3:
score += 350 * level
elif erase_count == 4:
score += 1000 * level
# Increase level
goal -= erase_count
if goal < 1 and level < 15:
level += 1
goal += level * 5
framerate = int(framerate * 0.8)
elif event.type == KEYDOWN:
erase_mino(dx, dy, mino, rotation, matrix)
if event.key == K_ESCAPE:
pause = True
#Q누르면 창 나가짐
elif event.key == K_q:
done = True
# Hard drop
elif event.key == K_SPACE:
while not is_bottom(dx, dy, mino, rotation, matrix):
dy += 1
hard_drop = True
pygame.time.set_timer(pygame.USEREVENT, 1)
draw_mino(dx, dy, mino, rotation, matrix)
draw_single_board(next_mino, hold_mino, score, level, goal, matrix)
# Hold
elif event.key == K_LSHIFT:
if hold == False:
if hold_mino == -1:
hold_mino = mino
mino = next_mino
next_mino = randint(1, 7)
else:
hold_mino, mino = mino, hold_mino
dx, dy = 3, 0
rotation = 0
hold = True
draw_mino(dx, dy, mino, rotation, matrix)
draw_single_board(next_mino, hold_mino, score, level, goal, matrix)
# Turn right
elif event.key == K_UP:
if is_turnable_r(dx, dy, mino, rotation, matrix):
rotation += 1
# Kick
elif is_turnable_r(dx, dy - 1, mino, rotation, matrix):
dy -= 1
rotation += 1
elif is_turnable_r(dx + 1, dy, mino, rotation, matrix):
dx += 1
rotation += 1
elif is_turnable_r(dx - 1, dy, mino, rotation, matrix):
dx -= 1
rotation += 1
elif is_turnable_r(dx, dy - 2, mino, rotation, matrix):
dy -= 2
rotation += 1
elif is_turnable_r(dx + 2, dy, mino, rotation, matrix):
dx += 2
rotation += 1
elif is_turnable_r(dx - 2, dy, mino, rotation, matrix):
dx -= 2
rotation += 1
if rotation == 4:
rotation = 0
draw_mino(dx, dy, mino, rotation, matrix)
draw_single_board(next_mino, hold_mino, score, level, goal, matrix)
# Turn left
elif event.key == K_z or event.key == K_LCTRL:
if is_turnable_l(dx, dy, mino, rotation, matrix):
rotation -= 1
# Kick
elif is_turnable_l(dx, dy - 1, mino, rotation, matrix):
dy -= 1
rotation -= 1
elif is_turnable_l(dx + 1, dy, mino, rotation, matrix):
dx += 1
rotation -= 1
elif is_turnable_l(dx - 1, dy, mino, rotation, matrix):
dx -= 1
rotation -= 1
elif is_turnable_l(dx, dy - 2, mino, rotation, matrix):
dy -= 2
rotation += 1
elif is_turnable_l(dx + 2, dy, mino, rotation, matrix):
dx += 2
rotation += 1
elif is_turnable_l(dx - 2, dy, mino, rotation, matrix):
dx -= 2
if rotation == -1:
rotation = 3
draw_mino(dx, dy, mino, rotation, matrix)
draw_single_board(next_mino, hold_mino, score, level, goal, matrix)
# Move left
elif event.key == K_LEFT:
if not is_leftedge(dx, dy, mino, rotation, matrix):
dx -= 1
draw_mino(dx, dy, mino, rotation, matrix)
draw_single_board(next_mino, hold_mino, score, level, goal, matrix)
# Move right
elif event.key == K_RIGHT:
if not is_rightedge(dx, dy, mino, rotation, matrix):
dx += 1
draw_mino(dx, dy, mino, rotation, matrix)
draw_single_board(next_mino, hold_mino, score, level, goal, matrix)
pygame.display.update()
# Start_multi screen
elif start_multi:
for event in pygame.event.get():
if event.type == QUIT:
done = True
elif event.type == USEREVENT:
screen.fill(ui_variables.black)
background_image_alpha(screen, background_file, screen_width, screen_height)
if not multi_over:
keys_pressed = pygame.key.get_pressed()
if keys_pressed[K_DOWN]:
pygame.time.set_timer(pygame.USEREVENT, framerate*1)
else:
pygame.time.set_timer(pygame.USEREVENT, framerate*10)
draw_mino(dx, dy, mino, rotation, matrix)
draw_multi_board_2(next_mino, hold_mino, score, level, goal, matrix)
# Erase a mino
if not multi_over:
erase_mino(dx, dy, mino, rotation, matrix)
# Move mino down
if not is_bottom(dx, dy, mino, rotation, matrix):
dy += 1
# Create new mino
else:
if hard_drop or bottom_count == 6:
hard_drop = False
bottom_count = 0
score += 10 * level
draw_mino(dx, dy, mino, rotation, matrix)
draw_multi_board_2(next_mino, hold_mino, score, level, goal, matrix)
if is_stackable(next_mino, matrix):
mino = next_mino
next_mino = randint(1, 7)
dx, dy = 3, 0
rotation = 0
hold = False
else:
start_multi = False
multi_over = True
player = 1
single = False
pygame.time.set_timer(pygame.USEREVENT, 1)
else:
bottom_count += 1
# Erase line
erase_count = 0
for j in range(21):
is_full = True
for i in range(10):
if matrix[i][j] == 0:
is_full = False
if is_full:
erase_count += 1
k = j
while k > 0:
for i in range(10):
matrix[i][k] = matrix[i][k - 1]
k -= 1
if erase_count == 1:
score += 50 * level
elif erase_count == 2:
score += 150 * level
elif erase_count == 3:
score += 350 * level
elif erase_count == 4:
score += 1000 * level
# Increase level
goal -= erase_count
if goal < 1 and level < 15:
level += 1
goal += level * 5
framerate = int(framerate * 0.8)
level_2 = level
draw_mino(dp, dq, mino_n, rotation_n ,matrix_n)
draw_multi_board_1(next_mino_n, hold_mino_n, score_n, level_n, goal_n, matrix_n)
if not multi_over:
erase_mino(dp, dq, mino_n, rotation_n, matrix_n)
# Move mino down
if not is_bottom(dp, dq, mino_n, rotation_n, matrix_n):
dq += 1
else:
if hard_drop_n or bottom_count_n == 6:
hard_drop_n = False
bottom_count_n = 0
score_n+=10*level_n
draw_mino(dp, dq, mino_n, rotation_n, matrix_n)
draw_multi_board_1(next_mino_n, hold_mino_n, score_n, level_n, goal_n, matrix_n)
if is_stackable(next_mino_n, matrix_n):
mino_n = next_mino_n
next_mino_n = randint(1,7)
dp, dq = 3, 0
rotation_n = 0
hold_n = False
else:
start_multi = False
multi_over= True
player = 2
single = False
pygame.time.set_timer(pygame.USEREVENT, 1)
else:
bottom_count_n += 1
erase_count_n = 0
for j in range(21):
is_full_n = True
for i in range(10):
if matrix_n[i][j] == 0:
is_full_n = False
if is_full_n:
erase_count_n += 1
k = j
while k > 0:
for i in range(10):
matrix_n[i][k] = matrix_n[i][k-1]
k -= 1
if erase_count_n == 1:
score_n += 50 * level_n
elif erase_count_n == 2:
score_n += 150 * level_n
elif erase_count_n == 3:
score_n += 350 * level_n
elif erase_count_n == 4:
score_n += 1000 * level_n
# Increase level
goal_n -= erase_count_n
if goal_n < 1 and level_n < 15:
level_n += 1
goal_n += level_n * 5
framerate_n = int(framerate_n * 0.8)
level1 = level_n
elif event.type == KEYDOWN:
erase_mino(dx, dy, mino, rotation, matrix)
erase_mino(dp, dq, mino_n, rotation_n, matrix_n)
if event.key == K_ESCAPE:
pause = True
#Q누르면 창 나가짐
elif event.key == K_q:
done = True
# Hard drop
elif event.key == K_SPACE:
while not is_bottom(dx, dy, mino, rotation, matrix):
dy += 1
hard_drop = True
pygame.time.set_timer(pygame.USEREVENT, framerate)
draw_mino(dx, dy, mino, rotation, matrix)
draw_multi_board_2(next_mino, hold_mino, score, level, goal, matrix)
elif event.key == K_LCTRL:
while not is_bottom(dp, dq, mino_n, rotation_n, matrix_n):
dq += 1
hard_drop_n = True
pygame.time.set_timer(pygame.USEREVENT, framerate_n)
draw_mino(dp, dq, mino_n, rotation_n, matrix_n)
draw_multi_board_1(next_mino_n, hold_mino_n, score_n, level_n, goal_n, matrix_n)
# Hold
elif event.key == K_RSHIFT:
if hold == False:
if hold_mino == -1:
hold_mino = mino
mino = next_mino
next_mino = randint(1, 7)
else:
hold_mino, mino = mino, hold_mino
dx, dy = 3, 0
rotation = 0
hold = True
draw_mino(dx, dy, mino, rotation, matrix)
draw_multi_board_2(next_mino, hold_mino, score, level, goal, matrix)
elif event.key == K_LSHIFT:
if hold_n == False:
if hold_mino_n == -1:
hold_mino_n = mino_n
mino_n = next_mino_n
next_mino_n = randint(1,7)
else:
hold_mino_n, mino_n = mino_n, hold_mino_n
dp, dq = 3, 0
rotation_n = 0
hold_n = True
draw_mino(dp, dq, mino_n, rotation_n, matrix_n)
draw_multi_board_1(next_mino_n, hold_mino_n, score_n, level_n, goal_n, matrix_n)
# Turn right
elif event.key == K_UP :
if is_turnable_r(dx, dy, mino, rotation, matrix):
rotation += 1
# Kick
elif is_turnable_r(dx, dy - 1, mino, rotation, matrix):
dy -= 1
rotation += 1
elif is_turnable_r(dx + 1, dy, mino, rotation, matrix):
dx += 1
rotation += 1
elif is_turnable_r(dx - 1, dy, mino, rotation, matrix):
dx -= 1
rotation += 1
elif is_turnable_r(dx, dy - 2, mino, rotation, matrix):
dy -= 2
rotation += 1
elif is_turnable_r(dx + 2, dy, mino, rotation, matrix):
dx += 2
rotation += 1
elif is_turnable_r(dx - 2, dy, mino, rotation, matrix):
dx -= 2
rotation += 1
if rotation == 4:
rotation = 0
draw_mino(dx, dy, mino, rotation, matrix)
draw_multi_board_2(next_mino, hold_mino, score, level, goal, matrix)
elif event.key == K_w:
if is_turnable_r(dp, dq, mino_n, rotation_n, matrix_n):
rotation_n += 1
# Kick
elif is_turnable_r(dp, dq - 1, mino_n, rotation_n, matrix_n):
dq -= 1
rotation_n += 1
elif is_turnable_r(dp + 1, dq, mino_n,rotation_n, matrix_n):
dp += 1
rotation_n += 1
elif is_turnable_r(dp - 1, dq, mino_n, rotation_n, matrix_n):
dp -= 1
rotation_n += 1
elif is_turnable_r(dp, dq - 2, mino_n, rotation_n, matrix_n):
dq -= 2
rotation_n+= 1
elif is_turnable_r(dp + 2, dq, mino_n,rotation_n, matrix_n):
dp += 2
rotation_n+= 1
elif is_turnable_r(dp - 2, dq, mino_n, rotation_n, matrix_n):
dp -= 2
rotation_n += 1
if rotation_n == 4:
rotation_n = 0
draw_mino(dp, dq, mino_n, rotation_n, matrix_n)
draw_multi_board_1(next_mino_n, hold_mino_n, score_n, level_n, goal_n, matrix_n)
# Move left
elif event.key == K_LEFT:
if not is_leftedge(dx, dy, mino, rotation, matrix):
dx -= 1
draw_mino(dx, dy, mino, rotation, matrix)
draw_multi_board_2(next_mino, hold_mino, score, level, goal, matrix)
elif event.key == K_a:
if not is_leftedge(dp, dq, mino_n, rotation_n, matrix_n):
dp -= 1
draw_mino(dp, dq, mino_n, rotation_n, matrix_n)
draw_multi_board_1(next_mino_n, hold_mino_n, score_n, level_n, goal_n, matrix_n)
# Move right
elif event.key == K_RIGHT:
if not is_rightedge(dx, dy, mino, rotation, matrix):
dx += 1
draw_mino(dx, dy, mino, rotation, matrix)
draw_multi_board_2(next_mino, hold_mino, score, level, goal, matrix)
elif event.key == K_d:
if not is_rightedge(dp, dq, mino_n, rotation_n, matrix_n):
dp += 1
draw_mino(dp, dq, mino_n, rotation_n, matrix_n)
draw_multi_board_1(next_mino_n, hold_mino_n, score_n, level_n, goal_n, matrix_n)
pygame.display.update()
# Game over screen
elif game_over:
for event in pygame.event.get():
if event.type == QUIT:
done = True
elif event.type == USEREVENT:
pygame.time.set_timer(pygame.USEREVENT, 300)
over_text_1 = ui_variables.DG_70.render("GAME OVER", 1, ui_variables.white)
over_start = ui_variables.DG_v_small.render("Press return to continue", 1, ui_variables.white)
draw_single_board(next_mino, hold_mino, score, level, goal, matrix)
#pause시 화면 불투명하게
over_surface = screen.convert_alpha()
over_surface.fill((0, 0, 0, 0))
pygame.draw.rect(over_surface, ui_variables.black_t, [0, 0, int(screen_width), int(screen_height)])
screen.blit(over_surface, (0, 0))
name_1 = ui_variables.DGM40.render(chr(name[0]), 1, ui_variables.white)
name_2 = ui_variables.DGM40.render(chr(name[1]), 1, ui_variables.white)
name_3 = ui_variables.DGM40.render(chr(name[2]), 1, ui_variables.white)
underbar_1 = ui_variables.DGM40.render("_", 1, ui_variables.white)
underbar_2 = ui_variables.DGM40.render("_", 1, ui_variables.white)
underbar_3 = ui_variables.DGM40.render("_", 1, ui_variables.white)
screen.blit(over_text_1, (int(screen_width*0.37), int(screen_height*0.2)))
screen.blit(name_1, (int(screen_width*0.4), int(screen_height*0.5)))
screen.blit(name_2, (int(screen_width*0.5), int(screen_height*0.5)))
screen.blit(name_3, (int(screen_width*0.6), int(screen_height*0.5)))
if blink:
screen.blit(over_start, (int(screen_width*0.38), int(screen_height*0.7)))
blink = False
else:
if name_location == 0:
screen.blit(underbar_1, (int(screen_width*0.4), int(screen_height*0.52)))
elif name_location == 1:
screen.blit(underbar_2, (int(screen_width*0.5), int(screen_height*0.52)))
elif name_location == 2:
screen.blit(underbar_3, (int(screen_width*0.6), int(screen_height*0.52)))
blink = True
pygame.display.update()
elif event.type == KEYDOWN:
if event.key == K_RETURN:
outfile = open('leaderboard.txt','a')
outfile.write(chr(name[0]) + chr(name[1]) + chr(name[2]) + ' ' + str(score) + '\n')
outfile.close()
pygame.time.set_timer(pygame.USEREVENT, 1)
sys.exit()
game_over = False
hold = False
dx, dy = 3, 0
dp, dq = 3, 0
rotation = 0
rotation_n =0
mino = randint(1, 7)
mino_n = randint(1,7)
next_mino = randint(1, 7)
next_mino_n = randint(1,7)
hold_mino = -1
hold_mino_n = -1
framerate = 30
framerate_n = 30
score = 0
score_n = 0
level = 1
level_n = 1
goal = level * 5
goal_n = level_n*5
bottom_count = 0
bottom_count_n = 0
hard_drop = False
hard_drop_n = False
if event.key == K_RIGHT:
if name_location != 2:
name_location += 1
else:
name_location = 0
pygame.time.set_timer(pygame.USEREVENT, 1)
elif event.key == K_LEFT:
if name_location != 0:
name_location -= 1
else:
name_location = 2
pygame.time.set_timer(pygame.USEREVENT, 1)
elif event.key == K_UP:
if name[name_location] != 90:
name[name_location] += 1
else:
name[name_location] = 65
pygame.time.set_timer(pygame.USEREVENT, 1)
elif event.key == K_DOWN:
if name[name_location] != 65:
name[name_location] -= 1
else:
name[name_location] = 90
pygame.time.set_timer(pygame.USEREVENT, 1)
elif event.key == K_q:
done = True
elif multi_over:
for event in pygame.event.get():
if event.type == QUIT:
done = True
elif event.type == USEREVENT:
pygame.time.set_timer(pygame.USEREVENT, 300)
title = "ACOTRIS"
winner_text = "{}P win".format(player)
title_text_1 = ui_variables.DG_big.render(title, 1, ui_variables.white)
over_text_1 = ui_variables.DG_70.render(winner_text, 1, ui_variables.white)
draw_multi_board_1(next_mino_n, hold_mino_n, score_n, level_n, goal_n, matrix_n)
draw_multi_board_2(next_mino, hold_mino, score, level, goal, matrix)
#pause시 화면 불투명하게
over_surface = screen.convert_alpha()
over_surface.fill((0, 0, 0, 0))
pygame.draw.rect(over_surface, ui_variables.black_t, [0, 0, int(screen_width), int(screen_height)])
screen.blit(over_surface, (0, 0))
screen.blit(title_text_1,(int(screen_width*0.35), int(screen_height*0.1)))
screen.blit(over_text_1, (int(screen_width*0.39), int(screen_height*0.75)))
insert_image(image_winner, screen_width*0.25, screen_height*0.12, int(screen_width*0.55), int(screen_height*0.65))
insert_image(image_trophy, screen_width*0.21, screen_height*0.13, int(screen_width*0.1), int(screen_height*0.18))
insert_image(image_trophy, screen_width*0.7, screen_height*0.13, int(screen_width*0.1), int(screen_height*0.18))
pygame.display.update()
if event.type == KEYDOWN:
if event.key == K_q:
done = True
elif event.key == K_RETURN:
done = True
elif game_mode:
for event in pygame.event.get():
if event.type == QUIT:
done = True
elif event.type == KEYDOWN:
keys = pygame.key.get_pressed()
# Q누르면 창 나가짐
if event.key == K_q:
done = True
elif keys[pygame.K_s] and keys[pygame.K_e]:
start_single = True
level = 1
goal = level * 5
type = 1
elif keys[pygame.K_s] and keys[pygame.K_r]:
level = 5
start_single = True
goal = level * 5
type = 2
elif keys[pygame.K_s] and keys[pygame.K_t]:
level = 10
start_single = True
goal = level * 5
type = 3
elif keys[pygame.K_m] and keys[pygame.K_e]:
level = 1
goal = level * 5
level_n = 1
goal_n = level_n*5
start_multi= True
type = 1
elif keys[pygame.K_m] and keys[pygame.K_r]:
level = 5
goal = level * 5
level_n = 5
goal_n = level_n*5
start_multi = True
type = 2
elif keys[pygame.K_m] and keys[pygame.K_t]:
level = 10
start_multi = True
goal = level * 5
level_n = 10
goal_n = level_n*5
type = 3
elif event.type == USEREVENT:
pygame.time.set_timer(pygame.USEREVENT, 300)
screen.fill(ui_variables.black)
background_image(background_file, screen_width, int(screen_height/2), int(screen_height/2))
game_mode_title = ui_variables.DG_small.render("게임옵션설정(두개의 키를 동시에 눌러주세요!)", 1, ui_variables.white)
game_mode_choice = ui_variables.DG_v_small.render("게임모드설정", 1, ui_variables.white)
game_mode_speed = ui_variables.DG_v_small.render("게임속도설정", 1, ui_variables.white)
game_mode_single = ui_variables.DG_v_small.render("● Single 모드 (S키)", 1, ui_variables.white)
game_mode_single_des = ui_variables.DG_v_small.render("혼자서 재미있게 하기!!", 1, ui_variables.white)
game_mode_multi = ui_variables.DG_v_small.render("● Multi 모드 (M키)", 1, ui_variables.white)
game_mode_multi_des = ui_variables.DG_v_small.render("둘이서 재미있게 하기!!", 1, ui_variables.white)
game_speed_easy = ui_variables.DG_v_small.render("● 아코 모드(E키)", 1, ui_variables.white)
game_speed_normal = ui_variables.DG_v_small.render("● 엉아코 모드(R키)", 1, ui_variables.white)
game_speed_hard = ui_variables.DG_v_small.render("● 졸업코 모드(T키)", 1, ui_variables.white)
game_speed_easy_des = ui_variables.DG_v_small.render("EASY 모드!", 1, ui_variables.white)
game_speed_normal_des = ui_variables.DG_v_small.render("NORMAL 모드!!", 1, ui_variables.white)
game_speed_hard_des = ui_variables.DG_v_small.render("HARD 모드!!!", 1, ui_variables.white)
pygame.draw.line(screen, ui_variables.white,
[0, int(screen_height*0.055)],
[screen_width,int(screen_height*0.055)],2)
screen.blit(game_mode_title, (int(screen_width*0.1)+int(int(screen_width*0.3)*0.4), int(screen_height*0.065)))
pygame.draw.line(screen, ui_variables.white,
[0, int(screen_height*0.125)],
[screen_width,int(screen_height*0.125)],2)
pygame.draw.rect(screen, ui_variables.white, [int(screen_width*0.175), int(screen_height*0.2), int(screen_width*0.2), int(screen_height*0.075)], 2)
pygame.draw.rect(screen, ui_variables.white, [int(screen_width*0.625), int(screen_height*0.2), int(screen_width*0.2), int(screen_height*0.075)], 2)
screen.blit(game_mode_choice, (int(screen_width*0.198), int(screen_height*0.215)))
screen.blit(game_mode_speed, (int(screen_width*0.655), int(screen_height*0.215)))
screen.blit(game_mode_single, (int(screen_width*0.15), int(screen_height*0.35)))
screen.blit(game_mode_multi, (int(screen_width*0.15), int(screen_height*0.55)))
screen.blit(game_mode_single_des, (int(screen_width*0.179), int(screen_height*0.4)))
screen.blit(game_mode_multi_des, (int(screen_width*0.179), int(screen_height*0.6)))
screen.blit(game_speed_easy, (int(screen_width*0.6), int(screen_height*0.3)))
screen.blit(game_speed_normal, (int(screen_width*0.6), int(screen_height*0.45)))
screen.blit(game_speed_hard, (int(screen_width*0.6), int(screen_height*0.6)))
screen.blit(game_speed_easy_des, (int(screen_width*0.65), int(screen_height*0.35)))
screen.blit(game_speed_normal_des, (int(screen_width*0.65), int(screen_height*0.5)))
screen.blit(game_speed_hard_des, (int(screen_width*0.65), int(screen_height*0.65)))
insert_image(image_aco1, int(screen_width*0.79), int(screen_height*0.295), int(screen_width*0.1), int(screen_height*0.1))
insert_image(image_aco2, int(screen_width*0.8), int(screen_height*0.445), int(screen_width*0.1), int(screen_height*0.1))
insert_image(image_aco3, int(screen_width*0.8), int(screen_height*0.595), int(screen_width*0.1), int(screen_height*0.1))
pygame.display.update()
# Manual screen
elif show_manual:
for event in pygame.event.get():
if event.type == QUIT:
done = True
elif event.type == KEYDOWN:
if event.key == K_SPACE:
game_mode = True
elif event.key == K_q:
done = True
elif event.type == USEREVENT:
pygame.time.set_timer(pygame.USEREVENT, 300)
screen.fill(ui_variables.black)
background_image('../assets/images/manual.png', screen_width, screen_height, 0)
show_score_manual = ui_variables.DG_small.render("Manual", 1, ui_variables.white)
show_desc1_manual = ui_variables.DGM23.render("Pytris는 테트리스 게임으로 총 7가지 모양의 블록이 위에서 아래로", 1, ui_variables.white)
show_desc2_manual = ui_variables.DGM23.render("떨어질 때 블록을 회전, 이동, 낙하 시켜 빈 곳으로 블록을 끼워 넣어", 1, ui_variables.white)
show_desc3_manual = ui_variables.DGM23.render("한 라인을 채우면 라인이 제거되면서 점수를 얻는 방식입니다.", 1, ui_variables.white)
pygame.draw.line(screen, ui_variables.white,
[0, int(screen_height*0.055)],
[screen_width,int(screen_height*0.055)],2)
screen.blit(show_score_manual, (int(screen_width*0.3)+int(int(screen_width*0.3)*0.5), int(screen_height*0.06)))
screen.blit(show_desc1_manual, (int(screen_width*0.05)+int(int(screen_width*0.1)*0.5), int(screen_height*0.15)))
screen.blit(show_desc2_manual, (int(screen_width*0.05)+int(int(screen_width*0.1)*0.5), int(screen_height*0.2)))
screen.blit(show_desc3_manual, (int(screen_width*0.05)+int(int(screen_width*0.1)*0.5), int(screen_height*0.25)))
pygame.draw.line(screen, ui_variables.white,
[0, int(screen_height*0.125)],
[screen_width,int(screen_height*0.125)],2)
title_start = ui_variables.DGM23.render("<Press space to start>", 1, ui_variables.white)
screen.blit(title_start, (screen_width*0.37, screen_height*0.75))
pygame.display.update()
# Show score
elif show_score:
for event in pygame.event.get():
if event.type == QUIT:
done = True
elif event.type == KEYDOWN:
# Q누르면 창 나가짐
if event.key == K_q:
done = True
#space누르면 매뉴얼 창으로
elif event.key == K_SPACE:
show_manual = True
elif event.type == USEREVENT:
pygame.time.set_timer(pygame.USEREVENT, 300)
screen.fill(ui_variables.black)
background_image(background_file, screen_width, int(screen_height/2), int(screen_height/2))
show_score_list = list()
i = 0
try:
while i<10:
j=0
temp = ui_variables.DG_small.render('%2d' % ((i+1))+'등 '+'{:>6s}'.format(leaders[i][j]) + ' ' + '{:<8s}'.format(str(leaders[i][j+1])), 1, ui_variables.white)
show_score_list.append(temp)
i+=1
except:
show_manual = True
show_name_y = int(screen_height*0.17)
prop = (show_name_y*0.3)
for element in show_score_list:
screen.blit(element, (int(screen_width*0.3)+int(int(screen_width*0.3)*0.25), show_name_y))
show_name_y += prop
show_button_right = ui_variables.DGM23.render("<Press space to start>", 1, ui_variables.white)
show_score_title = ui_variables.DG_small.render("Ranking", 1, ui_variables.white)
pygame.draw.line(screen, ui_variables.white,
[0, int(screen_height*0.055)],
[screen_width,int(screen_height*0.055)],2)
screen.blit(show_score_title, (int(screen_width*0.3)+int(int(screen_width*0.3)*0.5), int(screen_height*0.065)))
pygame.draw.line(screen, ui_variables.white,
[0, int(screen_height*0.125)],
[screen_width,int(screen_height*0.125)],2)
screen.blit(show_button_right, (int(screen_width*0.33)+int(int(screen_width*0.33)*0.2), show_name_y+prop))
pygame.display.flip()
# Start screen
else:
for event in pygame.event.get():
if event.type == QUIT:
done = True
elif event.type == KEYDOWN:
if event.key == K_SPACE:
show_score=True
#Q 누르면 창 나가짐
elif event.key == K_q:
done = True
screen.fill(ui_variables.white)
background_image(background_file, screen_width, int(screen_height/2), int(screen_height/2))
insert_image(image_aco1, screen_width*0.52, screen_height*0.29, 150, 130)
insert_image(image_aco2, screen_width*0.65, screen_height*0.22, 180, 180)
insert_image(image_aco3, screen_width*0.8, screen_height*0.18, 210, 210)
title = ui_variables.DG_big.render("ACOTRIS", 1, ui_variables.black)
title_uni = ui_variables.DG_small.render("in DGU", 1, ui_variables.black)
title_start = ui_variables.DGM23.render("<Press space to start>", 1, ui_variables.white)
title_info = ui_variables.DGM13.render("Copyright (c) 2017 Jason Kim All Rights Reserved.", 1, ui_variables.white)
if blink:
screen.blit(title_start, (91, 195))
blink = False
else:
blink = True
screen.blit(title, (screen_width*0.028, screen_height*0.3))
screen.blit(title_uni, (screen_width*0.37, screen_height*0.3))
screen.blit(title_start, (screen_width*0.37, screen_height*0.55))
screen.blit(title_info, (screen_width*0.35, screen_height*0.93))
if not show_score:
pygame.display.update()
clock.tick(3)
pygame.quit()
| 53,171 | 17,681 |
"""
Created on Sep 1, 2011
@author: guillaume
"""
from scipy import zeros
from chemex.bases.two_states.fast import R_IXY, DR_IXY, DW, KAB, KBA
def compute_liouvillians(pb=0.0, kex=0.0, dw=0.0,
r_ixy=5.0, dr_ixy=0.0):
"""
Compute the exchange matrix (Liouvillian)
The function assumes a 2-site (A <-> B) exchanging system.
The matrix is written in 6x6 cartesian basis, that is {Nx, Ny, Nz}{a,b}.
Here the thermal equilibrium is assumed to be 0. This is justified because of
the +/- phase cycling of the first 90 degree pulse at the beginning of the
cpmg block.
Parameters
----------
pb : float
Fractional population of state B.
0.0 for 0%, 1.0 for 100%.
kex : float
Exchange rate between state A and B in /s.
dw : float
Chemical shift difference between states A and B in rad/s.
r_nz : float
Longitudinal relaxation rate of state {a,b} in /s.
r_nxy : float
Transverse relaxation rate of state a in /s.
dr_nxy : float
Transverse relaxation rate difference between states a and b in /s.
cs_offset : float
Offset from the carrier in rad/s.
Returns
-------
out: numpy.matrix
Liouvillian describing free precession of one
isolated spin in presence of two-site exchange.
"""
kab = kex * pb
kba = kex - kab
l_free = R_IXY * r_ixy
l_free += DR_IXY * dr_ixy
l_free += DW * dw
l_free += KAB * kab
l_free += KBA * kba
return l_free
def compute_iy_eq(pb):
"""
Returns the equilibrium magnetization vector.
Parameters
----------
pb : float
Fractional population of state B.
0.0 for 0%, 1.0 for 100%.
Returns
-------
out: numpy.matrix
Magnetization vector at equilibrium.
"""
mag_eq = zeros((4, 1))
mag_eq[1, 0] += (1.0 - pb)
mag_eq[3, 0] += pb
return mag_eq
def get_iy(mag):
"""
Returns the amount of magnetization along z.
Parameters
----------
mag : ndarray
Magnetization vector.
Returns
-------
magy_a, magy_b : float
Amount of magnetization in state a and b along z.
"""
magy_a = mag[1, 0]
magy_b = mag[3, 0]
return magy_a, magy_b
| 2,305 | 829 |
import tempfile
import shutil
import os
import pandas
import numpy as np
import datetime
import pkg_resources
from unittest import TestCase
from dfs.nba.featurizers import feature_generators
from dfs.nba.featurizers import fantasy_points_fzr, last5games_fzr, nf_stats_fzr, vegas_fzr, \
opp_ffpg_fzr, salary_fzr
class FeaturizersTest(TestCase):
def setUp(self):
# A little test data from the past few years, useful for testing BREF data
testfn = pkg_resources.resource_filename(__name__, 'test.pickle')
self.data = pandas.read_pickle(testfn)
# More recent test data -- necessary for testing external data
recentfn = pkg_resources.resource_filename(__name__, 'recent.pickle')
self.recentdata = pandas.read_pickle(recentfn)
def testDataIntegrity(self):
assert len(self.data) == 10
assert self.data.iloc[0]['bref_id'] == 'gallola01'
assert self.data.iloc[9]['bref_id'] == 'dunlemi02'
assert len(self.recentdata) == 10
assert self.recentdata.iloc[0]['bref_id'] == 'barnema02'
assert self.recentdata.iloc[9]['bref_id'] == 'lawsoty01'
def testDecorator(self):
# Make sure the decorator is properly wrapping functions and turning their list outputs into pandas.Series
for func_name in feature_generators:
assert isinstance(func_name, basestring)
wrapper, columns, live = feature_generators[func_name]
output = wrapper(self.data.iloc[0])
self.assertTrue(isinstance(output, pandas.Series))
self.assertItemsEqual(columns, output.index)
def applyFeaturizer(self, fzr_function, expected_output, use_recent=False):
data = self.recentdata if use_recent else self.data
for integer_index, (_, row) in enumerate(data.iterrows()):
actual_output = fzr_function(row)
for i in range(len(expected_output[integer_index])):
# First check if they're both NaN
if np.isnan(expected_output[integer_index][i]) and np.isnan(actual_output.iloc[i]):
continue
self.assertAlmostEqual(expected_output[integer_index][i],
actual_output.iloc[i],
places=3,
msg="Error in row %d item %d of %s. Reference %s, actual output %s." % (
integer_index,
i,
'recentdata' if use_recent else 'data',
expected_output[integer_index][i],
actual_output.iloc[i]
))
def test_fantasy_points_fzr(self):
self.applyFeaturizer(fantasy_points_fzr, [[20.1],
[4.0],
[17.3],
[4.2],
[22.5],
[36.3],
[27.9],
[31.3],
[17.8],
[11.7]])
def test_last5games_fzr(self):
self.applyFeaturizer(last5games_fzr, [[25.1],
[6.78],
[18.78],
[6.26],
[19.24],
[29.56],
[30.74],
[31.36],
[13.94],
[23.72]])
def test_nf_stats_fzr(self):
self.applyFeaturizer(nf_stats_fzr,
[[23.76,6.0,2.7,1.4,0.6,0.2,0.8,1.9,12.14],
[35.97,19.0,6.1,4.0,1.1,0.2,2.1,2.9,32.82],
[23.58,12.9,2.7,1.7,0.7,0.2,1.2,2.4,19.29],
[np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan],
[27.23,10.4,4.4,2.9,0.6,0.3,1.8,2.3,20.03],
[23.39,7.10,3.0,1.0,0.5,0.3,0.6,2.1,13.2],
[24.62,8.1,4.2,1.6,0.6,0.2,1.4,2.4,15.74],
[18.26,9.2,3.0,1.1,0.5,0.4,0.7,1.4,15.55],
[23.38,8.1,3.5,0.9,0.6,0.2,0.8,1.7,14.45],
[35.62,18.8,4,7.5,1.5,0.1,2.8,2.4,35.25]],
use_recent=True)
def test_vegas_fzr(self):
self.applyFeaturizer(vegas_fzr,
[[10.5, 189.5],
[6.5, 199.5],
[9.5, 194.5],
[4.5, 194.0],
[8.5, 195.5],
[-1, 190.5],
[-5, 198],
[2.5, 196.5],
[-19, 200.0],
[-9, 181.0]
])
self.applyFeaturizer(vegas_fzr,
[[9.0, 204.5],
[-6.0, 200.5],
[4.5, 217.5],
[-5.5, 202.5],
[-5.5, 202.5],
[2.0, 195],
[13.0, 195],
[-4.0, 203.5],
[-6.0, 200.5],
[4.5, 217.5]],
use_recent=True)
def test_opp_ffpg_fzr(self):
self.applyFeaturizer(opp_ffpg_fzr,
[[18.389285714285712, 48.0, 0.85816666666666663, 1.1187666671058538, 20.0],
[17.040909090909093, 67.2, 0.76771331058020498, 0.76122548332443785, 2.0055710306406684],
[20.261666666666667, 42.4, 0.85140009104385328, 0.80628334990429773, 1.5840597758405979],
[15.684848484848485, 35.3, 0.71887224832758501, 0.67037347774416234, 1.3499043977055449],
[20.426530612244896, 52.4, 0.83409491798497215, 0.81556700238463165, 1.9865319865319866],
[17.885365853658534, 51.8, 0.7638541666666665, 0.69248549436529994, 1.3061224489795917],
[18.26969696969697, 66.2, 0.83735141954375503, 0.89284459636178026, 10.105263157894738],
[19.694339622641515, 54.6, 0.86982125248260445, 0.80132994567677285, 1.7091633466135459],
[17.863636363636363, 46.4, 0.81874052383653018, 0.80001770931620431, 1.5218658892128281],
[16.608974358974361, 56.2, 0.77021403091557705, 0.7193626173392953, 1.3805774278215222]],
use_recent=False)
def test_salary_fzr(self):
self.applyFeaturizer(salary_fzr, [[3500],
[8200],
[3700],
[np.nan],
[4100],
[3500],
[3500],
[4000],
[3700],
[7100]],
use_recent=True) | 7,261 | 2,959 |
from blocksync._consts import ByteSizes
from blocksync._status import Blocks
def test_initialize_status(fake_status):
# Expect: Set chunk size
assert fake_status.chunk_size == fake_status.src_size // fake_status.workers
def test_add_block(fake_status):
# Expect: Add each blocks and calculate done block
fake_status.add_block("same")
fake_status.add_block("same")
fake_status.add_block("diff")
assert fake_status.blocks == Blocks(same=2, diff=1, done=3)
def test_get_rate(fake_status):
# Expect: Return 0.00 when nothing done
assert fake_status.rate == 0.00
fake_status.block_size = ByteSizes.MiB
fake_status.src_size = fake_status.dest_size = ByteSizes.MiB * 10
# Expect: Return 50.00 when half done
fake_status.add_block("same")
fake_status.add_block("same")
fake_status.add_block("same")
fake_status.add_block("diff")
fake_status.add_block("diff")
assert fake_status.rate == 50.00
# Expect: Return 100.00 when all done
fake_status.add_block("same")
fake_status.add_block("same")
fake_status.add_block("same")
fake_status.add_block("diff")
fake_status.add_block("diff")
assert fake_status.rate == 100.00
# Expect: Return 100.00 when exceeding the total size
fake_status.add_block("diff")
assert fake_status.rate == 100.00
| 1,346 | 485 |
#!/usr/bin/python3
import os
def readFixture(sdk):
f = open(os.path.join('tests', 'fixtures', sdk), 'r')
lines = f.readlines()
f.close()
return [ line.strip('\n') for line in lines]
def valuesFromDisplay(display):
return [value-1 for value in display]
| 275 | 98 |
# General Utility Libraries
import sys
import os
import warnings
# PyQt5, GUI Library
from PyQt5 import QtCore, QtGui, QtWidgets
# Serial and Midi Port Library
import rtmidi
import serial
import serial.tools.list_ports
# SKORE Library
from lib_skore import read_config, update_config
import globals
#-------------------------------------------------------------------------------
# Classes
class ArduinoComboBox(QtWidgets.QComboBox):
"""
This class allows the combobox to recognize arduinos connected as soon as
the user clicks the combobox.
"""
def avaliable_arduino_com(self):
"""
This fuction returns all the available COM ports in a list of strings.
"""
ports = serial.tools.list_ports.comports(include_links=False)
results = []
for port in ports:
results.append(str(port.device))
return results
def showPopup(self):
"""
This function appends to the original showPopup function from the
QComboBox by adding the avaliable arduino com ports.
"""
avaliable_arduino_ports = self.avaliable_arduino_com()
self.clear()
for avaliable_port in avaliable_arduino_ports:
self.addItem(avaliable_port)
super(ArduinoComboBox, self).showPopup()
return None
class PianoComboBox(QtWidgets.QComboBox):
"""
This class allows the combobox to recognize piano connected as soon as the
user clicks the combobox.
"""
def avaliable_piano_port(self):
"""
This function returns all the available MIDI ports in a list of string.
"""
temp_midi_in = []
temp_midi_in = rtmidi.MidiIn()
avaliable_ports = temp_midi_in.get_ports()
results = []
for port_name in avaliable_ports:
results.append(str(port_name))
return results
def showPopup(self):
"""
This function appends to the showPopup function of the QComboBox by
adding the avaliable MIDI ports to the listed items in the QComboBox.
"""
avaliable_piano_ports = self.avaliable_piano_port()
self.clear()
for avaliable_piano_port_connected in avaliable_piano_ports:
self.addItem(avaliable_piano_port_connected)
super(PianoComboBox, self).showPopup()
return None
class ConfigDialog(QtWidgets.QDialog):
"""
This class is the settings dialog that provides the user the capability
of changing the settings of the SKORE application.
"""
finish_apply_signal = QtCore.pyqtSignal()
def __init__(self):
"""
This function sets the settings dialog by changing the title, size, icon,
and placing the widgets.
"""
super(QtWidgets.QDialog, self).__init__()
self.setObjectName("Dialog")
self.resize(530 * globals.S_W_R, 679 * globals.S_H_R)
self.setWindowTitle("SKORE - General Configuration")
self.setWindowIcon(QtGui.QIcon('.\images\skore_icon.png'))
self.setup_ui()
self.setup_func()
self.read_all_settings()
self.update_settings()
return None
def setup_ui(self):
"""
This function places all the widgets in the settings dialog.
"""
self.apply_close_buttonBox = QtWidgets.QDialogButtonBox(self)
self.apply_close_buttonBox.setGeometry(QtCore.QRect(310 * globals.S_W_R, 640 * globals.S_H_R, 201 * globals.S_W_R, 32 * globals.S_H_R))
self.apply_close_buttonBox.setLayoutDirection(QtCore.Qt.RightToLeft)
self.apply_close_buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.apply_close_buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Apply|QtWidgets.QDialogButtonBox.Close)
self.apply_close_buttonBox.setObjectName("apply_cancel_buttonBox")
#-----------------------------------------------------------------------
# Tab Widget
self.tabWidget = QtWidgets.QTabWidget(self)
self.tabWidget.setGeometry(QtCore.QRect(10 * globals.S_W_R, 10 * globals.S_H_R, 511 * globals.S_W_R, 621 * globals.S_H_R))
self.tabWidget.setLayoutDirection(QtCore.Qt.LeftToRight)
self.tabWidget.setObjectName("tabWidget")
#-----------------------------------------------------------------------#
# Tab Widget -> path_and_comm_tab
self.path_and_comm_tab = QtWidgets.QWidget()
self.path_and_comm_tab.setObjectName("path_and_comm_tab")
#-----------------------------------------------------------------------
# Tab Widget -> path_and_comm_tab -> path section
self.configure_path_label = QtWidgets.QLabel(self.path_and_comm_tab)
self.configure_path_label.setGeometry(QtCore.QRect(10 * globals.S_W_R, 5 * globals.S_H_R, 231 * globals.S_W_R, 16 * globals.S_H_R))
self.configure_path_label.setObjectName("configure_path_label")
self.path_line = QtWidgets.QFrame(self.path_and_comm_tab)
self.path_line.setGeometry(QtCore.QRect(10 * globals.S_W_R, 20 * globals.S_H_R, 481 * globals.S_W_R, 20 * globals.S_H_R))
self.path_line.setFrameShape(QtWidgets.QFrame.HLine)
self.path_line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.path_line.setObjectName("path_line")
self.audiveris_pushButton = QtWidgets.QPushButton(self.path_and_comm_tab)
self.audiveris_pushButton.setGeometry(QtCore.QRect(400 * globals.S_W_R, 60 * globals.S_H_R, 93 * globals.S_W_R, 31 * globals.S_H_R))
self.audiveris_pushButton.setObjectName("audiveris_pushButton")
self.audiveris_label = QtWidgets.QLabel(self.path_and_comm_tab)
self.audiveris_label.setGeometry(QtCore.QRect(10 * globals.S_W_R, 40 * globals.S_H_R, 101 * globals.S_W_R, 16 * globals.S_H_R))
self.audiveris_label.setObjectName("audiveris_label")
self.audiveris_lineEdit = QtWidgets.QLineEdit(self.path_and_comm_tab)
self.audiveris_lineEdit.setGeometry(QtCore.QRect(10 * globals.S_W_R, 60 * globals.S_H_R, 381 * globals.S_W_R, 31 * globals.S_H_R))
self.audiveris_lineEdit.setObjectName("audiveris_lineEdit")
self.amazingmidi_lineEdit = QtWidgets.QLineEdit(self.path_and_comm_tab)
self.amazingmidi_lineEdit.setGeometry(QtCore.QRect(10 * globals.S_W_R, 120 * globals.S_H_R, 381 * globals.S_W_R, 31 * globals.S_H_R))
self.amazingmidi_lineEdit.setObjectName("amazingmidi_lineEdit")
self.amazingmidi_label = QtWidgets.QLabel(self.path_and_comm_tab)
self.amazingmidi_label.setGeometry(QtCore.QRect(10 * globals.S_W_R, 100 * globals.S_H_R, 121 * globals.S_W_R, 16 * globals.S_H_R))
self.amazingmidi_label.setObjectName("amazingmidi_label")
self.amazingmidi_pushButton = QtWidgets.QPushButton(self.path_and_comm_tab)
self.amazingmidi_pushButton.setGeometry(QtCore.QRect(400 * globals.S_W_R, 120 * globals.S_H_R, 93 * globals.S_W_R, 31 * globals.S_H_R))
self.amazingmidi_pushButton.setObjectName("amazingmidi_pushButton")
self.anthemscore_pushButton = QtWidgets.QPushButton(self.path_and_comm_tab)
self.anthemscore_pushButton.setGeometry(QtCore.QRect(400 * globals.S_W_R, 180 * globals.S_H_R, 93 * globals.S_W_R, 31 * globals.S_H_R))
self.anthemscore_pushButton.setObjectName("anthemscore_pushButton")
self.anthemscore_lineEdit = QtWidgets.QLineEdit(self.path_and_comm_tab)
self.anthemscore_lineEdit.setGeometry(QtCore.QRect(10 * globals.S_W_R, 180 * globals.S_H_R, 381 * globals.S_W_R, 31 * globals.S_H_R))
self.anthemscore_lineEdit.setObjectName("anthemscore_lineEdit")
self.anthemscore_label = QtWidgets.QLabel(self.path_and_comm_tab)
self.anthemscore_label.setGeometry(QtCore.QRect(10 * globals.S_W_R, 160 * globals.S_H_R, 191 * globals.S_W_R, 16 * globals.S_H_R))
self.anthemscore_label.setObjectName("anthemscore_label")
self.muse_score_pushButton = QtWidgets.QPushButton(self.path_and_comm_tab)
self.muse_score_pushButton.setGeometry(QtCore.QRect(400 * globals.S_W_R, 240 * globals.S_H_R, 93 * globals.S_W_R, 31 * globals.S_H_R))
self.muse_score_pushButton.setObjectName("muse_score_pushButton")
self.muse_score_lineEdit = QtWidgets.QLineEdit(self.path_and_comm_tab)
self.muse_score_lineEdit.setGeometry(QtCore.QRect(10 * globals.S_W_R, 240 * globals.S_H_R, 381 * globals.S_W_R, 31 * globals.S_H_R))
self.muse_score_lineEdit.setObjectName("muse_score_linedEdit")
self.muse_score_label = QtWidgets.QLabel(self.path_and_comm_tab)
self.muse_score_label.setGeometry(QtCore.QRect(10 * globals.S_W_R, 220 * globals.S_H_R, 191 * globals.S_W_R, 16 * globals.S_H_R))
self.muse_score_label.setObjectName("muse_score_label")
self.mp3_to_midi_converter_label = QtWidgets.QLabel(self.path_and_comm_tab)
self.mp3_to_midi_converter_label.setGeometry(QtCore.QRect(10 * globals.S_W_R, 280 * globals.S_H_R, 141 * globals.S_W_R, 16 * globals.S_H_R))
self.mp3_to_midi_converter_label.setObjectName("mp3_to_midi_converter_label")
self.open_source_radioButton = QtWidgets.QRadioButton(self.path_and_comm_tab)
self.open_source_radioButton.setGeometry(QtCore.QRect(240 * globals.S_W_R, 280 * globals.S_H_R, 111 * globals.S_W_R, 20 * globals.S_H_R))
self.open_source_radioButton.setObjectName("open_source_radioButton")
self.close_source_radioButton = QtWidgets.QRadioButton(self.path_and_comm_tab)
self.close_source_radioButton.setGeometry(QtCore.QRect(380 * globals.S_W_R, 280 * globals.S_H_R, 111 * globals.S_W_R, 20 * globals.S_H_R))
self.close_source_radioButton.setObjectName("close_source_radioButton")
#-----------------------------------------------------------------------
# Tab Widget -> path_and_comm_tab -> comm section
self.comm_line = QtWidgets.QFrame(self.path_and_comm_tab)
self.comm_line.setGeometry(QtCore.QRect(10 * globals.S_W_R, 300 * globals.S_H_R, 481 * globals.S_W_R, 20 * globals.S_H_R))
self.comm_line.setFrameShape(QtWidgets.QFrame.HLine)
self.comm_line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.comm_line.setObjectName("comm_line")
self.portsettings_label = QtWidgets.QLabel(self.path_and_comm_tab)
self.portsettings_label.setGeometry(QtCore.QRect(210 * globals.S_W_R, 320 * globals.S_H_R, 81* globals.S_W_R, 20 * globals.S_H_R))
self.portsettings_label.setObjectName("portsettings_label")
self.piano_port_label = QtWidgets.QLabel(self.path_and_comm_tab)
self.piano_port_label.setGeometry(QtCore.QRect(10 * globals.S_W_R, 340 * globals.S_H_R, 71 * globals.S_W_R, 16 * globals.S_H_R))
self.piano_port_label.setObjectName("pianoport_label")
self.piano_port_comboBox = PianoComboBox(self.path_and_comm_tab)
self.piano_port_comboBox.setGeometry(QtCore.QRect(10 * globals.S_W_R, 360 * globals.S_H_R, 481 * globals.S_W_R, 31 * globals.S_H_R))
self.piano_port_comboBox.setObjectName("pianoport_comboBox")
self.piano_size_label = QtWidgets.QLabel(self.path_and_comm_tab)
self.piano_size_label.setGeometry(QtCore.QRect(10 * globals.S_W_R, 400 * globals.S_H_R, 71* globals.S_W_R, 16* globals.S_H_R))
self.piano_size_label.setObjectName("pianosize_label")
self.piano_size_comboBox = QtWidgets.QComboBox(self.path_and_comm_tab)
self.piano_size_comboBox.setGeometry(QtCore.QRect(10 * globals.S_W_R, 420 * globals.S_H_R, 481 * globals.S_W_R, 31 * globals.S_H_R))
self.piano_size_comboBox.setObjectName("pianosize_comboBox")
self.arduinoport_label = QtWidgets.QLabel(self.path_and_comm_tab)
self.arduinoport_label.setGeometry(QtCore.QRect(10 * globals.S_W_R, 460 * globals.S_H_R, 81 * globals.S_W_R, 16* globals.S_H_R))
self.arduinoport_label.setObjectName("arduinoport_label")
self.arduino_port_comboBox = ArduinoComboBox(self.path_and_comm_tab)
self.arduino_port_comboBox.setGeometry(QtCore.QRect(10 * globals.S_W_R, 480 * globals.S_H_R, 481 * globals.S_W_R, 31 * globals.S_H_R))
self.arduino_port_comboBox.setObjectName("arduinoport_comboBox")
self.arduino_baud_rate_label = QtWidgets.QLabel(self.path_and_comm_tab)
self.arduino_baud_rate_label.setGeometry(QtCore.QRect(10 * globals.S_W_R, 520 * globals.S_H_R, 200 * globals.S_W_R, 20* globals.S_H_R))
self.arduino_baud_rate_label.setText("Arduino Baud Rate")
self.arduino_baud_rate_comboBox = QtWidgets.QComboBox(self.path_and_comm_tab)
self.arduino_baud_rate_comboBox.setGeometry(QtCore.QRect(10 * globals.S_W_R, 540 * globals.S_H_R, 481* globals.S_W_R, 31 * globals.S_H_R))
self.tabWidget.addTab(self.path_and_comm_tab, "")
#-----------------------------------------------------------------------
# Tab Widget -> Lighting and Color Tab
self.color_tab = QtWidgets.QWidget()
self.color_tab.setObjectName("color_tab")
#-----------------------------------------------------------------------
# Tab Widget -> Tutoring Tab -> Timing Section
self.timingsettings_label = QtWidgets.QLabel(self.color_tab)
self.timingsettings_label.setGeometry(QtCore.QRect(200 * globals.S_W_R, 10 * globals.S_H_R, 151 * globals.S_W_R, 20 * globals.S_H_R))
self.timingsettings_label.setObjectName("timingsettings_label")
self.chord_tick_tolerance_label = QtWidgets.QLabel(self.color_tab)
self.chord_tick_tolerance_label.setGeometry(QtCore.QRect(20 * globals.S_W_R, 40* globals.S_H_R, 200 * globals.S_W_R, 20 * globals.S_H_R))
self.chord_tick_tolerance_label.setText("Chord Tick Tolerance:")
self.chord_tick_tolerance_lineEdit = QtWidgets.QLineEdit(self.color_tab)
self.chord_tick_tolerance_lineEdit.setGeometry(QtCore.QRect(200 * globals.S_W_R, 40 * globals.S_H_R, 280 * globals.S_W_R, 20 * globals.S_H_R))
self.chord_sum_tolerance_label = QtWidgets.QLabel(self.color_tab)
self.chord_sum_tolerance_label.setGeometry(QtCore.QRect(20 * globals.S_W_R, 80 * globals.S_H_R, 200 * globals.S_W_R, 20 * globals.S_H_R))
self.chord_sum_tolerance_label.setText("Chord Sum Tolerance:")
self.chord_sum_tolerance_lineEdit = QtWidgets.QLineEdit(self.color_tab)
self.chord_sum_tolerance_lineEdit.setGeometry(QtCore.QRect(200 * globals.S_W_R, 80 * globals.S_H_R, 280 * globals.S_W_R, 20 * globals.S_H_R))
self.record_chord_tolerance_label = QtWidgets.QLabel(self.color_tab)
self.record_chord_tolerance_label.setGeometry(QtCore.QRect(20* globals.S_W_R, 120 * globals.S_H_R, 200* globals.S_W_R, 20 * globals.S_H_R))
self.record_chord_tolerance_label.setText("Record Chord Tolerance:")
self.record_chord_tolerance_lineEdit = QtWidgets.QLineEdit(self.color_tab)
self.record_chord_tolerance_lineEdit.setGeometry(QtCore.QRect(200* globals.S_W_R, 120 * globals.S_H_R, 280 * globals.S_W_R, 20 * globals.S_H_R))
self.arduino_handshake_timeout_label = QtWidgets.QLabel(self.color_tab)
self.arduino_handshake_timeout_label.setGeometry(QtCore.QRect(20 * globals.S_W_R, 160* globals.S_H_R, 200 * globals.S_W_R, 20 * globals.S_H_R))
self.arduino_handshake_timeout_label.setText("Arduino Handshake Timeout:")
self.arduino_handshake_timeout_lineEdit = QtWidgets.QLineEdit(self.color_tab)
self.arduino_handshake_timeout_lineEdit.setGeometry(QtCore.QRect(200 * globals.S_W_R, 160 * globals.S_H_R, 280 * globals.S_W_R, 20 * globals.S_H_R))
self.line = QtWidgets.QFrame(self.color_tab)
self.line.setGeometry(QtCore.QRect(10 * globals.S_W_R, 230 * globals.S_H_R, 481 * globals.S_W_R, 16 * globals.S_H_R))
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
#-----------------------------------------------------------------------
# Tab Widget -> Tutoring Tab -> Color Section
self.colorsettings_label = QtWidgets.QLabel(self.color_tab)
self.colorsettings_label.setGeometry(QtCore.QRect(210 * globals.S_W_R, 250 * globals.S_H_R, 81 * globals.S_W_R, 20 * globals.S_H_R))
self.colorsettings_label.setObjectName("colorsettings_label_2")
bw_y = ( 250 + 40 ) * globals.S_H_R
space = 20 * globals.S_H_R
self.black_key_label = QtWidgets.QLabel(self.color_tab)
self.black_key_label.setGeometry(QtCore.QRect(80 * globals.S_W_R, bw_y, 61 * globals.S_W_R, 16 * globals.S_H_R))
self.black_key_label.setObjectName("black_key_label")
self.black_key_pushButton = QtWidgets.QPushButton(self.color_tab)
self.black_key_pushButton.setGeometry(QtCore.QRect(40 * globals.S_W_R, bw_y + space, 141 * globals.S_W_R, 61 * globals.S_H_R))
self.black_key_pushButton.setText("")
self.black_key_pushButton.setObjectName("black_key_pushButton")
self.white_key_label = QtWidgets.QLabel(self.color_tab)
self.white_key_label.setGeometry(QtCore.QRect(360 * globals.S_W_R, bw_y, 71 * globals.S_W_R, 16 * globals.S_H_R))
self.white_key_label.setObjectName("white_key_label")
self.white_key_pushButton = QtWidgets.QPushButton(self.color_tab)
self.white_key_pushButton.setGeometry(QtCore.QRect(320 * globals.S_W_R, bw_y + space, 141 * globals.S_W_R, 61 * globals.S_W_R))
self.white_key_pushButton.setText("")
self.white_key_pushButton.setObjectName("white_key_pushButton")
wu_y = ( 390 + 40 ) * globals.S_H_R
self.wrong_label = QtWidgets.QLabel(self.color_tab)
self.wrong_label.setGeometry(QtCore.QRect(75 * globals.S_W_R, wu_y, 71 * globals.S_W_R, 16 * globals.S_H_R))
self.wrong_label.setObjectName("wrong_label")
self.wrong_pushButton = QtWidgets.QPushButton(self.color_tab)
self.wrong_pushButton.setGeometry(QtCore.QRect(40 * globals.S_W_R, wu_y + space, 141 * globals.S_W_R, 61 * globals.S_H_R))
self.wrong_pushButton.setText("")
self.wrong_pushButton.setObjectName("wrong_pushButton")
self.upcoming_label = QtWidgets.QLabel(self.color_tab)
self.upcoming_label.setGeometry(QtCore.QRect(350 * globals.S_W_R, wu_y, 91 * globals.S_W_R, 16 * globals.S_H_R))
self.upcoming_label.setObjectName("upcoming_label")
self.upcoming_pushButton = QtWidgets.QPushButton(self.color_tab)
self.upcoming_pushButton.setGeometry(QtCore.QRect(320 * globals.S_W_R, wu_y + space, 141 * globals.S_W_R, 61 * globals.S_H_R))
self.upcoming_pushButton.setText("")
self.upcoming_pushButton.setObjectName("upcoming_pushButton")
self.tabWidget.addTab(self.color_tab, "")
self.retranslate_ui()
self.tabWidget.setCurrentIndex(0)
self.apply_close_buttonBox.accepted.connect(self.accept)
self.apply_close_buttonBox.rejected.connect(self.close)
QtCore.QMetaObject.connectSlotsByName(self)
def setup_func(self):
"""
This function places all the slot and signals for the widgets of the
settings dialog.
"""
self.browse_button_group = QtWidgets.QButtonGroup()
self.browse_button_group.addButton(self.audiveris_pushButton)
self.browse_button_group.addButton(self.amazingmidi_pushButton)
self.browse_button_group.addButton(self.anthemscore_pushButton)
self.browse_button_group.addButton(self.muse_score_pushButton)
self.browse_button_group.buttonClicked.connect(self.upload_exe_file)
self.browse_button_dict = {self.audiveris_pushButton: ['', self.audiveris_lineEdit, 'audiveris'], self.amazingmidi_pushButton: ['',self.amazingmidi_lineEdit, 'amazing_midi'],
self.anthemscore_pushButton: ['', self.anthemscore_lineEdit,'anthemscore'], self.muse_score_pushButton: ['', self.muse_score_lineEdit, 'muse_score']}
self.port_dict = {self.piano_port_comboBox: ['','piano'], self.piano_size_comboBox: ['','piano_size'],
self.arduino_port_comboBox: ['','arduino'], self.arduino_baud_rate_comboBox: ['', 'arduino baud rate']}
self.piano_size_comboBox.addItem('76 Key Piano')
self.piano_size_comboBox.addItem('88 Key Piano')
self.arduino_baud_rate_comboBox.addItem('300')
self.arduino_baud_rate_comboBox.addItem('600')
self.arduino_baud_rate_comboBox.addItem('1200')
self.arduino_baud_rate_comboBox.addItem('4800')
self.arduino_baud_rate_comboBox.addItem('9600')
self.arduino_baud_rate_comboBox.addItem('14400')
self.arduino_baud_rate_comboBox.addItem('19200')
self.arduino_baud_rate_comboBox.addItem('28800')
self.arduino_baud_rate_comboBox.addItem('38400')
self.arduino_baud_rate_comboBox.addItem('57600')
self.arduino_baud_rate_comboBox.addItem('115200')
self.arduino_baud_rate_comboBox.addItem('230400')
self.timing_button_dict = {self.chord_tick_tolerance_lineEdit: ['', 'chord tick tolerance'], self.chord_sum_tolerance_lineEdit: ['','chord sum tolerance'],
self.record_chord_tolerance_lineEdit: ['', 'record chord tolerance'], self.arduino_handshake_timeout_lineEdit: ['', 'count timeout']
}
self.color_button_group = QtWidgets.QButtonGroup()
self.color_button_group.addButton(self.black_key_pushButton)
self.color_button_group.addButton(self.white_key_pushButton)
self.color_button_group.addButton(self.wrong_pushButton)
self.color_button_group.addButton(self.upcoming_pushButton)
self.color_button_group.buttonClicked.connect(self.color_picker)
self.color_button_dict = {self.black_key_pushButton: ['','black'], self.white_key_pushButton: ['','white'],
self.wrong_pushButton: ['','wrong'], self.upcoming_pushButton: ['','upcoming']
}
self.apply_close_buttonBox.button(QtWidgets.QDialogButtonBox.Apply).clicked.connect(self.apply_changes)
return None
#---------------------------------------------------------------------------
# Path Section Functions
def open_file_name_dialog_exe_file(self):
"""
This file dialog is used to obtain the file location of the .exe file.
"""
options = QtWidgets.QFileDialog.Options()
options |= QtWidgets.QFileDialog.DontUseNativeDialog
fileName, _ = QtWidgets.QFileDialog.getOpenFileName(self, "Select .exe/.bat File", "", "Executiable Files (*.exe);; Batch Files (*.bat)", options=options)
if fileName:
file_dialog_output = str(fileName)
else:
return ""
file_dialog_output = file_dialog_output.replace('/' , '\\' )
return file_dialog_output
def open_directory_name_dialog_exe_path(self):
"""
This file dialog is used to obtain the folder directory of the desired
exe folder location.
"""
options = QtWidgets.QFileDialog.Options()
options |= QtWidgets.QFileDialog.ShowDirsOnly
options |= QtWidgets.QFileDialog.DontUseNativeDialog
directory = QtWidgets.QFileDialog.getExistingDirectory(self, caption = 'Select a folder', options = options)
if directory:
file_dialog_output = str(directory)
else:
return ""
file_dialog_output = file_dialog_output.replace('/' , '\\' )
return file_dialog_output
def upload_exe_file(self, button):
"""
This function decides wether to use the exe file or exe path function.
If the pushButton is for audiveris, utlize the exe path. Else, use the
standard exe file function.
"""
upload_exe_path = self.open_file_name_dialog_exe_file()
if upload_exe_path != '':
self.browse_button_dict[button][0] = upload_exe_path
self.update_settings()
return None
#---------------------------------------------------------------------------
# Color
def color_picker(self, button):
"""
This function creates a QColorDialog when the user clicks the color
wheel color. Once the user selects a color, it will display the RGB
colors in the lineedits.
"""
color = QtWidgets.QColorDialog.getColor()
if color.isValid():
# Converting Hexadecimal to RGB values
value = color.name()
value = value.lstrip('#')
rgb = tuple(int(value[i:i+2], 16) for i in (0, 2, 4))
rgb = str(rgb)[1:-1].replace(" ","")
self.color_button_dict[button][0] = rgb
button.setStyleSheet('background-color:rgb({})'.format(rgb))
return None
#---------------------------------------------------------------------------
# Reading Settings
def read_all_settings(self):
"""
This function reads all the settings in the config.yml and stores them
in dictionaries that correlate the settings to the widgets.
"""
cfg = read_config()
# Path Settings
for key in self.browse_button_dict.keys():
self.browse_button_dict[key][0] = cfg['app_path'][self.browse_button_dict[key][2]]
# Mp3 to midi Settings
self.mp3_to_midi_setting = cfg['app_path']['open_close_source']
# Port Settings
for key in self.port_dict.keys():
self.port_dict[key][0] = cfg['port'][self.port_dict[key][1]]
# Timing Settings
for key in self.timing_button_dict.keys():
self.timing_button_dict[key][0] = cfg['timing'][self.timing_button_dict[key][1]]
# Color Settings
for key in self.color_button_dict.keys():
self.color_button_dict[key][0] = cfg['color'][self.color_button_dict[key][1]]
return None
def update_settings(self):
"""
This function places the information of the settings into the widgets,
such as placing the value or color to the widget.
"""
# Path Settings
for button in self.browse_button_dict:
self.browse_button_dict[button][1].setText(self.browse_button_dict[button][0])
# Mp3 to midi Settings
if self.mp3_to_midi_setting == 'open_source':
self.open_source_radioButton.setChecked(True)
self.close_source_radioButton.setChecked(False)
elif self.mp3_to_midi_setting == 'close_source':
self.close_source_radioButton.setChecked(True)
self.open_source_radioButton.setChecked(False)
# Port Settings
for key in self.port_dict.keys():
if self.port_dict[key][1] == 'piano_size':
key.setCurrentText(str(self.port_dict[key][0]) + ' Key Piano')
elif key == self.arduino_baud_rate_comboBox:
key.setCurrentText(str(self.port_dict[key][0]))
else:
key.addItem(str(self.port_dict[key][0]))
key.setCurrentText(str(self.port_dict[key][0]))
# Timing Settings
for key in self.timing_button_dict.keys():
key.setText(str(self.timing_button_dict[key][0]))
# Color Settings
for key in self.color_button_dict.keys():
rgb = self.color_button_dict[key][0]
key.setStyleSheet('background-color:rgb({})'.format(rgb))
return None
def apply_changes(self):
"""
This fuction applies any of the changes done by the user to the settings.
This changes are recorded in the config.yml file.
"""
cfg = read_config()
# Apply Path
for button in self.browse_button_dict:
text = self.browse_button_dict[button][1].text()
cfg['app_path'][self.browse_button_dict[button][2]] = text
# Mp3 to midi Settings
if self.open_source_radioButton.isChecked():
cfg['app_path']['open_close_source'] = 'open_source'
elif self.close_source_radioButton.isChecked():
cfg['app_path']['open_close_source'] = 'close_source'
# Color Settings
for key in self.color_button_dict.keys():
rgb = self.color_button_dict[key][0]
cfg['color'][self.color_button_dict[key][1]] = rgb
for key in self.timing_button_dict.keys():
cfg['timing'][self.timing_button_dict[key][1]] = int(key.text())
# Port Settings
for key in self.port_dict.keys():
index = key.currentIndex()
if index == -1:
continue
if key == self.piano_port_comboBox or key == self.arduino_port_comboBox:
cfg['port'][self.port_dict[key][1]] = key.currentText()
elif key == self.piano_size_comboBox:
cfg['port'][self.port_dict[key][1]] = key.currentText()[:2]
elif key == self.arduino_baud_rate_comboBox:
cfg['port'][self.port_dict[key][1]] = int(key.currentText())
update_config(cfg)
print("Applied Changes")
self.finish_apply_signal.emit()
return None
#---------------------------------------------------------------------------
# Misc Functions
def retranslate_ui(self):
"""
This function places all the text content in the configuration dialog
widgets.
"""
_translate = QtCore.QCoreApplication.translate
self.anthemscore_pushButton.setText(_translate("Dialog", "Browse"))
self.anthemscore_label.setText(_translate("Dialog", "AnthemScore [.exe] (Optional)"))
self.audiveris_pushButton.setText(_translate("Dialog", "Browse"))
self.audiveris_label.setText(_translate("Dialog", "Audiveris [folder]"))
self.amazingmidi_pushButton.setText(_translate("Dialog", "Browse"))
self.amazingmidi_label.setText(_translate("Dialog", "AmazingMIDI [.exe]"))
self.muse_score_label.setText(_translate("Dialog", "MuseScore [.exe]"))
self.muse_score_pushButton.setText(_translate("Dialog", "Browse"))
self.configure_path_label.setText(_translate("Dialog", "Configure the path for each program."))
self.mp3_to_midi_converter_label.setText(_translate("Dialog", "MP3 to MIDI Converter:"))
self.open_source_radioButton.setText(_translate("Dialog", "Open-Source"))
self.close_source_radioButton.setText(_translate("Dialog", "Close-Source"))
self.piano_port_label.setText(_translate("Dialog", "Piano Port"))
self.piano_size_label.setText(_translate("Dialog", "Piano Size"))
self.portsettings_label.setText(_translate("Dialog", "Port Settings"))
self.arduinoport_label.setText(_translate("Dialog", "Arduino Port"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.path_and_comm_tab), _translate("Dialog", "Path and Communication Settings"))
self.timingsettings_label.setText(_translate("Dialog", "Timing Settings"))
self.colorsettings_label.setText(_translate("Dialog", "Color Settings"))
self.black_key_label.setText(_translate("Dialog", "Black Keys"))
self.white_key_label.setText(_translate("Dialog", "White Keys"))
self.wrong_label.setText(_translate("Dialog", "Wrong Note"))
self.upcoming_label.setText(_translate("Dialog", "Upcoming Note"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.color_tab), _translate("Dialog", "Tutoring Settings"))
#-----------------------------------------------------------------------
# Text Scaling
font = self.anthemscore_label.font()
font.setPixelSize(13)
print("Prescaling Font Pixel Size: ", font.pixelSize())
font.setPixelSize(font.pixelSize() * globals.S_W_R)
print("Postscaling Font Pixel Size: ", font.pixelSize())
text_group = [self.anthemscore_pushButton, self.anthemscore_label, self.anthemscore_lineEdit,
self.audiveris_pushButton, self.audiveris_label, self.audiveris_lineEdit,
self.amazingmidi_pushButton, self.amazingmidi_label, self.amazingmidi_lineEdit,
self.muse_score_pushButton, self.muse_score_label, self.muse_score_lineEdit,
self.configure_path_label, self. mp3_to_midi_converter_label,
self.piano_port_label, self.piano_size_label, self.piano_size_comboBox,
self.portsettings_label, self.arduinoport_label, self.piano_port_comboBox,
self.arduino_port_comboBox, self.timingsettings_label, self.colorsettings_label,
self.black_key_label, self.white_key_label, self.wrong_label, self.upcoming_label,
self.arduino_baud_rate_comboBox, self.open_source_radioButton,
self.close_source_radioButton, self.chord_tick_tolerance_label,
self.chord_tick_tolerance_lineEdit, self.chord_sum_tolerance_label,
self.chord_sum_tolerance_lineEdit, self.record_chord_tolerance_label,
self.record_chord_tolerance_lineEdit, self.arduino_handshake_timeout_label,
self.arduino_handshake_timeout_lineEdit, self.apply_close_buttonBox,
self.tabWidget]
for element in text_group:
element.setFont(font)
#-------------------------------------------------------------------------------
# Main Code
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
config_dialog = ConfigDialog()
config_dialog.show()
sys.exit(app.exec_())
| 33,787 | 11,948 |
import requests
from .values import ROUTES
from .values import LOCALES
from .values import REGIONS
from .values import ENDPOINTS
def value_check(*args):
KEYS = ROUTES + LOCALES + REGIONS
for arg in args:
if arg not in KEYS:
raise ValueError
else:
return True
class WebCaller(object):
def __init__(self, token: str, locale: str, region: str, route: str):
self.base = "https://{root}.api.riotgames.com/"
self.eps = ENDPOINTS["web"]
self.sess = requests.Session()
self.sess.params.update({"locale": locale})
self.sess.headers.update(
{
"Accept-Charset": "application/x-www-form-urlencoded; charset=UTF-8",
"User-Agent": "Mozilla/5.0",
"X-Riot-Token": token,
}
)
if value_check(locale, region, route):
self.locale = locale
self.region = region
self.route = route
def call(self, m: str, ep: str, params=None, route=False, **kw):
if ep not in list(self.eps.keys()):
raise ValueError
else:
pass
prefix = self.base.format(root=self.route if route else self.region)
url = prefix + self.eps[ep].format(**kw)
r = self.sess.request(m, url, params=params)
r.raise_for_status()
return r.json()
class ClientCaller(object):
def __init__(self, token: str):
self.base = "https://pd.{code}.a.pvp.net/"
self.token = token
self.sess = requests.Session()
self.sess.headers.update(
{
"Authorization": f"Bearer {token}",
"Content-Type": "application/json",
"X-Riot-Entitlements-JWT": "riot_entitlement",
}
)
| 1,814 | 562 |
for i in range(30):
print("a_", end="")
print()
for i in range(30):
print("b_", end="")
print()
for i in range(30):
print("c_", end="")
| 153 | 66 |
import os
import pytest
from typing import Any, Callable, Dict, List
import LearnSubtitles as ls
def prepare(language: str) -> List:
""" Create LearnSubtitles objects for every subtitle in folder 'language' """
test_dir = "testfiles/" + language
subs = [
ls.LearnSubtitles(os.path.abspath(os.path.join(test_dir, x)), language)
for x in os.listdir(test_dir)
]
return subs
languages = ["de", "en", "pt"] # supported languages
def test_LearnSubtitles_parsing():
for language in languages:
subs = prepare(language)
for sub in subs:
assert len(sub.text) != 0
def test_LearnSubtitles_bad_file():
with pytest.raises(FileNotFoundError):
ls.LearnSubtitles(os.path.abspath("testfiles/fail/fail.srt"), "en")
with pytest.raises(ls.LearnSubtitlesError):
ls.LearnSubtitles(os.path.abspath("testfiles/fail/bad_file.srt"), "en")
def test_LearnSubtitles_level():
levels = ["A1", "A2", "B1"]
subs = [
ls.LearnSubtitles(
"testfiles/de/Nicos Weg – " + level + " – Ganzer Film - German.srt", "de"
)
for level in levels
]
assert subs[0].film_level > subs[1].film_level
assert subs[1].film_level > subs[2].film_level
| 1,256 | 446 |
import pandas as pd
import numpy as np
from time import time
import matplotlib.pyplot as plt
from sklearn.ensemble import ExtraTreesClassifier
train = pd.read_excel('stats.xls', sheet_name='train')
test = pd.read_excel('stats.xls', sheet_name='test')
array_train = train.values
array_test = test.values
X = array_train[0:, 1:11]
y = np.asarray(train['状态'], dtype="|S6")
X_test = array_test[0:, 1:11]
# Build a forest and compute the pixel importances
print("Fitting ExtraTreesClassifier on faces data with %d cores..." % n_jobs)
t0 = time()
forest = ExtraTreesClassifier(n_estimators=1000,
max_features=128,
random_state=0)
forest.fit(X, y)
print("done in %0.3fs" % (time() - t0))
importances = forest.feature_importances_
importances = importances.reshape(data.images[0].shape)
# Plot pixel importances
plt.matshow(importances, cmap=plt.cm.hot)
plt.title("Pixel importances with forests of trees")
plt.show()
#
# X_indices = np.arange(X.shape[-1])
#
# # #############################################################################
# # Univariate feature selection with F-test for feature scoring
# # We use the default selection function: the 10% most significant features
# selector = SelectPercentile(f_classif, percentile=10)
# selector.fit(X, y)
# scores = -np.log10(selector.pvalues_)
# scores /= scores.max()
# plt.bar(X_indices - .45, scores, width=.2,
# label=r'Univariate score ($-Log(p_{value})$)', color='darkorange',
# edgecolor='black')
#
# # #############################################################################
# # Compare to the weights of an SVM
# clf = svm.SVC(kernel='linear')
# clf.fit(X, y)
#
# svm_weights = (clf.coef_ ** 2).sum(axis=0)
# svm_weights /= svm_weights.max()
#
# plt.bar(X_indices - .25, svm_weights, width=.2, label='SVM weight',
# color='navy', edgecolor='black')
#
# clf_selected = svm.SVC(kernel='linear')
# clf_selected.fit(selector.transform(X), y)
#
# svm_weights_selected = (clf_selected.coef_ ** 2).sum(axis=0)
# svm_weights_selected /= svm_weights_selected.max()
#
# plt.bar(X_indices[selector.get_support()] - .05, svm_weights_selected,
# width=.2, label='SVM weights after selection', color='c',
# edgecolor='black')
#
#
# plt.title("Comparing feature selection")
# plt.xlabel('Feature number')
# plt.yticks(())
# plt.axis('tight')
# plt.legend(loc='upper right')
# plt.show() | 2,455 | 877 |
#-*- coding: utf-8 -*-
from logpot.admin.base import AuthenticateView
from logpot.utils import ImageUtil
from flask import flash, redirect
from flask_admin import expose
from flask_admin.contrib.fileadmin import FileAdmin
from flask_admin.babel import gettext
import os
import os.path as op
from operator import itemgetter
from datetime import datetime
class EntryFileView(AuthenticateView, FileAdmin):
def __init__(self, dirpath, **kwargs):
super().__init__(dirpath, **kwargs)
can_delete = False
can_upload = False
can_mkdir = False
allowed_extensions = ImageUtil.ALLOWED_EXTENSIONS
| 618 | 181 |
from .emailutil import * | 24 | 7 |
from __future__ import annotations
import multiprocessing
import os
import re
import sys
from concurrent.futures import ThreadPoolExecutor
from dataclasses import dataclass, field
from itertools import chain
from pathlib import Path
from urllib.parse import urlparse
import click
import requests
from requests.models import HTTPError
from rich.progress import (
BarColumn,
DownloadColumn,
Progress,
TextColumn,
TimeRemainingColumn,
TransferSpeedColumn,
)
@dataclass
class DownloadFile:
urls: list[str]
dest: Path = Path.cwd()
filename: str = field(init=False)
def __post_init__(self):
self.filename = Path(self.urls[0]).name
@property
def filepath(self):
return self.dest / self.filename
BUFFER_SIZE = 32768
progress = Progress(
TextColumn("[bold blue]{task.fields[filename]}", justify="right"),
BarColumn(bar_width=None),
"[progress.percentage]{task.percentage:>3.1f}%",
"•",
DownloadColumn(),
"•",
TransferSpeedColumn(),
"•",
TimeRemainingColumn(),
)
def parse_aria2(data: list[str], destination: Path):
files = []
out_re = re.compile(r"^\s+out=(?P<out>.*)$")
for line in data:
if line.startswith("#") or not line:
continue
if line.startswith("http"):
files.append(DownloadFile(line.split("\t"), destination))
else:
match_out = out_re.match(line)
if match_out:
files[-1].filename = match_out.groupdict()["out"]
return files
def get_inputs(inputs: list[str], destination: Path, aria2_compatibility: bool):
paths = []
for input in inputs:
lines = Path(input).read_text().splitlines(keepends=False)
if aria2_compatibility:
paths.extend(parse_aria2(lines, destination))
else:
paths.extend(
DownloadFile([url], destination)
for url in lines
if url.startswith("http")
)
return paths
def downloader(downloadfile: DownloadFile, buffer_size: int, quiet: bool):
if not quiet:
task_id = progress.add_task(
"download",
filename=downloadfile.filename,
)
iterator = iter(downloadfile.urls)
response = None
try:
while not response:
url = next(iterator)
try:
response = requests.get(url, allow_redirects=True, stream=True)
response.raise_for_status()
except HTTPError:
response = None
if not quiet:
size = int(response.headers.get("content-length"))
progress.update(task_id, total=size)
with open(downloadfile.filepath, "wb") as handler:
if not quiet:
progress.start_task(task_id)
for data in response.iter_content(chunk_size=buffer_size):
handler.write(data)
if not quiet:
progress.update(task_id, advance=len(data))
except StopIteration:
print("Urls are not available")
def executor(threads, downloadfiles, buffer_size, quiet):
with ThreadPoolExecutor(max_workers=threads) as pool:
for downloadfile in sorted(
downloadfiles, key=lambda df: len(df.filename), reverse=True
):
try:
for url in downloadfile.urls:
urlparse(url)
except ValueError:
print(f"An url in {downloadfile.urls} is not valid!", file=sys.stderr)
continue
pool.submit(downloader, downloadfile, buffer_size, quiet)
@click.command()
@click.option(
"-t",
"--threads",
default=lambda: multiprocessing.cpu_count(),
type=click.IntRange(min=1, max=1000, clamp=True),
help="thread number",
)
@click.option(
"-i",
"--input",
"inputs",
multiple=True,
type=click.Path(exists=True, file_okay=True),
help="input file",
)
@click.option("-q", "--quiet", is_flag=True)
@click.option(
"-d",
"--destination",
type=click.Path(dir_okay=True, allow_dash=True),
default=Path(os.getcwd()),
)
@click.option("--aria2-compatibility", is_flag=True)
@click.option(
"--buffer-size", type=click.IntRange(min=1, clamp=True), default=BUFFER_SIZE
)
@click.argument("urls", nargs=-1, type=click.Path())
def fast_downloader(
threads, inputs, quiet, destination, buffer_size, aria2_compatibility, urls
):
download_urls = (DownloadFile([url], Path(destination)) for url in urls)
download_files = list(
chain(download_urls, get_inputs(inputs, Path(destination), aria2_compatibility))
)
if quiet:
executor(threads, download_files, buffer_size, quiet)
else:
with progress:
executor(threads, download_files, buffer_size, quiet)
if __name__ == "__main__":
fast_downloader()
| 4,888 | 1,446 |
import os
import time
from hashlib import sha256
import requests
from dotenv import load_dotenv
from fastapi.security import OAuth2PasswordBearer
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
load_dotenv(os.path.join(BASE_DIR, "../.env"))
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="api/v1/activate-login-code")
def create_access_code(email: str, expires_in: int) -> tuple:
link_create_time = time.time()
link_expire_time = time.time() + expires_in
user_data = f"{email}{link_create_time}"
login_code = sha256(user_data.encode("utf-8")).hexdigest()
return (login_code, link_expire_time)
def verified_email(email: str) -> bool:
response = requests.get(
os.environ.get("REAL_EMAIL_API_LINK"),
params={"email": email},
headers={"Authorization": "Bearer " + os.environ.get("REAL_EMAIL_API_KEY")},
)
response_status = response.json()["status"]
return False if response_status == "invalid" else True
def cookie_is_none(auth_token: str) -> bool:
return False if auth_token else True
| 1,060 | 372 |
"""Correlate based on geograpgic information."""
from alert_manager import AlertManager
from utility import Utility
class GeoCorrelator(object):
"""Geographic correlator."""
def __init__(self, device_id):
"""Initialize the Geographic Correlator."""
self.geo_anchor = {}
self.threshold = 100
self.time_threshold = 10
self.device_id = device_id
def correlate(self, scan_bolus):
"""Correlate one geo event.
The first time we get a geo event, we set the state and print a message
to stdout to that effect. Every subsequent message is compared
against the geo_anchor. Once the anchor is set, it does not
change for the life of the instance. Correlation of subsequent
events causes the distance beween the anchor and current event
to be determined and if the threshold of 10km is exceeded, an alert
is returned.
Args:
scan_bolus (tuple): Two-item tuple. Position 0 contains the scan
type, which is not checked. We should only ever have geo
events coming through this method. Position 1 is expected to
contain geo json.
Returns:
list: List of alerts. If no alerts are fired, the list returned is
zero-length.
"""
scan_body = scan_bolus[1]
if self.geo_anchor == {}:
self.geo_anchor = scan_body
print("GeoCorrelator: Setting anchor to %s" % str(scan_body))
alerts = []
else:
alerts = GeoCorrelator.geo_drift_check(self.geo_anchor, scan_body,
self.threshold,
self.device_id)
for alert in GeoCorrelator.time_drift_check(scan_body,
self.time_threshold,
self.device_id):
alerts.append(alert)
for alert in alerts:
alert[1]["site_name"] = scan_body["site_name"]
alert[1]["sensor_name"] = scan_body["sensor_name"]
alert[1]["sensor_id"] = scan_body["sensor_id"]
return alerts
@classmethod
def geo_drift_check(cls, geo_anchor, gps_scan, threshold, device_id):
"""Fire alarm if distance between points exceeds threshold.
Args:
geo_anchor (dict): Geographic anchor point, usually stored in an
instance variable and passed in via the `correlate()` method.
gps_scan (dict): Same format as geo_anchor, expects the same format
as `geo_anchor`.
threshold (int): Alerting threshold in km.
Returns:
list: list of alerts (usually just one) or an empty list of there
are no alerts.
"""
lat_1 = geo_anchor["location"]["coordinates"][1]
lon_1 = geo_anchor["location"]["coordinates"][0]
lat_2 = gps_scan["location"]["coordinates"][1]
lon_2 = gps_scan["location"]["coordinates"][0]
current_distance = Utility.calculate_distance(lon_1, lat_1,
lon_2, lat_2)
if current_distance < threshold:
return []
else:
message = "Possible GPS spoofing attack! %d delta from anchor at %s / %s %s !" % (current_distance, gps_scan["site_name"], gps_scan["sensor_name"], Utility.create_gmaps_link(lat_1, lon_1)) # NOQA
alert = AlertManager(device_id).build_alert(300, message,
gps_scan["location"])
return[alert]
@classmethod
def time_drift_check(cls, gps_scan, threshold_mins, device_id):
"""Checks drift value, alarms if beyond threshold."""
current_delta = gps_scan["time_drift"]
if current_delta < threshold_mins:
return []
else:
message = "Possible GPS time spoofing attack! %d delta from system at %s / %s" % (current_delta, gps_scan["site_name"], gps_scan["sensor_name"]) # NOQA
alert = AlertManager(device_id).build_alert(310, message,
gps_scan["location"])
return[alert]
| 4,362 | 1,192 |
import numpy as np
import matplotlib.pyplot as plt
N =[20,40,50,75,100,150,200]
scale = [0.0001, 0.001, 0.005, 0.01, 0.1, 1, 10]
mem = [0.001, 0.01, 0.1, 0.13, 0.25, 0.5, 1]
sigexp = [0.01, 0.1, 0.5, 1, 2, 5, 10]
val_key = {}
with open("./grid_search_results_v1/F1_report.txt") as f:
for i, line in enumerate(f):
lineval = line.split()[0]
print ("line {0} = {1}".format(i, lineval))
val_key[lineval.split(".txt:")[0][7:]] = float(lineval.split(".txt:")[1])
F1_matrix = np.zeros((len(scale),len(mem)),dtype=np.float)
N_i = str(200)
sigexp_i = str(0.1)
for i in range(len(scale)):
scale_i = str(scale[i])
for j in range(len(mem)):
mem_i = str(mem[j])
key_i = N_i + "_" + scale_i + "_" + mem_i + "_" + sigexp_i
F1_matrix[i,j] = val_key[key_i]
fig, ax = plt.subplots()
im = ax.imshow(F1_matrix)
ax.set_title("Grid search F1 opt")
ax.set_xticks(np.arange(len(mem)))
ax.set_yticks(np.arange(len(scale)))
ax.set_xticklabels(mem)
ax.set_yticklabels(scale)
ax.set_xlabel('mem')
ax.set_ylabel('scale')
cbar = ax.figure.colorbar(im, ax=ax)
# Loop over data dimensions and create text annotations.
for i in range(len(scale)):
for j in range(len(mem)):
text = ax.text(j, i, F1_matrix[i, j],
ha="center", va="center", color="w")
| 1,697 | 647 |
#!/usr/bin/env python
print("hey there, this is my first pip package")
| 72 | 24 |
from .django_q import AstToDjangoQVisitor
from .django_q_ext import *
from .shorthand import apply_odata_query
| 111 | 38 |
# scrape articles from RAND site, see https://vashu11.livejournal.com/20523.html
import re
import requests
from bs4 import BeautifulSoup
import os
content = ['https://www.rand.org/pubs/papers.html'] + ['https://www.rand.org/pubs/papers.{}.html'.format(i) for i in range(2, 108)]
def get_articles(page):
page = requests.get(page)
soup = BeautifulSoup(page.content, 'html.parser')
return [('https://www.rand.org' + link.get('href')) for link in soup.findAll('a', attrs={'href': re.compile("/pubs/papers/.*")})]
def get_pdfs(link):
page = requests.get(link)
soup = BeautifulSoup(page.content, 'html.parser')
name = soup.findAll('h1', attrs={'id': 'RANDTitleHeadingId'})[0].text
return set([(name, ('https://www.rand.org' if not 'http' in link.get('href') else '') + link.get('href')) for link in soup.findAll('a', attrs={'href': re.compile(".*\.pdf")})])
os.mkdir('pdfs')
for page in content[11:]:
print('PAGE', page)
articles = get_articles(page)
for article in articles:
print('ARTICLE', article)
c = 0
for d in get_pdfs(article):
name, link = d
if c > 0:
name += '_{}'.format(c)
print('NAME', name)
r = requests.get(link)
l = len(r.content)
print('LEN', l)
with open('./pdfs/' + re.sub('[^\w\-_\. ]', '_', name) + '.pdf', 'wb') as f:
f.write(r.content)
c += 1
| 1,457 | 518 |
from dbnd._core.commands.metrics import log_snowflake_table
from dbnd_snowflake.snowflake_resources import log_snowflake_resource_usage
__all__ = [
"log_snowflake_resource_usage",
"log_snowflake_table",
]
| 215 | 90 |
# A parser for multiple FINO2 .dat files in a directory.
import os
import pathlib
import pandas as pd
import numpy as np
import glob
import sys
class fino2_dats:
"""FINO2 data class
"""
def __init__(self, info, conf):
self.path = os.path.join(
(pathlib.Path(os.getcwd()).parent), str(info['path'])
)
self.var = info['var']
# self.lev = conf['levels']['height_agl']
self.target_var = info['target_var']
def get_ts(self, lev):
"""The directory can contain multiple FINO2 files, and each file
contains data at one height level.
The function only read in one data file at one height level.
"""
file_list = glob.glob(os.path.join(self.path, '*.dat'))
for file in file_list:
if str(lev)+'m' in file:
df_all = pd.read_csv(file)
# Get variable name and column names
var_name = df_all.iloc[0][0].split(': ', 1)[1]
col_names = df_all.iloc[3][0].split('\t')[1:]
df = pd.read_csv(file, skiprows=6, sep='\s+')
# Turn column names into 1st row
df = pd.DataFrame(np.vstack([df.columns, df]))
# Combine 2 time columns, hard coded
df['t'] = df[0].map(str)+' '+df[1]
# Drop duplicating columns
df.pop(0)
df.pop(1)
# Reassign column names
for i in range(len(col_names)):
df[col_names[i]] = df[i+2]
df.pop(i+2)
df = df.set_index('t').sort_index()
df.index = pd.to_datetime(df.index)
# FINO data are averages centered at each 10-minute period
# Data between 10:30 and 10:40 are averaged and labelled as
# 10:35
# Apply correction to label data at the end of each period
# Hence data between 10:30 and 10:40 are averaged and labelled
# as 10:40
df.index = df.index+pd.Timedelta('5minutes')
# Extract only 1 column of data
out_df = df.loc[:, [self.var]]
out_df.rename(
columns={self.var: self.target_var}, inplace=True
)
out_df = out_df.astype(float)
return out_df
| 2,422 | 741 |
import subprocess as sp
import os
import time
import platform
from os.path import exists
#colar vars
class color:
lightblue='\033[1;34m' #light blue
lightred='\033[1;31m' #light red
lightgreen='\033[1;32m' #lightgreen
red='\033[0;31m' #red
yellow='\033[1;33m' #yellow
none='\033[0m' #no color
purple='\033[1;35m' #purple
cyan='\033[0;36m' #cyan
green='\033[0;32m' #green
def permissions(): #checks for root permissions
if not os.environ.get("SUDO_UID") and os.geteuid() != 0:
print(color.lightred + "You need to run this script with sudo or as root.")
time.sleep(0.3)
quit()
permissions()
def getos():
osys=platform.system()
if osys != "Linux":
print(color.lightred + "This program only runs on Linux operating systems.")
time.sleep(2)
quit()
getos()
def check_file():
file = exists("tmp/flag.txt")
if file == 'True':
os.system("rm -rf tmp/flag.txt")
else:
time.sleep(0.5)
check_file()
#dependencies
class dependencies:
dependencie1 = 'mdk3'
dependencie2 = 'aircrack-ng'
dependencie3 = 'xterm'
dependencie4 = 'macchanger'
def check_mdk3():
check_d1 = sp.getoutput("bash etc/dpkg-check/dpkg-check-mdk3.sh")
if check_d1 == '0':
mdk3 = 'null'
else:
mdk3 = 'inst'
return mdk3
def check_aircrack():
check_d2 = sp.getoutput("bash etc/dpkg-check/dpkg-check-aircrack-ng.sh")
if check_d2 == '0':
aircrack = 'null'
else:
aircrack = 'inst'
return aircrack
def check_xterm():
check_d3 = sp.getoutput("bash etc/dpkg-check/dpkg-check-xterm.sh")
if check_d3 == '0':
xterm = 'null'
else:
xterm = 'inst'
return xterm
def check_macchanger():
check_d4 = sp.getoutput("bash etc/dpkg-check/dpkg-check-macchanger.sh")
if check_d4 == '0':
macchanger = 'null'
else:
macchanger = 'inst'
return macchanger
def export():
mdk3 = check_mdk3()
aircrack = check_aircrack()
xterm = check_xterm()
macchanger = check_macchanger()
if mdk3 == 'null':
flag = "null"
elif aircrack == 'null':
flag = "null"
elif xterm == 'null':
flag = "null"
elif macchanger == "null":
flag = "null"
else:
time.sleep(1)
if flag == 'null':
os.system("echo "+flag+" > tmp/flag.txt")
else:
check_file()
| 2,481 | 957 |
#! /usr/bin/env python
'''
This script calculates total heterozygosity.
#Example input:
CHROM POS REF sample1 sample2 sample3 sample4 sample5 sample6 sample7 sample8
chr_1 1 A W N N A N N N N
chr_1 2 C Y Y N C C N C N
chr_1 3 C N C N C C C C C
chr_1 4 T T T N T T T T T
chr_2 1 A A A N A A A A A
chr_2 2 C C C N C C C C C
chr_2 3 C N N N N N N N N
chr_2 4 C C T C C C C C C
chr_2 5 T T C T Y T Y T T
chr_3 1 G G N N G N N N N
chr_3 2 C S C N C C N C N
chr_3 3 N N N N N N N N N
chr_3 4 N T T N T T T T N
chr_3 5 G - N N G G G C G
#Example input2:
CHROM POS REF sample1 sample2 sample3 sample4 sample5 sample6 sample7 sample8
chr_1 1 A/A A/T ./. ./. A/A ./. ./. ./. ./.
chr_1 2 C/C T/C T/C ./. C/C C/C ./. C/C ./.
chr_1 3 C/C ./. C/C ./. C/C C/C C/C C/C C/C
chr_1 4 T/T T/T T/T ./. T/T T/T T/T T/T T/T
chr_2 1 A/A A/A A/A ./. A/A A/A A/A A/A A/A
chr_2 2 C/C C/C C/C ./. C/C C/C C/C C/C C/C
chr_2 3 C/C ./. ./. ./. ./. ./. ./. ./. ./.
chr_2 4 C/C C/C T/T C/C C/C C/C C/C C/C C/C
chr_2 5 T/T T/T C/C T/T T/C T/T T/C T/T T/T
chr_3 1 G/G G/G ./. ./. G/G ./. ./. ./. ./.
chr_3 2 C/C G/C C/C ./. C/C C/C ./. C/C ./.
chr_3 3 ./. ./. ./. ./. ./. ./. ./. ./. ./.
chr_3 4 ./. T/T T/T ./. T/T T/T T/T T/T ./.
chr_3 5 G/G -/- ./. ./. G/G G/G G/G C/C G/G
#Example output:
test.tab 0.1125
#command:
$ python calculate_Total-Hetero.py -i input.tab -o output.tab -s "sample1,sample2,sample3,sample4,sample5,sample6,sample7,sample8"
#contact:
Dmytro Kryvokhyzha dmytro.kryvokhyzha@evobio.eu
'''
############################# modules #############################
import calls # my custom module
import numpy as np
############################# options #############################
parser = calls.CommandLineParser()
parser.add_argument('-i', '--input', help = 'name of the input file', type=str, required=True)
parser.add_argument('-o', '--output', help = 'name of the output file', type=str, required=True)
parser.add_argument('-s', '--samples', help = 'column names of the samples to process (optional)', type=str, required=False)
args = parser.parse_args()
# check if samples names are given and if all sample names are present in a header
sampleNames = calls.checkSampleNames(args.samples, args.input)
############################# functions #############################
############################# program #############################
print('Opening the file...')
counter = 0
with open(args.input) as datafile:
header_line = datafile.readline()
header_words = header_line.split()
# index samples
sampCol = calls.indexSamples(sampleNames, header_words)
# count number of sample
nSample = len(sampleNames)
############################## perform counting ####################
print('Counting heterozygots ...')
Hcount = []
for line in datafile:
words = line.split()
# select samples
sample_charaters = calls.selectSamples(sampCol, words)
# check if one- or two-character code
if any(["/" in gt for gt in sample_charaters]):
sample_charaters = calls.twoToOne(sample_charaters)
# count hetero
Nmising = calls.countPerPosition(sample_charaters, 'N')
nHeter = calls.countHeteroPerPosition(sample_charaters)
nTotal = float(nSample - Nmising)
if nTotal != 0:
Hcount.append(float(nHeter/nTotal))
# track progress
counter += 1
if counter % 1000000 == 0:
print str(counter), "lines processed"
# make output header
outputFile = open(args.output, 'w')
heteroT = round(np.mean(Hcount), 4)
outputFile.write("%s\t%s\n" % (args.input, heteroT))
datafile.close()
outputFile.close()
print('Done!')
| 3,880 | 1,594 |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 27 17:24:58 2018
@author: Mauro
"""
#==============================================================================
# Imports
#==============================================================================
import struct
#==============================================================================
# Helpers
#==============================================================================
def as_bytes(dtype, data):
return struct.pack(dtype, data)
#==============================================================================
# Constants
#==============================================================================
# little conversion table for the supported files
type_to_size = {}
type_to_size['I'] = 4
type_to_size['d'] = 8
type_to_size['c'] = 1
#==============================================================================
# Binary file class
#==============================================================================
class BinaryFile:
''' reads the bytes from a file object with custom cumulative offset'''
def __init__(self, fobj, co = 0):
'''
self.file is a file object, self.co is the cumulative offset where
to start the procedure
'''
self.file = fobj
self.co = co
def write(self, dtype, data):
''' writes a data packet and moves the offset'''
self.file.seek(self.co)
b = as_bytes(dtype, data)
self.file.write(b)
self.co += len(b)
def read(self, dtype):
'''
reads a data packet and moves the offset, returns the data packet
in the specified format
'''
self.file.seek(self.co)
size_read = type_to_size[dtype]
b = self.file.read(size_read)
self.co += size_read
return struct.unpack(dtype, b)[0]
def write_string(self, string):
'''
Writess a string saving the length first and then the caracters
encoded with UTF-8
'''
self.file.seek(self.co)
strlen = len(string)
#write str len
self.write("I", strlen)
fmt = 'c'*strlen
data = []
for c in string:
data.append(bytes(c, "utf-8"))
b = struct.pack(fmt, *data)
self.file.write(b)
self.co += len(b)
def read_string(self):
''' readst the string from a binary file... in ascii? mmh...
'''
self.file.seek(self.co)
# read the length
strlen = self.read("I")
b = self.file.read(strlen)
s = str(b, "ascii")
self.co += strlen
return s
| 2,748 | 759 |
#!/usr/bin/env python
import pandas as pd
from scipy import stats
import numpy as np
#import seaborn as sns
#import matplotlib.pyplot as plt
import math
from Bio import SeqIO
import io
import re
import pysam
from functools import reduce
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument("--bam_file", metavar="<BAM>", dest="bam", help="enter the path to the alignment.bam file. By default 'aln_F4.bam' will be used",
type=str, default="aln_F4.bam")
parser.add_argument("--reads_fasta", metavar="<FASTA>", dest="fasta", help="enter the path to the original fasta file being analysed. By default 'reads.fasta' will be used",
type=str, default="reads.fasta")
parser.add_argument("--ident", metavar="<IDENT>", dest="ident", help="enter the int value for minimum identity. By default 80 will be used",
type=int, default= 80)
parser.add_argument("--cov_length", metavar="<COV>", dest="cov", help="enter the int value for minimum coverage length. By default 95 will be used",
type=int, default= 95)
parser.add_argument("--folder_out", metavar="<OUT>", dest="out", help="enter name for output files. By default 'arg_results' will be used",
type=str, default="../out_dir/")
parser.add_argument("--aro_idx", metavar="<IDX>", dest="idx", help="enter the path to the aro_index.csv file. By default 'aro_index.tsv' will be used",
type=str, default="aro_index.tsv")
# print help message for user
parser.print_help()
# get command line arguments
args = parser.parse_args()
# read files from path
bam = args.bam
fasta = args.fasta
ident = args.ident
covlen = args.cov
folder = args.out
idx = args.idx
#read list of cigar tuples and get number of matches (0), insertions (1) or deletions (2)
#auxiliary function in parse_bam()
def read_cigar(lof_tup, idnum):
x = 0
for t in lof_tup:
if(t[0]==idnum):
x += t[1]
return x
#Joins information from BAM file in pandas dataframe
#query sequence: query_name, query_length
#reference sequence: reference_name (gives one string, is split into ARO, ID, gene name and NCBI reference id), reference_start, reference_length
#alignment: query_alignment_length, number of mismatches and gaps (tag 'NM)
#calculates sequence identity % (identity(A,B)=100*(identical nucleotides / min(length(A),length(B)))), with identical nucleotides = query_alignment_length - NM
#calculates cover length % (query_alignment_length*100 / reference_length)
pd.options.mode.chained_assignment = None
def parse_bam(bam_path):
aln_file = pysam.AlignmentFile(bam_path, "rb")
lst = []
# loop over alignments, get values per contig and store in list of lists (lst)
for index, aln in enumerate(aln_file.fetch(until_eof = True)): #index = int(0 ... n), aln = all information on read
substr = [aln.query_name, aln.query_length, aln.query_alignment_length, aln.get_tag('NM'), aln.reference_length, aln.reference_start, aln.cigartuples]
#divide information in reference_name
string = str(aln.reference_name)
start=[]
stop=[]
for i, c in enumerate(string):
if ((c==':')):
start.append(i+1)
elif (c=='|'):
stop.append(i)
else:
continue
stop.append(len(string))
for i in range(0, len(start)):
#substr = []
substr.append(string[start[i]:stop[i]])
lst.append(substr)
#print(lst[0:10])
df = pd.DataFrame(lst, columns=('contig_name', 'contig_length', 'aln_length', 'aln_nm', 'ref_length', 'ref_start', 'c_tuples', 'ref_ARO', 'ref_ID', 'ref_genename', 'ref_NCBI'))
#get number of matches from cigar tuples
df['matches'] = df['c_tuples'].apply(lambda x: read_cigar(x, 0))
df['insertions'] = df['c_tuples'].apply(lambda x: read_cigar(x, 1))
df['deletions'] = df['c_tuples'].apply(lambda x: read_cigar(x, 2))
#infer contig_length in repetitions of same contig_name (otherwise the value is 0)
for i in range(1, df.shape[0]-1):
if (df['contig_name'].iloc[i+1]==df['contig_name'].iloc[i]):
df['contig_length'].iloc[i+1] = df['contig_length'].iloc[i]
#calculate coverage length
df['cov_length'] = df['aln_length']*100/df['ref_length']
#Sequence identity is the amount of characters which match exactly between two different sequences.
#identity(A,B)=100% (num identical nucleotides / min(length(A),length(B)))
df['cov_identity'] = 100*df['matches']/(df.loc[:,['aln_length','ref_length']].min(axis=1))
return df
#Filter df for highest identity and coverlength rates
def filter_best(df, ident, cov_l):
return df[(df['cov_identity']>=ident) & (df['cov_length']>=cov_l)]
#Filter assembly fasta for contigs of interest (data) and save to out_name.fasta
#for taxonomic analysis
def arg_contigs(data, fasta, out_name):
#filter contigs with antibiotic resistance genes
arg_contigs = data['contig_name'].drop_duplicates().to_list()
# filter contig sequence information from original fasta file
#filter fasta for contigs with antibiotic resistance genes (arg) for taxonomic analysis
fasta_sequences = SeqIO.parse(open(fasta),'fasta')
with open(out_name, 'w') as out_file:
for fasta in fasta_sequences:
#name, sequence = fasta.id, fasta.seq.tostring() #tostring() should be replaced by str(fasta.seq), but is not working on my computer
name, sequence = fasta.id, str(fasta.seq)
for c in arg_contigs:
if (name==c):
out_file.write('>'+ name + '\n' + sequence + '\n')
#check for and eliminate less significant (lower cover identity) overlaps
#generate list of index numbers of non-overlapping hits from df sorted by coverage identity (highest first)
#in case of overlaps, keep the hit with the highest coverage identity
def overlaps(df_in):
df = df_in.reset_index()
#list of contig_names
reads = df['contig_name'].unique()
#list of indices to keep
keep = []
#check overlaps for one contig_name at a time
for read in reads:
#create dataframe for each contig_name, sorted by cov_identity, highest value first
readdf = df[df['contig_name']==read].sort_values(by='cov_identity', ascending=False)
#list of indices to keep for each read
k=[]
#iterate over each enty for one read
for i in range(0, readdf.shape[0]-1):
#append first entry of sorted readdf (highest cov_identity) to list of indices to keep for this contig_name
k.append(readdf['index'].iloc[0])
#list for indices of contigs not overlapping with first entry
lst=[]
#compare first entry with all other entries
for j in range (i+1, readdf.shape[0]):
#get start s and end e position of two resistance gene hits
s1, e1 = readdf['ref_start'].iloc[i], readdf['ref_start'].iloc[i] + readdf['ref_length'].iloc[i]
s2, e2 = readdf['ref_start'].iloc[j], readdf['ref_start'].iloc[j] + readdf['ref_length'].iloc[j]
#if there is no overlap, add the entry index to lst
if (e1<s2 or e2<s1):
lst.append(readdf['index'].iloc[j])
#update readdf, only keep entries with index in lst
readdf = readdf[readdf['index'].isin(lst)]
#if updated readdf only contains one entry, add index to k and pass on to next read
if (readdf.shape[0]==1):
k.append(readdf['index'].iloc[0])
break
#if updated readdf is empty, pass on to next read
if(readdf.shape[0]==0):
break
#append indices for each read to lst keep
keep.append(k)
#flatten list of lists (keep)
keep = reduce(lambda x,y: x+y,keep)
return(df[df['index'].isin(keep)])
if __name__ == "__main__":
#extract data of interest from bam file, filter best hits and eliminate overlaps
result_df = overlaps(filter_best(parse_bam(bam), ident, covlen))
#add corresponding drug class from CARD aro_index.tsv to result_df
rgdrug_dict = pd.read_csv(idx, sep='\t').set_index('ARO Name').to_dict()['Drug Class']
result_df['drug_class'] = result_df['ref_genename'].map(rgdrug_dict)
#save result_df as tsv
result_df.to_csv("argHitsDf.tsv", sep='\t')
#save reads/contigs of hits in result_df in 'result.fasta' for further analysis with PlasFlow or Blast/Diamond
arg_contigs(result_df, fasta, "argHits.fasta")
| 8,726 | 2,817 |
import datetime
from django.contrib import admin
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Max
from . import models, forms
from address.biz import geocode
from utils import common
from utils.django_base import BaseAdmin
# Register your models here.
class ParkingPositionInline(admin.TabularInline):
model = models.ParkingPosition
extra = 0
class ParkingLotDocInline(admin.TabularInline):
model = models.ParkingLotDoc
form = forms.ParkingLotDocForm
extra = 0
class ParkingLotImageInline(admin.TabularInline):
model = models.ParkingLotImage
extra = 0
class ParkingLotCommentInline(admin.TabularInline):
model = models.ParkingLotComment
extra = 0
class ParkingLotKeyInline(admin.TabularInline):
model = models.ParkingLotKey
extra = 0
class ParkingLotStaffHistoryInline(admin.TabularInline):
model = models.ParkingLotStaffHistory
extra = 0
def has_add_permission(self, request):
return False
# def has_delete_permission(self, request, obj=None):
# return False
class ParkingPositionKeyInline(admin.TabularInline):
model = models.ParkingPositionKey
extra = 0
class ManagementCompanyStaffInline(admin.TabularInline):
model = models.ManagementCompanyStaff
extra = 0
@admin.register(models.ParkingLotType)
class ParkingLotTypeAdmin(BaseAdmin):
list_display = ('code', 'name')
list_display_links = ('code', 'name')
# @admin.register(models.LeaseManagementCompany)
# class LeaseManagementCompanyAdmin(BaseAdmin):
# list_display = ('name', 'department', 'position', 'staff', 'address', 'tel', 'email')
#
#
# @admin.register(models.BuildingManagementCompany)
# class BuildingManagementCompanyAdmin(BaseAdmin):
# list_display = ('name', 'department', 'position', 'staff', 'address', 'tel', 'email')
@admin.register(models.ManagementCompany)
class ManagementCompanyAdmin(BaseAdmin):
list_display = ('name', 'address', 'tel', 'email')
inlines = (ManagementCompanyStaffInline,)
@admin.register(models.TryPuttingOperator)
class TryPuttingOperatorAdmin(BaseAdmin):
pass
@admin.register(models.ParkingLot)
class ParkingLotAdmin(BaseAdmin):
form = forms.ParkingLotForm
icon = '<i class="material-icons">local_parking</i>'
list_display = ('code', 'name', 'category', 'address', 'subscription_list_send_type')
search_fields = ('code', 'name',)
inlines = (ParkingLotCommentInline, ParkingLotStaffHistoryInline, ParkingLotDocInline, ParkingLotImageInline,
ParkingLotKeyInline)
def save_model(self, request, obj, form, change):
if change is False or (
'pref_name' in form.changed_data or
'city_name' in form.changed_data or
'town_name' in form.changed_data or
'aza_name' in form.changed_data or
'other_name' in form.changed_data
):
# 新規の場合、または住所変更した場合、座標を取得しなおします。
coordinate = geocode(obj.address)
if coordinate.get('lng', None):
obj.lng = coordinate.get('lng', None)
if coordinate.get('lat', None):
obj.lat = coordinate.get('lat', None)
if coordinate.get('post_code', None):
obj.post_code = coordinate.get('post_code', None)
# 担当者変更時、駐車場担当者履歴追加
if change and 'staff' in form.changed_data:
queryset = models.ParkingLotStaffHistory.objects.public_filter(parking_lot=obj)
try:
last_staff = models.ParkingLot.objects.get(pk=obj.pk).staff
last_start_date = models.ParkingLot.objects.get(pk=obj.pk).staff_start_date
history_end_date = queryset.aggregate(Max('end_date')).get('end_date__max', None)
if (history_end_date is None or history_end_date < obj.staff_start_date) and last_start_date != obj.staff_start_date:
models.ParkingLotStaffHistory.objects.create(
parking_lot=obj,
member=last_staff,
start_date=last_start_date,
end_date=(obj.staff_start_date + datetime.timedelta(days=-1))
)
except ObjectDoesNotExist:
pass
super(ParkingLotAdmin, self).save_model(request, obj, form, change)
@admin.register(models.ParkingPosition)
class ParkingPosition(BaseAdmin):
form = forms.ParkingPositionForm
list_display = ('parking_lot', 'name', 'length', 'width', 'height', 'weight')
list_display_links = ('parking_lot', 'name',)
search_fields = ('parking_lot__code', 'parking_lot__name')
fieldsets = (
(None, {
'fields': (
'parking_lot',
'name', 'category', 'cost',
)
}),
("賃料", {
'classes': ('collapse',),
'fields': (
('price_recruitment_no_tax', 'price_recruitment'),
('price_homepage_no_tax', 'price_homepage'),
('price_handbill_no_tax', 'price_handbill'),
)
}),
("サイズ", {
'classes': ('collapse',),
'fields': (
('length', 'width', 'height', 'weight'),
('tyre_width', 'tyre_width_ap', 'min_height', 'min_height_ap'),
('f_value', 'r_value',),
)
}),
('備考', {
'fields': (
'comment',
)
}),
)
inlines = (ParkingPositionKeyInline,)
save_as = True
def save_model(self, request, obj, form, change):
continued_positions = common.get_continued_positions(obj.name)
if continued_positions:
split_positions = []
else:
split_positions = [s for s in obj.name.split(',') if s]
continued_positions.extend(split_positions)
if not change and continued_positions:
# 複数の車室を追加の場合
for name in continued_positions:
if models.ParkingPosition.objects.public_filter(parking_lot=obj.parking_lot, name=name).count() == 0:
obj.pk = None
obj.name = name
obj.save()
else:
super(ParkingPosition, self).save_model(request, obj, form, change)
| 6,315 | 1,997 |
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
filenames=["euler.dat","rk4.dat","leapfrog.dat"]
fig, axs = plt.subplots(nrows=3, ncols=3)
ax=axs[0][0]
ax.set_title('Euler')
ax=axs[0][1]
ax.set_title('RK4')
ax=axs[0][2]
ax.set_title('Leap_frog')
for i in range(3):
f=open(filenames[i],"r")
s=list(map(float,f.readline().split()))
s1=list(map(float,f.readline().split()))
time=list(map(float,f.readline().split()))
ax=axs[0][i]
ax.set_xlabel("time")
ax.set_ylabel("posistion")
ax.plot(time,s )
ax.set_ylim(-1.5,1.5)
ax.set_xlim(0,15)
ax=axs[1][i]
ax.plot(time, s1)
ax.set_ylim(-1.5,1.5)
ax.set_xlim(0,15)
ax.set_xlabel("time")
ax.set_ylabel("velocity")
ax=axs[2][i]
ax.plot(s, s1)
ax.set_ylim(-2.0,2.0)
ax.set_xlim(-2.0,2.0)
ax.set_xlabel("position")
ax.set_ylabel("velocity")
fig.subplots_adjust(hspace=1, wspace=1)
plt.savefig('graficas.png')
plt.show()
| 965 | 470 |
import requests
from bs4 import BeautifulSoup
from time import sleep
url = "http://zipnet.in/index.php?page=missing_person_search&criteria=browse_all&Page_No=1"
r = requests.get(url)
soup = BeautifulSoup(r.content, 'html.parser')
all_tables = soup.findAll('table')
for table in all_tables:
print('--- table ---')
all_rows = table.findAll('tr')
for row in all_rows:
all_cols = row.findAll('td')
if len(all_cols) > 1:
fields = all_cols[0].string
details = all_cols[1].string
print(fields, details)S
| 565 | 191 |
import mailpile.plugins
from mailpile.commands import Command
from mailpile.mailutils import Email, ExtractEmails
from mailpile.util import *
class VCard(Command):
"""Add/remove/list/edit vcards"""
ORDER = ('Internals', 6)
KIND = ''
SYNOPSIS = '<nickname>'
def command(self, save=True):
session, config = self.session, self.session.config
vcards = []
for email in self.args:
vcard = config.get_vcard(email)
if vcard:
vcards.append(vcard)
else:
session.ui.warning('No such contact: %s' % email)
return vcards
def _fparse(self, fromdata):
email = ExtractEmails(fromdata)[0]
name = fromdata.replace(email, '').replace('<>', '').strip()
return email, (name or email)
def _prepare_new_vcard(self, vcard):
pass
def _valid_vcard_handle(self, vc_handle):
return (vc_handle and '@' in vc_handle[1:])
def _add_from_messages(self):
pairs, idx = [], self._idx()
for email in [Email(idx, i) for i in self._choose_messages(self.args)]:
pairs.append(self._fparse(email.get_msg_info(idx.MSG_FROM)))
return pairs
def _pre_delete_vcard(self, vcard):
pass
def add_vcards(self):
session, config, idx = self.session, self.session.config, self._idx()
if (len(self.args) > 2
and self.args[1] == '='
and self._valid_vcard_handle(self.args[0])):
pairs = [(self.args[0], ' '.join(self.args[2:]))]
elif self.data:
if self.data.has_key("@contactname") and self.data.has_key("@contactemail"):
pairs = [(self.data["@contactemail"], self.data["@contactname"])]
elif self.data.has_key("contactnames") and self.data.has_key("contactemails"):
pairs = zip(self.data["contactemails"], self.data["contactnames"])
else:
pairs = self._add_from_messages()
if pairs:
vcards = []
for handle, name in pairs:
if handle.lower() not in config.vcards:
vcard = config.add_vcard(handle, name, self.KIND)
self._prepare_new_vcard(vcard)
vcards.append(vcard)
else:
session.ui.warning('Already exists: %s' % handle)
else:
return self._error('Nothing to do!')
return {"contacts": [x.as_mpCard() for x in vcards]}
def _format_values(self, key, vals):
if key.upper() in ('MEMBER', ):
return [['mailto:%s' % e, []] for e in vals]
else:
return [[e, []] for e in vals]
def set_vcard(self):
session, config = self.session, self.session.config
handle, var = self.args[0], self.args[1]
if self.args[2] == '=':
val = ' '.join(self.args[3:])
else:
val = ' '.join(self.args[2:])
try:
vcard = config.get_vcard(handle)
if not vcard:
return self._error('Contact not found')
config.deindex_vcard(vcard)
if val:
if ',' in val:
vcard[var] = self._format_values(var, val.split(','))
else:
vcard[var] = val
else:
del vcard[var]
vcard.save()
config.index_vcard(vcard)
session.ui.display_vcard(vcard, compact=False)
return True
except:
self._ignore_exception()
return self._error('Error setting %s = %s' % (var, val))
def rm_vcards(self):
session, config = self.session, self.session.config
for handle in self.args:
vcard = config.get_vcard(handle)
if vcard:
self._pre_delete_vcard(vcard)
config.del_vcard(handle)
else:
session.ui.error('No such contact: %s' % handle)
return True
def find_vcards(self):
session, config = self.session, self.session.config
if self.args and self.args[0] == '--full':
self.args.pop(0)
compact = False
else:
compact = True
kinds = self.KIND and [self.KIND] or []
vcards = config.find_vcards(self.args, kinds=kinds)
#for vcard in vcards:
# session.ui.display_vcard(vcard, compact=compact)
ctx = {}
ctx["contacts"] = [x.as_mpCard() for x in vcards]
ctx["query"] = " ".join(self.args)
ctx["total"] = len(vcards)
ctx["start"] = 1
ctx["end"] = len(vcards)
ctx["count"] = len(vcards)
return ctx
SUBCOMMANDS = {
'add': (add_vcards, '<msgs>|<email> = <name>'),
'set': (set_vcard, '<email> <attr> <value>'),
'list': (find_vcards, '[--full] [<terms>]'),
'delete': (rm_vcards, '<email>'),
}
class Contact(VCard):
"""Add/remove/list/edit contacts"""
KIND = 'individual'
ORDER = ('Tagging', 3)
SYNOPSIS = '<email>'
TEMPLATE_IDS = ['contact']
mailpile.plugins.register_command('C:', 'contact=', Contact)
mailpile.plugins.register_command('_vcard', 'vcard=', VCard)
| 4,654 | 1,662 |
import numpy as np
from pylab import *
D = 10
acc1 = np.load('res/small/acc.npy').reshape(D, -1).mean(axis=0)
loss1 = np.load('res/small/loss.npy').reshape(D, -1).mean(axis=0)
acc2 = np.load('res/large/acc.npy').reshape(D, -1).mean(axis=0)
loss2 = np.load('res/large/loss.npy').reshape(D, -1).mean(axis=0)
cut = int(acc1.shape[0] / 10 * 4)
print(' 1: %.2f %.6f'%(100*acc1[:cut].max(), loss1[:cut].min()))
print(' 2: %.2f %.6f'%(100*acc2[:cut].max(), loss2[:cut].min()))
iter_ = np.arange(acc1.shape[0]) * D
print(acc1.shape, iter_.shape[0])
figure()
p = subplot(111)
p.plot(iter_[:cut], loss1[:cut], '-', label='Original CNN')
p.plot(iter_[:cut], loss2[:cut], '-', label='Designed CNN')
p.set_ylim((0, .4))
p.set_xlabel(r'# of Iterations')
p.set_ylabel(r'Loss')
p.legend(loc='upper right')
tight_layout()
savefig("loss.pdf")
figure()
p = subplot(111)
p.plot(iter_[:cut], acc1[:cut], '-', label='Original CNN')
p.plot(iter_[:cut], acc2[:cut], '-', label='Designed CNN')
p.set_ylim((.9, 1))
p.set_xlabel(r'# of Iterations')
p.set_ylabel(r'Accuracy')
p.legend(loc='lower right')
tight_layout()
savefig("acc.pdf")
# 1: 23:24:44.414 Testing, total mean loss 0.019417, total acc 0.863300 - 23:24:33.131
# 2s: 20:20:39.807 Testing, total mean loss 0.003224, total acc 0.967700 - 20:18:21.597
# 2r: 20:48:01.448 Testing, total mean loss 0.002306, total acc 0.981300 - 20:45:16.709
#-2r: 20:38:47.940 Testing, total mean loss 0.002271, total acc 0.981500 - 20:35:59.910
# 3s: 00:38:10.865 Testing, total mean loss 0.001759, total acc 0.980098 - 00:33:01.622
# 3r: 21:24:04.253 Testing, total mean loss 0.001675, total acc 0.980588 - 21:19:28.262 | 1,663 | 875 |
from flask_restful import reqparse
def send_api_response(response_code, response_message, http_status, response_data={}):
if http_status not in [200, 201]:
return {'responseCode': response_code,
'responseMessage': response_message
}, int(http_status), \
{"Access-Control-Allow-Origin": "*"}
else:
return {'responseCode': response_code,
'responseMessage': response_message,
'data': response_data
}, int(http_status), \
{"Access-Control-Allow-Origin": "*"}
parser = reqparse.RequestParser()
parser.add_argument('email_address', help='field cannot be blank.')
| 696 | 186 |
"""
A fake DB-API 2 driver.
"""
# DB names used to trigger certain behaviours.
INVALID_DB = 'invalid-db'
INVALID_CURSOR = 'invalid-cursor'
HAPPY_OUT = 'happy-out'
apilevel = '2.0'
threadsafety = 2
paramstyle = 'qmark'
def connect(database):
return Connection(database)
class Connection(object):
"""
A fake connection.
"""
def __init__(self, database):
super(Connection, self).__init__()
self.database = database
self.session = []
self.cursors = set()
self.executed = 0
if database == INVALID_DB:
self.valid = False
raise OperationalError()
self.valid = True
def close(self):
if not self.valid:
raise ProgrammingError("Cannot close a closed connection.")
self.valid = False
for cursor in self.cursors:
cursor.close()
self.session.append('close')
if self.database == INVALID_DB:
raise OperationalError()
def commit(self):
self.session.append('commit')
def rollback(self):
self.session.append('rollback')
def cursor(self):
self.session.append('cursor')
if not self.valid:
raise InterfaceError()
return Cursor(self)
class Cursor(object):
"""
A fake cursor.
"""
def __init__(self, connection):
self.connection = connection
self.result = None
if connection.database == INVALID_CURSOR:
self.valid = False
raise OperationalError("You've tripped INVALID_CURSOR!")
connection.cursors.add(self)
self.valid = True
self.rowcount = -1
def close(self):
self.connection.session.append('cursor-close')
if not self.valid:
raise InterfaceError("Cursor is closed")
self.connection.cursors.remove(self)
self.valid = False
def execute(self, stmt, args=()):
if not self.valid or not self.connection.valid:
raise InterfaceError()
stmt = stmt.lstrip().lower()
# It's the ping!
if stmt == 'select 1':
return self
stmt_type, = stmt.split(' ', 1)
if stmt_type in ('select', 'update', 'insert', 'delete'):
self.result = None if args is () else args
self.connection.session.append(stmt_type)
self.connection.executed += 1
else:
self.result = None
raise ProgrammingError()
def callproc(self, procname, args=()):
if not self.valid or not self.connection.valid:
raise InterfaceError()
self.result = None if len(args) == 0 else args
self.connection.session.append('proc:' + procname)
self.connection.executed += 1
def fetchone(self):
if not self.valid:
raise InterfaceError("Cursor is closed")
result = self.result
self.result = None
return result
def fetchall(self):
return ()
class Warning(Exception):
pass
class Error(Exception):
pass
class InterfaceError(Error):
pass
class DatabaseError(Error):
pass
class DataError(DatabaseError):
pass
class OperationalError(DatabaseError):
pass
class IntegrityError(DatabaseError):
pass
class InternalError(DatabaseError):
pass
class ProgrammingError(DatabaseError):
pass
class NotSupportedError(DatabaseError):
pass
| 3,435 | 985 |
'''
================================================
## VOICEBOOK REPOSITORY ##
================================================
repository name: voicebook
repository version: 1.0
repository link: https://github.com/jim-schwoebel/voicebook
author: Jim Schwoebel
author contact: js@neurolex.co
description: a book and repo to get you started programming voice applications in Python - 10 chapters and 200+ scripts.
license category: opensource
license: Apache 2.0 license
organization name: NeuroLex Laboratories, Inc.
location: Seattle, WA
website: https://neurolex.ai
release date: 2018-09-28
This code (voicebook) is hereby released under a Apache 2.0 license license.
For more information, check out the license terms below.
================================================
## LICENSE TERMS ##
================================================
Copyright 2018 NeuroLex Laboratories, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================
## SERVICE STATEMENT ##
================================================
If you are using the code written for a larger project, we are
happy to consult with you and help you with deployment. Our team
has >10 world experts in Kafka distributed architectures, microservices
built on top of Node.js / Python / Docker, and applying machine learning to
model speech and text data.
We have helped a wide variety of enterprises - small businesses,
researchers, enterprises, and/or independent developers.
If you would like to work with us let us know @ js@neurolex.co.
================================================
## TEXT_FEATURES.PY ##
================================================
extract all text features:
nltk_features()
spacy_features()
gensim_features()
'''
import transcribe as ts
import sounddevice as sd
import soundfile as sf
import nltk_features as nf
import spacy_features as spf
import gensim_features as gf
import numpy as np
import os, json
def sync_record(filename, duration, fs, channels):
print('recording')
myrecording = sd.rec(int(duration * fs), samplerate=fs, channels=channels)
sd.wait()
sf.write(filename, myrecording, fs)
print('done recording')
def text_featurize(filename,jsondump):
# transcribe with sphinx
transcript=ts.transcribe_sphinx('test.wav')
# now put transcript through various feature engines
nltk_featureset, nltk_labels=nf.nltk_featurize(transcript)
spacy_featureset, spacy_labels=spf.spacy_featurize(transcript)
# make gensim embedding on alice and wonderland text
# (or any text corpus you'd like)
modelname='alice.pickle'
if modelname not in os.listdir():
text=open('alice.txt').read()
gf.w2v_train(text,100,modelname)
gensim_featureset=gf.sentence_embedding(transcript,100,modelname)
data={
'transcript':transcript,
'transcript type':'sphinx',
'nltk':np.array(nltk_featureset).tolist(),
'spacy':np.array(spacy_featureset).tolist(),
'gensim':np.array(gensim_featureset).tolist(),
}
if jsondump == True:
jsonfilename=filename[0:-4]+'.json'
jsonfile=open(jsonfilename,'w')
json.dump(data,jsonfile)
jsonfile.close()
return data
# # record and get transcript
# if 'test.wav' not in os.listdir():
# sync_record('test.wav', 10, 44100, 2)
# # now extract all text features
# data=text_featurize('test.wav', True)
| 4,081 | 1,217 |
from typing import List, Tuple, Union
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from ..utils._checks import (
_check_participant,
_check_participants,
_check_type,
)
from ..utils._docs import fill_doc
@fill_doc
def boxplot_scores_evolution(
csv,
participant: Union[int, list, tuple],
scores: int = 10,
swarmplot: bool = False,
figsize: Tuple[float, float] = (10.0, 5.0),
):
"""Plot the NFB scores as boxplots.
X: Session
Y: Score
Hue: Score ID (0 to 10)
The NFB scores displayed are logged in a .csv file with the syntax:
[participant, session, model_idx, online_idx, transfer, scores [...]]
The evolution of the NFB score during the 15 sessions is plotted for the
given participant with boxplots. Scores from different part of the NFB runs
can be displayed by providing the argument scores. By default, the last
score corresponding to the total score obtained on a given run is used.
Parameters
----------
csv : path-like
Path to the 'scores_logs.csv' file to read.
%(participant)s
scores : int | list of int
ID of the non-regulation/regulation cycle score to include, or list
of the IDs to include. Each cycle is displayed as a separate boxplot.
Must be between 1 and 10 included.
swarmplot : bool, optional
If True, plots the datapoints on top of the boxes with a swarmplot.
%(plt.figsize)s
Returns
-------
f : Figure
ax : Axes
"""
_check_participant(participant)
scores = _check_scores_idx(scores)
_check_type(swarmplot, (bool,), item_name="swarmplot")
_check_type(figsize, (tuple,), item_name="figsize")
# Select data
df = pd.read_csv(csv)
df = df.loc[df["Participant"] == int(participant)]
df = pd.melt(
df,
id_vars="Session",
value_vars=[f"Score {k}" for k in scores],
var_name="Score ID",
value_name="Score",
)
# Plot
f, ax = plt.subplots(1, 1, figsize=tuple(figsize))
sns.boxplot(
x="Session", y="Score", hue="Score ID", data=df, palette="muted", ax=ax
)
if swarmplot:
sns.swarmplot(
x="Session",
y="Score",
hue="Score ID",
data=df,
size=3,
color="black",
ax=ax,
)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles=handles[: len(scores)], labels=labels[: len(scores)])
return f, ax
@fill_doc
def boxplot_scores_between_participants(
csv,
participants: Union[int, list, tuple],
scores: int = 10,
swarmplot: bool = False,
figsize: Tuple[float, float] = (10.0, 5.0),
):
"""Plot the NFB scores as boxplots.
X: Participant
Y: Score
Hue: Score ID (0 to 10)
The NFB scores displayed are logged in a .csv file with the syntax:
[participant, session, model_idx, online_idx, transfer, scores [...]]
The scores obtained during the 15 sessions are plotted in a single
boxplot for each participant.
Parameters
----------
csv : path-like
Path to the 'scores_logs.csv' file to read.
%(participant)s
scores : int | list of int
ID of the non-regulation/regulation cycle score to include, or list
of the IDs to include. Each cycle is displayed as a separate boxplot.
Must be between 1 and 10 included.
swarmplot : bool, optional
If True, plots the datapoints on top of the boxes with a swarmplot.
%(plt.figsize)s
Returns
-------
f : Figure
ax : Axes
"""
participants = _check_participants(participants)
scores = _check_scores_idx(scores)
_check_type(swarmplot, (bool,), item_name="swarmplot")
_check_type(figsize, (tuple,), item_name="figsize")
# Select data
df = pd.read_csv(csv)
df = pd.melt(
df,
id_vars="Participant",
value_vars=[f"Score {k}" for k in scores],
var_name="Score ID",
value_name="Score",
)
df = df[df["Participant"].isin(participants)]
# Plot
f, ax = plt.subplots(1, 1, figsize=tuple(figsize))
sns.boxplot(
x="Participant",
y="Score",
hue="Score ID",
data=df,
palette="muted",
ax=ax,
)
if swarmplot:
sns.swarmplot(
x="Participant",
y="Score",
hue="Score ID",
data=df,
size=3,
color="black",
ax=ax,
)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles=handles[: len(scores)], labels=labels[: len(scores)])
return f, ax
def _check_scores_idx(scores: Union[int, list, tuple]) -> List[int]:
"""Check that the scores passed are valid."""
_check_type(scores, ("int", list, tuple), item_name="scores")
if isinstance(scores, int):
scores = [scores]
elif isinstance(scores, tuple):
scores = list(scores)
for score in scores:
_check_type(score, ("int",), item_name="score")
assert all(1 <= score <= 10 for score in scores)
return scores
| 5,178 | 1,712 |
class Everland:
def __init__(self):
self.rooms = []
def add_room(self, room):
self.rooms.append(room)
def get_monthly_consumptions(self):
total_consumption = 0
for room in self.rooms:
total_consumption += room.expenses + room.room_cost
return f"Monthly consumption: {total_consumption:.2f}$."
def pay(self):
result = []
for room in self.rooms:
total_cost = room.expenses + room.room_cost
if room.budget >= total_cost:
room.budget -= total_cost
result.append(f"{room.family_name} paid {total_cost:.2f}$ and"
f" have {room.budget:.2f}$ left.")
else:
self.rooms.remove(room)
result.append(f"{room.family_name} does not have enough"
f" budget and must leave the hotel.")
return "\n".join(result)
def status(self):
result = ""
result += f"Total population: {sum([r.members_count for r in self.rooms])}\n"
for r in self.rooms:
result += f"{r.family_name} with {r.members_count} members. Budget: {r.budget:.2f}$, " \
f"Expenses: {r.expenses:.2f}$\n"
if r.children:
counter = 0
for c in r.children:
counter += 1
result += f"--- Child {counter} monthly cost: {c.cost * 30:.2f}$\n"
if hasattr(r, "appliances"):
total_expenses = 0
for a in r.appliances:
total_expenses += a.get_monthly_expense()
result += f"--- Appliances monthly cost: {total_expenses:.2f}$\n"
return result
| 1,755 | 525 |
# Generated by Django 3.1.6 on 2021-02-25 05:46
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='Answer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mark', models.FloatField()),
],
),
migrations.CreateModel(
name='Assessment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
('contribution', models.FloatField()),
('start_date', models.DateTimeField()),
('end_date', models.DateTimeField()),
],
),
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
('code', models.CharField(max_length=10)),
('description', models.TextField()),
('curriculum', models.TextField(blank=True, null=True)),
('image', models.ImageField(upload_to='course_images/')),
('visible', models.BooleanField()),
],
),
migrations.CreateModel(
name='CourseCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
],
),
migrations.CreateModel(
name='Lesson',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=512)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mark', models.FloatField()),
('polymorphic_ctype', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='polymorphic_core.question_set+', to='contenttypes.contenttype')),
],
options={
'abstract': False,
'base_manager_name': 'objects',
},
),
migrations.CreateModel(
name='Section',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
('start_date', models.DateTimeField()),
('end_date', models.DateTimeField()),
('visible', models.BooleanField()),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.course')),
],
),
migrations.CreateModel(
name='Session',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=512)),
('serial', models.IntegerField()),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.course')),
],
),
migrations.CreateModel(
name='Testimonial',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('testimonial', models.CharField(max_length=4000)),
('name', models.CharField(max_length=64)),
('identity', models.CharField(max_length=128)),
('serial', models.IntegerField()),
],
),
migrations.CreateModel(
name='AudioLesson',
fields=[
('lesson_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='core.lesson')),
('embed', models.TextField()),
('audio_type', models.CharField(choices=[('SOUNDCLOUD', 'SoundCloud')], default='SOUNDCLOUD', max_length=32)),
],
bases=('core.lesson',),
),
migrations.CreateModel(
name='BroadAnswer',
fields=[
('answer_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='core.answer')),
('answer', models.TextField()),
],
bases=('core.answer',),
),
migrations.CreateModel(
name='BroadQuestion',
fields=[
('question_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='core.question')),
('question', models.CharField(max_length=512)),
],
options={
'abstract': False,
'base_manager_name': 'objects',
},
bases=('core.question',),
),
migrations.CreateModel(
name='MultipleChoiceAnswer',
fields=[
('answer_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='core.answer')),
('answer', models.CharField(max_length=128)),
],
bases=('core.answer',),
),
migrations.CreateModel(
name='MultipleChoiceQuestion',
fields=[
('question_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='core.question')),
('question', models.CharField(max_length=512)),
('choice1', models.CharField(max_length=128)),
('choice2', models.CharField(max_length=128)),
('choice3', models.CharField(max_length=128)),
('choice4', models.CharField(max_length=128)),
('correct_choice', models.CharField(max_length=128)),
],
options={
'abstract': False,
'base_manager_name': 'objects',
},
bases=('core.question',),
),
migrations.CreateModel(
name='NoteLesson',
fields=[
('lesson_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='core.lesson')),
('note', models.FileField(upload_to='course_files/')),
],
bases=('core.lesson',),
),
migrations.CreateModel(
name='ShortAnswer',
fields=[
('answer_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='core.answer')),
('answer', models.CharField(max_length=2048)),
],
bases=('core.answer',),
),
migrations.CreateModel(
name='ShortQuestion',
fields=[
('question_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='core.question')),
('question', models.CharField(max_length=512)),
],
options={
'abstract': False,
'base_manager_name': 'objects',
},
bases=('core.question',),
),
migrations.CreateModel(
name='VideoLesson',
fields=[
('lesson_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='core.lesson')),
('link', models.CharField(max_length=512)),
('video_type', models.CharField(choices=[('YOUTUBE', 'YouTube')], default='YOUTUBE', max_length=32)),
],
bases=('core.lesson',),
),
migrations.CreateModel(
name='Teacher',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('gender', models.CharField(choices=[('MALE', 'Male'), ('FEMALE', 'Female')], max_length=6)),
('date_of_birth', models.DateTimeField(null=True)),
('biography', models.TextField(null=True)),
('profile_picture', models.ImageField(null=True, upload_to='')),
('teacher_type', models.CharField(choices=[('MAIN', 'Main'), ('ASSISTANT', 'Assistant')], max_length=32)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Student',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('gender', models.CharField(choices=[('MALE', 'Male'), ('FEMALE', 'Female')], max_length=6)),
('date_of_birth', models.DateTimeField()),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='question',
name='session',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.session'),
),
migrations.AddField(
model_name='lesson',
name='session',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.session'),
),
migrations.AddField(
model_name='course',
name='course_category',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='core.coursecategory'),
),
migrations.CreateModel(
name='AssessmentResult',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('total_mark', models.FloatField()),
('assessment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.assessment')),
('student', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.student')),
],
),
migrations.AddField(
model_name='assessment',
name='section',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.section'),
),
migrations.AddField(
model_name='answer',
name='assessment',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.assessment'),
),
migrations.AddField(
model_name='answer',
name='student',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.student'),
),
migrations.CreateModel(
name='Admin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='SessionSection',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('visible', models.BooleanField()),
('section', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.section')),
('session', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.session')),
],
options={
'unique_together': {('section', 'session')},
},
),
migrations.CreateModel(
name='SectionTeacher',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('section', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.section')),
('teacher', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.teacher')),
],
options={
'unique_together': {('section', 'teacher')},
},
),
migrations.CreateModel(
name='Enrolment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('final_grade', models.FloatField()),
('section', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.section')),
('student', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.student')),
],
options={
'unique_together': {('section', 'student')},
},
),
migrations.CreateModel(
name='AssessmentQuestion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('assessment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.assessment')),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.question')),
],
options={
'unique_together': {('assessment', 'question')},
},
),
]
| 14,770 | 4,145 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of the minifold project.
# https://github.com/nokia/minifold
__author__ = "Marc-Olivier Buob"
__maintainer__ = "Marc-Olivier Buob"
__email__ = "marc-olivier.buob@nokia-bell-labs.com"
__copyright__ = "Copyright (C) 2018, Nokia"
__license__ = "BSD-3"
import sys
from pprint import pformat
DEBUG = 0
INFO = 1
WARNING = 2
ERROR = 3
# Shell colors
DEFAULT = 0
RED = 1
GREEN = 2
YELLOW = 3
BLUE = 4
PINK = 5
CYAN = 6
GRAY = 7
# Shell style
DEFAULT = 0
BOLD = 1
UNDERLINED = 4
BLINKING = 5
HIGHLIGHTED = 7
class Log:
enable_print = False
# TODO: The following static paramaters should be load from ~/.minifoldrc
# TODO: dark / light colors
with_color = True
log_level = 0
message_header = {
DEBUG : "DEBUG",
INFO : "INFO",
WARNING : "WARNING",
ERROR : "ERROR",
}
message_color = {
DEBUG : CYAN,
INFO : GREEN,
WARNING : YELLOW,
ERROR : RED,
}
@staticmethod
def start_style(
fg_color :int = None,
bg_color :int = None,
styles :list = list()
) -> str:
styling = list()
if fg_color != None: styling.append("3%d" % fg_color)
if bg_color != None: styling.append("4%d" % bg_color)
if styles: styling += styles
return "\033[%sm" % ";".join(styling) if styling else ""
@staticmethod
def default_style() -> str:
return "\033[0m"
@classmethod
def print(cls, message_type :int, message :str, file = sys.stderr):
if cls.enable_print and message_type >= cls.log_level:
color = cls.message_color[message_type]
header = cls.message_header[message_type]
print(
"%(start_style)s%(message)s%(end_style)s" % {
"start_style" : cls.start_style(fg_color = color),
"message" : " ".join([header, message if isinstance(message, str) else pformat(message)]),
"end_style" : cls.default_style()
},
file = file
)
@classmethod
def debug(cls, s): cls.print(DEBUG, s)
@classmethod
def info(cls, s): cls.print(INFO, s)
@classmethod
def warning(cls, s): cls.print(WARNING, s)
@classmethod
def error(cls, s): cls.print(ERROR, s)
| 2,448 | 901 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-18 11:31
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Card',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('type', models.CharField(blank=True, max_length=255, null=True)),
('text', models.TextField(blank=True, null=True)),
('date_created', models.DateTimeField(auto_now_add=True)),
('date_updated', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name_plural': 'cards',
'ordering': ['name'],
},
),
migrations.CreateModel(
name='CardGamePlayer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(db_index=True, default='hand', max_length=30)),
('date_created', models.DateTimeField(auto_now_add=True)),
('date_updated', models.DateTimeField(auto_now=True)),
('card', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cardgame_channels_app.Card')),
],
options={
'verbose_name': 'Card Game Player',
'verbose_name_plural': 'Card Game Players',
'ordering': ['date_created'],
},
),
migrations.CreateModel(
name='Game',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(db_index=True, max_length=255, unique=True)),
('date_created', models.DateTimeField(auto_now_add=True)),
('date_updated', models.DateTimeField(auto_now=True)),
('cards', models.ManyToManyField(through='cardgame_channels_app.CardGamePlayer', to='cardgame_channels_app.Card')),
],
options={
'verbose_name_plural': 'games',
'ordering': ['code'],
},
),
migrations.CreateModel(
name='Player',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
('status', models.CharField(default='waiting', max_length=20)),
('score', models.IntegerField(default=0)),
('date_created', models.DateTimeField(auto_now_add=True)),
('date_updated', models.DateTimeField(auto_now=True)),
('cards', models.ManyToManyField(through='cardgame_channels_app.CardGamePlayer', to='cardgame_channels_app.Card')),
('game', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='players', to='cardgame_channels_app.Game')),
],
options={
'verbose_name_plural': 'players',
'ordering': ['name'],
},
),
migrations.AddField(
model_name='cardgameplayer',
name='game',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cardgame_channels_app.Game'),
),
migrations.AddField(
model_name='cardgameplayer',
name='player',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cardgame_channels_app.Player'),
),
migrations.AddField(
model_name='card',
name='games',
field=models.ManyToManyField(through='cardgame_channels_app.CardGamePlayer', to='cardgame_channels_app.Game'),
),
migrations.AddField(
model_name='card',
name='players',
field=models.ManyToManyField(through='cardgame_channels_app.CardGamePlayer', to='cardgame_channels_app.Player'),
),
migrations.AlterUniqueTogether(
name='cardgameplayer',
unique_together=set([('card', 'game')]),
),
]
| 4,515 | 1,276 |
from bs4 import BeautifulSoup
import re
import urllib
import pickle as pkl
def cleanhtml(raw_html):
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', raw_html)
cleanr_still = re.compile('\\xa0')
cleanertext = re.sub(cleanr_still, '', cleantext)
cleanr_even = re.compile('\\u2019s')
cleanesttext= re.sub(cleanr_even, '', cleanertext)
cleanr_more = re.compile('\\u2019ll')
cleanest_even = re.sub(cleanr_more, ' ', cleanesttext)
cleanest_even_more = cleanest_even.replace('\\xa0', ' ')
cleanest_even_more = cleanest_even_more.replace('\\u2014', ' ')
cleanest_even_more = cleanest_even_more.replace('\\u201c', ' ')
cleanest_even_more = cleanest_even_more.replace('\\u201d', ' ')
cleanest_even_more = cleanest_even_more.replace('\\u2013', ' ')
return cleanest_even_more
unclean_dat = pkl.load(open('omscs_website_data.p', 'rb'))
clean_dat = {}
for course_number in unclean_dat.keys():
curr_unclean_dat = unclean_dat[course_number]
curr_clean_dat = {}
for attribute in curr_unclean_dat.keys():
if attribute == 'Instructor':
try:
instructor_name = str(curr_unclean_dat[attribute][0])
except:
continue
curr_clean_dat[attribute] = instructor_name
elif attribute == 'Name':
try:
class_name = str(curr_unclean_dat[attribute])
except:
continue
curr_clean_dat[attribute] = class_name
elif attribute in ['Overview', 'Prerequisites', 'Grading', 'Technical', 'Reading']:
final_string= ''
unclean_list = curr_unclean_dat[attribute]
unclean_list.pop(0)
for item in unclean_list:
try:
if str(type(item)) == "<class 'bs4.element.NavigableString'>":
item = item.encode('ascii', errors='backslashreplace')
if str(item) == '\n':
continue
final_string = final_string+ ' ' + str(item)
elif str(type(item)) == "<class 'bs4.element.Tag'>":
if item.next == '\n':
continue
final_string = final_string+ ' '+ str(item.next)
except UnicodeEncodeError:
item = item.encode('ascii', errors='backslashreplace')
if str(item) == '\n':
continue
final_string = final_string+ ' ' + str(item)
html_cleaned_string = cleanhtml(final_string)
curr_clean_dat[attribute] = html_cleaned_string
continue
clean_dat[course_number] = curr_clean_dat
pkl.dump(clean_dat, open('omscs_cleaned_data.p', 'wb'))
| 2,820 | 867 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import utils
import yaml
class TestHypervisorRunC(utils.Base):
def test_slaves_are_running(self, host):
assert host.check_output("runc list -q")
def test_slaves_are_isolated(self, host):
group_vars = yaml.safe_load(open(
"/var/lib/software-factory/ansible/group_vars/all.yaml"))
if group_vars.get("enable_insecure_slaves") is not True:
# Make sure managesf internal url access fails
assert host.run("curl --connect-timeout 3 %s" % group_vars[
"managesf_internal_url"]).rc in (7, 28)
| 1,116 | 344 |
from django.urls import path
from .views import index, create, delete, update
urlpatterns = [
path('', index, name='index'),
path('create/', create, name='create'),
path('delete/<int:pk>', delete, name='delete'),
path('update/<int:pk>', update, name='update'),
] | 280 | 89 |
"""
Adapted from https://realpython.com/python-web-scraping-practical-introduction/
for the purpose of scraping https://publications.parliament.uk/pa/ld/ldjudgmt.HTML
to create an expanded HOLJ+ corpus
"""
import requests
from requests import get
from requests.exceptions import RequestException
from contextlib import closing
class Scrape:
def simple_get(self, url):
"""
Attempts to get the content at `url` by making an HTTP GET request.
If the content-type of response is some kind of HTML/XML, return the
text content, otherwise return None
"""
try:
with closing(get(url, stream=True)) as resp:
if self.is_good_response(resp):
return resp.content
else:
return None
except RequestException as e:
self.log_error('Error during requests to {0} : {1}'.format(url, str(e)))
return None
def is_good_response(self, resp):
"""
Returns true if the response seems to be HTML, false otherwise
"""
content_type = resp.headers['Content-Type'].lower()
return (resp.status_code == 200
and content_type is not None
and content_type.find('html') > -1)
def log_error(self, e):
"""
It is always a good idea to log errors.
This function just prints them, but you can
make it do anything.
"""
print(e)
if __name__ == "__main__":
sc = Scrape()
print("Testing the scaper:")
raw_html = sc.simple_get('https://realpython.com/blog/')
assert (len(raw_html) > 0), "Error, does not get"
no_html = sc.simple_get("https://doesnotexist.com/thereshouldbenothing/")
assert (no_html == None), "Error, does get"
print("Working")
| 1,831 | 529 |
# #!/usr/bin/python
import os
import numpy as np
import pandas as pd
from keras.models import load_model
from keras.models import Sequential
from keras.utils import np_utils
from keras.layers.core import Dense, Activation, Dropout
from keras import optimizers
from matplotlib import pyplot as plt
print('Loading data...')
data = pd.read_csv('fer2013.csv')
#data = pd.read_csv('testdata.csv')
im = data['pixels']
im_list = []
print('Pre-processing data...')
for i in range(len(im)):
im_list.append(list(map(int,im[i].split())))
X_train = np.asarray(im_list).astype('float32')
y_train = np_utils.to_categorical(np.asarray(data['emotion']))
X_train *= 2.0/255
X_train -= 1
input_dim = X_train.shape[1]
nb_classes = y_train.shape[1]
# Parameters were chosen from most commonly used and sometimes at random
# Further development of the model may be needed
print('Making model')
model = Sequential()
# Dense define number of nodes
model.add(Dense(1000, input_dim=input_dim))
# Activation defines the output
model.add(Activation('relu'))
# Dropout to avoid overfitting.
model.add(Dropout(0.15))
model.add(Dense(500))
model.add(Activation('relu'))
model.add(Dropout(0.15))
model.add(Dense(100))
model.add(Activation('relu'))
model.add(Dropout(0.15))
model.add(Dense(50))
model.add(Activation('relu'))
model.add(Dropout(0.15))
model.add(Dense(10))
model.add(Activation('relu'))
model.add(Dropout(0.15))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
print(model.summary())
sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',optimizer=sgd,metrics=['accuracy'])
print("Training...")
model.fit(X_train, y_train, epochs=100, validation_split=0.1, verbose=2)
scores = model.evaluate(X_train, y_train, verbose=0)
print(scores)
# save model to HDF5
model.save('model.h5')
print("Saved model to disk")
| 1,881 | 723 |
from socket import socket, gaierror, getservbyport, AF_INET, SOCK_STREAM, setdefaulttimeout
from tqdm import tqdm
from datetime import datetime
def detect_port_services(ip, range_start, range_end):
port_services = {}
port_detecting_progress = tqdm(range(range_start, range_end + 1))
try:
for port in port_detecting_progress:
port_detecting_progress.set_description('checking port {}'.upper().format(port))
setdefaulttimeout(2)
s = socket(AF_INET, SOCK_STREAM)
result = s.connect_ex((ip, port))
# trying to get more information about port service
try:
message = b'WhoAreYou'
s.send(message)
banner = s.recv(100)
s.close()
except IOError:
banner = b''
if result == 0:
service_name = getservbyport(port)
port_services.update({port: (service_name, banner.replace(b'\r\n', b'').decode('utf-8'))})
s.close()
log_port_services(ip, range_start, range_end, port_services)
except KeyboardInterrupt:
print("\ncanceled...".upper())
except gaierror:
print("\nHostname Could Not Be Resolved".upper())
return port_services
def log_port_services(ip, range_start, range_end, port_services):
try:
with open("results/result_port_services.txt", "a") as file:
file.write('@ {}'.upper().format(datetime.now()))
file.write('\nhost {} open ports\' services from {} to {}:'.upper().format(ip, range_start, range_end))
[file.write('\n {}:\t{} {}'
.format(port,
port_services[port][0].upper(),
'' if not port_services[port][1] else '\n\t\t({})\n'
.format(port_services[port][1]))
) for port in port_services.keys()]
if not port_services.keys():
file.write('\n× no open ports was founded!'.upper())
file.write('\n----------------------------------------------------\n')
except FileNotFoundError:
print('PLEASE CREATE \"/results/result_detect_open_ports.txt\" AND TRY AGAIN.')
if __name__ == '__main__':
detect_port_services(
ip=input('TARGET IP ADDRESS: '),
range_start=int(input('START OF RANGE : ')),
range_end=int(input('END OF RANGE : ')),
)
| 2,487 | 726 |
# -*- coding: utf-8 -*-
import json
from collections import OrderedDict
from typing import List
import dash_core_components as dcc
import dash_html_components as html
import dash_table
import pandas as pd
from dash import dash
from dash.dependencies import Input, Output, State
from zvdata import IntervalLevel
from zvdata.app import app
from zvdata.chart import Drawer
from zvdata.domain import global_providers, get_schemas, get_schema_by_name, get_schema_columns
from zvdata.normal_data import NormalData, IntentType
from zvdata.reader import DataReader
from zvdata.utils.pd_utils import df_is_not_null
from zvdata.utils.time_utils import now_pd_timestamp, TIME_FORMAT_DAY
current_df = None
layout = html.Div(
[
html.Div(
[
# provider selector
dcc.Dropdown(
id='provider-selector',
placeholder='select provider',
options=[{'label': provider, 'value': provider} for provider in
global_providers]),
# schema selector
dcc.Dropdown(id='schema-selector', placeholder='select schema'),
# level selector
dcc.Dropdown(id='level-selector', placeholder='select level',
options=[{'label': level.value, 'value': level.value} for level in
IntervalLevel],
value=IntervalLevel.LEVEL_1DAY.value),
# column selector
html.Div(id='schema-column-selector-container', children=None),
dcc.Dropdown(
id='properties-selector',
options=[
{'label': 'undefined', 'value': 'undefined'}
],
value='undefined',
multi=True
),
# codes filter
dcc.Input(id='input-code-filter', type='text', placeholder='input codes',
style={'width': '400px'}),
# time range filter
dcc.DatePickerRange(
id='date-picker-range',
start_date='2009-01-01',
end_date=now_pd_timestamp(),
display_format=TIME_FORMAT_DAY
),
# load data for table
html.Button('load data', id='btn-load-data', n_clicks_timestamp=0),
# table container
html.Div(id='data-table-container', children=None),
# selected properties
html.Label('setting y_axis and chart type for the columns:'),
# col setting container
html.Div(id='col-setting-container', children=dash_table.DataTable(
id='col-setting-table',
columns=[
{'id': 'property', 'name': 'property', 'editable': False},
{'id': 'y_axis', 'name': 'y_axis', 'presentation': 'dropdown'},
{'id': 'chart', 'name': 'chart', 'presentation': 'dropdown'}
],
dropdown={
'y_axis': {
'options': [
{'label': i, 'value': i}
for i in ['y1', 'y2', 'y3', 'y4', 'y5']
]
},
'chart': {
'options': [
{'label': chart_type.value, 'value': chart_type.value}
for chart_type in NormalData.get_charts_by_intent(IntentType.compare_self)
]
}
},
editable=True
), ),
html.Div(id='table-type-label', children=None),
html.Div(
[
html.Div([dcc.Dropdown(id='intent-selector')],
style={'width': '50%', 'display': 'inline-block'}),
html.Div([dcc.Dropdown(id='chart-selector')],
style={'width': '50%', 'display': 'inline-block'})
]
),
html.Div(id='chart-container', children=None)
])
]
)
@app.callback(
Output('schema-selector', 'options'),
[Input('provider-selector', 'value')])
def update_schema_selector(provider):
if provider:
return [{'label': schema.__name__, 'value': schema.__name__} for schema in
get_schemas(provider=provider)]
raise dash.exceptions.PreventUpdate()
@app.callback(
Output('schema-column-selector-container', 'children'),
[Input('schema-selector', 'value')],
state=[State('provider-selector', 'value')])
def update_column_selector(schema_name, provider):
if provider and schema_name:
schema = get_schema_by_name(name=schema_name)
cols = get_schema_columns(schema=schema)
return dcc.Dropdown(
id='schema-column-selector',
options=[
{'label': col, 'value': col} for col in cols
],
value=get_schema_by_name(name=schema_name).important_cols(),
multi=True
)
raise dash.exceptions.PreventUpdate()
@app.callback(
[Output('properties-selector', 'options'),
Output('properties-selector', 'value')],
[Input('schema-column-selector', 'value')],
state=[State('provider-selector', 'value'),
State('schema-selector', 'value'),
State('properties-selector', 'options'),
State('properties-selector', 'value')])
def update_selected_properties(selected_cols, provider, schema_name, options, value):
if selected_cols and provider and schema_name:
current_options = options
current_value = value
added_labels = []
added_values = []
for col in selected_cols:
added_labels.append(col)
added_values.append(
json.dumps({
'provider': provider,
'schema': schema_name,
'column': col
}))
added_options = [{'label': col, 'value': added_values[i]} for i, col in enumerate(added_labels)]
if 'undefined' in value:
current_options = []
current_value = []
current_options += added_options
current_value += added_values
return current_options, current_value
raise dash.exceptions.PreventUpdate()
def properties_to_readers(properties, level, codes, start_date, end_date) -> List[DataReader]:
provider_schema_map_cols = {}
for prop in properties:
provider = prop['provider']
schema_name = prop['schema']
key = (provider, schema_name)
if key not in provider_schema_map_cols:
provider_schema_map_cols[key] = []
provider_schema_map_cols[key].append(prop['column'])
readers = []
for item, columns in provider_schema_map_cols.items():
provider = item[0]
schema_name = item[1]
schema = get_schema_by_name(schema_name)
readers.append(DataReader(data_schema=schema, provider=provider, codes=codes, level=level,
columns=columns, start_timestamp=start_date, end_timestamp=end_date,
time_field=schema.time_field()))
return readers
@app.callback(
[Output('data-table-container', 'children'),
Output('col-setting-table', 'data'),
Output('table-type-label', 'children'),
Output('intent-selector', 'options'),
Output('intent-selector', 'value')],
[Input('btn-load-data', 'n_clicks')],
state=[State('properties-selector', 'value'),
State('level-selector', 'value'),
State('input-code-filter', 'value'),
State('date-picker-range', 'start_date'),
State('date-picker-range', 'end_date')])
def update_data_table(n_clicks, properties, level, codes: str, start_date, end_date):
if n_clicks and properties:
props = []
for prop in properties:
props.append(json.loads(prop))
readers = properties_to_readers(properties=props, level=level, codes=codes, start_date=start_date,
end_date=end_date)
if readers:
data_df = readers[0].data_df
for reader in readers[1:]:
if df_is_not_null(reader.data_df):
data_df = data_df.join(reader.data_df, how='outer')
global current_df
current_df = data_df
if not df_is_not_null(current_df):
return 'no data,please reselect!', [], '', [
{'label': 'compare_self', 'value': 'compare_self'}], 'compare_self'
normal_data = NormalData(current_df)
data_table = Drawer(data=normal_data).draw_data_table(id='data-table-content')
# generate col setting table
properties = normal_data.data_df.columns.to_list()
df = pd.DataFrame(OrderedDict([
('property', properties),
('y_axis', ['y1'] * len(properties)),
('chart', ['line'] * len(properties))
]))
# generate intents
intents = normal_data.get_intents()
intent_options = [
{'label': intent.value, 'value': intent.value} for intent in intents
]
intent_value = intents[0].value
return data_table, df.to_dict('records'), normal_data.get_table_type(), intent_options, intent_value
else:
return 'no data,please reselect!', [], '', [
{'label': 'compare_self', 'value': 'compare_self'}], 'compare_self'
raise dash.exceptions.PreventUpdate()
@app.callback(
[Output('chart-selector', 'options'),
Output('chart-selector', 'value')],
[Input('intent-selector', 'value')])
def update_chart_selector(intent):
if intent:
charts = NormalData.get_charts_by_intent(intent=intent)
options = [
{'label': chart.value, 'value': chart.value} for chart in charts
]
value = charts[0].value
return options, value
raise dash.exceptions.PreventUpdate()
operators_df = [['ge ', '>='],
['le ', '<='],
['lt ', '<'],
['gt ', '>'],
['ne ', '!='],
['eq ', '='],
['contains '],
['datestartswith ']]
operators_sql = [['>= ', '>='],
['<= ', '<='],
['< ', '<'],
['> ', '>'],
['!= ', '!='],
['== ', '='],
['contains '],
['datestartswith ']]
def split_filter_part(filter_part, operators=operators_df):
for operator_type in operators:
for operator in operator_type:
if operator in filter_part:
name_part, value_part = filter_part.split(operator, 1)
name = name_part[name_part.find('{') + 1: name_part.rfind('}')]
value_part = value_part.strip()
v0 = value_part[0]
if (v0 == value_part[-1] and v0 in ("'", '"', '`')):
value = value_part[1: -1].replace('\\' + v0, v0)
else:
try:
value = float(value_part)
except ValueError:
value = value_part
# word operators need spaces after them in the filter string,
# but we don't want these later
return name, operator_type[0].strip(), value
return [None] * 3
@app.callback(
[Output('data-table-content', "data"),
Output('chart-container', "children")],
[Input('data-table-content', "page_current"),
Input('data-table-content', "page_size"),
Input('data-table-content', "sort_by"),
Input('data-table-content', "filter_query"),
Input('intent-selector', "value"),
Input('chart-selector', "value"),
Input('col-setting-table', 'data'),
Input('col-setting-table', 'columns')])
def update_table_and_graph(page_current, page_size, sort_by, filter, intent, chart, rows, columns):
if chart:
property_map = {}
for row in rows:
property_map[row['property']] = {
'y_axis': row['y_axis'],
'chart': row['chart']
}
dff = current_df
if filter:
filtering_expressions = filter.split(' && ')
for filter_part in filtering_expressions:
col_name, operator, filter_value = split_filter_part(filter_part)
if operator in ('eq', 'ne', 'lt', 'le', 'gt', 'ge'):
# these operators match pandas series operator method names
dff = dff.loc[getattr(dff[col_name], operator)(filter_value)]
elif operator == 'contains':
dff = dff.loc[dff[col_name].str.contains(filter_value)]
elif operator == 'datestartswith':
# this is a simplification of the front-end filtering logic,
# only works with complete fields in standard format
dff = dff.loc[dff[col_name].str.startswith(filter_value)]
# if sort_by:
# dff = dff.sort_values(
# [col['entity_id'] for col in sort_by],
# ascending=[
# col['direction'] == 'asc'
# for col in sort_by
# ],
# inplace=False
# )
if intent in (IntentType.compare_self.value, IntentType.compare_to_other.value):
graph_data, graph_layout = Drawer(NormalData(dff)).draw_compare(chart=chart, property_map=property_map,
render=None, keep_ui_state=False)
else:
graph_data, graph_layout = Drawer(NormalData(dff)).draw(chart=chart, property_map=property_map, render=None,
keep_ui_state=False)
table_data = dff.iloc[page_current * page_size: (page_current + 1) * page_size
].to_dict('records')
return table_data, \
dcc.Graph(
id='chart-content',
figure={
'data': graph_data,
'layout': graph_layout
}
)
raise dash.exceptions.PreventUpdate()
| 14,829 | 3,950 |
import numpy as np
class CTCCodec(object):
""" Convert index to label """
def __init__(self, char_label, top_k):
# char_label : all the characters.
self.top_k = top_k
self.index = {}
list_character = list(char_label)
for i, char in enumerate(list_character):
# 0 is for 'blank'
self.index[char] = i + 1
self.char_label = ['[blank]'] + list_character
def decode(self, predicts):
""" convert index to label. """
texts_label = []
text_list = []
# Select max probability
index_predicts = np.argmax(predicts, 2) # WBD - > WB
index_predicts = index_predicts.transpose(1, 0) # WB -> BW
index_predicts_reshape = index_predicts.reshape(-1) # B*W
for i in range(len(index_predicts_reshape)):
if index_predicts_reshape[i] != 0 and (not (i > 0 and index_predicts_reshape[i] == index_predicts_reshape[i - 1])):
text_list.append(self.char_label[index_predicts_reshape[i]])
text = ''.join(text_list)
texts_label.append(text)
return texts_label
| 1,139 | 376 |
from common_libs import *
from cublas_functions import *
linalg.init()
def cublas_calculate_transpose_non_batched(h, a_gpu):
cublas_transpose = get_single_transpose_function(a_gpu)
m, k = a_gpu.shape
at_gpu = gpuarray.empty((k, m), a_gpu.dtype)
k, n = at_gpu.shape
# Calculate transpose
transa = transb = 't'
cublas_transpose(h, transa, transb, m, k, 1.0, a_gpu.gpudata, k, 0.0, a_gpu.gpudata, k, at_gpu.gpudata, m)
return at_gpu
# Matrix product, there is a batch equivalent for this function too
# Make sure it has 2 dimensions (use reshape in the case is 1d)
def cublas_matrix_product_gemm_non_batched(handle, a_gpu, b_gpu):
"""
:param handle:
:param a_gpu: Be carefull to pass X here
:param b_gpu: Xt should be here
:return:
"""
cublas_dot = get_single_dot_function(b_gpu)
if len(a_gpu.shape)!=2 or len(a_gpu.shape)!=2:
raise ValueError('Make sure the arrays are 2 dimensional')
n, l = a_gpu.shape
k, m = b_gpu.shape
c_gpu = gpuarray.empty((n, m), b_gpu.dtype)
lda = max(1, a_gpu.strides[0] // a_gpu.dtype.itemsize)
ldb = max(1, b_gpu.strides[0] // b_gpu.dtype.itemsize)
ldc = max(1, c_gpu.strides[0] // c_gpu.dtype.itemsize)
alpha = np.float32(1.0)
beta = np.float32(0.0)
transa = transb = 'n'
cublas_dot(handle, transb, transa, m, n, k, alpha, b_gpu.gpudata, ldb, a_gpu.gpudata, lda, beta, c_gpu.gpudata, ldc)
return c_gpu
def cublas_matrix_product_gemm_batched(handle, as_gpu, bs_gpu):
cublas_dot = get_batched_dot_function(as_gpu)
if len(a_gpu.shape) != 2 or len(a_gpu.shape) != 2:
raise ValueError('Make sure the arrays are 2 dimensional')
# n, z, l
n, l = as_gpu.shape
k, m = bs_gpu.shape
c_gpu = gpuarray.empty((n, m), b_gpu.dtype)
lda = max(1, a_gpu.strides[0] // a_gpu.dtype.itemsize)
ldb = max(1, b_gpu.strides[0] // b_gpu.dtype.itemsize)
ldc = max(1, c_gpu.strides[0] // c_gpu.dtype.itemsize)
alpha = np.float32(1.0)
beta = np.float32(0.0)
transa = transb = 'n'
cublas_dot(handle, transb, transa, m, n, k, alpha, b_gpu.gpudata, ldb, a_gpu.gpudata, lda, beta, c_gpu.gpudata, ldc)
return c_gpu
"TODO: Fix this function, like linalg.inv"
def cublas_single_matrix_inversion_non_batched(h, a_gpu, overwrite=False, ipiv_gpu=None):
(cublas_getrf, bufsize, cublas_getrs) = get_single_inverse_function(a_gpu)
data_type = a_gpu.dtype
n = a_gpu.shape[0]
if ipiv_gpu is None:
ipiv_gpu = gpuarray.empty((n, 1), np.int32)
try:
in_gpu = a_gpu if overwrite else a_gpu.copy()
Lwork = bufsize(h, n, n, in_gpu.gpudata, n)
Work = gpuarray.empty(Lwork, data_type)
devInfo = gpuarray.empty(1, np.int32)
cublas_getrf(h, n, n, in_gpu.gpudata, n, Work.gpudata, ipiv_gpu.gpudata, devInfo.gpudata)
except cusolver.CUSOLVER_ERROR as e:
raise ValueError("Error while generating inverse of the matrix")
d = devInfo.get()[0]
if d != 0:
raise ValueError("Singular matrix or wrong params")
try:
b_gpu = linalg.eye(n, data_type)
cublas_getrs(h, cublas._CUBLAS_OP['n'], n, n,
in_gpu.gpudata, n, ipiv_gpu.gpudata, b_gpu.gpudata, n,
devInfo.gpudata)
# Since CUSOLVER's getrs functions save their output in b_gpu, we
# need to copy it back to the input matrix if overwrite is requested:
if overwrite:
a_gpu.set(b_gpu)
return a_gpu
else:
return b_gpu
except cusolver.CUSOLVER_ERROR as e:
raise "Error with cusolver {}".format(e.message)
return h
def calculate_regression_coeffs_non_batched(handle, x_gpu, y_gpu):
xt_gpu = cublas_calculate_transpose_non_batched(handle, x_gpu)
xtx_gpu = cublas_matrix_product_gemm_non_batched(handle, xt_gpu, x_gpu)
xty_gpu = cublas_matrix_product_gemm_non_batched(handle, xt_gpu, y_gpu)
# xtx_inv_gpu = cublas_single_matrix_inversion(handle, xtx_gpu)
xtx_inv_gpu = linalg.inv(xtx_gpu, lib="cusolver")
b_coefficients = cublas_matrix_product_gemm_non_batched(handle, xtx_inv_gpu, xty_gpu)
return b_coefficients
def calculate_predictions_from_model_non_batched(handle, x_gpu, b_coefficients_gpu):
return cublas_matrix_product_gemm_non_batched(handle, x_gpu, b_coefficients_gpu) | 4,333 | 1,858 |
import os
from time import localtime, strftime
pwd = os.curdir
root_dir = pwd + './../'
weights_path = '{}data/imagenet_models/VGG16.v2.caffemodel'.format(root_dir)
cfg_path = '{}experiments/cfgs/mask_rcnn_alt_opt.yml'.format(root_dir)
log_file="{}experiments/logs/mask_rcnn_alt_opt_{}".format(root_dir, strftime("%d-%m-%Y_%H_%M", localtime()))
#print log_file
exec_log_file = "exec &> >(tee -a \"{}\")".format(log_file)
#echo Logging output to "$LOG"
#os.system(exec &> >(tee -a "$LOG")
exec_python = "python ../train_mask_rcnn_alt_opt.py --gpu 0 --net_name 'VGG16' --weights {} --imdb 'voc_2012_train' --cfg {}".format(weights_path, cfg_path)
exec_all = "'/bin/bash -c {}' ; {}".format(exec_log_file, exec_python)
#os.system(exec_all)
print exec_all
os.system(exec_all)
| 793 | 344 |
from direct.directnotify import DirectNotifyGlobal
from direct.distributed import DistributedObject
from toontown.ai import DistributedPhaseEventMgr
class DistributedTrashcanZeroMgr(DistributedPhaseEventMgr.DistributedPhaseEventMgr):
neverDisable = 1
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedTrashcanZeroMgr')
def __init__(self, cr):
DistributedPhaseEventMgr.DistributedPhaseEventMgr.__init__(self, cr)
cr.trashcanZeroMgr = self
def announceGenerate(self):
DistributedPhaseEventMgr.DistributedPhaseEventMgr.announceGenerate(self)
messenger.send('trashcanZeroIsRunning', [self.isRunning])
def delete(self):
self.notify.debug('deleting trashcanzeromgr')
messenger.send('trashcanZeroIsRunning', [False])
DistributedPhaseEventMgr.DistributedPhaseEventMgr.delete(self)
if hasattr(self.cr, 'trashcanZeroMgr'):
del self.cr.trashcanZeroMgr
def setCurPhase(self, newPhase):
DistributedPhaseEventMgr.DistributedPhaseEventMgr.setCurPhase(self, newPhase)
messenger.send('trashcanZeroPhase', [newPhase])
def setIsRunning(self, isRunning):
DistributedPhaseEventMgr.DistributedPhaseEventMgr.setIsRunning(self, isRunning)
messenger.send('trashcanZeroIsRunning', [isRunning])
| 1,325 | 418 |
"""
.. _ft_seeg_example:
=========================================
Apply bipolar montage to depth electrodes
=========================================
This scripts shows a very simple example on how to create an Interface wrapping
a desired function of a Matlab toolbox (|FieldTrip|).
.. |FieldTrip| raw:: html
<a href="http://www.fieldtriptoolbox.org/" target="_blank">FieldTrip</a>
The **input** data should be a **.mat** file containing a FieldTrip data struct
"""
# Authors: Annalisa Pascarella <a.pascarella@iac.cnr.it>
# License: BSD (3-clause)
import os.path as op
import ephypype
from ephypype.nodes.FT_tools import Reference
from ephypype.datasets import fetch_ieeg_dataset
###############################################################################
# Let us fetch the data first. It is around 675 MB download.
base_path = op.join(op.dirname(ephypype.__file__), '..', 'examples')
data_path = fetch_ieeg_dataset(base_path)
ft_path = '/usr/local/MATLAB/R2018a/toolbox/MEEG/fieldtrip-20200327/'
refmethod = 'bipolar'
channels_name = '{\'RAM*\', \'RHH*\', \'RTH*\', \'ROC*\', \'LAM*\',\'LHH*\', \'LTH*\'}' # noqa
# Now we call the interface Reference to apply a bipolar montage to sEEG data
reference_if = Reference()
reference_if.inputs.data_file = op.join(data_path, 'SubjectUCI29_data.mat')
reference_if.inputs.channels = channels_name
reference_if.inputs.ft_path = ft_path
reference_if.inputs.refmethod = refmethod
reference_if.inputs.script = ''
out = reference_if.run()
print('Rereferenced data saved at {}'.format(out.outputs.data_output))
| 1,575 | 542 |
# Simple tree structure
import numpy as np
import math
class Node:
'''
Class defining a node for the game tree.
Nodes store their position on the board,
their reward, their visits and their children.
'''
def __init__(self, parent, boardposition, current_player):
self.parent = parent
self.boardposition = boardposition
self.reward = 0
self.visits = 0
self.current_player = current_player
self.children = []
def add_child(self, boardposition):
'''add child with certain position on the board'''
player = 3 - self.current_player
self.children.append(Node(self, boardposition, player))
def isExpanded(self):
'''Check if node is fully expanded, meaning all childs have been visited'''
return self.children and all(child.visits > 0 for child in self.children)
def getPossibleChildren(self, game_state):
'''Used to add all children to node when visited for the first time'''
flatgame = np.array(game_state).flatten()
for position, value in enumerate(flatgame):
if value == 0:
self.add_child(position)
def update(self, result):
'''update visits and reward according to result'''
self.visits += 1
reward = 0 # reward a tie with 0
if result:
if self.current_player == result: # player of current node has won
reward = 1
else: # player of current node has lost
reward = -1
self.reward += reward
def UTC_traverse(self, root):
'''Choosed child for node via UCT function'''
choosen_child = max(self.children, key= lambda x: x.UCT(root))
return choosen_child
def UCT(self, root):
'''calculate UCT value for given node'''
if self.visits == 0: return 0
return self.reward/self.visits + 1*math.sqrt(math.log(root.visits)/self.visits)
def print(self, root):
print("Position ", self.boardposition, ", Player ", self.current_player,\
", Reward ", self.reward, ", Visits ", self.visits,\
", UTC ", round(self.UCT(root), 3), ", Childcount ", len(self.children)) | 2,224 | 620 |
# -*- coding: utf-8 -*-
"""Console script for vibrant_frequencies."""
import logging
import click
from .prototype import visualize
@click.command()
def main():
logging.getLogger('').setLevel(logging.WARN)
visualize()
if __name__ == "__main__":
main()
| 269 | 94 |
from __future__ import annotations
from typing import TYPE_CHECKING
from .packet import (
ConnectionRequest,
ConnectionRequestAccepted,
NewIncomingConnection,
OfflinePing,
OfflinePong,
OnlinePing,
OnlinePong,
OpenConnectionRequest1,
OpenConnectionReply1,
OpenConnectionRequest2,
OpenConnectionReply2,
IncompatibleProtocolVersion,
)
from .protocol_info import ProtocolInfo
from ..utils import InternetAddress
if TYPE_CHECKING:
from ..server import Server
__all__ = 'Handler',
class Handler:
"""
Class containing various handler methods to handle packets
:param server: Server for which handler is intended
"""
__slots__ = 'server',
def __init__(self, server: Server):
self.server = server
async def handle_connection_request(self, data: bytes, address: InternetAddress, *, server: Server = None) -> bytes:
"""
Handler to handle `Connection-Request`
:param data: data of the packet
:param address: :class:`InternetAddress` of the packet
:param server: Optional server to use the handler with, defaults to ``self.handler``
:return: returns the processed data
"""
server = server or self.server
packet: ConnectionRequest = ConnectionRequest(data)
packet.decode()
new_packet: ConnectionRequestAccepted = ConnectionRequestAccepted()
new_packet.client_address = address
new_packet.system_index = 0
new_packet.server_guid = server.guid
new_packet.system_addresses = [InternetAddress("255.255.255.255", 19132)] * 20
new_packet.request_timestamp = server.get_time_ms()
new_packet.encode()
return new_packet.data
async def handle_connection_request_accepted(self, data: bytes, address: InternetAddress, *, server: Server = None) -> bytes:
"""
Handler to handle `Connection-Request-Accepted`
:param data: data of the packet
:param address: :class:`InternetAddress` of the packet
:param server: Optional server to use the handler with, defaults to ``self.handler``
:return: returns the processed data
"""
server = server or self.server
packet: ConnectionRequestAccepted = ConnectionRequestAccepted(data)
packet.decode()
new_packet: NewIncomingConnection = NewIncomingConnection()
new_packet.server_address = address
new_packet.system_addresses = packet.system_addresses
new_packet.request_timestamp = packet.accepted_timestamp
new_packet.accepted_timestamp = server.get_time_ms()
new_packet.encode()
return new_packet.data
async def handle_offline_ping(self, data: bytes, address: InternetAddress = None, *, server: Server = None) -> bytes:
"""
Handler to handle `Offline-Ping`
:param data: data of the packet
:param address: :class:`InternetAddress` of the packet
:param server: Optional server to use the handler with, defaults to ``self.handler``
:return: returns the processed data
"""
server = server or self.server
packet: OfflinePing = OfflinePing(data)
packet.decode()
new_packet: OfflinePong = OfflinePong()
new_packet.client_timestamp = packet.client_timestamp
new_packet.server_guid = server.guid
new_packet.magic = ProtocolInfo.MAGIC
new_packet.server_name = server.name if hasattr(server, "name") else ""
new_packet.encode()
return new_packet.data
async def handle_online_ping(self, data: bytes, address: InternetAddress = None, *, server: Server = None) -> bytes:
"""
Handler to handle `Online-Ping`
:param data: data of the packet
:param address: :class:`InternetAddress` of the packet
:param server: Optional server to use the handler with, defaults to ``self.handler``
:return: returns the processed data
"""
server = server or self.server
packet: OnlinePing = OnlinePing(data)
packet.decode()
new_packet: OnlinePong = OnlinePong()
new_packet.client_timestamp = packet.client_timestamp
new_packet.server_timestamp = server.get_time_ms()
new_packet.encode()
return new_packet.data
async def handle_open_connection_request_1(self, data: bytes, address: InternetAddress = None, *, server: Server = None) -> bytes:
"""
Handler to handle `Open-Connection-Request-1`
:param data: data of the packet
:param address: :class:`InternetAddress` of the packet
:param server: Optional server to use the handler with, defaults to ``self.handler``
:return: returns the processed data
"""
server = server or self.server
packet: OpenConnectionRequest1 = OpenConnectionRequest1(data)
packet.decode()
if packet.protocol_version == server.protocol_version:
new_packet: OpenConnectionReply1 = OpenConnectionReply1()
new_packet.magic = ProtocolInfo.MAGIC
new_packet.server_guid = server.guid
new_packet.use_security = False
new_packet.mtu_size = packet.mtu_size
else:
new_packet: IncompatibleProtocolVersion = IncompatibleProtocolVersion()
new_packet.protocol_version = server.protocol_version
new_packet.magic = ProtocolInfo.MAGIC
new_packet.server_guid = server.guid
new_packet.encode()
return new_packet.data
async def handle_open_connection_request_2(self, data: bytes, address: InternetAddress = None, *, server: Server = None) -> bytes:
"""
Handler to handle `Open-Connection-Request-2`
:param data: data of the packet
:param address: :class:`InternetAddress` of the packet
:param server: Optional server to use the handler with, defaults to ``self.handler``
:return: returns the processed data
"""
server = server or self.server
packet: OpenConnectionRequest2 = OpenConnectionRequest2(data)
packet.decode()
new_packet: OpenConnectionReply2 = OpenConnectionReply2()
new_packet.magic = ProtocolInfo.MAGIC
new_packet.server_guid = server.guid
new_packet.client_address = address
new_packet.mtu_size = packet.mtu_size
new_packet.use_encryption = False
new_packet.encode()
await server.add_connection(address, packet.mtu_size)
return new_packet.data
| 6,568 | 1,803 |