hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
9d68975702955b40381a81581441db52c47f67ab
43
py
Python
src/network/__init__.py
ThomasRanvier/faces_recognition_nn
b9177134169b6e05d9d9b6ea3206628bdb127a5e
[ "MIT" ]
null
null
null
src/network/__init__.py
ThomasRanvier/faces_recognition_nn
b9177134169b6e05d9d9b6ea3206628bdb127a5e
[ "MIT" ]
null
null
null
src/network/__init__.py
ThomasRanvier/faces_recognition_nn
b9177134169b6e05d9d9b6ea3206628bdb127a5e
[ "MIT" ]
null
null
null
from .neural_network import Neural_network
21.5
42
0.883721
6
43
6
0.666667
0.722222
0
0
0
0
0
0
0
0
0
0
0.093023
43
1
43
43
0.923077
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
9d6b7d2817a9a11d4f368ca09bd16da81be04b5f
1,496
py
Python
rides/forms.py
andrenbrandao/pirauber
d7c5647ec6df698fa3d7397907ff629c74cc76b9
[ "MIT" ]
null
null
null
rides/forms.py
andrenbrandao/pirauber
d7c5647ec6df698fa3d7397907ff629c74cc76b9
[ "MIT" ]
6
2020-06-05T23:27:38.000Z
2022-02-10T08:14:16.000Z
rides/forms.py
andrenbrandao/pirauber
d7c5647ec6df698fa3d7397907ff629c74cc76b9
[ "MIT" ]
null
null
null
from django import forms from crispy_forms.helper import FormHelper from crispy_forms.layout import Submit from django.utils.translation import ugettext_lazy as _ from .models import Ride class RideForm(forms.ModelForm): date = forms.DateField( label=_('Date'), widget=forms.DateInput(format=('%Y-%m-%d'),attrs={ 'class': 'form-control input-group-alternative', 'type': 'date' }) ) time = forms.TimeField( label=_('Time'), required=False, input_formats=['%H:%M'], widget=forms.TimeInput(format=('%H:%M'), attrs={ 'class': 'form-control input-group-alternative', 'type': 'time' }) ) description = forms.CharField( label=_('Description'), required=False, help_text=_('Write here any additional information.'), widget=forms.Textarea(attrs={ 'class': 'form-control input-group-alternative', }) ) class Meta: model = Ride fields = ('date', 'time', 'origin', 'destination', 'seats', 'price', 'description') def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.helper = FormHelper() self.helper.form_method = 'post' self.helper.add_input( Submit('submit', _('Save Ride'), css_class='btn-block')) for visible in self.visible_fields(): visible.field.widget.attrs['class'] = 'input-group-alternative'
31.166667
91
0.592914
158
1,496
5.468354
0.481013
0.046296
0.097222
0.072917
0.155093
0.155093
0.155093
0.106481
0
0
0
0
0.25869
1,496
47
92
31.829787
0.77908
0
0
0.195122
0
0
0.21123
0.061497
0
0
0
0
0
1
0.02439
false
0
0.121951
0
0.268293
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d6dfe9a0fb4cf150a1dbedc9b781a51974ddeed
843
py
Python
tests/testdata/models.py
dtpryce/MLServer
02744b3c770141b0b1d9dad2a0256d243051de61
[ "Apache-2.0" ]
null
null
null
tests/testdata/models.py
dtpryce/MLServer
02744b3c770141b0b1d9dad2a0256d243051de61
[ "Apache-2.0" ]
null
null
null
tests/testdata/models.py
dtpryce/MLServer
02744b3c770141b0b1d9dad2a0256d243051de61
[ "Apache-2.0" ]
null
null
null
import asyncio from mlserver import MLModel from mlserver.codecs import NumpyCodec from mlserver.types import InferenceRequest, InferenceResponse class SumModel(MLModel): async def predict(self, payload: InferenceRequest) -> InferenceResponse: decoded = self.decode(payload.inputs[0]) total = decoded.sum(axis=1, keepdims=True) output = NumpyCodec().encode(name="total", payload=total) return InferenceResponse(id=payload.id, model_name=self.name, outputs=[output]) class SlowModel(MLModel): async def load(self) -> bool: await asyncio.sleep(10) self.ready = True return self.ready async def infer(self, payload: InferenceRequest) -> InferenceResponse: await asyncio.sleep(10) return InferenceResponse(id=payload.id, model_name=self.name, outputs=[])
31.222222
87
0.71293
97
843
6.175258
0.43299
0.0601
0.050083
0.146912
0.193656
0.193656
0.193656
0.193656
0.193656
0.193656
0
0.008759
0.187426
843
26
88
32.423077
0.865693
0
0
0.111111
0
0
0.005931
0
0
0
0
0
0
1
0
false
0
0.222222
0
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d6f477bb8496ccbe8298b0d502cfaf9b42c5d1c
10,459
py
Python
PERFORMER.py
ShivamRajSharma/Transformer-Architecure_From_Scratch
f7f24cb5146c09e6cf38a41e5e5ef721389803c1
[ "MIT" ]
17
2020-09-13T07:53:41.000Z
2022-03-17T09:58:23.000Z
PERFORMER.py
ShivamRajSharma/Transformer-Architecure_From_Scratch
f7f24cb5146c09e6cf38a41e5e5ef721389803c1
[ "MIT" ]
null
null
null
PERFORMER.py
ShivamRajSharma/Transformer-Architecure_From_Scratch
f7f24cb5146c09e6cf38a41e5e5ef721389803c1
[ "MIT" ]
3
2020-12-15T14:20:47.000Z
2022-01-24T02:26:04.000Z
from time import time import torch import torch.nn as nn class FastAttention(nn.Module): def __init__(self, input_shape, head, n_features): super(FastAttention, self).__init__() self.head = head self.input_shape = input_shape self.depth = int(input_shape // head) self.n_features = n_features self.key_ORF = self.OrthogonalRandomFeature() self.query_ORF = self.OrthogonalRandomFeature() self.query = nn.Linear(self.depth, self.depth) self.key = nn.Linear(self.depth, self.depth) self.value = nn.Linear(self.depth, self.depth) self.fc = nn.Linear(self.depth*head, input_shape) def kernel_function(self, x, flag): ORF = self.query_ORF if flag == 'query' else self.key_ORF normalization_factor = 1/ORF.shape[-1]**0.25 x *= normalization_factor out = torch.einsum('nhsd, fd -> nhsf', x, ORF) kernel_fn = nn.ReLU()(out) + 1e-3 return kernel_fn def OrthogonalRandomFeature(self): n = self.n_features//self.depth remainder = self.n_features%self.depth orthogonal_features = [] for _ in range(n): normal_feature = torch.rand(self.depth, self.depth) orthogonal_feature, _ = torch.qr(normal_feature) orthogonal_features.append(orthogonal_feature) if remainder > 0 : normal_feature = torch.rand(self.depth, self.depth) orthogonal_feature, _ = torch.qr(normal_feature) orthogonal_features.append(orthogonal_feature[0: remainder]) orthogonal_features = torch.cat(orthogonal_features) mutilplier = torch.randn(self.n_features, self.depth).norm(dim=1) final_features = torch.matmul(torch.diag(mutilplier), orthogonal_features) return final_features def causal_attention(self, q, k, v): denominator = 1/torch.einsum('nhqf, nhkf -> nhqf', q, k.cumsum(dim=-2)) x = torch.einsum('nhkf, nhkd -> nhkfd', k, v) x = x.cumsum(dim=-3) out = torch.einsum('nhqfd, nhqf, nhqf -> nhqd', x, q, denominator) return out def bidirectional_attention(self, q, k, v): kt_i = torch.einsum('nhkf -> nhf', k) normalization_factor = 1/(torch.einsum('nhqf, nhf -> nhq', q, kt_i)) k_v = torch.einsum('nhkf, nhkd -> nhfd', k, v) attention = torch.einsum('nhfd, nhqf, nhq-> nhqd', k_v, q, normalization_factor) return attention def forward(self, query, key, value, mask=None, casual_mask=False): batch = query.shape[0] query_len, key_len, value_len = query.shape[1], key.shape[1], value.shape[1] query = query.reshape(batch, query_len, self.head, self.depth) key = key.reshape(batch, key_len, self.head, self.depth) value = value.reshape(batch, value_len, self.head, self.depth) query = query.permute(0, 2, 1, 3) key = key.permute(0, 2, 1, 3) value = value.permute(0, 2, 1, 3) query = self.query(query) key = self.key(key) value = self.value(value) if mask is not None: key.masked_fill(mask == 0, float("-1e20")) query = self.kernel_function(query, 'query') key = self.kernel_function(key, 'key') if casual_mask: out = self.causal_attention(query, key, value) else: out = self.bidirectional_attention(query, key, value) out = out.permute(0, 2, 1, 3) out = out.reshape(batch, query_len, self.head*self.depth) out = self.fc(out) return out class PerformerBlock(nn.Module): def __init__(self, input_shape, head, n_features, dropout, forward_expansion): super(PerformerBlock, self).__init__() self.attention = FastAttention(input_shape, head, n_features) self.feed_forward = nn.Sequential( nn.Linear(input_shape, input_shape*forward_expansion), nn.GELU(), nn.Linear(input_shape*forward_expansion, input_shape) ) self.layernorm1 = nn.LayerNorm(input_shape) self.layernorm2 = nn.LayerNorm(input_shape) self.dropout = nn.Dropout(dropout) def forward(self, query, key, value, mask): attention = self.attention(query, key, value, mask) add = attention + query regulazation = self.dropout(self.layernorm1(add)) forward = self.feed_forward(regulazation) out = self.dropout(self.layernorm2(forward + regulazation)) return out class Encoder(nn.Module): def __init__( self, vocab_size, embedding_out, num_layers, heads, n_features, forward_expansion, dropout, max_len ): super(Encoder, self).__init__() self.word_embedding = nn.Embedding(vocab_size, embedding_out) self.postional_embedding = nn.Parameter(torch.zeros(1, max_len, embedding_out)) self.dropout = nn.Dropout(dropout) self.layers = nn.Sequential( *[ PerformerBlock( embedding_out, heads, n_features, dropout, forward_expansion ) for _ in range(num_layers) ] ) def forward(self, x, mask): word_embedding = self.word_embedding(x) postional_embedding = self.postional_embedding[:, :x.shape[1], :] out = self.dropout(word_embedding + postional_embedding) for layer in self.layers: out = layer(out, out, out, mask) return out class DecoderBlock(nn.Module): def __init__( self, embedding_out, head, n_features, forward_expansion, dropout ): super(DecoderBlock, self).__init__() self.attention = FastAttention(embedding_out, head, n_features) self.Performer_block = PerformerBlock( embedding_out, head, n_features, dropout, forward_expansion ) self.dropout = nn.Dropout(dropout) self.norm = nn.LayerNorm(embedding_out) def forward(self, query, key, value, src_mask): attention = self.attention(query, query, query, src_mask, True) query = self.dropout(self.norm(attention + query)) out = self.Performer_block(query, key, value, src_mask) return out class Decoder(nn.Module): def __init__( self, vocab_size, embedding_out, num_layers, head, n_features, forward_expansion, dropout, max_len ): super(Decoder, self).__init__() self.word_embedding = nn.Embedding(vocab_size, embedding_out) self.positional_embedding = nn.Parameter(torch.zeros(1, max_len, embedding_out)) self.layers = nn.Sequential( *[ DecoderBlock( embedding_out, head, n_features, forward_expansion, dropout ) for _ in range(num_layers) ] ) self.fc = nn.Linear(embedding_out, vocab_size) self.dropout = nn.Dropout(dropout) def forward(self, x, encoder_output, src_mask): x = self.dropout(self.word_embedding(x) + self.positional_embedding[:, :x.shape[1], :]) for layer in self.layers: x = layer( x, encoder_output, encoder_output, src_mask ) out = self.fc(x) return out class Performers(nn.Module): def __init__( self, input_vocab_size, output_vocab_size, pad_idx, embedding_out, num_layers, forward_expansion, head, n_features, dropout, max_len ): super(Performers, self).__init__() self.encoder = Encoder( input_vocab_size, embedding_out, num_layers, head, n_features, forward_expansion, dropout, max_len ) self.decoder = Decoder( output_vocab_size, embedding_out, num_layers, head, n_features, forward_expansion, dropout, max_len ) self.pad_idx = pad_idx self.apply(self._init_weights) #From @HuggingFace def _init_weights(self, module): if isinstance(module, (nn.Linear, nn.Embedding)): module.weight.data.normal_(mean=0.0, std=0.02) elif isinstance(module, nn.LayerNorm): module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() def input_pad_mask(self, inputs): pad_mask = (inputs != self.pad_idx).unsqueeze(1).unsqueeze(3) return pad_mask def output_pad_mask(self, targets): pad_mask = (targets != self.pad_idx).unsqueeze(1).unsqueeze(3) def forward(self, inputs, target): input_pad_mask = self.input_pad_mask(inputs) output_pad_mask = self.output_pad_mask(targets) encoder_output = self.encoder(inputs, input_pad_mask) decoder_out = self.decoder(target, encoder_output, output_pad_mask) return decoder_out if __name__ == "__main__": #Depends on the Tokenizer input_vocab_size = 100 output_vocab_size = 200 #DEFAULT PerFORMERS PARAMETERS:- pad_idx = 0 embedding_out = 512 num_layers = 6 forward_expansion = 4 head = 8 n_features = 256 dropout = 0.1 max_len = 512 inputs = torch.randint(0, 100, (32, 200)) targets = torch.randint(0, 100, (32,100)) model = Performers( input_vocab_size, output_vocab_size, pad_idx, embedding_out, num_layers, forward_expansion, head, n_features, dropout, max_len ) start = time() y = model(inputs, targets) print(f'INFERENCE TIME = {time() - start}sec') x = sum(p.numel() for p in model.parameters() if p.requires_grad) print(f'NUMBER OF PARAMETERS ARE = {x}')
30.852507
95
0.581222
1,207
10,459
4.809445
0.153273
0.031008
0.026873
0.015504
0.418432
0.292334
0.264255
0.230146
0.178467
0.178467
0
0.013485
0.319342
10,459
339
96
30.852507
0.801938
0.006884
0
0.394265
0
0
0.022918
0
0
0
0
0
0
1
0.0681
false
0
0.010753
0
0.139785
0.007168
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d6fa2ce7adb3f0d8fb6ff64a2befb7535e72eca
28,970
py
Python
nogo/gtp_connection.py
douglasrebstock/alpha-zero-general
2237522be5a1bbfebbc2fc1b2a8e8a6bcb6d5aab
[ "MIT" ]
null
null
null
nogo/gtp_connection.py
douglasrebstock/alpha-zero-general
2237522be5a1bbfebbc2fc1b2a8e8a6bcb6d5aab
[ "MIT" ]
null
null
null
nogo/gtp_connection.py
douglasrebstock/alpha-zero-general
2237522be5a1bbfebbc2fc1b2a8e8a6bcb6d5aab
[ "MIT" ]
null
null
null
""" gtp_connection.py Module for playing games of Go using GoTextProtocol Parts of this code were originally based on the gtp module in the Deep-Go project by Isaac Henrion and Amos Storkey at the University of Edinburgh. """ import signal, os import traceback from sys import stdin, stdout, stderr from board_util import GoBoardUtil, BLACK, WHITE, EMPTY, BORDER, PASS, \ MAXSIZE, coord_to_point import numpy as np import re import time import random class GtpConnection(): def __init__(self, go_engine, board, debug_mode = False): """ Manage a GTP connection for a Go-playing engine Parameters ---------- go_engine: a program that can reply to a set of GTP commandsbelow board: Represents the current board state. """ self.totalTime = 0 self.count = 0 self.nodeExp = 0 self.timeLimit = 1 self.to_play = BLACK #H table is a dictionary that stores (state,value) pairs #value = Black win -> 1, White win -1 self.H_table = {} self._winner = '' self._optimal_move = '' self._debug_mode = debug_mode self.go_engine = go_engine self.board = board self.commands = { "protocol_version": self.protocol_version_cmd, "quit": self.quit_cmd, "name": self.name_cmd, "boardsize": self.boardsize_cmd, "showboard": self.showboard_cmd, "clear_board": self.clear_board_cmd, "komi": self.komi_cmd, "version": self.version_cmd, "known_command": self.known_command_cmd, "genmove": self.genmove_cmd, "list_commands": self.list_commands_cmd, "play": self.play_cmd, "legal_moves": self.legal_moves_cmd, "gogui-rules_game_id": self.gogui_rules_game_id_cmd, "gogui-rules_board_size": self.gogui_rules_board_size_cmd, "gogui-rules_legal_moves": self.gogui_rules_legal_moves_cmd, "gogui-rules_side_to_move": self.gogui_rules_side_to_move_cmd, "gogui-rules_board": self.gogui_rules_board_cmd, "gogui-rules_final_result": self.gogui_rules_final_result_cmd, "gogui-analyze_commands": self.gogui_analyze_cmd, "timelimit": self.timelimit_cmd, "solve":self.solve_cmd } # used for argument checking # values: (required number of arguments, # error message on argnum failure) self.argmap = { "boardsize": (1, 'Usage: boardsize INT'), "komi": (1, 'Usage: komi FLOAT'), "known_command": (1, 'Usage: known_command CMD_NAME'), "genmove": (1, 'Usage: genmove {w,b}'), "play": (2, 'Usage: play {b,w} MOVE'), "legal_moves": (1, 'Usage: legal_moves {w,b}'), "timelimit": (1, 'Usage: timelimit INT, 1 <= INT <= 100'), } def write(self, data): stdout.write(data) def flush(self): stdout.flush() def start_connection(self): """ Start a GTP connection. This function continuously monitors standard input for commands. """ line = stdin.readline() while line: self.get_cmd(line) line = stdin.readline() def get_cmd(self, command): """ Parse command string and execute it """ if len(command.strip(' \r\t')) == 0: return if command[0] == '#': return # Strip leading numbers from regression tests if command[0].isdigit(): command = re.sub("^\d+", "", command).lstrip() elements = command.split() if not elements: return command_name = elements[0]; args = elements[1:] if self.has_arg_error(command_name, len(args)): return if command_name in self.commands: try: self.commands[command_name](args) except Exception as e: self.debug_msg("Error executing command {}\n".format(str(e))) self.debug_msg("Stack Trace:\n{}\n". format(traceback.format_exc())) raise e else: self.debug_msg("Unknown command: {}\n".format(command_name)) self.error('Unknown command') stdout.flush() def has_arg_error(self, cmd, argnum): """ Verify the number of arguments of cmd. argnum is the number of parsed arguments """ if cmd in self.argmap and self.argmap[cmd][0] != argnum: self.error(self.argmap[cmd][1]) return True return False def debug_msg(self, msg): """ Write msg to the debug stream """ if self._debug_mode: stderr.write(msg) stderr.flush() def error(self, error_msg): """ Send error msg to stdout """ stdout.write('? {}\n\n'.format(error_msg)) stdout.flush() def respond(self, response=''): """ Send response to stdout """ stdout.write('= {}\n\n'.format(response)) stdout.flush() def reset(self, size): """ Reset the board to empty board of given size """ self.board.reset(size) def board2d(self): return str(GoBoardUtil.get_twoD_board(self.board)) def protocol_version_cmd(self, args): """ Return the GTP protocol version being used (always 2) """ self.respond('2') def quit_cmd(self, args): """ Quit game and exit the GTP interface """ self.respond() exit() def name_cmd(self, args): """ Return the name of the Go engine """ self.respond(self.go_engine.name) def version_cmd(self, args): """ Return the version of the Go engine """ self.respond(self.go_engine.version) def clear_board_cmd(self, args): """ clear the board """ self.reset(self.board.size) self.respond() def boardsize_cmd(self, args): """ Reset the game with new boardsize args[0] """ self.reset(int(args[0])) self.respond() #newly added def timelimit_cmd(self, args): """ Reset the game with new timelimit args[0] """ self.timeLimit = int(args[0]) self.respond() def showboard_cmd(self, args): self.respond('\n' + self.board2d()) def komi_cmd(self, args): """ Set the engine's komi to args[0] """ self.go_engine.komi = float(args[0]) self.respond() def known_command_cmd(self, args): """ Check if command args[0] is known to the GTP interface """ if args[0] in self.commands: self.respond("true") else: self.respond("false") def list_commands_cmd(self, args): """ list all supported GTP commands """ self.respond(' '.join(list(self.commands.keys()))) def legal_moves_cmd(self, args): """ List legal moves for color args[0] in {'b','w'} """ board_color = args[0].lower() color = color_to_int(board_color) moves = GoBoardUtil.generate_legal_moves(self.board, color) gtp_moves = [] for move in moves: coords = point_to_coord(move, self.board.size) gtp_moves.append(format_point(coords)) sorted_moves = ' '.join(sorted(gtp_moves)) self.respond(sorted_moves) def play_cmd(self, args): """ play a move args[1] for given color args[0] in {'b','w'} """ try: board_color = args[0].lower() board_move = args[1] if board_color != "b" and board_color !="w": self.respond("illegal move: \"{}\" wrong color".format(board_color)) return color = color_to_int(board_color) #change turn to the other player self.to_play = GoBoardUtil.opponent(color) if args[1].lower() == 'pass': self.respond("illegal move: \"{} {}\" wrong coordinate".format(args[0], args[1])) return coord = move_to_coord(args[1], self.board.size) if coord: move = coord_to_point(coord[0],coord[1], self.board.size) else: self.error("Error executing move {} converted from {}" .format(move, args[1])) return if not self.board.play_move(move, color): self.respond("illegal move: \"{} {}\" ".format(args[0], board_move)) return else: self.debug_msg("Move: {}\nBoard:\n{}\n". format(board_move, self.board2d())) self.respond() except Exception as e: self.respond('illegal move: \"{} {}\" {}'.format(args[0], args[1], str(e))) def solve_helper(self): winner = 'unknown' #the copy of board can be viewed as a state cp_board = self.board.copy() start = time.time() signal.signal(signal.SIGALRM, handler) signal.alarm(self.timeLimit) try: value,move = self.advanced_search(cp_board,81,-1,1) except Exception as e: value,move = 0,None #print("nodeExp",self.nodeExp) #print("count",self.count) signal.alarm(0) end = time.time() print("time: ",end - start) #print("partial time: ",self.totalTime) if value == 1: winner = 'b' elif value == -1: winner = 'w' if (winner == 'b' and self.to_play !=BLACK) or (winner == 'w' and self.to_play !=WHITE): move = None return winner,move #newly added def solve_cmd(self,args): moveStr = '' winner,move = self.solve_helper() if move: moveStr = ' '+ coord_to_move(move,self.board.size) self.respond(winner+moveStr) #alpha beta pruning, referencing from wikipedia: https://en.wikipedia.org/wiki/Alpha%E2%80%93beta_pruning #color is the player. black is max player, white is min player def ab_search(self, color, copy_of_board, depth, alpha, beta): _alpha = alpha _beta = beta bestMove = None #base case, no more legal move #print(GoBoardUtil.generate_legal_moves(copy_of_board, color)) if depth == 0 or (GoBoardUtil.generate_legal_moves(copy_of_board, color) == []): #depth should always be >0 #since NOGO cannot capture nor suiside, if last move is by WHITE/BLACK, it must be a BLACK/WHITE win. if color == WHITE: return 1,None #color == BLACK else: return -1,None #color is black; max player if color == BLACK: value = -1000000 #make a copy of current state allmoves = GoBoardUtil.generate_legal_moves(copy_of_board, color) #print("allmoves:") #print(allmoves) for move in allmoves: child = copy_of_board.copy() child.play_move(move, color) childValue,_ = self.ab_search(WHITE,child,depth-1,_alpha,_beta) value = max(value,childValue) _alpha = max(_alpha,value) bestMove = move #beta cut-off if _alpha >= _beta: break return value,bestMove #color is white; min player else: value = 1000000 allmoves = GoBoardUtil.generate_legal_moves(copy_of_board, color) #print("allmoves:") #print(allmoves) for move in allmoves: child = copy_of_board.copy() child.play_move(move, color) childValue,_ = self.ab_search(BLACK,child,depth-1,_alpha,_beta) value = min(value,childValue) _beta = min(_beta,value) bestMove = move #alpha cut-off if _alpha >= _beta: break return value,bestMove def advanced_search(self,copy_of_board,depth,alpha,beta): _alpha = alpha _beta = beta bestMove = None self.nodeExp += 1 #base case, depth 0 if depth == 0: return 0,None #Start = time.time() allmoves = GoBoardUtil.generate_legal_moves(copy_of_board, copy_of_board.current_player) #End =time.time() #self.totalTime += End-Start #base case, no more legal move if allmoves == []: #since NOGO cannot capture nor suiside, if last move is by WHITE/BLACK, it must be a BLACK/WHITE win. if copy_of_board.current_player == WHITE: self.H_table[self.tuple_to_str(self.matrix_to_tuple(GoBoardUtil.get_twoD_board(copy_of_board),copy_of_board.size))] = 1 return 1,None #color == BLACK else: self.H_table[self.tuple_to_str(self.matrix_to_tuple(GoBoardUtil.get_twoD_board(copy_of_board),copy_of_board.size))] = -1 return -1,None searchedMoves = [] unsearchedMoves = [] unsearched = {} searchedValue = {} isoSet = set() singleMoveIsoSet = set() for move in allmoves: singleMoveIsoSet.clear() child = copy_of_board.copy() child.play_move(move, copy_of_board.current_player) #get all isomorphics of the board, in order to prunning as many as redundent states possible isomorphics = self.get_all_isomorphic(GoBoardUtil.get_twoD_board(child),child.size) found = False for iso in isomorphics: if self.tuple_to_str(iso) in self.H_table: found = True searchedMoves.append(move) searchedValue[move] = self.H_table[self.tuple_to_str(iso)] break if iso in isoSet: found = True break else: isoSet.add(iso) singleMoveIsoSet.add(iso) if not found: ''' the following is the heuristic I created for ordering the moves: (1) eye-filling is the last thing we want to do; (2) the few the number of player's stones with MD 1, the better; (3) the more the number of opponent's stones with MD 1, the better; (4) the more the number of player's stones with MD 2, the better; ''' num_same = 49 dis1 = [move+1,move-1,move+child.size+1,move-child.size-1] dis2 = [move+2,move-2,move+2*(child.size+1),move-2*(child.size+1),move+child.size+2,move-child.size-2,move+child.size,move-child.size] valid1 = [] for point in dis1: x = point%(child.size+1) y = point//(child.size+1) if 1<=x<=child.size and 1<=y<=child.size: valid1.append(point) valid2 = [] for point in dis2: x = point%(child.size+1) y = point//(child.size+1) if 1<=x<=child.size and 1<=y<=child.size: valid2.append(point) if copy_of_board.is_eye(move,copy_of_board.current_player): num_same += 1000 for point in valid1: if child.get_color(point)==copy_of_board.current_player: num_same += 100 if child.get_color(point)== BLACK+WHITE-copy_of_board.current_player: num_same -= 10 for point in valid2: if child.get_color(point)==copy_of_board.current_player: num_same -= 1 unsearched[move] = num_same #print("dic:",unsearched) #print("searched:",searchedMoves) #sorting unsearched moves by the heuristic value sorted_x = sorted(unsearched.items(), key=lambda kv: kv[1]) for item in sorted_x: unsearchedMoves.append(item[0]) orderedMoves = searchedMoves + unsearchedMoves self.count += len(allmoves) - len(orderedMoves) state = self.tuple_to_str(self.matrix_to_tuple(GoBoardUtil.get_twoD_board(copy_of_board),copy_of_board.size)) #below is normal alpha-beta search #color is black; max player if copy_of_board.current_player == BLACK: value = -1000000 #make a copy of current state for move in orderedMoves: if move in searchedMoves: childValue = searchedValue[move] else: child = copy_of_board.copy() child.play_move(move, copy_of_board.current_player) childValue,_ = self.advanced_search(child,depth-1,_alpha,_beta) #childValue,_ = self.advanced_search(copy_of_board,depth-1,_alpha,_beta) value = max(value,childValue) _alpha = max(_alpha,value) bestMove = move #beta cut-off if _alpha >= _beta: break self.H_table[state] = value return value,bestMove #color is white; min player else: value = 1000000 for move in orderedMoves: if move in searchedMoves: childValue = searchedValue[move] else: child = copy_of_board.copy() child.play_move(move, copy_of_board.current_player) #childValue,_ = self.advanced_search(copy_of_board,depth-1,_alpha,_beta) childValue,_ = self.advanced_search(child,depth-1,_alpha,_beta) value = min(value,childValue) _beta = min(_beta,value) bestMove = move #alpha cut-off if _alpha >= _beta: break self.H_table[state] = value return value,bestMove def get_all_isomorphic(self, board_2d,size): """ input: matrix of a board output: a set of tuples """ isomorphics = set() #original #print("mat to tuple:") #print(self.matrix_to_tuple(board_2d,size)) isomorphics.add(self.matrix_to_tuple(board_2d,size)) #return isomorphics tmp_board = [] #reflectional sym, 2 cases #swap rows cp_board_2dx = board_2d.copy() for i in range(size//2): tmp = cp_board_2dx[i,:].copy() cp_board_2dx[i,:] = cp_board_2dx[size-1-i,:] cp_board_2dx[size-1-i,:]=tmp isomorphics.add(self.matrix_to_tuple(cp_board_2dx,size)) #swap columns cp_board_2dy = board_2d.copy() for j in range(size//2): for i in range(size): tmp = cp_board_2dy[i,j] cp_board_2dy[i,j] = cp_board_2dy[i,size-1-j] cp_board_2dy[i,size-1-j] = tmp isomorphics.add(self.matrix_to_tuple(cp_board_2dy,size)) #rotational sym, 3 cases board_90 = np.rot90(board_2d) #board_90 = self.rotateMatrix(board_2d,size) isomorphics.add(self.matrix_to_tuple(board_90,size)) #reflectional sym of 90 degree, 2 cases #swap rows cp_board_90x = board_90.copy() for i in range(size//2): tmp = cp_board_90x[i,:].copy() cp_board_90x[i,:] = cp_board_90x[size-1-i,:] cp_board_90x[size-1-i,:] = tmp isomorphics.add(self.matrix_to_tuple(cp_board_90x,size)) #swap columns cp_board_90y = board_90.copy() for j in range(size//2): for i in range(size): tmp = cp_board_90y[i,j] cp_board_90y[i,j] = cp_board_90y[i,size-1-j] cp_board_90y[i,size-1-j] = tmp isomorphics.add(self.matrix_to_tuple(cp_board_90y,size)) #print("90",board_90) board_180 = np.rot90(board_90) #print("180",board_180) isomorphics.add(self.matrix_to_tuple(board_180,size)) board_270 = np.rot90(board_180) #print("270",board_270) isomorphics.add(self.matrix_to_tuple(board_270,size)) #board_180 = self.rotateMatrix(board_90,size) #isomorphics.add(self.matrix_to_tuple(board_180,size)) #board_270 = self.rotateMatrix(board_180,size) #isomorphics.add(self.matrix_to_tuple(board_270,size)) return isomorphics def matrix_to_tuple(self,matrix,dim): board1d = np.zeros((dim* dim), dtype = np.int32) for i in range(dim): board1d[i*dim:i*dim+dim] = matrix[i,:] return tuple(board1d) def get_oneD_board(self,goboard): """ Return: numpy array a 1-d numpy array with the stones as the goboard. Does not pad with BORDER Rows 1..size of goboard are copied into rows 0..size - 1 of board2d """ size = goboard.size board1d = np.zeros((size* size), dtype = np.int32) for row in range(size): start = goboard.row_start(row + 1) board1d[row*size:row*size+size] = goboard.board[start : start + size] return board1d def tuple_to_str(self,tup): res = '' for i in tup: res += str(int(i)) return res #genemove overrided def genmove_cmd(self, args): """ Generate a move for the color args[0] in {'b', 'w'}, for the game of gomoku. """ board_color = args[0].lower() color = color_to_int(board_color) self.to_play = color winnerStr,optMove = self.solve_helper() winner = EMPTY if winnerStr=='b': winner = BLACK elif winnerStr =='w': winner = WHITE #if current player is winner, we will take bestmove; otherwise we should take a random move if board_color == winner: move = optMove else: move = GoBoardUtil.generate_random_move(self.board, color,False) move_coord = point_to_coord(move, self.board.size) move_as_string = format_point(move_coord) if self.board.is_legal(move, color): self.board.play_move(move, color) self.respond(move_as_string) else: self.respond("resign") def gogui_rules_game_id_cmd(self, args): self.respond("NoGo") def gogui_rules_board_size_cmd(self, args): self.respond(str(self.board.size)) def legal_moves_cmd(self, args): """ List legal moves for color args[0] in {'b','w'} """ board_color = args[0].lower() color = color_to_int(board_color) moves = GoBoardUtil.generate_legal_moves(self.board, color) gtp_moves = [] for move in moves: coords = point_to_coord(move, self.board.size) gtp_moves.append(format_point(coords)) sorted_moves = ' '.join(sorted(gtp_moves)) self.respond(sorted_moves) def gogui_rules_legal_moves_cmd(self, args): empties = self.board.get_empty_points() color = self.board.current_player legal_moves = [] for move in empties: if self.board.is_legal(move, color): legal_moves.append(move) gtp_moves = [] for move in legal_moves: coords = point_to_coord(move, self.board.size) gtp_moves.append(format_point(coords)) sorted_moves = ' '.join(sorted(gtp_moves)) self.respond(sorted_moves) def gogui_rules_side_to_move_cmd(self, args): color = "black" if self.board.current_player == BLACK else "white" self.respond(color) def gogui_rules_board_cmd(self, args): size = self.board.size str = '' for row in range(size-1, -1, -1): start = self.board.row_start(row + 1) for i in range(size): point = self.board.board[start + i] if point == BLACK: str += 'X' elif point == WHITE: str += 'O' elif point == EMPTY: str += '.' else: assert False str += '\n' self.respond(str) def gogui_rules_final_result_cmd(self, args): empties = self.board.get_empty_points() color = self.board.current_player legal_moves = [] for move in empties: if self.board.is_legal(move, color): legal_moves.append(move) if not legal_moves: result = "black" if self.board.current_player == WHITE else "white" else: result = "unknown" self.respond(result) def gogui_analyze_cmd(self, args): self.respond("pstring/Legal Moves For ToPlay/gogui-rules_legal_moves\n" "pstring/Side to Play/gogui-rules_side_to_move\n" "pstring/Final Result/gogui-rules_final_result\n" "pstring/Board Size/gogui-rules_board_size\n" "pstring/Rules GameID/gogui-rules_game_id\n" "pstring/Show Board/gogui-rules_board\n" ) def point_to_coord(point, boardsize): """ Transform point given as board array index to (row, col) coordinate representation. Special case: PASS is not transformed """ if point == PASS: return PASS else: NS = boardsize + 1 return divmod(point, NS) def format_point(move): """ Return move coordinates as a string such as 'a1', or 'pass'. """ column_letters = "ABCDEFGHJKLMNOPQRSTUVWXYZ" #column_letters = "abcdefghjklmnopqrstuvwxyz" if move == PASS: return "pass" row, col = move if not 0 <= row < MAXSIZE or not 0 <= col < MAXSIZE: raise ValueError return column_letters[col - 1]+ str(row) def move_to_coord(point_str, board_size): """ Convert a string point_str representing a point, as specified by GTP, to a pair of coordinates (row, col) in range 1 .. board_size. Raises ValueError if point_str is invalid """ if not 2 <= board_size <= MAXSIZE: raise ValueError("board_size out of range") s = point_str.lower() if s == "pass": return PASS try: col_c = s[0] if (not "a" <= col_c <= "z") or col_c == "i": raise ValueError col = ord(col_c) - ord("a") if col_c < "i": col += 1 row = int(s[1:]) if row < 1: raise ValueError except (IndexError, ValueError): # e.g. "a0" raise ValueError("wrong coordinate") if not (col <= board_size and row <= board_size): # e.g. "a20" raise ValueError("wrong coordinate") return row, col def coord_to_move(move, board_size): """ Convert a string point_str representing a point, as specified by GTP, to a pair of coordinates (row, col) in range 1 .. board_size. Raises ValueError if point_str is invalid """ if not 2 <= board_size <= MAXSIZE: raise ValueError("board_size out of range") #s = point_str.lower() x = move%(board_size+1) y = move//(board_size+1) col = chr(x-1 + ord("a")) #col = col.upper() return col+str(y) def color_to_int(c): """convert character to the appropriate integer code""" color_to_int = {"b": BLACK , "w": WHITE, "e": EMPTY, "BORDER": BORDER} return color_to_int[c] def handler(signum, frame): print('Signal handler called with signal', signum) raise Exception("Timeout!")
34.736211
150
0.542975
3,482
28,970
4.345204
0.1278
0.013483
0.023265
0.01573
0.429941
0.379577
0.346861
0.320291
0.295704
0.271844
0
0.017755
0.354539
28,970
833
151
34.777911
0.791379
0.15875
0
0.334586
0
0
0.055794
0.013659
0
0
0
0
0.00188
1
0.088346
false
0.015038
0.015038
0.00188
0.167293
0.003759
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d70c2235e5fc849eb97316fd49d7acf1fb36a6a
2,634
py
Python
seamless/highlevel/SubCell.py
sjdv1982/seamless
1b814341e74a56333c163f10e6f6ceab508b7df9
[ "MIT" ]
15
2017-06-07T12:49:12.000Z
2020-07-25T18:06:04.000Z
seamless/highlevel/SubCell.py
sjdv1982/seamless
1b814341e74a56333c163f10e6f6ceab508b7df9
[ "MIT" ]
110
2016-06-21T23:20:44.000Z
2022-02-24T16:15:22.000Z
seamless/highlevel/SubCell.py
sjdv1982/seamless
1b814341e74a56333c163f10e6f6ceab508b7df9
[ "MIT" ]
6
2016-06-21T11:19:22.000Z
2019-01-21T13:45:39.000Z
import weakref from .Cell import Cell class SubCell(Cell): def __init__(self, parent, cell, subpath, readonly): assert isinstance(cell, Cell) assert not isinstance(cell, SubCell) fullpath = cell._path + subpath super().__init__(parent=parent, path=fullpath) self._cell = weakref.ref(cell) self._readonly = readonly self._subpath = subpath def _get_hcell(self): return self._cell()._get_hcell() def _get_cell_subpath(self, cell, subpath): return cell def __setattr__(self, attr, value): if attr.startswith("_"): return object.__setattr__(self, attr, value) from .assign import assign_to_subcell parent = self._parent() path = self._subpath + (attr,) assign_to_subcell(self._cell(), path, value) def __getattr__(self, attr): if attr.startswith("_"): return super().__getattribute__(attr) if attr in type(self).__dict__ or attr in self.__dict__: return super().__getattribute__(attr) parent = self._parent() readonly = self._readonly return SubCell(self._parent(), self._cell(), self._subpath + (attr,), readonly=readonly) @property def authoritative(self): #TODO: determine if the subcell didn't get any inbound connections # If it did, you can't get another inbound connection, nor a link return True #stub @property def links(self): #TODO: return the other partner of all Link objects with self in it return [] #stub @property def value(self): cell = self._cell() cellvalue = cell.value if cellvalue.unsilk is None: raise ValueError for attr in self._subpath: if isinstance(attr, int): cellvalue = cellvalue[attr] else: cellvalue = getattr(cellvalue, attr) return cellvalue def set(self, value): assert not self._readonly cell = self._cell() attr = self._subpath[-1] if len(self._subpath) == 1: return setattr(cell, attr, value) else: parent_subcell = SubCell(self._parent(), cell, self._subpath[:-1], False) return setattr(parent_subcell, attr, value) @property def _virtual_path(self): cell = self._cell() p = cell._virtual_path if p is None: return None return p + self._subpath def _set_observers(self): pass def __str__(self): return "Seamless SubCell: %s" % ".".join(self._path)
30.988235
96
0.602885
306
2,634
4.911765
0.277778
0.053227
0.023952
0.026613
0
0
0
0
0
0
0
0.001629
0.300683
2,634
84
97
31.357143
0.814332
0.077069
0
0.220588
0
0
0.009481
0
0
0
0
0.011905
0.044118
1
0.176471
false
0.014706
0.044118
0.073529
0.441176
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
9d70ca280f4f08aef01023da8fb208958fa5803b
460
py
Python
colos/sfbx/__init__.py
asmodehn/colos
8894c3a758489b639638ba9aa9c83f7d621648eb
[ "MIT" ]
null
null
null
colos/sfbx/__init__.py
asmodehn/colos
8894c3a758489b639638ba9aa9c83f7d621648eb
[ "MIT" ]
4
2018-04-11T09:13:05.000Z
2018-04-11T09:28:18.000Z
colos/sfbx/__init__.py
asmodehn/colos
8894c3a758489b639638ba9aa9c83f7d621648eb
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- # The aim of this package is to : # - guarantee protected code execution is safe and *will* happen (eventually) # - report usage via colosstat # - recover when code fails ( possibly recording previous state, for example ) # one possibility is to implement another levelof abstraction ( like a language - cstk aim ) # another is to just isolate portions of python code with postconditions to guarantee success...
41.818182
96
0.741304
65
460
5.246154
0.8
0.035191
0
0
0
0
0
0
0
0
0
0.002646
0.178261
460
10
97
46
0.899471
0.96087
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
2
9d71192a0442b7eef7acad0763b92e91ecac841f
965
py
Python
plugins/help.py
A0vanc01/Frisky
d4d7f9892858b5412755c9dee594e5b60b6d2b94
[ "MIT" ]
5
2020-01-22T18:16:59.000Z
2021-06-14T13:23:57.000Z
plugins/help.py
A0vanc01/Frisky
d4d7f9892858b5412755c9dee594e5b60b6d2b94
[ "MIT" ]
104
2020-02-12T00:36:14.000Z
2022-02-10T08:18:28.000Z
plugins/help.py
A0vanc01/Frisky
d4d7f9892858b5412755c9dee594e5b60b6d2b94
[ "MIT" ]
4
2020-01-30T15:44:04.000Z
2020-08-27T19:22:57.000Z
from frisky.events import MessageEvent from frisky.plugin import FriskyPlugin, PluginRepositoryMixin from frisky.responses import FriskyResponse class HelpPlugin(FriskyPlugin, PluginRepositoryMixin): commands = ['help'] def command_help(self, message: MessageEvent) -> FriskyResponse: if len(message.args) == 1: plugin_name = message.args[0] if plugin_name == 'help': return 'Usage: `?help` or `?help <plugin_name>`' plugin = self.get_plugin_by_name(plugin_name) if plugin is None: return f'No such plugin: `{plugin_name}`, try `?help` to list installed plugins' if (help_text := plugin.help_text()) is None: return f'Plugin `{plugin_name}` does not provide help text.' return help_text plugins = self.get_plugin_names() joined_string = ', '.join(plugins) return f'Available plugins: {joined_string}'
40.208333
96
0.643523
111
965
5.441441
0.432432
0.099338
0.043046
0.043046
0
0
0
0
0
0
0
0.002801
0.260104
965
23
97
41.956522
0.843137
0
0
0
0
0
0.210363
0
0
0
0
0
0
1
0.052632
false
0
0.157895
0
0.578947
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d712c380762c48dece9d6503dff8952414ca037
1,663
py
Python
cadnano/tests/testgroup.py
mctrinh/cadnano2.5
d8254f24eef5fd77b4fb2b1a9642a8eea2e3c736
[ "BSD-3-Clause" ]
69
2015-01-13T02:54:40.000Z
2022-03-27T14:25:51.000Z
cadnano/tests/testgroup.py
mctrinh/cadnano2.5
d8254f24eef5fd77b4fb2b1a9642a8eea2e3c736
[ "BSD-3-Clause" ]
127
2015-01-01T06:26:34.000Z
2022-03-02T12:48:05.000Z
cadnano/tests/testgroup.py
mctrinh/cadnano2.5
d8254f24eef5fd77b4fb2b1a9642a8eea2e3c736
[ "BSD-3-Clause" ]
48
2015-01-22T19:57:49.000Z
2022-03-27T14:27:53.000Z
# -*- coding: utf-8 -*- from PyQt5.QtWidgets import QGraphicsItem, QGraphicsRectItem, QGraphicsItemGroup from PyQt5.QtCore import pyqtSlot class MyItemGroup(QGraphicsItemGroup): Type = QGraphicsItem.UserType + 3 def __init__(self, parent=None): super(MyItemGroup, self).__init__(parent) def __repr__(self): return str(type(self).__name__) class MyRectItemNOIC(QGraphicsRectItem): Type = QGraphicsItem.UserType + 2 def __init__(self, parent=None): super(MyRectItemNOIC, self).__init__(parent) def __repr__(self): return str(type(self).__name__) # end class class MyRectItem(QGraphicsRectItem): Type = QGraphicsItem.UserType + 1 def __init__(self, parent=None): super(MyRectItem, self).__init__(parent) # def __repr__(self): # return str(type(self).__name__) def itemChange(self, change, value): assert isinstance(self, MyRectItem) # print("\nChange %s\n" % self, change, value) return super(MyRectItem, self).itemChange(change, value) # end def def testItemChangeRegression(): """Make sure PyQt5 handles QGraphicsItem.itemChange correctly as there was a regression in PyQt5 v 5.6 that was fixed in v 5.7 """ a = MyRectItemNOIC() b = MyRectItem(a) item_group = MyItemGroup() assert b.parentItem() is a assert a.childItems()[0] is b item_group.addToGroup(b) assert item_group.childItems()[0] is b assert b.parentItem() is item_group e = MyRectItem() c = MyRectItemNOIC(e) assert c.parentItem() is e item_group.addToGroup(c) assert c.parentItem() is item_group # end def
26.822581
80
0.683103
201
1,663
5.383085
0.338308
0.049908
0.069316
0.047135
0.19963
0.19963
0.127542
0.127542
0.127542
0.127542
0
0.010687
0.212267
1,663
61
81
27.262295
0.815267
0.164161
0
0.2
0
0
0
0
0
0
0
0
0.2
1
0.2
false
0
0.057143
0.057143
0.514286
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
9d71751143901cbe72d8513a42c3b74da3d29bf0
998
py
Python
composer/models/ssd/ssd_hparams.py
anisehsani/composer
42599682d50409b4a4eb7c91fad85d67418cee13
[ "Apache-2.0" ]
null
null
null
composer/models/ssd/ssd_hparams.py
anisehsani/composer
42599682d50409b4a4eb7c91fad85d67418cee13
[ "Apache-2.0" ]
null
null
null
composer/models/ssd/ssd_hparams.py
anisehsani/composer
42599682d50409b4a4eb7c91fad85d67418cee13
[ "Apache-2.0" ]
null
null
null
# Copyright 2022 MosaicML. All Rights Reserved. from dataclasses import dataclass import yahp as hp from composer.models.model_hparams import ModelHparams @dataclass class SSDHparams(ModelHparams): input_size: int = hp.optional( doc="input size", default=300, ) num_classes: int = hp.optional( doc="num_classes", default=80, ) overlap_threshold: float = hp.optional( doc="threshold", default=0.5, ) nms_max_detections: int = hp.optional( doc="nms max dets", default=200, ) data: str = hp.optional( doc="data", default="/localdisk/coco", ) def initialize_object(self): from composer.models.ssd.ssd import SSD return SSD( input_size=self.input_size, overlap_threshold=self.overlap_threshold, nms_max_detections=self.nms_max_detections, num_classes=self.num_classes, data=self.data, )
22.681818
55
0.617234
114
998
5.245614
0.438596
0.083612
0.108696
0.080268
0
0
0
0
0
0
0
0.019774
0.290581
998
43
56
23.209302
0.824859
0.04509
0
0
0
0
0.064143
0
0
0
0
0
0
1
0.029412
false
0
0.117647
0
0.352941
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d73808fab2e4c633d3b7d43187bc4821f1bfb77
1,303
py
Python
src/lib/base_dataset.py
CvHadesSun/Camera-Calibration
5c054672749aa0b3be1bdff8b8f4f3d2fcf3ee85
[ "MIT" ]
null
null
null
src/lib/base_dataset.py
CvHadesSun/Camera-Calibration
5c054672749aa0b3be1bdff8b8f4f3d2fcf3ee85
[ "MIT" ]
null
null
null
src/lib/base_dataset.py
CvHadesSun/Camera-Calibration
5c054672749aa0b3be1bdff8b8f4f3d2fcf3ee85
[ "MIT" ]
null
null
null
from os.path import join from utils import getFileList class ImageFolder: def __init__(self, path, sub=None, annot='annot') -> None: self.root = path self.image = 'images' self.annot = annot self.image_root = join(path, self.image) self.annot_root = join(path, self.annot) self.annot_root_tmp = join(path, self.annot + '_tmp') if sub is None: self.imgnames = getFileList(self.image_root, ext='.jpg') self.annnames = getFileList(self.annot_root, ext='.json') else: self.imgnames = getFileList(join(self.image_root, sub), ext='.jpg') self.annnames = getFileList(join(self.annot_root, sub), ext='.json') self.imgnames = [join(sub, name) for name in self.imgnames] self.annnames = [join(sub, name) for name in self.annnames] self.isTmp = True assert len(self.imgnames) == len(self.annnames) def __getitem__(self, index): imgname = join(self.image_root, self.imgnames[index]) if self.isTmp: annname = join(self.annot_root_tmp, self.annnames[index]) else: annname = join(self.annot_root, self.annnames[index]) return imgname, annname def __len__(self): return len(self.imgnames)
40.71875
80
0.61934
166
1,303
4.710843
0.23494
0.103581
0.099744
0.065217
0.196931
0.061381
0.061381
0
0
0
0
0
0.261704
1,303
32
81
40.71875
0.81289
0
0
0.068966
0
0
0.025307
0
0
0
0
0
0.034483
1
0.103448
false
0
0.068966
0.034483
0.275862
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d73d6f049758b5497d67b41cd027577eaf0250d
1,704
py
Python
main.py
sunkr1995/genetic-drawing
6e5cc755a55c1994770c3f18fb14f1cc651bb700
[ "MIT" ]
null
null
null
main.py
sunkr1995/genetic-drawing
6e5cc755a55c1994770c3f18fb14f1cc651bb700
[ "MIT" ]
null
null
null
main.py
sunkr1995/genetic-drawing
6e5cc755a55c1994770c3f18fb14f1cc651bb700
[ "MIT" ]
null
null
null
''' Author: your name Date: 2021-06-18 10:13:00 LastEditTime: 2021-07-08 14:13:07 LastEditors: Please set LastEditors Description: In User Settings Edit FilePath: /genetic-drawing/main.py ''' import cv2 import os import time from IPython.display import clear_output from genetic_drawing import * gen = GeneticDrawing('03.jpg', seed=time.time()) out = gen.generate(400, 50) brushesRange = np.array([[0.1, 0.3], [0.3, 0.7]]) for i in range(len(gen.imgBuffer)): cv2.imwrite(os.path.join("out", f"{i:06d}.png"), gen.imgBuffer[i]) try: for i in range(5): brushesRange_tmp = brushesRange/(2**(i+1)) gen.brushesRange = brushesRange_tmp.tolist() maskname = "masks-03/mask-{}.jpg".format(i) gen.sampling_mask = cv2.cvtColor(cv2.imread(maskname), cv2.COLOR_BGR2GRAY) #keep drawing on top of our previous result out = gen.generate(100, 30) for i in range(len(gen.imgBuffer)): cv2.imwrite(os.path.join("out", f"{i:06d}.png"), gen.imgBuffer[i]) except: if not os.path.exists('out'): os.mkdir("out") for i in range(len(gen.imgBuffer)): cv2.imwrite(os.path.join("out", f"{i:06d}.png"), gen.imgBuffer[i]) #brushesRange_tmp = brushesRange/100 #gen.brushesRange = brushesRange_tmp.tolist() ##gen.brushesRange = [[0.005, 0.015],[0.015, 0.035]] #gen.sampling_mask = cv2.cvtColor(cv2.imread("masks/mask-end.jpg"), cv2.COLOR_BGR2GRAY) # ##keep drawing on top of our previous result #out = gen.generate(50, 30) #save all the images from the image buffer if not os.path.exists('out'): os.mkdir("out") for i in range(len(gen.imgBuffer)): cv2.imwrite(os.path.join("out", f"{i:06d}.png"), gen.imgBuffer[i])
34.08
87
0.669601
271
1,704
4.173432
0.372694
0.084881
0.026525
0.04863
0.541114
0.477454
0.477454
0.415561
0.415561
0.415561
0
0.067975
0.162559
1,704
50
88
34.08
0.724597
0.32277
0
0.428571
0
0
0.082674
0
0
0
0
0
0
1
0
false
0
0.178571
0
0.178571
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d740fa3ec721433e495424e2743d9af67d910eb
10,991
py
Python
flair/models/sandbox/simple_sequence_tagger_model.py
bratao/flair
67b53cc2a615a2e2a4e552d6f787c2efa708a939
[ "MIT" ]
null
null
null
flair/models/sandbox/simple_sequence_tagger_model.py
bratao/flair
67b53cc2a615a2e2a4e552d6f787c2efa708a939
[ "MIT" ]
null
null
null
flair/models/sandbox/simple_sequence_tagger_model.py
bratao/flair
67b53cc2a615a2e2a4e552d6f787c2efa708a939
[ "MIT" ]
null
null
null
import logging from typing import List, Union, Optional import torch import torch.nn import torch.nn.functional as F from tqdm import tqdm import flair.nn from flair.data import Dictionary, Sentence, Label from flair.datasets import SentenceDataset, DataLoader from flair.embeddings import TokenEmbeddings from flair.training_utils import store_embeddings log = logging.getLogger("flair") class SimpleSequenceTagger(flair.nn.Classifier): """ This class is a simple version of the SequenceTagger class. The purpose of this class is to demonstrate the basic hierarchy of a sequence tagger (this could be helpful for new developers). It only uses the given embeddings and maps them with a linear layer to the tag_dictionary dimension. Thus, this class misses following functionalities from the SequenceTagger: - CRF, - RNN, - Reprojection. As a result, only poor results can be expected. """ def __init__( self, embeddings: TokenEmbeddings, tag_dictionary: Dictionary, tag_type: str, ): """ Initializes a SimpleSequenceTagger :param embeddings: word embeddings used in tagger :param tag_dictionary: dictionary of tags you want to predict :param tag_type: string identifier for tag type :param beta: Parameter for F-beta score for evaluation and training annealing """ super(SimpleSequenceTagger, self).__init__() # embeddings self.embeddings = embeddings # dictionaries self.tag_dictionary: Dictionary = tag_dictionary self.tag_type: str = tag_type self.tagset_size: int = len(tag_dictionary) # linear layer self.linear = torch.nn.Linear(self.embeddings.embedding_length, len(tag_dictionary)) # all parameters will be pushed internally to the specified device self.to(flair.device) def forward_loss( self, data_points: Union[List[Sentence], Sentence], sort=True ) -> torch.tensor: features = self.forward(data_points) return self._calculate_loss(features, data_points) def _get_state_dict(self): model_state = { "state_dict": self.state_dict(), "embeddings": self.embeddings, "tag_dictionary": self.tag_dictionary, "tag_type": self.tag_type, } return model_state @staticmethod def _init_model_with_state_dict(state): model = SimpleSequenceTagger( embeddings=state["embeddings"], tag_dictionary=state["tag_dictionary"], tag_type=state["tag_type"], ) model.load_state_dict(state["state_dict"]) return model def predict( self, sentences: Union[List[Sentence], Sentence], mini_batch_size=32, all_tag_prob: bool = False, verbose: bool = False, label_name: Optional[str] = None, return_loss=False, embedding_storage_mode="none", ): """ Predict sequence tags for Named Entity Recognition task :param sentences: a Sentence or a List of Sentence :param mini_batch_size: size of the minibatch, usually bigger is more rapid but consume more memory, up to a point when it has no more effect. :param all_tag_prob: True to compute the score for each tag on each token, otherwise only the score of the best tag is returned :param verbose: set to True to display a progress bar :param return_loss: set to True to return loss :param label_name: set this to change the name of the label type that is predicted :param embedding_storage_mode: default is 'none' which is always best. Only set to 'cpu' or 'gpu' if you wish to not only predict, but also keep the generated embeddings in CPU or GPU memory respectively. 'gpu' to store embeddings in GPU memory. """ if label_name is None: label_name = self.tag_type with torch.no_grad(): if not sentences: return sentences if isinstance(sentences, Sentence): sentences = [sentences] # reverse sort all sequences by their length rev_order_len_index = sorted( range(len(sentences)), key=lambda k: len(sentences[k]), reverse=True ) reordered_sentences: List[Union[Sentence, str]] = [ sentences[index] for index in rev_order_len_index ] dataloader = DataLoader( dataset=SentenceDataset(reordered_sentences), batch_size=mini_batch_size ) # progress bar for verbosity if verbose: dataloader = tqdm(dataloader) overall_loss = 0 batch_no = 0 for batch in dataloader: batch_no += 1 if verbose: dataloader.set_description(f"Inferencing on batch {batch_no}") batch = self._filter_empty_sentences(batch) # stop if all sentences are empty if not batch: continue feature = self.forward(batch) if return_loss: overall_loss += self._calculate_loss(feature, batch) tags, all_tags = self._obtain_labels( feature=feature, batch_sentences=batch, get_all_tags=all_tag_prob, ) for (sentence, sent_tags) in zip(batch, tags): for (token, tag) in zip(sentence.tokens, sent_tags): token.add_tag_label(label_name, tag) # all_tags will be empty if all_tag_prob is set to False, so the for loop will be avoided for (sentence, sent_all_tags) in zip(batch, all_tags): for (token, token_all_tags) in zip(sentence.tokens, sent_all_tags): token.add_tags_proba_dist(label_name, token_all_tags) # clearing token embeddings to save memory store_embeddings(batch, storage_mode=embedding_storage_mode) if return_loss: return overall_loss / batch_no def forward(self, sentences: List[Sentence]): self.embeddings.embed(sentences) names = self.embeddings.get_names() lengths: List[int] = [len(sentence.tokens) for sentence in sentences] longest_token_sequence_in_batch: int = max(lengths) pre_allocated_zero_tensor = torch.zeros( self.embeddings.embedding_length * longest_token_sequence_in_batch, dtype=torch.float, device=flair.device, ) all_embs = list() for sentence in sentences: all_embs += [ emb for token in sentence for emb in token.get_each_embedding(names) ] nb_padding_tokens = longest_token_sequence_in_batch - len(sentence) if nb_padding_tokens > 0: t = pre_allocated_zero_tensor[ : self.embeddings.embedding_length * nb_padding_tokens ] all_embs.append(t) sentence_tensor = torch.cat(all_embs).view( [ len(sentences), longest_token_sequence_in_batch, self.embeddings.embedding_length, ] ) features = self.linear(sentence_tensor) return features def _calculate_loss( self, features: torch.tensor, sentences: List[Sentence] ) -> float: lengths: List[int] = [len(sentence.tokens) for sentence in sentences] tag_list: List = [] for s_id, sentence in enumerate(sentences): # get the tags in this sentence tag_idx: List[int] = [ self.tag_dictionary.get_idx_for_item(token.get_tag(self.tag_type).value) for token in sentence ] # add tags as tensor tag = torch.tensor(tag_idx, device=flair.device) tag_list.append(tag) score = 0 for sentence_feats, sentence_tags, sentence_length in zip( features, tag_list, lengths ): sentence_feats = sentence_feats[:sentence_length] score += torch.nn.functional.cross_entropy( sentence_feats, sentence_tags ) score /= len(features) return score def _obtain_labels( self, feature: torch.Tensor, batch_sentences: List[Sentence], get_all_tags: bool, ) -> (List[List[Label]], List[List[List[Label]]]): """ Returns a tuple of two lists: - The first list corresponds to the most likely `Label` per token in each sentence. - The second list contains a probability distribution over all `Labels` for each token in a sentence for all sentences. """ lengths: List[int] = [len(sentence.tokens) for sentence in batch_sentences] tags = [] all_tags = [] feature = feature.cpu() for index, length in enumerate(lengths): feature[index, length:] = 0 softmax_batch = F.softmax(feature, dim=2).cpu() scores_batch, prediction_batch = torch.max(softmax_batch, dim=2) feature = zip(softmax_batch, scores_batch, prediction_batch) for feats, length in zip(feature, lengths): softmax, score, prediction = feats confidences = score[:length].tolist() tag_seq = prediction[:length].tolist() scores = softmax[:length].tolist() tags.append( [ Label(self.tag_dictionary.get_item_for_index(tag), conf) for conf, tag in zip(confidences, tag_seq) ] ) if get_all_tags: all_tags.append( [ [ Label( self.tag_dictionary.get_item_for_index(score_id), score ) for score_id, score in enumerate(score_dist) ] for score_dist in scores ] ) return tags, all_tags @staticmethod def _filter_empty_sentences(sentences: List[Sentence]) -> List[Sentence]: filtered_sentences = [sentence for sentence in sentences if sentence.tokens] if len(sentences) != len(filtered_sentences): log.warning( f"Ignore {len(sentences) - len(filtered_sentences)} sentence(s) with no tokens." ) return filtered_sentences @property def label_type(self): return self.tag_type
35.569579
111
0.592849
1,243
10,991
5.053097
0.225261
0.028976
0.013533
0.018468
0.074988
0.048878
0.038847
0.038847
0.038847
0.031842
0
0.001373
0.337185
10,991
309
112
35.569579
0.86081
0.19516
0
0.068627
0
0
0.023448
0.0028
0
0
0
0
0
1
0.04902
false
0
0.053922
0.004902
0.156863
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d7508b796c963b53ae0eb9f9680e4518db45e86
1,708
py
Python
exercise/xiaohuar/spider-xiaohuar.com.py
PorYoung/bigData-camp-8d
8fa31b48065da27fd1c4f8432232342cede6f56c
[ "MIT" ]
1
2019-12-27T06:34:06.000Z
2019-12-27T06:34:06.000Z
exercise/xiaohuar/spider-xiaohuar.com.py
PorYoung/bigData-camp-8d
8fa31b48065da27fd1c4f8432232342cede6f56c
[ "MIT" ]
1
2021-12-14T20:40:06.000Z
2021-12-14T20:40:06.000Z
exercise/xiaohuar/spider-xiaohuar.com.py
PorYoung/bigData-camp-8d
8fa31b48065da27fd1c4f8432232342cede6f56c
[ "MIT" ]
null
null
null
import requests from bs4 import BeautifulSoup def spider_xiaohuar_content(url, headers): response = requests.get(url=url, headers=headers) print(response.status_code) if response.status_code == 200: response.encoding = 'utf-8' html = response.content # 参数:网页内容,解析器 soup = BeautifulSoup(html, 'html5lib') div_list = soup.find_all('div', attrs={'class': 'all_lanmu'}) text = '' file = open('爬虫校花.md', 'w', encoding='utf-8') for div in div_list: title_div = div.find('div', attrs={'class': 'title1000'}) title = title_div.find('a').string text += '<style>img[src*="headimg-style"]{width:100px;height:100px}</style>\n\n## 标题:'+title+'\n\n' ul = div.find('ul') li_list = ul.find_all('li') for li in li_list: img_src = li.find('img').attrs['lazysrc'] a_href = li.find('a').attrs['href'] img_title = li.find('span').string school = li.find('b', attrs={'class': 'b1'}).string fav = li.find('b', attrs={'class': 'b2'}).string if url not in img_src: img_src = url+img_src text += '> ' + img_title+'\n\n' text += '!['+img_title+']('+img_src+'#headimg-style)'+'\n\n' text += '- 学校:'+school+'\n\n' text += '- 点赞人数:'+fav+'\n\n' file.write(text) file.close url = 'http://xiaohuar.com/' headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'} spider_xiaohuar_content(url, headers)
38.818182
136
0.538056
223
1,708
4.013453
0.41704
0.040223
0.020112
0.053631
0.107263
0
0
0
0
0
0
0.039409
0.286885
1,708
44
137
38.818182
0.695402
0.00644
0
0
0
0.055556
0.220519
0.042453
0.027778
0
0
0
0
1
0.027778
false
0
0.055556
0
0.083333
0.027778
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d75c627939ebcaa3bf24644789f819936e04c59
749
py
Python
v1.1/auc_csv_merge.py
lz-pku-1997/so-many-tricks-for-Image-classification
3df7a0672f88219f893b0fa23c31ae6b30d01264
[ "MIT" ]
2
2020-04-21T06:06:28.000Z
2020-12-27T12:35:57.000Z
v1.1/auc_csv_merge.py
lz-pku-1997/so-many-tricks-for-Image-classification
3df7a0672f88219f893b0fa23c31ae6b30d01264
[ "MIT" ]
null
null
null
v1.1/auc_csv_merge.py
lz-pku-1997/so-many-tricks-for-Image-classification
3df7a0672f88219f893b0fa23c31ae6b30d01264
[ "MIT" ]
null
null
null
#尝试直接读取文件夹内所有csv,记得看看列表,是不是读对了 import glob import pandas as pd import numpy as np io = glob.glob(r"*.csv") len_io=len(io) print('总共输入表的数量为:',len_io) prob_list=[] for i in range(len_io): sub_1 = pd.read_csv(io[i]) denominator=len(sub_1) for my_classes in ['healthy','multiple_diseases','rust','scab']: sub_label_1 = sub_1.loc[:, my_classes].values sort_1=np.argsort(sub_label_1) for i,temp_sort in enumerate(sort_1): sub_label_1[temp_sort]=i/denominator sub_1.loc[:,my_classes]=sub_label_1 prob_list.append(sub_1.loc[:,'healthy':].values) sub_1.loc[:,'healthy':] = np.mean(prob_list,axis =0) sub_1.to_csv('out/submission.csv', index=False) print(sub_1.head())
31.208333
69
0.663551
124
749
3.75
0.403226
0.068817
0.077419
0.03871
0.068817
0
0
0
0
0
0
0.02455
0.184246
749
24
70
31.208333
0.736498
0.038718
0
0
0
0
0.113343
0
0
0
0
0
0
1
0
false
0
0.15
0
0.15
0.1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d76b727796967801234a59f7efe009b01c9e636
10,468
py
Python
masakari-7.0.0/masakari/objects/base.py
scottwedge/OpenStack-Stein
7077d1f602031dace92916f14e36b124f474de15
[ "Apache-2.0" ]
null
null
null
masakari-7.0.0/masakari/objects/base.py
scottwedge/OpenStack-Stein
7077d1f602031dace92916f14e36b124f474de15
[ "Apache-2.0" ]
5
2019-08-14T06:46:03.000Z
2021-12-13T20:01:25.000Z
masakari-7.0.0/masakari/objects/base.py
scottwedge/OpenStack-Stein
7077d1f602031dace92916f14e36b124f474de15
[ "Apache-2.0" ]
2
2020-03-15T01:24:15.000Z
2020-07-22T20:34:26.000Z
# Copyright 2016 NTT Data. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Masakari common internal object model""" import datetime from oslo_utils import versionutils from oslo_versionedobjects import base as ovoo_base from oslo_versionedobjects import fields as obj_fields from masakari import objects def get_attrname(name): """Return the mangled name of the attribute's underlying storage.""" return '_obj_' + name class MasakariObjectRegistry(ovoo_base.VersionedObjectRegistry): notification_classes = [] def registration_hook(self, cls, index): # NOTE(Dinesh_Bhor): This is called when an object is registered, # and is responsible for maintaining masakari.objects.$OBJECT # as the highest-versioned implementation of a given object. version = versionutils.convert_version_to_tuple(cls.VERSION) if not hasattr(objects, cls.obj_name()): setattr(objects, cls.obj_name(), cls) else: cur_version = versionutils.convert_version_to_tuple( getattr(objects, cls.obj_name()).VERSION) if version >= cur_version: setattr(objects, cls.obj_name(), cls) @classmethod def register_notification(cls, notification_cls): """Register a class as notification. Use only to register concrete notification or payload classes, do not register base classes intended for inheritance only. """ cls.register_if(False)(notification_cls) cls.notification_classes.append(notification_cls) return notification_cls @classmethod def register_notification_objects(cls): """Register previously decorated notification as normal ovos. This is not intended for production use but only for testing and document generation purposes. """ for notification_cls in cls.notification_classes: cls.register(notification_cls) remotable_classmethod = ovoo_base.remotable_classmethod remotable = ovoo_base.remotable class MasakariObject(ovoo_base.VersionedObject): """Base class and object factory. This forms the base of all objects that can be remoted or instantiated via RPC. Simply defining a class that inherits from this base class will make it remotely instantiatable. Objects should implement the necessary "get" classmethod routines as well as "save" object methods as appropriate. """ OBJ_SERIAL_NAMESPACE = 'masakari_object' OBJ_PROJECT_NAMESPACE = 'masakari' def masakari_obj_get_changes(self): """Returns a dict of changed fields with tz unaware datetimes. Any timezone aware datetime field will be converted to UTC timezone and returned as timezone unaware datetime. This will allow us to pass these fields directly to a db update method as they can't have timezone information. """ # Get dirtied/changed fields changes = self.obj_get_changes() # Look for datetime objects that contain timezone information for k, v in changes.items(): if isinstance(v, datetime.datetime) and v.tzinfo: # Remove timezone information and adjust the time according to # the timezone information's offset. changes[k] = v.replace(tzinfo=None) - v.utcoffset() # Return modified dict return changes def obj_reset_changes(self, fields=None, recursive=False): """Reset the list of fields that have been changed. .. note:: - This is NOT "revert to previous values" - Specifying fields on recursive resets will only be honored at the top level. Everything below the top will reset all. :param fields: List of fields to reset, or "all" if None. :param recursive: Call obj_reset_changes(recursive=True) on any sub-objects within the list of fields being reset. """ if recursive: for field in self.obj_get_changes(): # Ignore fields not in requested set (if applicable) if fields and field not in fields: continue # Skip any fields that are unset if not self.obj_attr_is_set(field): continue value = getattr(self, field) # Don't reset nulled fields if value is None: continue # Reset straight Object and ListOfObjects fields if isinstance(self.fields[field], obj_fields.ObjectField): value.obj_reset_changes(recursive=True) elif isinstance(self.fields[field], obj_fields.ListOfObjectsField): for thing in value: thing.obj_reset_changes(recursive=True) if fields: self._changed_fields -= set(fields) else: self._changed_fields.clear() class MasakariObjectDictCompat(ovoo_base.VersionedObjectDictCompat): def __iter__(self): for name in self.obj_fields: if (self.obj_attr_is_set(name) or name in self.obj_extra_fields): yield name def keys(self): return list(self) class MasakariTimestampObject(object): """Mixin class for db backed objects with timestamp fields. Sqlalchemy models that inherit from the oslo_db TimestampMixin will include these fields and the corresponding objects will benefit from this mixin. """ fields = { 'created_at': obj_fields.DateTimeField(nullable=True), 'updated_at': obj_fields.DateTimeField(nullable=True), } class MasakariPersistentObject(object): """Mixin class for Persistent objects. This adds the fields that we use in common for most persistent objects. """ fields = { 'created_at': obj_fields.DateTimeField(nullable=True), 'updated_at': obj_fields.DateTimeField(nullable=True), 'deleted_at': obj_fields.DateTimeField(nullable=True), 'deleted': obj_fields.BooleanField(default=False), } class ObjectListBase(ovoo_base.ObjectListBase): @classmethod def _obj_primitive_key(cls, field): return 'masakari_object.%s' % field @classmethod def _obj_primitive_field(cls, primitive, field, default=obj_fields.UnspecifiedDefault): key = cls._obj_primitive_key(field) if default == obj_fields.UnspecifiedDefault: return primitive[key] else: return primitive.get(key, default) class MasakariObjectSerializer(ovoo_base.VersionedObjectSerializer): """A Masakari Object Serializer. This implements the Oslo Serializer interface and provides the ability to serialize and deserialize MasakariObject entities. Any service that needs to accept or return MasakariObjects as arguments or result values should pass this to its RPCClient and RPCServer objects. """ OBJ_BASE_CLASS = MasakariObject def __init__(self): super(MasakariObjectSerializer, self).__init__() def obj_make_list(context, list_obj, item_cls, db_list, **extra_args): """Construct an object list from a list of primitives. This calls item_cls._from_db_object() on each item of db_list, and adds the resulting object to list_obj. :param:context: Request context :param:list_obj: An ObjectListBase object :param:item_cls: The MasakariObject class of the objects within the list :param:db_list: The list of primitives to convert to objects :param:extra_args: Extra arguments to pass to _from_db_object() :returns: list_obj """ list_obj.objects = [] for db_item in db_list: item = item_cls._from_db_object(context, item_cls(), db_item, **extra_args) list_obj.objects.append(item) list_obj._context = context list_obj.obj_reset_changes() return list_obj def obj_to_primitive(obj): """Recursively turn an object into a python primitive. A MasakariObject becomes a dict, and anything that implements ObjectListBase becomes a list. """ if isinstance(obj, ObjectListBase): return [obj_to_primitive(x) for x in obj] elif isinstance(obj, MasakariObject): result = {} for key in obj.obj_fields: if obj.obj_attr_is_set(key) or key in obj.obj_extra_fields: result[key] = obj_to_primitive(getattr(obj, key)) return result else: return obj def obj_equal_prims(obj_1, obj_2, ignore=None): """Compare two primitives for equivalence ignoring some keys. This operation tests the primitives of two objects for equivalence. Object primitives may contain a list identifying fields that have been changed - this is ignored in the comparison. The ignore parameter lists any other keys to be ignored. :param:obj1: The first object in the comparison :param:obj2: The second object in the comparison :param:ignore: A list of fields to ignore :returns: True if the primitives are equal ignoring changes and specified fields, otherwise False. """ def _strip(prim, keys): if isinstance(prim, dict): for k in keys: prim.pop(k, None) for v in prim.values(): _strip(v, keys) if isinstance(prim, list): for v in prim: _strip(v, keys) return prim if ignore is not None: keys = ['masakari_object.changes'] + ignore else: keys = ['masakari_object.changes'] prim_1 = _strip(obj_1.obj_to_primitive(), keys) prim_2 = _strip(obj_2.obj_to_primitive(), keys) return prim_1 == prim_2
35.364865
79
0.664215
1,296
10,468
5.220679
0.26929
0.017292
0.011085
0.017736
0.11277
0.06444
0.034585
0.027195
0.027195
0.027195
0
0.002355
0.26987
10,468
295
80
35.484746
0.882899
0.416221
0
0.166667
0
0
0.026246
0.008103
0
0
0
0
0
1
0.113636
false
0
0.037879
0.015152
0.348485
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d7a01fbe97c35ca79d4cd01911da8cd9570eceb
53
py
Python
malaya/text/bahasa/news.py
ebiggerr/malaya
be757c793895522f80b929fe82353d90762f7fff
[ "MIT" ]
88
2021-01-06T10:01:31.000Z
2022-03-30T17:34:09.000Z
malaya/text/bahasa/news.py
zulkiflizaki/malaya
2358081bfa43aad57d9415a99f64c68f615d0cc4
[ "MIT" ]
43
2021-01-14T02:44:41.000Z
2022-03-31T19:47:42.000Z
malaya/text/bahasa/news.py
zulkiflizaki/malaya
2358081bfa43aad57d9415a99f64c68f615d0cc4
[ "MIT" ]
38
2021-01-06T07:15:03.000Z
2022-03-19T05:07:50.000Z
news = ['klik untuk membaca', 'klik untuk maklumat']
26.5
52
0.698113
7
53
5.285714
0.714286
0.486486
0
0
0
0
0
0
0
0
0
0
0.150943
53
1
53
53
0.822222
0
0
0
0
0
0.698113
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
9d7a0f0018ec32fb50d147552cd1d3e28431140d
306
py
Python
sonosscripts/modules.py
RobinDeBaets/SonosScripts
e3a4f27259d9881ebdc3176069e7fe428f88c244
[ "WTFPL" ]
null
null
null
sonosscripts/modules.py
RobinDeBaets/SonosScripts
e3a4f27259d9881ebdc3176069e7fe428f88c244
[ "WTFPL" ]
1
2019-11-21T20:22:01.000Z
2019-11-21T20:22:01.000Z
sonosscripts/modules.py
RobinDeBaets/SonosScripts
e3a4f27259d9881ebdc3176069e7fe428f88c244
[ "WTFPL" ]
1
2020-08-01T18:02:21.000Z
2020-08-01T18:02:21.000Z
from sonosscripts import stop, play_pause, previous, next, change_bass, change_volume, mute_volume modules = { "stop": stop, "play_pause": play_pause, "previous": previous, "next": next, "change_bass": change_bass, "change_volume": change_volume, "mute_volume": mute_volume }
23.538462
98
0.69281
37
306
5.405405
0.351351
0.135
0.24
0.2
0
0
0
0
0
0
0
0
0.189542
306
12
99
25.5
0.806452
0
0
0
0
0
0.2
0
0
0
0
0
0
1
0
false
0
0.1
0
0.1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
9d7ad5477f4bf8f12192323e1ee2103954aa57db
3,925
py
Python
twitter_bot/MyBot.py
diem-ai/datascience-projects
deef93217bd3b0cfc2ca7802933142d1dad7fcba
[ "MIT" ]
null
null
null
twitter_bot/MyBot.py
diem-ai/datascience-projects
deef93217bd3b0cfc2ca7802933142d1dad7fcba
[ "MIT" ]
null
null
null
twitter_bot/MyBot.py
diem-ai/datascience-projects
deef93217bd3b0cfc2ca7802933142d1dad7fcba
[ "MIT" ]
null
null
null
""" Class SaleBot It is initialised by nlp model (bag-of-word, tf-idf, word2vec) It returns response with a question as the input """ from gensim.corpora import Dictionary #from gensim.models import FastText from gensim.models import Word2Vec , WordEmbeddingSimilarityIndex from gensim.similarities import SoftCosineSimilarity, SparseTermSimilarityMatrix from gensim.models import TfidfModel from multiprocessing import cpu_count from nlp_helper import preprocessing class AskeBayBot: """ - Using tf-idf and word2vec to build vector matrix from the corpus - Using soft-cosine similarity to calculate the similarity between query and matrix """ """ References: - https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/soft_cosine_tutorial.ipynb """ def __init__(self, questions, responses, model_type="word2vec"): self.questions = questions self.responses = responses self.model_type = model_type self.docsim_index = [] self.dictionary = [] self.tfidf = [] self.compute_sim_matrix() def compute_sim_matrix(self): ''' if(self.model_type.lower() == "fasttext"): model = FastText(self.questions) else: model = Word2Vec(self.questions) ''' self.dictionary = Dictionary(self.questions) self.tfidf = TfidfModel(dictionary = self.dictionary) word2vec_model = Word2Vec(self.questions , workers=cpu_count() , min_count=5 , size=300 , seed=12345) sim_index = WordEmbeddingSimilarityIndex(word2vec_model.wv) sim_matrix = SparseTermSimilarityMatrix(sim_index , self.dictionary , self.tfidf , nonzero_limit=100) bow_corpus = [self.dictionary.doc2bow(document) for document in self.questions] tfidf_corpus = [self.tfidf[bow] for bow in bow_corpus] self.docsim_index = SoftCosineSimilarity(tfidf_corpus, sim_matrix, num_best=10) def get_similarities(self, question): ''' @return indices of anwsers whose questions are similar to the input question ''' vectorizer = self.dictionary.doc2bow(preprocessing(question)) tfidf_vectorizer = self.tfidf[vectorizer] similarities = self.docsim_index[tfidf_vectorizer] return similarities def get_response(self, question): similarities = self.get_similarities(question) return self.get_sim(similarities, 1) def get_all_responses(self, question): similarities = self.get_similarities(question) return self.get_sim(similarities, 10) def get_sim(self, similarities, n_top=1): """ @return a tuple of similar question and best response in similarity matrix """ sim_questions = [] sim_responses = [] sim_scores = [] if (len(similarities) > 0): for (idx, score) in similarities: if (idx < len(self.responses)): sim_questions.append(self.questions[idx]) sim_responses.append(self.responses[idx]) sim_scores.append(score) # return self.questions[idx], self.responses[idx], score else: return "Just a moment, someone will contact you" if (n_top == 1): return sim_questions[0], sim_responses[0], sim_scores[0] else: return sim_questions, sim_responses, sim_scores if __name__ == "__main__": print("I'm a bot")
37.380952
105
0.592866
399
3,925
5.666667
0.323308
0.051747
0.02123
0.029191
0.125608
0.10084
0.10084
0.069881
0.069881
0.069881
0
0.012495
0.327134
3,925
105
106
37.380952
0.84362
0.163822
0
0.066667
0
0
0.02191
0
0
0
0
0
0
1
0.1
false
0
0.1
0
0.316667
0.016667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d7c94008fdd0c290d0ad7ba8082f2beff2eb070
2,452
py
Python
Tensorflow_2X_PythonFiles/demo123_convolution_visualization.py
mahnooranjum/Tensorflow_DeepLearning
65ab178d4c17efad01de827062d5c85bdfb9b1ca
[ "MIT" ]
null
null
null
Tensorflow_2X_PythonFiles/demo123_convolution_visualization.py
mahnooranjum/Tensorflow_DeepLearning
65ab178d4c17efad01de827062d5c85bdfb9b1ca
[ "MIT" ]
null
null
null
Tensorflow_2X_PythonFiles/demo123_convolution_visualization.py
mahnooranjum/Tensorflow_DeepLearning
65ab178d4c17efad01de827062d5c85bdfb9b1ca
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """Demo123_Convolution_Visualization.ipynb # **Spit some [tensor] flow** We need to learn the intricacies of tensorflow to master deep learning `Let's get this over with` """ import numpy as np import pandas as pd import matplotlib.pyplot as plt import tensorflow as tf print(tf.__version__) """## Reference MachineLearningMastery.com""" from tensorflow.keras.layers import Input, Dense, Dropout, Flatten, Conv2D from tensorflow.keras.models import Model from tensorflow.keras.optimizers import SGD, Adam from glob import glob import sys, os import cv2 !wget https://www.theluxecafe.com/wp-content/uploads/2014/07/ferrari-spider-indian-theluxecafe.jpg !ls X = cv2.imread('ferrari-spider-indian-theluxecafe.jpg') X = cv2.cvtColor(X, cv2.COLOR_BGR2RGB) plt.imshow(X) print(X.shape) IMAGE_SIZE = X.shape X = np.expand_dims(X, axis=0) print(X.shape) y = np.ndarray([1]) print(y.shape) i_layer = Input(shape = IMAGE_SIZE) h_layer = Conv2D(8, (3,3), strides = 1, activation='relu', padding='same')(i_layer) h_layer = Flatten()(h_layer) o_layer = Dense(1, activation='sigmoid')(h_layer) model = Model(i_layer, o_layer) model.summary() model.compile( optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) report = model.fit(X, y, epochs = 10) model.layers conv_layer = model.layers[1] print(conv_layer) filters, biases = conv_layer.get_weights() print(conv_layer.name, filters.shape) f_min, f_max = filters.min(), filters.max() filters = (filters - f_min) / (f_max - f_min) plt.figure(figsize=(20,10)) n_filters, idx = 8, 1 for i in range(n_filters): # get filter f = filters[:, :, :, i] for j in range(3): ax = plt.subplot(n_filters, 3, idx) ax.set_xticks([]) ax.set_yticks([]) plt.imshow(f[:, :, j], cmap='gray') idx += 1 plt.show() model_visual = Model(inputs=model.inputs, outputs=conv_layer.output) model_visual.summary() maps = model_visual(X) print(maps.shape) plt.figure(figsize=(20,10)) square = 4 idx = 1 for _ in range(square): for _ in range(square): if (idx > square * 2): break # specify subplot and turn of axis ax = plt.subplot(square, square, idx) ax.set_xticks([]) ax.set_yticks([]) plt.imshow(maps[0, :, :, idx-1], cmap='gray') idx += 1 plt.show() maps.shape[3] for i in range(maps.shape[3]): ax = plt.subplot() plt.imshow(maps[0, :, :, i], cmap='gray') ax.set_xticks([]) ax.set_yticks([]) plt.show()
21.137931
98
0.69168
386
2,452
4.277202
0.393782
0.018171
0.034525
0.023622
0.143549
0.079346
0.05633
0.041187
0.041187
0
0
0.023546
0.151305
2,452
115
99
21.321739
0.769822
0.026509
0
0.226667
0
0
0.043961
0.017122
0
0
0
0
0
0
null
null
0
0.133333
null
null
0.093333
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
9d8165f8ce202fddd44b2d3bc70e29ad7d9245a2
1,482
py
Python
hail_scripts/v01/convert_tsv_to_vds.py
NLSVTN/hail-elasticsearch-pipelines
8b895a2e46a33d347dd2a1024101a6d515027a03
[ "MIT" ]
null
null
null
hail_scripts/v01/convert_tsv_to_vds.py
NLSVTN/hail-elasticsearch-pipelines
8b895a2e46a33d347dd2a1024101a6d515027a03
[ "MIT" ]
null
null
null
hail_scripts/v01/convert_tsv_to_vds.py
NLSVTN/hail-elasticsearch-pipelines
8b895a2e46a33d347dd2a1024101a6d515027a03
[ "MIT" ]
null
null
null
import argparse as ap import hail from pprint import pprint import time from hail_scripts.v01.utils.vds_utils import write_vds p = ap.ArgumentParser(description="Convert a tsv table to a .vds") p.add_argument("-c", "--chrom-column", required=True) p.add_argument("-p", "--pos-column", required=True) p.add_argument("-r", "--ref-column", required=True) p.add_argument("-a", "--alt-column", required=True) p.add_argument("table_path", nargs="+") args = p.parse_args() print(", ".join(args.vcf_path)) hc = hail.HailContext(log="./hail_{}.log".format(time.strftime("%y%m%d_%H%M%S"))) for table_path in args.table_path: print("\n") print("==> import_table: %s" % table_path) output_path = table_path.replace(".tsv", "").replace(".gz", "").replace(".bgz", "") + ".vds" print("==> output: %s" % output_path) kt = hc.import_table(table_path, impute=True, no_header=args.no_header, delimiter=args.delimiter, missing=args.missing_value, min_partitions=1000) #kt = kt.drop(columns_to_drop) #kt = kt.rename(rename_columns) kt = kt.filter("%(ref_column)s == %(alt_column)s" % args.__dict__, keep=False) kt = kt.annotate("variant=Variant(%(chrom_column)s, %(pos_column)s, %(ref_column)s, %(alt_column)s)" % args.__dict__) kt = kt.key_by('variant') kt = kt.drop([args.chrom_column, args.pos_column, args.ref_column, args.alt_column]) vds = hail.VariantDataset.from_table(kt) pprint(vds.variant_schema) write_vds(vds, output_path)
36.146341
150
0.690958
227
1,482
4.281938
0.348018
0.055556
0.061728
0.078189
0.18107
0.18107
0.057613
0.057613
0
0
0
0.004615
0.122807
1,482
40
151
37.05
0.743077
0.039811
0
0
0
0.037037
0.209008
0.023223
0
0
0
0
0
1
0
false
0
0.259259
0
0.259259
0.222222
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d81808e7a83247fd981f349fc73abe0b9de1e1e
4,649
py
Python
scripts/Old/fixSequenceIDs.py
paepcke/json_to_relation
acfa58d540f8f51d1d913d0c173ee3ded1b6c2a9
[ "BSD-3-Clause" ]
4
2015-10-10T19:09:49.000Z
2021-09-02T00:58:06.000Z
scripts/Old/fixSequenceIDs.py
paepcke/json_to_relation
acfa58d540f8f51d1d913d0c173ee3ded1b6c2a9
[ "BSD-3-Clause" ]
null
null
null
scripts/Old/fixSequenceIDs.py
paepcke/json_to_relation
acfa58d540f8f51d1d913d0c173ee3ded1b6c2a9
[ "BSD-3-Clause" ]
8
2015-05-16T14:33:33.000Z
2019-10-24T08:56:25.000Z
#!/usr/bin/env python # Copyright (c) 2014, Stanford University # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ''' Created on Dec 22, 2013 @author: paepcke ''' import os import re import sys from edxTrackLogJSONParser import EdXTrackLogJSONParser from modulestoreImporter import ModulestoreImporter from unidecode import unidecode idExtractPat = re.compile(r'^"([^"]*)') seqIDExtractPat = re.compile(r'","([^"]*)') hashLookup = ModulestoreImporter(os.path.join(os.path.dirname(__file__),'data/modulestore_latest.json'), useCache=True) def makeInsertSafe(unsafeStr): ''' Makes the given string safe for use as a value in a MySQL INSERT statement. Looks for embedded CR or LFs, and turns them into semicolons. Escapes commas and single quotes. Backslash is replaced by double backslash. This is needed for unicode, like \0245 (invented example) @param unsafeStr: string that possibly contains unsafe chars @type unsafeStr: String @return: same string, with unsafe chars properly replaced or escaped @rtype: String ''' #return unsafeStr.replace("'", "\\'").replace('\n', "; ").replace('\r', "; ").replace(',', "\\,").replace('\\', '\\\\') if unsafeStr is None or not isinstance(unsafeStr, basestring) or len(unsafeStr) == 0: return '' # Check for chars > 128 (illegal for standard ASCII): for oneChar in unsafeStr: if ord(oneChar) > 128: # unidecode() replaces unicode with approximations. # I tried all sorts of escapes, and nothing worked # for all cases, except this: unsafeStr = unidecode(unicode(unsafeStr)) break return unsafeStr.replace('\n', "; ").replace('\r', "; ").replace('\\', '').replace("'", r"\'") def fixSequencIDs(): counter = 0 with open('/home/paepcke/tmp/sequenceIDs.sql','w') as outfd: outfd.write("USE Edx;\nINSERT INTO EdxTrackEvent(_id,resource_display_name)\n") with open('/home/paepcke/tmp/sequenceIDs.csv','r') as fd: for idSeqID in fd: sqlid = idExtractPat.search(idSeqID).group(1) seqID = seqIDExtractPat.search(idSeqID).group(1) resourceNameMatch = EdXTrackLogJSONParser.findHashPattern.search(seqID) if resourceNameMatch is not None: resourceName = makeInsertSafe(hashLookup.getDisplayName(resourceNameMatch.group(1))) if counter == 0: outfd.write('("%s","%s")' % (sqlid,resourceName)) else: outfd.write(',\n("%s","%s")' % (sqlid,resourceName)) else: continue counter += 1 #if counter > 10: # break outfd.write("\nON DUPLICATE KEY UPDATE resource_display_name = VALUES(resource_display_name);\n") print("Created %d corrections." % counter) if __name__ == '__main__': fixSequencIDs() #INSERT INTO EdxTrackEvent (_id,long_answer) VALUES ('fbcefe06_fb7c_48aa_a12e_d85e6988dbda','first answer'),('bbd3ddf3_8ed0_4eee_8ff7_f5791b9e4a7e','second answer') ON DUPLICATE KEY UPDATE long_answer=VALUES(long_answer);
54.05814
757
0.687245
568
4,649
5.568662
0.489437
0.012646
0.018021
0.014543
0.112551
0.082833
0.042997
0.042997
0.042997
0.042997
0
0.016218
0.217466
4,649
85
758
54.694118
0.853216
0.544203
0
0.05
0
0
0.161527
0.092511
0
0
0
0
0
1
0.05
false
0
0.175
0
0.275
0.025
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d818b86a7daa5558c49d73a26208235e0d52b89
8,433
py
Python
tests/test_logger_device.py
ska-telescope/lmc-base-classes
e3ac46a731aca4d49d53747b4352ec4be089ff5d
[ "BSD-3-Clause" ]
3
2019-04-18T20:46:02.000Z
2019-07-30T17:47:40.000Z
tests/test_logger_device.py
ska-telescope/lmc-base-classes
e3ac46a731aca4d49d53747b4352ec4be089ff5d
[ "BSD-3-Clause" ]
26
2018-10-30T07:50:50.000Z
2020-07-13T12:50:36.000Z
tests/test_logger_device.py
ska-telescope/lmc-base-classes
e3ac46a731aca4d49d53747b4352ec4be089ff5d
[ "BSD-3-Clause" ]
4
2019-01-16T07:47:59.000Z
2021-06-01T11:17:32.000Z
######################################################################################### # -*- coding: utf-8 -*- # # This file is part of the SKALogger project # # # ######################################################################################### """Contain the tests for the SKALogger.""" import re import pytest from tango import DevState from tango.test_context import MultiDeviceTestContext from ska_tango_base.base import ReferenceBaseComponentManager from ska_tango_base.logger_device import SKALogger from ska_tango_base.subarray import SKASubarray import tango # PROTECTED REGION ID(SKALogger.test_additional_imports) ENABLED START # from ska_tango_base.control_model import ( AdminMode, ControlMode, HealthState, LoggingLevel, SimulationMode, TestMode, ) # PROTECTED REGION END # // SKALogger.test_additional_imports # PROTECTED REGION ID(SKALogger.test_SKALogger_decorators) ENABLED START # @pytest.mark.usefixtures("tango_context", "initialize_device") # PROTECTED REGION END # // SKALogger.test_SKALogger_decorators class TestSKALogger(object): """ Test class for tests of the SKALogger device class. """ @pytest.fixture(scope="class") def device_test_config(self, device_properties): """ Fixture that specifies the device to be tested, along with its properties and memorized attributes. """ return { "device": SKALogger, "component_manager_patch": lambda self: ReferenceBaseComponentManager( self.op_state_model, logger=self.logger ), "properties": device_properties, "memorized": {"adminMode": str(AdminMode.ONLINE.value)}, } @pytest.mark.skip("Not implemented") def test_properties(self, tango_context): # test the properties # PROTECTED REGION ID(SKALogger.test_properties) ENABLED START # # PROTECTED REGION END # // SKALogger.test_properties pass # PROTECTED REGION ID(SKALogger.test_State_decorators) ENABLED START # # PROTECTED REGION END # // SKALogger.test_State_decorators def test_State(self, tango_context): """Test for State""" # PROTECTED REGION ID(SKALogger.test_State) ENABLED START # assert tango_context.device.State() == DevState.OFF # PROTECTED REGION END # // SKALogger.test_State # PROTECTED REGION ID(SKALogger.test_Status_decorators) ENABLED START # # PROTECTED REGION END # // SKALogger.test_Status_decorators def test_Status(self, tango_context): """Test for Status""" # PROTECTED REGION ID(SKALogger.test_Status) ENABLED START # assert tango_context.device.Status() == "The device is in OFF state." # PROTECTED REGION END # // SKALogger.test_Status # PROTECTED REGION ID(SKALogger.test_GetVersionInfo_decorators) ENABLED START # # PROTECTED REGION END # // SKALogger.test_GetVersionInfo_decorators def test_GetVersionInfo(self, tango_context): """Test for GetVersionInfo""" # PROTECTED REGION ID(SKALogger.test_GetVersionInfo) ENABLED START # versionPattern = re.compile( f"{tango_context.device.info().dev_class}, ska_tango_base, [0-9]+.[0-9]+.[0-9]+, " "A set of generic base devices for SKA Telescope." ) versionInfo = tango_context.device.GetVersionInfo() assert (re.match(versionPattern, versionInfo[0])) is not None # PROTECTED REGION END # // SKALogger.test_GetVersionInfo # PROTECTED REGION ID(SKALogger.test_buildState_decorators) ENABLED START # # PROTECTED REGION END # // SKALogger.test_buildState_decorators def test_buildState(self, tango_context): """Test for buildState""" # PROTECTED REGION ID(SKALogger.test_buildState) ENABLED START # buildPattern = re.compile( r"ska_tango_base, [0-9]+.[0-9]+.[0-9]+, " r"A set of generic base devices for SKA Telescope" ) assert (re.match(buildPattern, tango_context.device.buildState)) is not None # PROTECTED REGION END # // SKALogger.test_buildState # PROTECTED REGION ID(SKALogger.test_versionId_decorators) ENABLED START # # PROTECTED REGION END # // SKALogger.test_versionId_decorators def test_versionId(self, tango_context): """Test for versionId""" # PROTECTED REGION ID(SKALogger.test_versionId) ENABLED START # versionIdPattern = re.compile(r"[0-9]+.[0-9]+.[0-9]+") assert (re.match(versionIdPattern, tango_context.device.versionId)) is not None # PROTECTED REGION END # // SKALogger.test_versionId # PROTECTED REGION ID(SKALogger.test_loggingLevel_decorators) ENABLED START # # PROTECTED REGION END # // SKALogger.test_loggingLevel_decorators def test_loggingLevel(self, tango_context): """Test for loggingLevel""" # PROTECTED REGION ID(SKALogger.test_loggingLevel) ENABLED START # assert tango_context.device.loggingLevel == LoggingLevel.INFO # PROTECTED REGION END # // SKALogger.test_loggingLevel # PROTECTED REGION ID(SKALogger.test_healthState_decorators) ENABLED START # # PROTECTED REGION END # // SKALogger.test_healthState_decorators def test_healthState(self, tango_context): """Test for healthState""" # PROTECTED REGION ID(SKALogger.test_healthState) ENABLED START # assert tango_context.device.healthState == HealthState.OK # PROTECTED REGION END # // SKALogger.test_healthState # PROTECTED REGION ID(SKALogger.test_adminMode_decorators) ENABLED START # # PROTECTED REGION END # // SKALogger.test_adminMode_decorators def test_adminMode(self, tango_context): """Test for adminMode""" # PROTECTED REGION ID(SKALogger.test_adminMode) ENABLED START # assert tango_context.device.adminMode == AdminMode.ONLINE # PROTECTED REGION END # // SKALogger.test_adminMode # PROTECTED REGION ID(SKALogger.test_controlMode_decorators) ENABLED START # # PROTECTED REGION END # // SKALogger.test_controlMode_decorators def test_controlMode(self, tango_context): """Test for controlMode""" # PROTECTED REGION ID(SKALogger.test_controlMode) ENABLED START # assert tango_context.device.controlMode == ControlMode.REMOTE # PROTECTED REGION END # // SKALogger.test_controlMode # PROTECTED REGION ID(SKALogger.test_simulationMode_decorators) ENABLED START # # PROTECTED REGION END # // SKALogger.test_simulationMode_decorators def test_simulationMode(self, tango_context): """Test for simulationMode""" # PROTECTED REGION ID(SKALogger.test_simulationMode) ENABLED START # assert tango_context.device.simulationMode == SimulationMode.FALSE # PROTECTED REGION END # // SKALogger.test_simulationMode # PROTECTED REGION ID(SKALogger.test_testMode_decorators) ENABLED START # # PROTECTED REGION END # // SKALogger.test_testMode_decorators def test_testMode(self, tango_context): """Test for testMode""" # PROTECTED REGION ID(SKALogger.test_testMode) ENABLED START # assert tango_context.device.testMode == TestMode.NONE # PROTECTED REGION END # // SKALogger.test_testMode @pytest.mark.forked def test_SetLoggingLevel(): """Test for SetLoggingLevel""" logging_level = int(tango.LogLevel.LOG_ERROR) logging_target = "logger/target/1" logger_device = "logger/device/1" devices_info = ( {"class": SKALogger, "devices": [{"name": logger_device}]}, {"class": SKASubarray, "devices": [{"name": logging_target}]}, ) with MultiDeviceTestContext(devices_info, process=False) as multi_context: dev_proxy = multi_context.get_device(logging_target) dev_proxy.Init() dev_proxy.loggingLevel = int(tango.LogLevel.LOG_FATAL) assert dev_proxy.loggingLevel != logging_level levels = [] levels.append(logging_level) targets = [] targets.append(multi_context.get_device_access(logging_target)) device_details = [] device_details.append(levels) device_details.append(targets) multi_context.get_device(logger_device).SetLoggingLevel(device_details) assert dev_proxy.loggingLevel == logging_level
44.856383
94
0.681727
899
8,433
6.204672
0.150167
0.134457
0.076192
0.116529
0.552886
0.48028
0.154177
0.146468
0.020437
0
0
0.003295
0.208348
8,433
187
95
45.096257
0.832235
0.440768
0
0
0
0.011628
0.099074
0.024306
0
0
0
0
0.151163
1
0.162791
false
0.011628
0.104651
0
0.290698
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d83b4f58893d59845ef72aeb0870f92b39fa121
2,053
py
Python
baseline/find_pairs.py
parallelcrawl/DataCollection
4308473e6b53779159a15c1416bff3f2291dd1f2
[ "Apache-2.0" ]
8
2018-02-08T16:03:00.000Z
2022-01-19T11:41:38.000Z
baseline/find_pairs.py
christianbuck/CorpusMining
f9248c3528a415a1e5af2c5a54a60c16cd79ff1d
[ "Apache-2.0" ]
3
2017-08-08T10:53:29.000Z
2017-08-08T10:58:51.000Z
baseline/find_pairs.py
parallelcrawl/DataCollection
4308473e6b53779159a15c1416bff3f2291dd1f2
[ "Apache-2.0" ]
4
2018-06-09T21:53:09.000Z
2022-01-19T11:41:48.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- import sys import re import urlparse def process_buffer(buffer): if not buffer or len(buffer) < 2: return buffer = [line.decode('utf-8', 'ignore') for line in buffer] split_buffer = [line.strip().lower().split("\t") for line in buffer] if list(set(map(len, split_buffer))) != [4]: for line in buffer: sys.stderr.write(line.encode('utf-8')) return original_urls = [] stripped_languages = [] detected_languages = [] for stripped_url, \ original_url, \ stripped_language, \ detected_language in split_buffer: original_urls.append(original_url) stripped_languages.append(stripped_language) detected_languages.append(detected_language) if len(set(original_urls)) < 2: # print "not enough urls" return if len(set(stripped_languages)) < 2: # print "not enough stripped languages", languages_stripped return if len(set(detected_languages)) < 2: # print "not enough detected_languages", detected_languages return for language in stripped_languages: for detected_language in detected_languages: # print "looking for ", language, " in ", detected_languages if language in detected_language.replace("chineset", "chinese") \ .split('/'): for line in buffer: sys.stdout.write(line.encode("utf-8")) return if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() buffer = [] buffer_url = None for line in sys.stdin: # line = line.decode("utf-8", "ignore") url = line.split("\t", 1)[0] if url != buffer_url: process_buffer(buffer) buffer = [line] buffer_url = url else: buffer.append(line) # print url != buffer_url process_buffer(buffer)
31.106061
77
0.580614
229
2,053
5.021834
0.266376
0.103478
0.03913
0.052174
0.205217
0.097391
0
0
0
0
0
0.008523
0.314174
2,053
65
78
31.584615
0.808239
0.147589
0
0.2
0
0
0.028129
0
0
0
0
0
0
1
0.02
false
0
0.08
0
0.22
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d84b7b6381a6f3c016023bcfd74caa6a922fa9b
625
py
Python
tests/test_jupyter_integration.py
boeddeker/graphviz
acf79bca4518781cad02c102e89ec4e9ce757088
[ "MIT" ]
null
null
null
tests/test_jupyter_integration.py
boeddeker/graphviz
acf79bca4518781cad02c102e89ec4e9ce757088
[ "MIT" ]
null
null
null
tests/test_jupyter_integration.py
boeddeker/graphviz
acf79bca4518781cad02c102e89ec4e9ce757088
[ "MIT" ]
null
null
null
import pytest from graphviz import jupyter_integration def test_get_jupyter_format_mimetype_invalid_raises_unknown(): with pytest.raises(ValueError, match=r'unknown'): jupyter_integration.get_jupyter_format_mimetype('Brian!') def test_get_jupyter_mimetype_format_normalizes(): assert jupyter_integration.get_jupyter_mimetype_format( jupyter_integration.get_jupyter_format_mimetype('jpg')) == 'jpeg' def test_get_jupyter_mimetype_format_raises_unsupported(): with pytest.raises(ValueError, match='unsupported'): jupyter_integration.get_jupyter_mimetype_format('A boy called Brian!')
32.894737
78
0.808
76
625
6.184211
0.355263
0.148936
0.178723
0.238298
0.621277
0.489362
0
0
0
0
0
0
0.1136
625
18
79
34.722222
0.848375
0
0
0
0
0
0.08
0
0
0
0
0
0.090909
1
0.272727
true
0
0.181818
0
0.454545
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
0
0
0
0
3
9d872c11430e2faa3e970e4a406f2f735e7a91bc
122
py
Python
gaussianmean.py
rjw57/fear-python-example
b95440fff6471d2555dce63ed8b26a0a7c8d2ed1
[ "MIT" ]
1
2016-06-27T08:28:23.000Z
2016-06-27T08:28:23.000Z
gaussianmean.py
rjw57/fear-python-example
b95440fff6471d2555dce63ed8b26a0a7c8d2ed1
[ "MIT" ]
null
null
null
gaussianmean.py
rjw57/fear-python-example
b95440fff6471d2555dce63ed8b26a0a7c8d2ed1
[ "MIT" ]
null
null
null
import numpy as np def main(): s = np.mean(np.random.randn(100)) print(s) if __name__ == '__main__': main()
13.555556
37
0.598361
19
122
3.421053
0.736842
0
0
0
0
0
0
0
0
0
0
0.032258
0.237705
122
8
38
15.25
0.666667
0
0
0
0
0
0.065574
0
0
0
0
0
0
1
0.166667
false
0
0.166667
0
0.333333
0.166667
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
9d874b69262d199893f7832d8c3dfc78745d2cab
544
py
Python
sarsa.py
lukaspestalozzi/URLNN-Project2
425d3a14f063d91ae4b6183aa866fa074dc1d791
[ "MIT" ]
null
null
null
sarsa.py
lukaspestalozzi/URLNN-Project2
425d3a14f063d91ae4b6183aa866fa074dc1d791
[ "MIT" ]
null
null
null
sarsa.py
lukaspestalozzi/URLNN-Project2
425d3a14f063d91ae4b6183aa866fa074dc1d791
[ "MIT" ]
null
null
null
import mountaincar as mc import numpy as np from collections import namedtuple from collections import defaultdict import matplotlib.pylab as plb import matplotlib.pyplot as plt from time import time State = namedtuple('State', ['x', 'v']) class SarsaMountainCar(object): def __init__(self, learning_rate=0.1, reward_factor=0.95, eligibility_decay=0.7): self.learning_rate = learning_rate self.reward_factor = reward_factor self.eligibility_decay = eligibility_decay def _vizualize(self): pass
24.727273
85
0.740809
72
544
5.402778
0.513889
0.092545
0.107969
0
0
0
0
0
0
0
0
0.015837
0.1875
544
21
86
25.904762
0.864253
0
0
0
0
0
0.012891
0
0
0
0
0
0
1
0.133333
false
0.066667
0.466667
0
0.666667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
1
0
1
0
0
3
9d87c99f7edc4a51975ce4aad83b2a68eca0165b
4,931
py
Python
utils.py
nea23/greek_alphabets_tf-idf
94094dd6d7383400e0f0a9d4a1b05744dd2f3ba9
[ "MIT" ]
null
null
null
utils.py
nea23/greek_alphabets_tf-idf
94094dd6d7383400e0f0a9d4a1b05744dd2f3ba9
[ "MIT" ]
null
null
null
utils.py
nea23/greek_alphabets_tf-idf
94094dd6d7383400e0f0a9d4a1b05744dd2f3ba9
[ "MIT" ]
null
null
null
import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd """ The following functions are used to create an annotated heatmap and they were copied from: https://matplotlib.org/stable/gallery/images_contours_and_fields/image_annotated_heatmap.html#using-the-helper-function-code-style """ def heatmap(data, row_labels, col_labels, ax=None, **kwargs): """ Create a heatmap from a numpy array and two lists of labels. Parameters ---------- data A 2D numpy array of shape (N, M). row_labels A list or array of length N with the labels for the rows. col_labels A list or array of length M with the labels for the columns. ax A `matplotlib.axes.Axes` instance to which the heatmap is plotted. If not provided, use current axes or create a new one. Optional. cbar_kw A dictionary with arguments to `matplotlib.Figure.colorbar`. Optional. cbarlabel The label for the colorbar. Optional. **kwargs All other arguments are forwarded to `imshow`. """ if not ax: ax = plt.gca() # Plot the heatmap im = ax.imshow(data, **kwargs) # We want to show all ticks... ax.set_xticks(np.arange(data.shape[1])) ax.set_yticks(np.arange(data.shape[0])) # ... and label them with the respective list entries. ax.set_xticklabels(col_labels) ax.set_yticklabels(row_labels) # Let the horizontal axes labeling appear on top. ax.tick_params(top=True, bottom=False, labeltop=True, labelbottom=False) # Rotate the tick labels and set their alignment. plt.setp(ax.get_xticklabels(), rotation=-30, ha="right", rotation_mode="anchor") # Turn spines off and create white grid. # ax.spines[:].set_visible(False) ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True) ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True) ax.grid(which="minor", color="w", linestyle='-', linewidth=3) ax.tick_params(which="minor", bottom=False, left=False) return im def annotate_heatmap(im, data=None, valfmt="{x:.2f}", textcolors=("black", "white"), threshold=None, **textkw): """ A function to annotate a heatmap. Parameters ---------- im The AxesImage to be labeled. data Data used to annotate. If None, the image's data is used. Optional. valfmt The format of the annotations inside the heatmap. This should either use the string format method, e.g. "$ {x:.2f}", or be a `matplotlib.ticker.Formatter`. Optional. textcolors A pair of colors. The first is used for values below a threshold, the second for those above. Optional. threshold Value in data units according to which the colors from textcolors are applied. If None (the default) uses the middle of the colormap as separation. Optional. **kwargs All other arguments are forwarded to each call to `text` used to create the text labels. """ if not isinstance(data, (list, np.ndarray)): data = im.get_array() # Normalize the threshold to the images color range. if threshold is not None: threshold = im.norm(threshold) else: threshold = im.norm(data.max())/2. # Set default alignment to center, but allow it to be # overwritten by textkw. kw = dict(horizontalalignment="center", verticalalignment="center") kw.update(textkw) # Get the formatter in case a string is supplied if isinstance(valfmt, str): valfmt = matplotlib.ticker.StrMethodFormatter(valfmt) # Loop over the data and create a `Text` for each "pixel". # Change the text's color depending on the data. texts = [] for i in range(data.shape[0]): for j in range(data.shape[1]): kw.update(color=textcolors[int(im.norm(data[i, j]) > threshold)]) text = im.axes.text(j, i, valfmt(data[i, j], None), **kw) texts.append(text) return texts """ The following functions are used to get the top pairs from a correlation matrix and they were copied from: https://stackoverflow.com/a/41453817 """ def get_redundant_pairs(df): '''Get diagonal and lower triangular pairs of correlation matrix''' pairs_to_drop = set() cols = df.columns for i in range(0, df.shape[1]): for j in range(0, i+1): pairs_to_drop.add((cols[i], cols[j])) return pairs_to_drop def get_top_abs_correlations(df, min_val=0.6): au_corr = df.corr().abs().unstack() labels_to_drop = get_redundant_pairs(df) au_corr = au_corr.drop(labels=labels_to_drop).sort_values(ascending=False) au_corr_df = pd.DataFrame(au_corr, columns=['Score']) return au_corr_df.where(au_corr_df['Score'] >= min_val, np.nan).dropna()
34.725352
131
0.651592
713
4,931
4.429173
0.352034
0.0133
0.015199
0.021533
0.136795
0.117163
0.081697
0.065231
0
0
0
0.008313
0.243764
4,931
142
132
34.725352
0.838563
0.404583
0
0
0
0
0.026282
0
0
0
0
0
0
1
0.071429
false
0
0.071429
0
0.214286
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d87fe4b4c7aa76322c36b84c9220f5fee728c3d
6,675
py
Python
built-in/MindSpore/Official/cv/detection/CenterFace_for_MindSpore/src/launch.py
Huawei-Ascend/modelzoo
df51ed9c1d6dbde1deef63f2a037a369f8554406
[ "Apache-2.0" ]
12
2020-12-13T08:34:24.000Z
2022-03-20T15:17:17.000Z
built-in/MindSpore/Official/cv/detection/CenterFace_for_MindSpore/src/launch.py
Huawei-Ascend/modelzoo
df51ed9c1d6dbde1deef63f2a037a369f8554406
[ "Apache-2.0" ]
3
2021-03-31T20:15:40.000Z
2022-02-09T23:50:46.000Z
built-in/MindSpore/Official/cv/detection/CenterFace_for_MindSpore/src/launch.py
Huawei-Ascend/modelzoo
df51ed9c1d6dbde1deef63f2a037a369f8554406
[ "Apache-2.0" ]
2
2021-07-10T12:40:46.000Z
2021-12-17T07:55:15.000Z
# Copyright 2020 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """auto generate rank table and export envs""" import sys import subprocess import os import socket import json from argparse import ArgumentParser, REMAINDER def parse_args(): parser = ArgumentParser(description="mindspore distributed training launch " "helper utilty that will spawn up " "multiple distributed processes") parser.add_argument("--nproc_per_node", type=int, default=1, help="The number of processes to launch on each node, " "for D training, this is recommended to be set " "to the number of D in your system so that " "each process can be bound to a single D.") parser.add_argument("--visible_devices", type=str, default="0,1,2,3,4,5,6,7", help="will use the visible devices sequentially") parser.add_argument("--env_sh", type=str, default="", help="env for 1p") parser.add_argument("--server_id", type=str, default="", help="server ip") # positional parser.add_argument("training_script", type=str, help="The full path to the single D training " "program/script to be launched in parallel, " "followed by all the arguments for the " "training script") # device mode parser.add_argument("--device", type=str, default="A+K") # task_set, to impove cpu utilization for multi-npu(e.g., 8P) training parser.add_argument("--task_set", type=bool, default=False) parser.add_argument("--task_set_core", type=int, default=24) # ranktable file parser.add_argument("--table_fn", type=str, default="", help="The ranktable file path, if not set, " "we will auto-generate a ranktable for user") # rest from the training program parser.add_argument('training_script_args', nargs=REMAINDER) return parser.parse_args() def main(): args = parse_args() print('args:{}'.format(args)) visible_devices = args.visible_devices.split(',') assert len(visible_devices) >= args.nproc_per_node print('visible_devices:{}'.format(visible_devices)) if(args.server_id == ''): print('pleaser input server ip!!!') exit(0) print('server_id:{}'.format(args.server_id)) hccn_configs = open('/etc/hccn.conf', 'r').readlines() device_ips = {} for hccn_item in hccn_configs: hccn_item = hccn_item.strip() if hccn_item.startswith('address_'): device_id, device_ip = hccn_item.split('=') device_id = device_id.split('_')[1] device_ips[device_id] = device_ip print('device_id:{}, device_ip:{}'.format(device_id, device_ip)) hccn_table = {} if args.device == 'A+K': hccn_table['board_id'] = '0x002f' else: hccn_table['board_id'] = '0x0000' hccn_table['chip_info'] = '910' hccn_table['deploy_mode'] = 'lab' hccn_table['group_count'] = '1' hccn_table['group_list'] = [] instance_list = [] usable_dev = '' for instance_id in range(args.nproc_per_node): instance = {} instance['devices'] = [] device_id = visible_devices[instance_id] device_ip = device_ips[device_id] usable_dev += str(device_id) instance['devices'].append({ 'device_id': device_id, 'device_ip': device_ip, }) instance['rank_id'] = str(instance_id) instance['server_id'] = args.server_id instance_list.append(instance) hccn_table['group_list'].append({ 'device_num': str(args.nproc_per_node), 'server_num': '1', 'group_name': '', 'instance_count': str(args.nproc_per_node), 'instance_list': instance_list, }) hccn_table['para_plane_nic_location'] = 'device' hccn_table['para_plane_nic_name'] = [] for instance_id in range(args.nproc_per_node): eth_id = visible_devices[instance_id] hccn_table['para_plane_nic_name'].append('eth{}'.format(eth_id)) hccn_table['para_plane_nic_num'] = str(args.nproc_per_node) hccn_table['status'] = 'completed' if args.table_fn is "": table_fn = os.path.join(os.getcwd(), 'rank_table_{}p_{}_{}.json'.format(args.nproc_per_node, usable_dev, args.server_id)) with open(table_fn, 'w') as table_fp: json.dump(hccn_table, table_fp, indent=4) else: table_fn = args.table_fn # world size in terms of number of processes dist_group_size = args.nproc_per_node for rank in range(0, args.nproc_per_node): rank_id = rank device_id = visible_devices[rank] device_root_fn = os.path.join(os.getcwd(), 'device{}'.format(device_id)) #format(rank_id)) rank_process = '' if args.nproc_per_node > 1: rank_process += 'export RANK_TABLE_FILE={} && '.format(table_fn) if args.task_set: left = int(device_id) * args.task_set_core right = left + args.task_set_core - 1 rank_process += 'export RANK_SIZE={} && source {} && export RANK_ID={} && export DEVICE_ID={} && rm -rf {} && mkdir {} && cd {} && taskset -c {}-{} python {} '.format(args.nproc_per_node, args.env_sh, rank_id, device_id, device_root_fn, device_root_fn, device_root_fn, left, right, args.training_script) else: rank_process += 'export RANK_SIZE={} && source {} && export RANK_ID={} && export DEVICE_ID={} && rm -rf {} && mkdir {} && cd {} && python {} '.format(args.nproc_per_node, args.env_sh, rank_id, device_id, device_root_fn, device_root_fn, device_root_fn, args.training_script) rank_process += ' '.join(args.training_script_args) + ' >log{}.log 2>&1 &'.format(rank_id) os.system(rank_process) if __name__ == "__main__": main()
43.914474
315
0.61588
867
6,675
4.491349
0.275663
0.03698
0.040062
0.049307
0.21623
0.138932
0.0981
0.0981
0.0981
0.07961
0
0.008585
0.249588
6,675
151
316
44.205298
0.768816
0.131386
0
0.06087
0
0.017391
0.254505
0.008316
0
0
0.002079
0
0.008696
1
0.017391
false
0
0.052174
0
0.078261
0.043478
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d88690768c73f37df5f9308e7658f80de5bdba2
1,475
py
Python
orange3/Orange/widgets/credentials.py
rgschmitz1/BioDepot-workflow-builder
f74d904eeaf91ec52ec9b703d9fb38e9064e5a66
[ "MIT" ]
54
2017-01-08T17:21:49.000Z
2021-11-02T08:46:07.000Z
orange3/Orange/widgets/credentials.py
Synthia-3/BioDepot-workflow-builder
4ee93abe2d79465755e82a145af3b6a6e1e79fd4
[ "MIT" ]
22
2017-03-28T06:03:14.000Z
2021-07-28T05:43:55.000Z
orange3/Orange/widgets/credentials.py
Synthia-3/BioDepot-workflow-builder
4ee93abe2d79465755e82a145af3b6a6e1e79fd4
[ "MIT" ]
21
2017-01-26T21:12:09.000Z
2022-01-31T21:34:59.000Z
import logging import keyring SERVICE_NAME = "Orange3 - {}" log = logging.getLogger(__name__) class CredentialManager: """ Class for storage of passwords in the system keyring service. All attributes of this class are safely stored. Args: service_name (str): service name used for storing in keyring. Examples: >>> cm = CredentialManager('Widget Name') >>> cm.some_secret = 'api-key-1234' >>> cm.some_secret 'api-key-1234' >>> del cm.some_secret >>> cm.some_secret """ def __init__(self, service_name): self.__dict__["__service_name"] = SERVICE_NAME.format(service_name) @property def service_name(self): return self.__dict__["__service_name"] def __setattr__(self, key, value): try: keyring.set_password(self.service_name, key, value) except Exception: log.exception("Failed to set secret '%s' of '%r'.", key, self.service_name) def __getattr__(self, item): try: return keyring.get_password(self.service_name, item) except Exception: log.exception("Failed to get secret '%s' of '%r'.", item, self.service_name) def __delattr__(self, item): try: keyring.delete_password(self.service_name, item) except Exception: log.exception( "Failed to delete secret '%s' of '%r'.", item, self.service_name )
27.830189
88
0.614237
174
1,475
4.902299
0.33908
0.193435
0.123095
0.080891
0.305979
0.305979
0.213365
0.213365
0.145369
0.145369
0
0.008443
0.277288
1,475
52
89
28.365385
0.791745
0.251525
0
0.222222
0
0
0.138359
0
0
0
0
0
0
1
0.185185
false
0.111111
0.074074
0.037037
0.37037
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
9d886ff7c8fb1d674ed9db521c7c448a657e5fe1
3,799
py
Python
Incident-Response/Tools/cyphon/cyphon/responder/actions/tests/test_models.py
sn0b4ll/Incident-Playbook
cf519f58fcd4255674662b3620ea97c1091c1efb
[ "MIT" ]
1
2021-07-24T17:22:50.000Z
2021-07-24T17:22:50.000Z
Incident-Response/Tools/cyphon/cyphon/responder/actions/tests/test_models.py
sn0b4ll/Incident-Playbook
cf519f58fcd4255674662b3620ea97c1091c1efb
[ "MIT" ]
2
2022-02-28T03:40:31.000Z
2022-02-28T03:40:52.000Z
Incident-Response/Tools/cyphon/cyphon/responder/actions/tests/test_models.py
sn0b4ll/Incident-Playbook
cf519f58fcd4255674662b3620ea97c1091c1efb
[ "MIT" ]
2
2022-02-25T08:34:51.000Z
2022-03-16T17:29:44.000Z
# -*- coding: utf-8 -*- # Copyright 2017-2019 ControlScan, Inc. # # This file is part of Cyphon Engine. # # Cyphon Engine is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, version 3 of the License. # # Cyphon Engine is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Cyphon Engine. If not, see <http://www.gnu.org/licenses/>. """ """ # standard library try: from unittest.mock import Mock, patch except ImportError: from mock import Mock, patch # third party from django.test import TestCase # local import platforms.jira.handlers as jira_module from responder.actions.models import Action from tests.fixture_manager import get_fixtures class ActionsBaseTestCase(TestCase): """ Base class for testing Actions. """ fixtures = get_fixtures(['actions', 'dispatches']) def setUp(self): self.action = Action.objects.get(pk=1) class ActionTestCase(ActionsBaseTestCase): """ Tests the Action class. """ def test_str(self): """ Tests the string representation of a Pipe. """ self.assertEqual(str(self.action), 'Jira IssueAPI') def test_get_module(self): """ Tests the _get_module method for getting the module for an Action's Destination. """ self.assertEqual(self.action._get_module(), jira_module) def test_create_request_handler(self): """ Tests the create_request_handler method for getting a request handler for an Action. """ mock_user = Mock() mock_handler = Mock() with patch('platforms.jira.handlers.IssueAPI', return_value=mock_handler) as mock_api: kwargs = { 'user': mock_user, } result = self.action.create_request_handler(**kwargs) mock_api.assert_called_once_with(endpoint=self.action, user=mock_user) self.assertEqual(result, mock_handler) def test_save_w_no_descr(self): """ Test the save method of an Action with the Action has no description. """ self.assertEqual(self.action.description, None) self.action.save() self.assertEqual(self.action.description, 'Jira IssueAPI') def test_save_w_descr(self): """ Test the save method of an Action with the Action has a description. """ self.action.description = 'Create a JIRA Issue' self.action.save() self.assertEqual(self.action.description, 'Create a JIRA Issue') def test_get_dispatch(self): """ Test the get_dispatch method of an Action. """ mock_alert = Mock() mock_user = Mock() mock_record = Mock() mock_handler = Mock() mock_handler.run = Mock(return_value=mock_record) mock_handler.record = mock_record with patch('platforms.jira.handlers.IssueAPI', return_value=mock_handler) as mock_api: kwargs = { 'alert': mock_alert, 'user': mock_user, } result = self.action.get_dispatch(**kwargs) mock_api.assert_called_once_with(endpoint=self.action, user=mock_user) mock_handler.run.assert_called_once_with(mock_alert) self.assertEqual(result, mock_record)
30.886179
72
0.630692
461
3,799
5.052061
0.321041
0.055818
0.032632
0.042937
0.303564
0.276943
0.228854
0.206097
0.16316
0.16316
0
0.004044
0.284022
3,799
122
73
31.139344
0.852206
0.300869
0
0.327273
0
0
0.065047
0.026348
0
0
0
0
0.181818
1
0.127273
false
0
0.127273
0
0.309091
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d8881a2641e3115485a61059c62987f2d27bf5d
4,805
py
Python
predictions/lambda/handler.py
aaronshim/alexa-github-today
4f3e7adffa9bb9f3d63cfc1f4a79f396078c787c
[ "MIT" ]
null
null
null
predictions/lambda/handler.py
aaronshim/alexa-github-today
4f3e7adffa9bb9f3d63cfc1f4a79f396078c787c
[ "MIT" ]
null
null
null
predictions/lambda/handler.py
aaronshim/alexa-github-today
4f3e7adffa9bb9f3d63cfc1f4a79f396078c787c
[ "MIT" ]
null
null
null
import json import requests from collections import defaultdict from fuzzywuzzy import process from random import sample # Constants """ Constants for default responses that do not need any further computation. """ DEFAULT_STOP_RESPONSE = 'All right. See you next time!' DEFAULT_ERROR_MESSAGE = "I'm sorry. I don't know how to do that yet." DEFAULT_HELP_MESSAGE = "Try asking me about prediction markets. Ask me to look up midterm elections." PREDEFINED_RESPONSES = { 'AMAZON.FallbackIntent': "I couldn't understand what you were asking. Why don't you ask me about elections?", 'AMAZON.CancelIntent': DEFAULT_STOP_RESPONSE, 'AMAZON.HelpIntent': DEFAULT_HELP_MESSAGE, 'AMAZON.StopIntent': DEFAULT_STOP_RESPONSE, 'AMAZON.NavigateHomeIntent': DEFAULT_STOP_RESPONSE, } """ To be considered as a match, any other title would have to be within this percentage of the score of the best match. """ PERCENTAGE_THRESHOLD = 0.1 # API Helpers def get_all_markets(): """ Query the PredictIt API to get all available markets in a dictionary that maps from the name of the market to its ID. """ all_markets = requests.request( 'GET', 'https://www.predictit.org/api/marketdata/all/') all_markets = json.loads(all_markets.content) return dict((market['name'], market['id']) for market in all_markets['markets']) def get_market(id): """ Query the PredictIt API to get the details of a particular market given the market's ID. """ market = requests.request( 'GET', "https://www.predictit.org/api/marketdata/markets/%d" % id) return json.loads(market.content) # "UI" Helpers def market_message(market): """ Given the response from `get_market`, generates a message that conveys the relevant information of the particular market. """ if len(market['contracts']) > 1: return "%s is too complicated." % market['name'] return "%s is trading at %d percent." % \ (market['name'], market['contracts'][0]['lastTradePrice'] * 100) def response_from_message(message): """ Helper to wrap a message string into the minimum acceptable Alexa response JSON. """ return { 'version': '1.0', 'response': { 'outputSpeech': { 'type': 'PlainText', 'text': message, } } } def can_fulfill(intent): if intent['name'] == 'Query' and intent['slots'] and \ intent['slots']['Market'] and intent['slots']['Market']['value']: return { 'version': '1.0', 'response': { 'canFulfillIntent': { 'canFulfill': 'YES', 'slots': { 'Market': { 'canUnderstand': 'YES', 'canFulfill': 'YES' }, } } } } return { 'version': '1.0', 'response': { 'canFulfillIntent': { 'canFulfill': 'NO', } } } # Main function def main(event, context): """ Entry point for the Alexa action. """ request_type = event['request']['type'] if request_type != 'IntentRequest': if request_type == 'LaunchRequest': return response_from_message(DEFAULT_HELP_MESSAGE) elif request_type == 'CanFulfillIntentRequest': return can_fulfill(event['request']['intent']) elif request_type == 'SessionEndedRequest': return intent = event['request']['intent'] intent_type = intent['name'] # Get the canned responses out of the way before we do any heavy lifting # with external API calls. if intent_type in PREDEFINED_RESPONSES: return response_from_message(PREDEFINED_RESPONSES[intent_type]) # Sanity check. if intent_type != 'Query' or 'Market' not in intent['slots']: return response_from_message(DEFAULT_ERROR_MESSAGE) keyword = intent['slots']['Market']['value'] markets = get_all_markets() # Only take the ones that are within percentage threshold of the first # result. Bucket them by score. likely_markets = process.extract(keyword, markets.keys(), limit=100) (_, best_score) = likely_markets[0] result_markets = defaultdict(list) # Multimap score -> id's for (name, score) in likely_markets: if best_score - score <= PERCENTAGE_THRESHOLD * best_score: result_markets[score].append(markets[name]) # List of market JSON response's. result_markets = [get_market(id) for id in sum( [sample(ids, 1) for (_, ids) in result_markets.items()], [])] return response_from_message(' '.join(market_message(market) for market in result_markets))
33.838028
125
0.624766
565
4,805
5.189381
0.346903
0.010232
0.032401
0.034106
0.114939
0.085266
0.068213
0.034789
0.034789
0
0
0.005078
0.262227
4,805
141
126
34.078014
0.822003
0.157544
0
0.120879
0
0
0.238488
0.018366
0
0
0
0
0
1
0.065934
false
0
0.054945
0
0.263736
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d88973447a6fc9a97038839f4db33428c51196b
12,649
py
Python
Train.py
prattcmp/speakerembedding
5ed051261e69aaf7a1306c390b36cedb8da3f095
[ "MIT" ]
null
null
null
Train.py
prattcmp/speakerembedding
5ed051261e69aaf7a1306c390b36cedb8da3f095
[ "MIT" ]
null
null
null
Train.py
prattcmp/speakerembedding
5ed051261e69aaf7a1306c390b36cedb8da3f095
[ "MIT" ]
null
null
null
import torch import numpy as np import logging, yaml, os, sys, argparse, time from tqdm import tqdm from collections import defaultdict from Logger import Logger import matplotlib matplotlib.use('agg') matplotlib.rcParams['agg.path.chunksize'] = 10000 import matplotlib.pyplot as plt from scipy.io import wavfile from random import sample from sklearn.manifold import TSNE from Modules import GE2E, GE2E_Loss from Datasets import Dataset, Collater, Inference_Collater from Noam_Scheduler import Modified_Noam_Scheduler from Radam import RAdam from Arg_Parser import Recursive_Parse hp = Recursive_Parse(yaml.load( open('Hyper_Parameters.yaml', encoding='utf-8'), Loader=yaml.Loader )) if not hp.Device is None: os.environ['CUDA_VISIBLE_DEVICES']= str(hp.Device) if not torch.cuda.is_available(): device = torch.device('cpu') else: device = torch.device('cuda:0') torch.backends.cudnn.benchmark = True torch.cuda.set_device(0) logging.basicConfig( level=logging.INFO, stream=sys.stdout, format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s" ) if hp.Use_Mixed_Precision: try: from apex import amp except: logging.warn('There is no apex modules in the environment. Mixed precision does not work.') hp.Use_Mixed_Precision = False class Trainer: def __init__(self, steps= 0): self.steps = steps self.epochs = 0 self.Datset_Generate() self.Model_Generate() self.scalar_Dict = { 'Train': defaultdict(float), 'Evaluation': defaultdict(float), } self.writer_Dict = { 'Train': Logger(os.path.join(hp.Log_Path, 'Train')), 'Evaluation': Logger(os.path.join(hp.Log_Path, 'Evaluation')), } self.Load_Checkpoint() def Datset_Generate(self): train_Dataset = Dataset( pattern_path= hp.Train.Train_Pattern.Path, metadata_file= hp.Train.Train_Pattern.Metadata_File, pattern_per_speaker= hp.Train.Batch.Train.Pattern_per_Speaker, use_cache= hp.Train.Use_Pattern_Cache ) dev_Dataset = Dataset( pattern_path= hp.Train.Eval_Pattern.Path, metadata_file= hp.Train.Eval_Pattern.Metadata_File, pattern_per_speaker= hp.Train.Batch.Eval.Pattern_per_Speaker, use_cache= hp.Train.Use_Pattern_Cache ) inference_Dataset = Dataset( pattern_path= hp.Train.Eval_Pattern.Path, metadata_file= hp.Train.Eval_Pattern.Metadata_File, pattern_per_speaker= hp.Train.Batch.Eval.Pattern_per_Speaker, num_speakers= 50, #Maximum number by tensorboard. use_cache= hp.Train.Use_Pattern_Cache ) logging.info('The number of train speakers = {}.'.format(len(train_Dataset))) logging.info('The number of development speakers = {}.'.format(len(dev_Dataset))) collater = Collater( min_frame_length= hp.Train.Frame_Length.Min, max_frame_length= hp.Train.Frame_Length.Max ) inference_Collater = Inference_Collater( samples= hp.Train.Inference.Samples, frame_length= hp.Train.Inference.Frame_Length, overlap_length= hp.Train.Inference.Overlap_Length ) self.dataLoader_Dict = {} self.dataLoader_Dict['Train'] = torch.utils.data.DataLoader( dataset= train_Dataset, shuffle= True, collate_fn= collater, batch_size= hp.Train.Batch.Train.Speaker, num_workers= hp.Train.Num_Workers, pin_memory= True ) self.dataLoader_Dict['Dev'] = torch.utils.data.DataLoader( dataset= dev_Dataset, shuffle= True, collate_fn= collater, batch_size= hp.Train.Batch.Eval.Speaker, num_workers= hp.Train.Num_Workers, pin_memory= True ) self.dataLoader_Dict['Inference'] = torch.utils.data.DataLoader( dataset= inference_Dataset, shuffle= True, collate_fn= inference_Collater, batch_size= hp.Train.Batch.Eval.Speaker, num_workers= hp.Train.Num_Workers, pin_memory= True ) def Model_Generate(self): self.model = GE2E( mel_dims= hp.Sound.Mel_Dim, lstm_size= hp.GE2E.LSTM.Sizes, lstm_stacks= hp.GE2E.LSTM.Stacks, embedding_size= hp.GE2E.Embedding_Size, ).to(device) self.criterion = GE2E_Loss().to(device) self.optimizer = RAdam( params= self.model.parameters(), lr= hp.Train.Learning_Rate.Initial, betas= (hp.Train.ADAM.Beta1, hp.Train.ADAM.Beta2), eps= hp.Train.ADAM.Epsilon, weight_decay= hp.Train.Weight_Decay ) self.scheduler = Modified_Noam_Scheduler( optimizer= self.optimizer, base= hp.Train.Learning_Rate.Base, ) if hp.Use_Mixed_Precision: self.model, self.optimizer = amp.initialize( models= self.model, optimizers=self.optimizer ) logging.info(self.model) def Train_Step(self, mels): loss_Dict = {} mels = mels.to(device, non_blocking=True) embeddings = self.model(mels) loss_Dict['Embedding'] = self.criterion(embeddings, hp.Train.Batch.Train.Pattern_per_Speaker) self.optimizer.zero_grad() if hp.Use_Mixed_Precision: with amp.scale_loss(loss_Dict['Embedding'], self.optimizer) as scaled_loss: scaled_loss.backward() torch.nn.utils.clip_grad_norm_( parameters= amp.master_params(self.optimizer), max_norm= hp.Train.Gradient_Norm ) else: loss_Dict['Embedding'].backward() torch.nn.utils.clip_grad_norm_( parameters= self.model.parameters(), max_norm= hp.Train.Gradient_Norm ) self.optimizer.step() self.scheduler.step() self.steps += 1 self.tqdm.update(1) for tag, loss in loss_Dict.items(): self.scalar_Dict['Train']['Loss/{}'.format(tag)] += loss_Dict['Embedding'] def Train_Epoch(self): for mels in self.dataLoader_Dict['Train']: self.Train_Step(mels) if self.steps % hp.Train.Checkpoint_Save_Interval == 0: self.Save_Checkpoint() if self.steps % hp.Train.Logging_Interval == 0: self.scalar_Dict['Train'] = { tag: loss / hp.Train.Logging_Interval for tag, loss in self.scalar_Dict['Train'].items() } self.scalar_Dict['Train']['Learning_Rate'] = self.scheduler.get_last_lr() self.writer_Dict['Train'].add_scalar_dict(self.scalar_Dict['Train'], self.steps) self.scalar_Dict['Train'] = defaultdict(float) if self.steps % hp.Train.Evaluation_Interval == 0: self.Evaluation_Epoch() if self.steps % hp.Train.Inference_Interval == 0: self.Inference_Epoch() if self.steps >= hp.Train.Max_Step: return self.epochs += 1 @torch.no_grad() def Evaluation_Step(self, mels): loss_Dict = {} mels = mels.to(device, non_blocking=True) embeddings = self.model(mels) loss_Dict['Embedding'] = self.criterion(embeddings, hp.Train.Batch.Eval.Pattern_per_Speaker) for tag, loss in loss_Dict.items(): self.scalar_Dict['Evaluation']['Loss/{}'.format(tag)] += loss def Evaluation_Epoch(self): logging.info('(Steps: {}) Start evaluation.'.format(self.steps)) self.model.eval() for step, mels in tqdm(enumerate(self.dataLoader_Dict['Dev'], 1), desc='[Evaluation]'): self.Evaluation_Step(mels) self.scalar_Dict['Evaluation'] = { tag: loss / step for tag, loss in self.scalar_Dict['Evaluation'].items() } self.writer_Dict['Evaluation'].add_scalar_dict(self.scalar_Dict['Evaluation'], self.steps) self.writer_Dict['Evaluation'].add_histogram_model(self.model, self.steps, delete_keywords=['layer_Dict', 'layer']) self.scalar_Dict['Evaluation'] = defaultdict(float) self.model.train() @torch.no_grad() def Inference_Step(self, mels): return self.model( mels= mels.to(device, non_blocking=True), samples= hp.Train.Inference.Samples ) def Inference_Epoch(self): logging.info('(Steps: {}) Start inference.'.format(self.steps)) self.model.eval() embeddings, speakers = zip(*[ (self.Inference_Step(mels), speakers) for mels, speakers in tqdm(self.dataLoader_Dict['Inference'], desc='[Inference]') ]) embeddings = torch.cat(embeddings, dim= 0).cpu().numpy() speakers = [speaker for speaker_List in speakers for speaker in speaker_List] self.writer_Dict['Evaluation'].add_embedding( embeddings, metadata= speakers, global_step= self.steps, tag= 'Embeddings' ) self.model.train() def Load_Checkpoint(self): if self.steps == 0: paths = [ os.path.join(root, file).replace('\\', '/') for root, _, files in os.walk(hp.Checkpoint_Path) for file in files if os.path.splitext(file)[1] == '.pt' ] if len(paths) > 0: path = max(paths, key = os.path.getctime) else: return # Initial training else: path = os.path.join(path, 'S_{}.pt'.format(self.steps).replace('\\', '/')) state_Dict = torch.load(os.path.join(path), map_location= 'cpu') self.model.load_state_dict(state_Dict['Model']) self.optimizer.load_state_dict(state_Dict['Optimizer']) self.scheduler.load_state_dict(state_Dict['Scheduler']) self.steps = state_Dict['Steps'] self.epochs = state_Dict['Epochs'] if hp.Use_Mixed_Precision: if not 'AMP' in state_Dict.keys(): logging.warn('No AMP state dict is in the checkpoint. Model regards this checkpoint is trained without mixed precision.') else: amp.load_state_dict(state_Dict['AMP']) logging.info('Checkpoint loaded at {} steps.'.format(self.steps)) def Save_Checkpoint(self): os.makedirs(hp.Checkpoint_Path, exist_ok= True) state_Dict = { 'Model': self.model.state_dict(), 'Optimizer': self.optimizer.state_dict(), 'Scheduler': self.scheduler.state_dict(), 'Steps': self.steps, 'Epochs': self.epochs, } if hp.Use_Mixed_Precision: state_Dict['AMP'] = amp.state_dict() torch.save( state_Dict, os.path.join(hp.Checkpoint_Path, 'S_{}.pt'.format(self.steps).replace('\\', '/')) ) logging.info('Checkpoint saved at {} steps.'.format(self.steps)) def Train(self): hp_Path = os.path.join(hp.Checkpoint_Path, 'Hyper_Parameters.yaml').replace('\\', '/') if not os.path.exists(hp_Path): from shutil import copyfile os.makedirs(hp.Checkpoint_Path, exist_ok= True) copyfile('Hyper_Parameters.yaml', hp_Path) if self.steps == 0: self.Evaluation_Epoch() if hp.Train.Initial_Inference: self.Inference_Epoch() self.tqdm = tqdm( initial= self.steps, total= hp.Train.Max_Step, desc='[Training]' ) while self.steps < hp.Train.Max_Step: try: self.Train_Epoch() except KeyboardInterrupt: self.Save_Checkpoint() exit(1) self.tqdm.close() logging.info('Finished training.') if __name__ == '__main__': argParser = argparse.ArgumentParser() argParser.add_argument('-s', '--steps', default= 0, type= int) args = argParser.parse_args() new_Trainer = Trainer(steps= args.steps) new_Trainer.Train()
35.233983
137
0.591035
1,446
12,649
4.981328
0.185339
0.041788
0.023324
0.018465
0.374705
0.295155
0.206303
0.168957
0.147022
0.140358
0
0.004048
0.296861
12,649
359
138
35.233983
0.805824
0.003716
0
0.216949
0
0.00339
0.080787
0.006825
0
0
0
0
0
1
0.040678
false
0
0.061017
0.00339
0.115254
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d8b9ee2c96a9f3f72e8c6e40b49a6ccfdc17590
2,247
py
Python
steamstore/client.py
saucesteals/steam.py
b1017f85f23c0eccafc6f35814d2e57cb4aa23e7
[ "MIT" ]
null
null
null
steamstore/client.py
saucesteals/steam.py
b1017f85f23c0eccafc6f35814d2e57cb4aa23e7
[ "MIT" ]
null
null
null
steamstore/client.py
saucesteals/steam.py
b1017f85f23c0eccafc6f35814d2e57cb4aa23e7
[ "MIT" ]
1
2021-04-11T00:38:19.000Z
2021-04-11T00:38:19.000Z
import logging import asyncio import aiohttp from .defaults import * from .app import App from .featured import FeaturedList log = logging.getLogger(__name__) class Client: """Main class for the Steam API""" def __init__(self, *, loop=None, **opts): self.loop = asyncio.get_event_loop() if loop is None else loop self.ready = False self.http = None def __cleanup(self): loop = self.loop if self.http: asyncio.ensure_future(self.http.close(), loop=loop) def stop(self): self.__cleanup() def __build_api(self, path:str, qs:dict=None, **args): url = base_api + path if qs: url += "?" + '&'.join([name + "=" + str(value) for name, value in qs.items()]) return url async def __get_json(self, req:aiohttp.client_reqrep.ClientResponse, *args, **kwargs) -> dict: json_resp = None try: json_resp = await req.json() except: print("Error") return # TODO: Handle this return json_resp async def start(self, *args, **kwargs): self.http = await aiohttp.ClientSession().__aenter__() self.ready = True def run(self, *args, **kwargs): loop = self.loop async def runner(): try: await self.start(*args, **kwargs) finally: pass # TODO: Handle this asyncio.ensure_future(runner(), loop=loop) try: loop.run_forever() except KeyboardInterrupt: self.__cleanup() async def get_app_from_id(self, appid:int, currency_code:str="us", language_code:str="en"): req = await self.http.get(self.__build_api("appdetails", {"appids":appid, "cc":currency_code, "l":language_code})) json_resp = await self.__get_json(req) for item in json_resp: data = json_resp[item] return App(data=data["data"]) if data["success"] else None # TODO: Handle This async def get_featured(self, *args, **kwargs): req = await self.http.get(self.__build_api("featured")) json_resp = await self.__get_json(req) return FeaturedList(data=json_resp)
26.127907
122
0.587895
278
2,247
4.539568
0.327338
0.050713
0.026149
0.025357
0.091918
0.091918
0.091918
0.049128
0
0
0
0
0.294615
2,247
85
123
26.435294
0.796215
0.036938
0
0.160714
0
0
0.023191
0
0.017857
0
0
0.011765
0
1
0.089286
false
0.017857
0.107143
0
0.285714
0.017857
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
9d8c97671a23367d026ea52b147ffe064cc2939a
881
py
Python
ga/gen_graph.py
k4t0mono/exercicios-ia
06f76db20f519b8d7e9b5ee2cf5c7a72b21e188c
[ "BSD-3-Clause" ]
1
2018-09-23T15:38:04.000Z
2018-09-23T15:38:04.000Z
ga/gen_graph.py
k4t0mono/exercicios-ia
06f76db20f519b8d7e9b5ee2cf5c7a72b21e188c
[ "BSD-3-Clause" ]
null
null
null
ga/gen_graph.py
k4t0mono/exercicios-ia
06f76db20f519b8d7e9b5ee2cf5c7a72b21e188c
[ "BSD-3-Clause" ]
null
null
null
import sys import numpy as np import matplotlib.pyplot as plt f = open(sys.argv[1], 'r') lines = f.readlines() f.close() pop_size = int(lines.pop(0)) pops = [] for l in lines: if l[0] == '[': pops.append(l.strip()) for j in range(len(pops)): p = [] for n in pops[j][1:-1].split(','): p.append(int(n)) d = {} for i in range(-16, 16): d[i] = 0 for i in p: d[i] += 1 x = [] y = [] for k in d: x.append(k) y.append(d[k]) axes = plt.gca() axes.set_xlim([-17, 16]) axes.set_ylim([0, pop_size+1]) # plt.scatter(x, y, s=5, c=[(0,0,0)], alpha=0.5) plt.bar(x, y, 1, color='blue') plt.title('Population {:03d}'.format(j)) plt.xlabel('x') plt.ylabel('qnt') name = 'pop{:03d}.png'.format(j) plt.savefig(name) print('saving {}'.format(name)) plt.clf()
17.979592
52
0.506243
151
881
2.927152
0.437086
0.013575
0.027149
0
0
0
0
0
0
0
0
0.044094
0.279228
881
48
53
18.354167
0.651969
0.052213
0
0
0
0
0.060024
0
0
0
0
0
0
1
0
false
0
0.083333
0
0.083333
0.027778
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d8f0a7d44e8c877c0f58c7e9fe5bd054fd5c40a
7,486
py
Python
src/analyses/analyses.py
zahariaa/disentangled-dynamics
2dbdf9884f6f90ff67073f571191227e7abce81d
[ "MIT" ]
null
null
null
src/analyses/analyses.py
zahariaa/disentangled-dynamics
2dbdf9884f6f90ff67073f571191227e7abce81d
[ "MIT" ]
null
null
null
src/analyses/analyses.py
zahariaa/disentangled-dynamics
2dbdf9884f6f90ff67073f571191227e7abce81d
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ analyses for bVAE entanglement, etc """ import torch import sys sys.path.append("..") # Adds higher directory to python modules path. import matplotlib.pyplot as plt import numpy as np from data.dspritesb import dSpriteBackgroundDataset from torchvision import transforms ds = dSpriteBackgroundDataset(transform=transforms.Resize((32,32)),shapetype = 'circle') # Build sweeps through model ... def sweepCircleLatents(model,latents=np.linspace(0,1,16),def_latents=None): """sweepCircleLatents(model,latents,def_latents): generates input images that sweep through each latent variable, and evaluates them on given model model = loaded model, e.g., vae = staticVAE32(n_latent = 4) latents = latents to sweep through. defaults to np.linspace(0,1,16) def_latents = 'default latents': defines the non-swept latents. defaults to [0.5,0.5,0.5,0.5] if None ---e.g.,--- yhat, x = sweepCircleLatents(vae) """ # Initialization nsweep = len(latents) if type(model).__name__ == 'encoderBVAE_like': n_latent = model.fc.out_features encoder = model else: n_latent = model.n_latent encoder = model.encode if def_latents is None: def_latents = 0.5*np.ones(n_latent) # Generate stimulus sweeps x = torch.zeros((n_latent,nsweep,1,32,32)) for i in np.arange(0,nsweep): x[0,i,:,:,:] = ds.arbitraryCircle(latents[i],def_latents[1],def_latents[2],def_latents[3]) x[1,i,:,:,:] = ds.arbitraryCircle(def_latents[0],latents[i],def_latents[2],def_latents[3]) x[2,i,:,:,:] = ds.arbitraryCircle(def_latents[0],def_latents[1],latents[i],def_latents[3]) x[3,i,:,:,:] = ds.arbitraryCircle(def_latents[0],def_latents[1],def_latents[2],latents[i]) # ... and evaulate them all at once yhat = encoder(x) if not (type(model).__name__ == 'encoderBVAE_like' or type(model).__name__ == 'dynamicAE32'): yhat = yhat[0] return yhat,x # Plot sweeps through model def plotCircleSweep(x=None,nimgs=5): """plotCircleSweep(yhat,x): plots a subset of stimuli, generated from sweepCircleLatents() ---e.g.,--- yhat, x = sweepCircleLatents(vae) plotCircleSweep(x) alternatively, plotCircleSweep(sweepCircleLatents(vae)) """ # Initialization if x is None and type(nimgs) is tuple: x = yhat[1] # Start a-plottin' fig, ax = plt.subplots(nimgs,4,figsize=(9, 15), dpi= 80, facecolor='w', edgecolor='k') for latentdim in range(4): cnt = -1 for img in np.linspace(0,15,nimgs).astype(int): cnt+=1 plt.sca(ax[cnt,latentdim]) plt.set_cmap('gray') ax[cnt,latentdim].imshow( x[latentdim*16+img,:,:,:].squeeze(), vmin=0, vmax=1) plt.axis('off') return fig, ax def plotLatentsSweep(yhat,nmodels=1): """plotLatentsSweep(yhat): plots model latents and a subset of the corresponding stimuli, generated from sweepCircleLatents() ---e.g.,--- yhat, x = sweepCircleLatents(vae) plotCircleSweep(yhat,x) alternatively, plotLatentsSweep(sweepCircleLatents(vae)) """ # Initialization if type(yhat) is tuple: yhat = yhat[0] # Start a-plottin' fig, ax = plt.subplots(nmodels,4,figsize=(9, 15), dpi= 80, facecolor='w', edgecolor='k', sharey='row',sharex='col') for latentdim in range(4): if nmodels > 1: for imodel in range(nmodels): plt.sca(ax[imodel,latentdim]) plt.plot(yhat[imodel][latentdim*16+np.arange(0,16),:].detach().numpy()) # ax[imodel,latentdim].set_aspect(1./ax[imodel,latentdim].get_data_ratio()) ax[imodel,latentdim].spines['top'].set_visible(False) ax[imodel,latentdim].spines['right'].set_visible(False) if latentdim>0: ax[imodel,latentdim].spines['left'].set_visible(False) # ax[imodel,latentdim].set_yticklabels([]) ax[imodel,latentdim].tick_params(axis='y', length=0) # if imodel<nmodels-1 or latentdim>0: ax[imodel,latentdim].spines['bottom'].set_visible(False) ax[imodel,latentdim].set_xticklabels([]) ax[imodel,latentdim].tick_params(axis='x', length=0) else: imodel=0 plt.sca(ax[latentdim]) plt.plot(yhat[latentdim*16+np.arange(0,16),:].detach().numpy()) ax[latentdim].set_aspect(1./ax[latentdim].get_data_ratio()) ax[latentdim].spines['top'].set_visible(False) ax[latentdim].spines['right'].set_visible(False) if latentdim>0: ax[latentdim].spines['left'].set_visible(False) ax[latentdim].tick_params(axis='y', length=0) # if imodel<nmodels-1 or latentdim>0: ax[latentdim].spines['bottom'].set_visible(False) ax[latentdim].set_xticklabels([]) ax[latentdim].tick_params(axis='x', length=0) return fig, ax def colorAxisNormalize(colorbar): """colorAxisNormalize(colorbar): normalizes a color axis so it is centered on zero. useful for diverging colormaps (e.g., cmap='bwr': blue=negative, red=positive, white=0) input is already initialized colorbar object from a plot ---e.g.,--- corr_vae = np.corrcoef(yhat_vae.detach().numpy().T) plt.set_cmap('bwr') plt.imshow(corr_vae) cb = plt.colorbar() colorAxisNormalize(cb) ---or--- colorAxisNormalize(plt.colorbar()) """ cm = np.max(np.abs(colorbar.get_clim())) colorbar.set_clim(-cm,cm) def showReconstructionsAndErrors(model): """showReconstructionsAndErrors(model): generates random inputs, runs them through a specified model to generate their reconstructions. plots the inputs, reconstructions, and their difference ---e.g.--- from staticvae.models import staticVAE32 vae = staticVAE32(n_latent = 4) vae.eval() checkpoint = torch.load('../staticvae/trained/staticvae32_dsprites_circle_last_500K',map_location='cpu') vae.load_state_dict(checkpoint['model_states']['net']) showReconstructionsAndErrors(model) """ fig=plt.figure(figsize=(18, 16), dpi= 80, facecolor='w', edgecolor='k') cnt = 0 for ii in range(12): x,label = ds[np.random.randint(1000)] x = x[np.newaxis, :, :] mu,logvar = model.encode(x.float()) recon = model.decode(mu).detach() diff = x - recon cnt += 1 ax = plt.subplot(6,6,cnt) plt.set_cmap('gray') ax.imshow(x.squeeze(), vmin=0, vmax=1) plt.title('true') plt.axis('off') cnt += 1 ax = plt.subplot(6,6,cnt) ax.imshow(recon.squeeze(), vmin=0, vmax=1) plt.title('recon') plt.axis('off') cnt += 1 ax = plt.subplot(6,6,cnt) plt.set_cmap('bwr') img = ax.imshow(diff.numpy().squeeze()) colorAxisNormalize(fig.colorbar(img)) plt.title('diff') plt.axis('off')
36.339806
119
0.593909
925
7,486
4.723243
0.271351
0.038911
0.042802
0.023346
0.378119
0.318379
0.280385
0.161822
0.161822
0.127489
0
0.025496
0.266497
7,486
205
120
36.517073
0.770169
0.320064
0
0.221154
0
0
0.029848
0
0
0
0
0
0
1
0.048077
false
0
0.057692
0
0.134615
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d9030a3ab27bda98f5076efe7e1d4f4d61c1b31
2,684
py
Python
Chapter_BestPractices/Centering_Scaling.py
ML-PSE/Machine_Learning_for_PSE
b53578d7cc0e0eca4907527b188a60de06d6710e
[ "Apache-2.0" ]
2
2022-02-20T18:57:46.000Z
2022-03-03T07:07:12.000Z
Chapter_BestPractices/Centering_Scaling.py
ML-PSE/Machine_Learning_for_PSE
b53578d7cc0e0eca4907527b188a60de06d6710e
[ "Apache-2.0" ]
null
null
null
Chapter_BestPractices/Centering_Scaling.py
ML-PSE/Machine_Learning_for_PSE
b53578d7cc0e0eca4907527b188a60de06d6710e
[ "Apache-2.0" ]
null
null
null
##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% ## Centering & Scaling ## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% #%% Standard scaling import numpy as np from sklearn.preprocessing import StandardScaler X = np.array([[ 1000, 0.01, 300], [ 1200, 0.06, 350], [ 1500, 0.1, 320]]) scaler = StandardScaler().fit(X) # computes mean & std column-wise X_scaled = scaler.transform(X) # transform using computed mean and std # check mean = 0 and variance = 1 for every variable/column after scaling print(X_scaled.mean(axis=0)) # return 1D array of size(3,1) print(X_scaled.std(axis=0)) # return 1D array of size(3,1) # access mean and variance via object properties print(scaler.mean_) # return 1D array of size(3,1) print(scaler.var_) # return 1D array of size(3,1) #%% Normalization from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() # create object X_scaled = scaler.fit_transform(X) # fit & transform # check min = 0 and max = 1 for every variable/column after scaling print(X_scaled.min(axis=0)) print(X_scaled.max(axis=0)) # access min and max via object properties print(scaler.data_min_) print(scaler.data_max_) ##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% ## Robust Centering & Scaling ## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% #%% Generate oulier-infested data X = np.random.normal(40, 1, (1500,1)) X[200:300] = X[200:300] +8; X[1000:1150] = X[1000:1150] + 8 # plot import matplotlib.pyplot as plt plt.plot(X, '.-') plt.xlabel('sample #'), plt.ylabel('variable measurement') plt.title('Raw measurements') #%% Transform via standard scaling scaler = StandardScaler().fit(X) X_scaled = scaler.transform(X) # mean and std print('Estimated mean = ', scaler.mean_[0]) print('Estimated standard deviation = ', np.sqrt(scaler.var_[0])) # plot plt.figure() plt.plot(X_scaled, '.-') plt.xlabel('sample #'), plt.ylabel('scaled variable measurement') plt.xlim((0,1500)) plt.title('Standard scaling') #%% Transform via robust MAD scaling # compute median and MAD from scipy import stats median = np.median(X) MAD = stats.median_absolute_deviation(X) # scale X_scaled = (X - median)/MAD[0] # median and MAD print('Estimated robust location = ', median) print('Estimated robust spread = ', MAD) # plot plt.figure() plt.plot(X_scaled, '.-') plt.xlabel('sample #'), plt.ylabel('scaled variable measurement') plt.xlim((0,1500)) plt.title('Robust MAD scaling')
31.209302
80
0.592399
340
2,684
4.614706
0.276471
0.044614
0.030593
0.038241
0.331421
0.248566
0.248566
0.235182
0.215424
0.182282
0
0.044984
0.171759
2,684
85
81
31.576471
0.660819
0.389344
0
0.272727
0
0
0.168754
0
0
0
0
0
0
1
0
false
0
0.113636
0
0.113636
0.272727
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d9115d7ba282f909762763e4412827f039f107a
943
py
Python
pbtaskrunner/models.py
arxcruz/pbtaskrunner
26aff681593aae0d72520509fd1fbecbc3c8a9a6
[ "Apache-2.0" ]
null
null
null
pbtaskrunner/models.py
arxcruz/pbtaskrunner
26aff681593aae0d72520509fd1fbecbc3c8a9a6
[ "Apache-2.0" ]
null
null
null
pbtaskrunner/models.py
arxcruz/pbtaskrunner
26aff681593aae0d72520509fd1fbecbc3c8a9a6
[ "Apache-2.0" ]
null
null
null
from pbtaskrunner import db from pbtaskrunner import app from datetime import datetime def date_time_now(): return datetime.now class TestTask(db.Model): """Database representation of a Task test""" __tablename__ = 'test_task' request_id = db.Column(db.Integer, primary_key=True) requester = db.Column('requester', db.String(30)) created = db.Column(db.DateTime, default=date_time_now()) test_environment = db.Column('test_environment', db.Integer) template = db.Column('template', db.String(256)) status = db.Column('status', db.String(15)) output = db.Column('output', db.Text) task_id = db.Column('task_id', db.String(40)) class TestEnvironment(db.Model): """Database representation of a test environment""" __tablename__ = 'test_environment' id = db.Column(db.Integer, primary_key=True) env_number = db.Column(db.Integer) in_use = db.Column(db.Boolean, default=False)
31.433333
64
0.710498
129
943
5.015504
0.372093
0.136012
0.07728
0.078825
0.200927
0.200927
0.102009
0.102009
0
0
0
0.011378
0.161188
943
29
65
32.517241
0.806574
0.089077
0
0
0
0
0.090802
0
0
0
0
0
0
1
0.05
false
0
0.15
0.05
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
9d91be2759fba448a3db8257c92c32db569fc6fc
2,244
py
Python
web/addons/mass_mailing/models/mass_mailing_report.py
diogocs1/comps
63df07f6cf21c41e4527c06e2d0499f23f4322e7
[ "Apache-2.0" ]
1
2019-12-29T11:53:56.000Z
2019-12-29T11:53:56.000Z
odoo/addons/mass_mailing/models/mass_mailing_report.py
tuanquanghpvn/odoo8-tutorial
52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e
[ "MIT" ]
null
null
null
odoo/addons/mass_mailing/models/mass_mailing_report.py
tuanquanghpvn/odoo8-tutorial
52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e
[ "MIT" ]
3
2020-10-08T14:42:10.000Z
2022-01-28T14:12:29.000Z
# -*- coding: utf-8 -*- from openerp.osv import fields, osv from openerp import tools class MassMailingReport(osv.Model): _name = 'mail.statistics.report' _auto = False _description = 'Mass Mailing Statistics' _columns = { 'scheduled_date': fields.datetime('Scheduled Date', readonly=True), 'name': fields.char('Mass Mail', readonly=True), 'campaign': fields.char('Mass Mail Campaign', readonly=True), 'sent': fields.integer('Sent', readonly=True), 'delivered': fields.integer('Delivered', readonly=True), 'opened': fields.integer('Opened', readonly=True), 'bounced': fields.integer('Bounced', readonly=True), 'replied': fields.integer('Replied', readonly=True), 'state': fields.selection( [('draft', 'Draft'), ('test', 'Tested'), ('done', 'Sent')], string='Status', readonly=True, ), 'email_from': fields.char('From', readonly=True), } def init(self, cr): """Mass Mail Statistical Report: based on mail.mail.statistics that models the various statistics collected for each mailing, and mail.mass_mailing model that models the various mailing performed. """ tools.drop_view_if_exists(cr, 'mail_statistics_report') cr.execute(""" CREATE OR REPLACE VIEW mail_statistics_report AS ( SELECT min(ms.id) as id, ms.scheduled as scheduled_date, mm.name as name, mc.name as campaign, count(ms.bounced) as bounced, count(ms.sent) as sent, (count(ms.sent) - count(ms.bounced)) as delivered, count(ms.opened) as opened, count(ms.replied) as replied, mm.state, mm.email_from FROM mail_mail_statistics as ms left join mail_mass_mailing as mm ON (ms.mass_mailing_id=mm.id) left join mail_mass_mailing_campaign as mc ON (ms.mass_mailing_campaign_id=mc.id) GROUP BY ms.scheduled, mm.name, mc.name, mm.state, mm.email_from )""")
42.339623
101
0.572638
254
2,244
4.944882
0.314961
0.095541
0.047771
0.028662
0.065287
0
0
0
0
0
0
0.000649
0.31328
2,244
52
102
43.153846
0.814406
0.096702
0
0
0
0.023256
0.590025
0.076309
0
0
0
0
0
1
0.023256
false
0
0.046512
0
0.186047
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d92cc65827cd5fd979d0843a2269e9633857396
97
py
Python
main.py
chengxianga2008/abn_amro
66172747328b33a591ea4e4fcbb902cb823b91e0
[ "BSD-2-Clause" ]
null
null
null
main.py
chengxianga2008/abn_amro
66172747328b33a591ea4e4fcbb902cb823b91e0
[ "BSD-2-Clause" ]
null
null
null
main.py
chengxianga2008/abn_amro
66172747328b33a591ea4e4fcbb902cb823b91e0
[ "BSD-2-Clause" ]
null
null
null
import app if __name__ == "__main__": app.daily_summary("data/Input.txt", "data/Output.csv")
24.25
58
0.701031
14
97
4.214286
0.857143
0
0
0
0
0
0
0
0
0
0
0
0.123711
97
4
58
24.25
0.694118
0
0
0
0
0
0.377551
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
9d934505c9a5de277afc3e1a3c4cc83a509daf62
2,750
py
Python
modules/springerlink.py
Christoph-D/paperget
9887936039ecc9fafe4dcce7988e75e964a05bcd
[ "MIT" ]
3
2016-06-17T15:52:02.000Z
2017-12-21T02:44:49.000Z
modules/springerlink.py
Christoph-D/paperget
9887936039ecc9fafe4dcce7988e75e964a05bcd
[ "MIT" ]
null
null
null
modules/springerlink.py
Christoph-D/paperget
9887936039ecc9fafe4dcce7988e75e964a05bcd
[ "MIT" ]
1
2021-02-16T21:10:33.000Z
2021-02-16T21:10:33.000Z
import urllib, re class FakeUseragentURLopener(urllib.FancyURLopener): version = "Mozilla/5.0 (Ubuntu; X11; Linux i686; rv:9.0.1) Gecko/20100101 Firefox/9.0.1" urllib._urlopener = FakeUseragentURLopener() download_pdf_regex = re.compile('.*<li class="pdf"><a class="sprite pdf-resource-sprite" href="([^"]*)" title="Download PDF.*') viewstate_regex = re.compile('.*<input type="hidden" name="__VIEWSTATE" id="__VIEWSTATE" value="([^"]*)" />.*') eventvalidation_regex = re.compile('.*<input type="hidden" name="__EVENTVALIDATION" id="__EVENTVALIDATION" value="([^"]*)" />.*') def download_pdf(url, filename): page = urllib.urlopen(url).read() result = download_pdf_regex.search(page) if result is None: return False fulltext_url = "http://www.springerlink.com" + result.group(1) return urllib.urlretrieve(fulltext_url, filename) is not None def download_bib(url, filename): url += 'export-citation/' form = urllib.urlopen(url).read() viewstate = viewstate_regex.search(form) eventvalidation = eventvalidation_regex.search(form) if viewstate is None or eventvalidation is None: return False viewstate = viewstate.group(1) eventvalidation = eventvalidation.group(1) data = urllib.urlencode([ ('__VIEWSTATE', viewstate), ('ctl00$ctl14$cultureList', 'en-us'), ('ctl00$ctl14$SearchControl$BasicSearchForTextBox', ''), ('ctl00$ctl14$SearchControl$BasicAuthorOrEditorTextBox', ''), ('ctl00$ctl14$SearchControl$BasicPublicationTextBox', ''), ('ctl00$ctl14$SearchControl$BasicVolumeTextBox', ''), ('ctl00$ctl14$SearchControl$BasicIssueTextBox', ''), ('ctl00$ctl14$SearchControl$BasicPageTextBox', ''), ('ctl00$ContentPrimary$ctl00$ctl00$Export', 'CitationOnlyRadioButton'), ('ctl00$ContentPrimary$ctl00$ctl00$CitationManagerDropDownList', 'BibTex'), ('ctl00$ContentPrimary$ctl00$ctl00$ExportCitationButton', 'Export+Citation'), ('__EVENTVALIDATION', eventvalidation)]) return urllib.urlretrieve(url, filename, data=data) is not None def download_pdf_chapter(url, filename): return urllib.urlretrieve(url.replace('/chapter/', '/content/pdf/', 1) + '.pdf', filename) is not None import base base.register_module('http://www\.springerlink\.com/content/.*', {'name': 'springerlink', 'download_pdf': download_pdf, 'download_bib': download_bib, }) base.register_module('http://link\.springer\.com/chapter/.*', {'name': 'springerlink_chapter', 'download_pdf': download_pdf_chapter, })
49.107143
129
0.651273
272
2,750
6.448529
0.338235
0.056442
0.078677
0.049601
0.060433
0.037628
0.037628
0
0
0
0
0.0322
0.198182
2,750
55
130
50
0.763265
0
0
0.081633
0
0.040816
0.396
0.189455
0
0
0
0
0
1
0.061224
false
0
0.040816
0.020408
0.244898
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d956d3bf237c9754179486589b614a0b07bc05b
1,533
py
Python
app/__init__.py
alexander-emelyanov/microblog
f549768b410f1ce70fbfcbcdf89fb945793168e2
[ "MIT" ]
null
null
null
app/__init__.py
alexander-emelyanov/microblog
f549768b410f1ce70fbfcbcdf89fb945793168e2
[ "MIT" ]
null
null
null
app/__init__.py
alexander-emelyanov/microblog
f549768b410f1ce70fbfcbcdf89fb945793168e2
[ "MIT" ]
null
null
null
import os from flask import Flask from flask.ext.sqlalchemy import SQLAlchemy from flask.ext.login import LoginManager from flask.ext.openid import OpenID from config import basedir, ADMINS, MAIL_SERVER, MAIL_PORT, MAIL_USERNAME, MAIL_PASSWORD, MAIL_SECURE app = Flask(__name__) app.config.from_object('config') db = SQLAlchemy(app) from app import models lm = LoginManager() lm.init_app(app) lm.login_view = 'login' oid = OpenID(app, os.path.join(basedir, 'tmp')) @lm.user_loader def load_user(id): return models.User.query.get(int(id)) from app import views # Error handling if not app.debug: import logging from logging.handlers import SMTPHandler, RotatingFileHandler # SMTP based handler configuration credentials = None secure = None if MAIL_USERNAME or MAIL_PASSWORD: credentials = (MAIL_USERNAME, MAIL_PASSWORD) if MAIL_SECURE: secure = MAIL_SECURE mail_handler = SMTPHandler((MAIL_SERVER, MAIL_PORT), MAIL_USERNAME, ADMINS, 'Microblog failure', credentials, secure) mail_handler.setLevel(logging.ERROR) # File based handler file_handler = RotatingFileHandler('tmp/microblog.log', 'a', 1 * 1024 * 1024, 10) file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')) file_handler.setLevel(logging.INFO) # Set handlers app.logger.setLevel(logging.INFO) app.logger.addHandler(mail_handler) app.logger.addHandler(file_handler) app.logger.info('Microblog startup')
26.894737
121
0.739726
205
1,533
5.385366
0.390244
0.032609
0.032609
0.032609
0.054348
0.054348
0
0
0
0
0
0.008534
0.159165
1,533
57
122
26.894737
0.847944
0.051533
0
0
0
0.027778
0.091724
0.016552
0
0
0
0
0
1
0.027778
false
0.083333
0.277778
0.027778
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
9d99ee239305997e26415c20f473a94ad6005845
330
py
Python
PersonalWebApp/Blog/migrations/0002_remove_post_wallpaper_representation.py
CiganOliviu/personal_website
abedf67efc2e7e212c32815f645d3b3709f9f177
[ "MIT" ]
1
2021-04-02T16:45:56.000Z
2021-04-02T16:45:56.000Z
PersonalWebApp/Blog/migrations/0002_remove_post_wallpaper_representation.py
CiganOliviu/personal_website
abedf67efc2e7e212c32815f645d3b3709f9f177
[ "MIT" ]
null
null
null
PersonalWebApp/Blog/migrations/0002_remove_post_wallpaper_representation.py
CiganOliviu/personal_website
abedf67efc2e7e212c32815f645d3b3709f9f177
[ "MIT" ]
null
null
null
# Generated by Django 3.0.8 on 2020-09-03 17:04 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('Blog', '0001_initial'), ] operations = [ migrations.RemoveField( model_name='post', name='wallpaper_representation', ), ]
18.333333
47
0.593939
34
330
5.676471
0.852941
0
0
0
0
0
0
0
0
0
0
0.081545
0.293939
330
17
48
19.411765
0.746781
0.136364
0
0
1
0
0.155477
0.084806
0
0
0
0
0
1
0
false
0
0.090909
0
0.363636
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
9d9caa03a4ae2fbdbadf5bfc3fd2600ade753a1b
3,460
py
Python
modules/colors.py
trybefore/discordbot
1ffce8149cde586e8c5883e8200b02937c5a15f6
[ "MIT" ]
3
2020-09-15T23:19:18.000Z
2021-02-17T10:24:54.000Z
modules/colors.py
trybefore/discordbot
1ffce8149cde586e8c5883e8200b02937c5a15f6
[ "MIT" ]
3
2021-06-22T10:57:14.000Z
2021-06-22T10:57:15.000Z
modules/colors.py
trybefore/discordbot
1ffce8149cde586e8c5883e8200b02937c5a15f6
[ "MIT" ]
2
2020-05-03T20:54:57.000Z
2020-09-12T18:49:13.000Z
from threading import Lock import discord from discord.ext import commands from loguru import logger from local_types import Snowflake from modules import is_bot_admin class Colors(commands.Cog): bot: discord.ext.commands.Bot colorRoles = {} mutex = Lock() def __init__(self, bot): self.bot = bot self.reload() def reload(self): self.mutex.acquire() for g in self.bot.guilds: try: self.colorRoles[g.id].clear() except Exception: pass # Ignore error d = {} for r in g.roles: if r.name.lower().startswith("color- "): color_name = r.name.lower().split("color- ")[1] d[color_name] = Snowflake(r.id) # logger.debug(f"color roles: {d}") self.colorRoles[g.id] = d self.mutex.release() @commands.command(name='reload_colors', hidden=True) @commands.check_any(is_bot_admin(), commands.has_permissions(manage_roles=True), commands.is_owner()) @commands.max_concurrency(1, wait=True) @commands.guild_only() async def reload_colors(self, ctx): await self.reload() async def print_colors(self, ctx: discord.ext.commands.Context): g: discord.Guild = ctx.guild d: dict = self.colorRoles[g.id] roles = [] for r in d.keys(): roles.append(r) await ctx.send(f"```{', '.join(roles)}```") # do not use outside of color command function async def remove_roles(self, ctx: discord.ext.commands.Context): g: discord.Guild = ctx.guild member: discord.member.Member = g.get_member(ctx.author.id) d: dict = self.colorRoles[g.id] to_remove = [] for r in d.values(): for mr in member.roles: if r.id == mr.id: to_remove.append(r) await member.remove_roles(*to_remove, reason="Color Command", atomic=True) @commands.command(name='color', help="Choose your name color") @commands.cooldown(type=commands.BucketType.user, rate=1, per=3) @commands.guild_only() async def color(self, ctx: discord.ext.commands.Context, color: str): self.mutex.acquire() g: discord.Guild = ctx.guild member: discord.member.Member = g.get_member(ctx.author.id) color = color.lower() if color == "list": await self.print_colors(ctx) else: d: dict = self.colorRoles[g.id] if d is None: await ctx.send(f"{ctx.author.mention} could not find any color roles in this server!") else: try: r = d[color] await self.remove_roles(ctx) await member.add_roles(r) await ctx.send(f"{ctx.author.mention} successfully changed your color to {color}") except KeyError: await ctx.send( f"{ctx.author.mention} could not find any such color!\n ```{self.bot.command_prefix}{ctx.command.name} list``` to view available colors") self.mutex.release() @color.error async def color_error(self, ctx, error): if isinstance(error, discord.ext.commands.errors.CommandOnCooldown): await ctx.send(f"{ctx.author.mention} {error}") else: logger.error(f"color error: {error}")
31.454545
161
0.57948
437
3,460
4.512586
0.276888
0.030426
0.045639
0.043103
0.267748
0.234787
0.185091
0.15568
0.15568
0.15568
0
0.001656
0.302023
3,460
109
162
31.743119
0.814907
0.026301
0
0.2375
0
0.0125
0.120654
0.01367
0
0
0
0
0
1
0.025
false
0.0125
0.075
0
0.15
0.025
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d9d2695df7ed5d007311b6af26fc83339dd2f8b
526
py
Python
src/test/python/loader_native.py
dlech/xlang
ace2c924cc1fbecd05804866e183124cbb73bd48
[ "MIT" ]
null
null
null
src/test/python/loader_native.py
dlech/xlang
ace2c924cc1fbecd05804866e183124cbb73bd48
[ "MIT" ]
null
null
null
src/test/python/loader_native.py
dlech/xlang
ace2c924cc1fbecd05804866e183124cbb73bd48
[ "MIT" ]
1
2022-01-23T06:01:40.000Z
2022-01-23T06:01:40.000Z
import sys sys.path.append("./generated") sys.path.append("../../package/pywinrt/projection/pywinrt") import _winrt _winrt.init_apartment(_winrt.MTA) def import_ns(ns): import importlib.machinery import importlib.util module_name = "_winrt_" + ns.replace('.', '_') loader = importlib.machinery.ExtensionFileLoader(module_name, _winrt.__file__) spec = importlib.util.spec_from_loader(module_name, loader) module = importlib.util.module_from_spec(spec) loader.exec_module(module) return module
30.941176
82
0.747148
65
526
5.723077
0.415385
0.104839
0.069892
0
0
0
0
0
0
0
0
0
0.127376
526
16
83
32.875
0.810458
0
0
0
1
0
0.114068
0.076046
0
0
0
0
0
1
0.071429
false
0
0.571429
0
0.714286
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
3
9d9de2c097d8a8da90ec0340d6b529e57bfc179c
2,247
py
Python
src/main/scripts/evalDelly.py
cwhelan/cloudbreak
bcff41d5309cfffb1faffc1d46e3f85007f84981
[ "MIT" ]
4
2015-02-10T07:10:28.000Z
2016-09-18T19:29:53.000Z
src/main/scripts/evalDelly.py
cwhelan/cloudbreak
bcff41d5309cfffb1faffc1d46e3f85007f84981
[ "MIT" ]
null
null
null
src/main/scripts/evalDelly.py
cwhelan/cloudbreak
bcff41d5309cfffb1faffc1d46e3f85007f84981
[ "MIT" ]
null
null
null
#!/usr/bin/env python import sys import subprocess import evalBedFile # Delly file format (when only del summaries in file - cat *.del.txt | grep Deletion) # The summary line contains the chromosome, the estimated start and end of the structural variant, # the size of the variant, the number of supporting pairs, the average mapping quality and a unique structural variant id. # 2 3666033 3666250 217 2 1.5 >Deletion_JCVICHR2SIM_00000053< delly_filename = sys.argv[1] truth_filename = sys.argv[2] score_values = [] print_hits = False print_bed = False if len(sys.argv) == 5 and sys.argv[3] == "--printHits": threshold = float(sys.argv[4]) score_values.append(threshold) print_hits = True elif len(sys.argv) == 5 and sys.argv[3] == "--printBed": threshold = float(sys.argv[4]) score_values.append(threshold) print_bed = True else: delly_file = open(delly_filename, "r") for line in delly_file: if line.startswith("#"): continue fields = line.split("\t") # use num pairs as score for now score = float(fields[4]) score_values.append(score) delly_file.close() unique_score_values = list(set(score_values)) unique_score_values.sort() if not print_hits and not print_bed: print "\t".join(["Thresh", "Calls", "TP", "WrongType", "Short", "TPR"]) for v in unique_score_values: calls_gte_threshold = [] delly_file = open(delly_filename, "r") non_del_calls = 0 for line in delly_file: if line.startswith("#"): continue fields = line.split("\t") if float(fields[4]) >= v: chrom = fields[0] ostart = fields[1] oend = fields[2] bed_line = "\t".join([chrom, ostart, oend]) #print bed_line.strip() calls_gte_threshold.append(bed_line) if print_bed: print "\n".join(calls_gte_threshold) continue (qualified_calls, matches, short_calls) = evalBedFile.eval_bed_deletions(truth_filename, calls_gte_threshold, print_hits) tpr = float(matches) / (qualified_calls) if not print_hits: print "\t".join(map(str, [v, qualified_calls, matches, non_del_calls, short_calls, tpr]))
31.208333
125
0.652425
310
2,247
4.554839
0.354839
0.03966
0.048159
0.038244
0.226629
0.226629
0.188385
0.188385
0.157224
0.157224
0
0.025567
0.23409
2,247
71
126
31.647887
0.794887
0.202492
0
0.3
0
0
0.037535
0
0
0
0
0
0
0
null
null
0
0.06
null
null
0.26
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
9d9e064b6bf0f12b09cc360b5115a0ae4d5fbeff
1,645
py
Python
examples/basic_dsp_example.py
Camotubi/basic_dsp
38a380439cc8936c64febbc12227df78d95fce7f
[ "Apache-2.0", "MIT" ]
40
2015-11-23T02:23:35.000Z
2022-03-18T11:19:11.000Z
examples/basic_dsp_example.py
Camotubi/basic_dsp
38a380439cc8936c64febbc12227df78d95fce7f
[ "Apache-2.0", "MIT" ]
47
2015-11-23T01:58:38.000Z
2021-01-11T07:53:37.000Z
examples/basic_dsp_example.py
Camotubi/basic_dsp
38a380439cc8936c64febbc12227df78d95fce7f
[ "Apache-2.0", "MIT" ]
9
2018-05-19T07:25:26.000Z
2022-01-09T20:51:40.000Z
import ctypes import struct import time # # A small example how to use basic_dsp in a different language. # class VecResult(ctypes.Structure): _fields_ = [("resultCode", ctypes.c_int), ("result", ctypes.c_void_p)] lib = ctypes.WinDLL('basic_dsp.dll') new64Proto = ctypes.WINFUNCTYPE ( ctypes.c_void_p, # Return type. ctypes.c_int, ctypes.c_int, ctypes.c_double, ctypes.c_ulong, ctypes.c_double) new64 = new64Proto (("new64", lib)) getValue64Proto = ctypes.WINFUNCTYPE ( ctypes.c_double, # Return type. ctypes.c_void_p, ctypes.c_ulong) getValue64 = getValue64Proto (("get_value64", lib)) offset64Proto = ctypes.WINFUNCTYPE ( VecResult, # Return type. ctypes.c_void_p, ctypes.c_double) offset64 = offset64Proto (("real_offset64", lib)) vec = new64( ctypes.c_int(0), ctypes.c_int(0), ctypes.c_double(0.0), ctypes.c_ulong(100000), ctypes.c_double(1.0)) val = getValue64(vec, ctypes.c_ulong(0)) print('At the start: vec[0] = {}'.format(val)) start = time.clock() iterations = 100000 toNs = 1e9 / iterations increment = 5.0 for x in range(0, iterations): vecRes = offset64(vec, ctypes.c_double(increment)) vec = vecRes.result end = time.clock() print('{} ns per iteration, each iteration has {} samples'.format((end - start) * toNs, iterations)) print('Result code: {} (0 means no error)'.format(vecRes.resultCode)) vecRes = offset64(vec, ctypes.c_double(5.0)) vec = vecRes.result val = getValue64(vec, ctypes.c_ulong(0)) print('After {} iterations of increment by {}: vec[0] = {}'.format(iterations + 1, increment, val))
26.967213
100
0.677204
227
1,645
4.76652
0.334802
0.142329
0.096118
0.044362
0.22366
0.198706
0.116451
0.116451
0
0
0
0.048363
0.182979
1,645
60
101
27.416667
0.756696
0.06079
0
0.319149
0
0
0.141835
0
0
0
0
0
0
1
0
false
0
0.06383
0
0.106383
0.085106
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9da16db4956d4af0439ae0a5ca6c02568b1d609f
53,171
py
Python
src/pytris.py
CSID-DGU/2019-2-OSSPC-MDJ-1
2987e11b65bc9e31a30cadd39eea4214e2261998
[ "MIT" ]
1
2019-09-24T04:55:29.000Z
2019-09-24T04:55:29.000Z
src/pytris.py
CSID-DGU/2019-2-OSSPC-MDJ-1
2987e11b65bc9e31a30cadd39eea4214e2261998
[ "MIT" ]
null
null
null
src/pytris.py
CSID-DGU/2019-2-OSSPC-MDJ-1
2987e11b65bc9e31a30cadd39eea4214e2261998
[ "MIT" ]
7
2019-09-24T05:14:24.000Z
2019-12-10T04:15:28.000Z
#!/usr/bin/env python # coding: utf-8 import pygame import operator from mino import * from random import * from pygame.locals import * from ui import * from screeninfo import get_monitors from pygame.surface import Surface import sys from function import * #화면크기 조정 screen_width = 0 screen_height = 0 for m in get_monitors(): screen_width = int(m.width*0.7) screen_height = int(m.height*0.7) # Define block_size = 25 width = 10 # Board width height = 20 # Board height framerate = 30 # Bigger -> Slower framerate_n = 30 pygame.init() size = [screen_width, screen_height] clock = pygame.time.Clock() screen = pygame.display.set_mode(size) pygame.time.set_timer(pygame.USEREVENT, framerate * 10) pygame.time.set_timer(pygame.USEREVENT, framerate_n * 10) pygame.display.set_caption("ACOTRIS™") background_file = '../assets/images/backgroundimage.png' # draw single board def draw_single_board(next, hold, score, level, goal, matrix): screen.fill(ui_variables.black) background_image_alpha(screen, background_file,screen_width, screen_height) # Draw next mino grid_n = tetrimino.mino_map[next - 1][0] for i in range(4): for j in range(4): dx = screen_width*0.692 + block_size * j dy = screen_height*0.22 + block_size * i if grid_n[i][j] != 0: pygame.draw.rect( screen, ui_variables.t_color[grid_n[i][j]], Rect(dx, dy, block_size*0.9, block_size*0.9) ) # Draw hold mino grid_h = tetrimino.mino_map[hold - 1][0] if hold_mino != -1: for i in range(4): for j in range(4): dx = screen_width*0.252 + block_size * j dy = screen_height*0.22 + block_size * i if grid_h[i][j] != 0: pygame.draw.rect( screen, ui_variables.t_color[grid_h[i][j]], Rect(dx, dy, block_size*0.9, block_size*0.9) ) # Set max score if score > 999999: score = 999999 # Draw texts text_hold = ui_variables.DG_v_small.render("HOLD", 1, ui_variables.white) text_next = ui_variables.DG_v_small.render("NEXT", 1, ui_variables.white) text_score = ui_variables.DG_v_small.render("SCORE", 1, ui_variables.white) score_value = ui_variables.DG_v_small.render(str(score), 1, ui_variables.white) text_level = ui_variables.DG_v_small.render("LEVEL", 1, ui_variables.white) level_value = ui_variables.DG_v_small.render(str(level), 1, ui_variables.white) text_goal = ui_variables.DG_v_small.render("GOAL", 1, ui_variables.white) goal_value = ui_variables.DG_v_small.render(str(goal), 1, ui_variables.white) aco = ui_variables.DG_v_small.render("ACO level", 1, ui_variables.white) screen.blit(text_hold, (screen_width*0.25, screen_height*0.15)) screen.blit(text_level, (screen_width*0.25, screen_height*0.35)) screen.blit(level_value, (screen_width*0.25, screen_height*0.4)) screen.blit(text_goal, (screen_width*0.25, screen_height*0.65)) screen.blit(goal_value, (screen_width*0.25, screen_height*0.7)) screen.blit(text_next, (screen_width*0.69, screen_height*0.15)) screen.blit(aco, (screen_width*0.69, screen_height*0.35)) screen.blit(text_score, (screen_width*0.69, screen_height*0.65)) screen.blit(score_value, (screen_width*0.69, screen_height*0.7))\ # 플레이 화면에 아코 사진 aco_level(level, int(screen_width*0.68), int(screen_height*0.41)) # Draw board for x in range(width): for y in range(height): dx = screen_width*0.4 + block_size * x dy = screen_height*0.1 + block_size * y draw_block(screen,dx, dy, ui_variables.t_color[matrix[x][y + 1]], block_size) def draw_multi_board_1(next, hold_n, score, level, goal, matrix_n): # Draw next mino_player1 grid_n = tetrimino.mino_map[next - 1][0] for x in range(4): for y in range(4): dx = screen_width*0.39 + block_size * 0.72 * y dy = screen_height*0.23 + block_size * 0.72 * x if grid_n[x][y] != 0: pygame.draw.rect( screen, ui_variables.t_color[grid_n[x][y]], Rect(dx, dy, block_size * 0.7, block_size * 0.7) ) # Draw hold mino_player1 grid_h = tetrimino.mino_map[hold_n - 1][0] if hold_mino_n != -1: for x in range(4): for y in range(4): dx = screen_width*0.095 + block_size * 0.72 * y dy = screen_height*0.23 + block_size * 0.72 * x if grid_h[x][y] != 0: pygame.draw.rect( screen, ui_variables.t_color[grid_h[x][y]], Rect(dx, dy, block_size * 0.7, block_size * 0.7) ) # Set max score if score > 999999: score = 999999 # Draw texts text_hold = ui_variables.DG_v_small.render("HOLD", 1, ui_variables.white) text_next = ui_variables.DG_v_small.render("NEXT", 1, ui_variables.white) text_score = ui_variables.DG_v_small.render("SCORE", 1, ui_variables.white) score_value = ui_variables.DG_v_small.render(str(score), 1, ui_variables.white) text_level = ui_variables.DG_v_small.render("LEVEL", 1, ui_variables.white) level_value = ui_variables.DG_v_small.render(str(level), 1, ui_variables.white) text_goal = ui_variables.DG_v_small.render("GOAL", 1, ui_variables.white) goal_value = ui_variables.DG_v_small.render(str(goal), 1, ui_variables.white) # Place texts for player1 screen.blit(text_hold, (screen_width*0.091, screen_height*0.15)) screen.blit(text_level, (screen_width*0.083, screen_height*0.43)) screen.blit(level_value, (screen_width*0.11, screen_height*0.48)) screen.blit(text_goal, (screen_width*0.092, screen_height*0.7)) screen.blit(goal_value, (screen_width*0.115, screen_height*0.75)) screen.blit(text_next, (screen_width*0.389, screen_height*0.15)) screen.blit(text_score, (screen_width*0.388, screen_height*0.7)) screen.blit(score_value, (screen_width*0.393, screen_height*0.75)) #aco_level(screen_width*0.38, screen_height*0.48) aco = ui_variables.DG_v_small.render("ACO level", 1, ui_variables.white) screen.blit(aco, (screen_width*0.39, screen_height*0.43)) aco_level(level1, screen_width*0.38, screen_height*0.48) # Draw board - player1 for x in range(width): for y in range(height): dx = screen_width*0.15 + block_size * x dy = screen_height*0.1 + block_size * y draw_block(screen, dx, dy, ui_variables.t_color[matrix_n[x][y + 1]], block_size) # Draw multi board def draw_multi_board_2(next, hold, score, level, goal, matrix): # Draw next mino_player grid_m = tetrimino.mino_map[next - 1][0] # Draw next mino_player2 for x in range(4): for y in range(4): dx = screen_width*0.84 + block_size * 0.72 * y dy = screen_height*0.23 + block_size * 0.72 * x if grid_m[x][y] != 0: pygame.draw.rect( screen, ui_variables.t_color[grid_m[x][y]], Rect(dx,dy, block_size*0.7, block_size*0.7) ) # Draw hold mino_player1 grid_i = tetrimino.mino_map[hold - 1][0] # Draw hold mino_player2 if hold_mino != -1: for x in range(4): for y in range(4): dx = screen_width*0.55 + block_size * 0.72 * y dy = screen_height*0.23 + block_size * 0.72 * x if grid_i[x][y] != 0: pygame.draw.rect( screen, ui_variables.t_color[grid_i[x][y]], Rect(dx, dy, block_size * 0.7, block_size * 0.7) ) # Set max score if score > 999999: score = 999999 # Draw texts text_hold = ui_variables.DG_v_small.render("HOLD", 1, ui_variables.white) text_next = ui_variables.DG_v_small.render("NEXT", 1, ui_variables.white) text_score = ui_variables.DG_v_small.render("SCORE", 1, ui_variables.white) score_value = ui_variables.DG_v_small.render(str(score), 1, ui_variables.white) text_level = ui_variables.DG_v_small.render("LEVEL", 1, ui_variables.white) level_value = ui_variables.DG_v_small.render(str(level), 1, ui_variables.white) text_goal = ui_variables.DG_v_small.render("GOAL", 1, ui_variables.white) goal_value = ui_variables.DG_v_small.render(str(goal), 1, ui_variables.white) # Place texts for player2 screen.blit(text_hold, (screen_width*0.546, screen_height*0.15)) screen.blit(text_level, (screen_width*0.54, screen_height*0.43)) screen.blit(level_value, (screen_width*0.546, screen_height*0.48)) screen.blit(text_goal, (screen_width*0.54, screen_height*0.7)) screen.blit(goal_value, (screen_width*0.562, screen_height*0.75)) screen.blit(text_next, (screen_width*0.84, screen_height*0.15)) screen.blit(text_score, (screen_width*0.845, screen_height*0.7)) screen.blit(score_value, (screen_width*0.85, screen_height*0.75)) #aco_level(screen_width*0.84, screen_height*0.48) aco = ui_variables.DG_v_small.render("ACO level", 1, ui_variables.white) screen.blit(aco, (screen_width*0.845, screen_height*0.43)) aco_level(level, screen_width*0.84, screen_height*0.48) # Draw board - player2 for i in range(width): for j in range(height): di = screen_width*0.6 + block_size * i dj = screen_height*0.1 + block_size * j draw_block(screen, di, dj, ui_variables.t_color[matrix[i][j + 1]], block_size) #background image def background_image(filename, width, height, blit_pos): background = pygame.image.load(filename) picture = pygame.transform.scale(background,(width, height)) screen.blit(picture,(0,blit_pos)) def aco_level(level, x, y): # 플레이 화면에 아코 사진 if type == 1: screen.blit(rect_aco1, (x, y)) if level >=5 and level <=9: screen.blit(rect_aco2, (x, y)) elif level >= 10: screen.blit(rect_aco3, (x, y)) elif type == 2: screen.blit(rect_aco2, (x, y)) if level >= 10: screen.blit(rect_aco3, (x, y)) elif type == 3: screen.blit(rect_aco3, (x, y)) # insert image x,y 이미지 위치, r이미지 가로 길이, c이미지 세로 길이 def insert_image(image, x, y, r, c): photo = pygame.transform.scale(image, (r, c)) screen.blit(photo, (x, y)) # image image_aco1 = pygame.image.load('../assets/images/aco1.png') image_aco2 = pygame.image.load('../assets/images/aco2.png') image_aco3 = pygame.image.load('../assets/images/aco3.png') image_manual = pygame.image.load('../assets/images/manual.png') image_winner = pygame.image.load('../assets/images/winner1.png') image_trophy = pygame.image.load('../assets/images/trophy.png') rect_aco1b = pygame.image.load('../assets/images/aco1.png').convert() rect_aco2b = pygame.image.load('../assets/images/aco2.png').convert() rect_aco3b = pygame.image.load('../assets/images/aco3.png').convert() rect_aco1 = pygame.transform.scale(rect_aco1b, (int(screen_width*0.12), int(screen_height*0.13))) rect_aco2 = pygame.transform.scale(rect_aco2b, (int(screen_width*0.13), int(screen_height*0.16))) rect_aco3 = pygame.transform.scale(rect_aco3b, (int(screen_width*0.14), int(screen_height*0.18))) # Initial values blink = False start_single = False # sinlge mode start_multi = False # multi mode pause = False done = False game_over = False multi_over = False show_score = False show_manual = False screen_Start = True game_mode = False score = 0 score_n = 0 level = 1 level_n = 1 goal = 1 goal_n = 1 bottom_count = 0 bottom_count_n = 0 hard_drop = False hard_drop_n = False player = 0 dx, dy = 3, 0 # Minos location status dp, dq = 3, 0 rotation = 0 # Minos rotation status rotation_n = 0 mino = randint(1, 7) # Current mino mino_n = randint(1,7) next_mino = randint(1, 7) # Next mino next_mino_n = randint(1,7) hold = False # Hold status hold_n=False hold_mino = -1 # Holded mino hold_mino_n = -1 name_location = 0 name = [65, 65, 65] #모드 별 아코 사진 넣을려고 만듦 type = 0 level1 = 0 level2 = 0 with open('leaderboard.txt') as f: lines = f.readlines() lines = [line.rstrip('\n') for line in open('leaderboard.txt')] leaders = {} for i in lines: leaders[i.split(' ')[0]] = int(i.split(' ')[1]) leaders = sorted(leaders.items(), key=operator.itemgetter(1), reverse=True) matrix= [[0 for y in range(height + 1)] for x in range(width)] # Board matrix matrix_n = [[0 for k in range(height + 1)] for p in range(width)] ########################################################### # Loop Start ########################################################### while not done: # Pause screen if pause: for event in pygame.event.get(): if event.type == QUIT: done = True elif event.type == USEREVENT: pygame.time.set_timer(pygame.USEREVENT, 300) if start_single == True: draw_single_board(next_mino, hold_mino, score, level, goal, matrix) elif start_multi == True: draw_multi_board_1(next_mino_n, hold_mino_n, score_n, level_n, goal_n, matrix_n) draw_multi_board_2(next_mino, hold_mino, score, level, goal, matrix) #pause시 화면 불투명하게 pause_surface = screen.convert_alpha() pause_surface.fill((0, 0, 0, 0)) pygame.draw.rect(pause_surface, ui_variables.black_t, [0, 0, int(screen_width), int(screen_height)]) screen.blit(pause_surface, (0, 0)) pause_text = ui_variables.DG_70.render("PAUSED", 1, ui_variables.white) pause_start = ui_variables.DG_small.render("Press esc to continue", 1, ui_variables.white) screen.blit(pause_text, (screen_width*0.415, screen_height*0.35)) if blink: screen.blit(pause_start, (screen_width*0.36, screen_height*0.6)) blink = False else: blink = True pygame.display.update() elif event.type == KEYDOWN: erase_mino(dx, dy, mino, rotation, matrix) erase_mino(dp, dq, mino_n, rotation_n, matrix_n) if event.key == K_ESCAPE: pause = False pygame.time.set_timer(pygame.USEREVENT, 1) elif event.key == K_q: done = True # Game screen # Start_single screen elif start_single: for event in pygame.event.get(): if event.type == QUIT: done = True elif event.type == USEREVENT: # Set speed if not game_over: keys_pressed = pygame.key.get_pressed() if keys_pressed[K_DOWN]: pygame.time.set_timer(pygame.USEREVENT, framerate * 1) else: pygame.time.set_timer(pygame.USEREVENT, framerate * 10) # Draw a mino draw_mino(dx, dy, mino, rotation, matrix) draw_single_board(next_mino, hold_mino, score, level, goal, matrix) # Erase a mino if not game_over: erase_mino(dx, dy, mino, rotation, matrix) # Move mino down if not is_bottom(dx, dy, mino, rotation, matrix): dy += 1 # Create new mino else: if hard_drop or bottom_count == 6: hard_drop = False bottom_count = 0 score += 10 * level draw_mino(dx, dy, mino, rotation, matrix) draw_single_board(next_mino, hold_mino, score, level, goal, matrix) if is_stackable(next_mino, matrix): mino = next_mino next_mino = randint(1, 7) dx, dy = 3, 0 rotation = 0 hold = False else: start_single = False game_over = True single = True pygame.time.set_timer(pygame.USEREVENT, 1) else: bottom_count += 1 # Erase line erase_count = 0 for j in range(21): is_full = True for i in range(10): if matrix[i][j] == 0: is_full = False if is_full: erase_count += 1 k = j while k > 0: for i in range(10): matrix[i][k] = matrix[i][k - 1] k -= 1 if erase_count == 1: score += 50 * level elif erase_count == 2: score += 150 * level elif erase_count == 3: score += 350 * level elif erase_count == 4: score += 1000 * level # Increase level goal -= erase_count if goal < 1 and level < 15: level += 1 goal += level * 5 framerate = int(framerate * 0.8) elif event.type == KEYDOWN: erase_mino(dx, dy, mino, rotation, matrix) if event.key == K_ESCAPE: pause = True #Q누르면 창 나가짐 elif event.key == K_q: done = True # Hard drop elif event.key == K_SPACE: while not is_bottom(dx, dy, mino, rotation, matrix): dy += 1 hard_drop = True pygame.time.set_timer(pygame.USEREVENT, 1) draw_mino(dx, dy, mino, rotation, matrix) draw_single_board(next_mino, hold_mino, score, level, goal, matrix) # Hold elif event.key == K_LSHIFT: if hold == False: if hold_mino == -1: hold_mino = mino mino = next_mino next_mino = randint(1, 7) else: hold_mino, mino = mino, hold_mino dx, dy = 3, 0 rotation = 0 hold = True draw_mino(dx, dy, mino, rotation, matrix) draw_single_board(next_mino, hold_mino, score, level, goal, matrix) # Turn right elif event.key == K_UP: if is_turnable_r(dx, dy, mino, rotation, matrix): rotation += 1 # Kick elif is_turnable_r(dx, dy - 1, mino, rotation, matrix): dy -= 1 rotation += 1 elif is_turnable_r(dx + 1, dy, mino, rotation, matrix): dx += 1 rotation += 1 elif is_turnable_r(dx - 1, dy, mino, rotation, matrix): dx -= 1 rotation += 1 elif is_turnable_r(dx, dy - 2, mino, rotation, matrix): dy -= 2 rotation += 1 elif is_turnable_r(dx + 2, dy, mino, rotation, matrix): dx += 2 rotation += 1 elif is_turnable_r(dx - 2, dy, mino, rotation, matrix): dx -= 2 rotation += 1 if rotation == 4: rotation = 0 draw_mino(dx, dy, mino, rotation, matrix) draw_single_board(next_mino, hold_mino, score, level, goal, matrix) # Turn left elif event.key == K_z or event.key == K_LCTRL: if is_turnable_l(dx, dy, mino, rotation, matrix): rotation -= 1 # Kick elif is_turnable_l(dx, dy - 1, mino, rotation, matrix): dy -= 1 rotation -= 1 elif is_turnable_l(dx + 1, dy, mino, rotation, matrix): dx += 1 rotation -= 1 elif is_turnable_l(dx - 1, dy, mino, rotation, matrix): dx -= 1 rotation -= 1 elif is_turnable_l(dx, dy - 2, mino, rotation, matrix): dy -= 2 rotation += 1 elif is_turnable_l(dx + 2, dy, mino, rotation, matrix): dx += 2 rotation += 1 elif is_turnable_l(dx - 2, dy, mino, rotation, matrix): dx -= 2 if rotation == -1: rotation = 3 draw_mino(dx, dy, mino, rotation, matrix) draw_single_board(next_mino, hold_mino, score, level, goal, matrix) # Move left elif event.key == K_LEFT: if not is_leftedge(dx, dy, mino, rotation, matrix): dx -= 1 draw_mino(dx, dy, mino, rotation, matrix) draw_single_board(next_mino, hold_mino, score, level, goal, matrix) # Move right elif event.key == K_RIGHT: if not is_rightedge(dx, dy, mino, rotation, matrix): dx += 1 draw_mino(dx, dy, mino, rotation, matrix) draw_single_board(next_mino, hold_mino, score, level, goal, matrix) pygame.display.update() # Start_multi screen elif start_multi: for event in pygame.event.get(): if event.type == QUIT: done = True elif event.type == USEREVENT: screen.fill(ui_variables.black) background_image_alpha(screen, background_file, screen_width, screen_height) if not multi_over: keys_pressed = pygame.key.get_pressed() if keys_pressed[K_DOWN]: pygame.time.set_timer(pygame.USEREVENT, framerate*1) else: pygame.time.set_timer(pygame.USEREVENT, framerate*10) draw_mino(dx, dy, mino, rotation, matrix) draw_multi_board_2(next_mino, hold_mino, score, level, goal, matrix) # Erase a mino if not multi_over: erase_mino(dx, dy, mino, rotation, matrix) # Move mino down if not is_bottom(dx, dy, mino, rotation, matrix): dy += 1 # Create new mino else: if hard_drop or bottom_count == 6: hard_drop = False bottom_count = 0 score += 10 * level draw_mino(dx, dy, mino, rotation, matrix) draw_multi_board_2(next_mino, hold_mino, score, level, goal, matrix) if is_stackable(next_mino, matrix): mino = next_mino next_mino = randint(1, 7) dx, dy = 3, 0 rotation = 0 hold = False else: start_multi = False multi_over = True player = 1 single = False pygame.time.set_timer(pygame.USEREVENT, 1) else: bottom_count += 1 # Erase line erase_count = 0 for j in range(21): is_full = True for i in range(10): if matrix[i][j] == 0: is_full = False if is_full: erase_count += 1 k = j while k > 0: for i in range(10): matrix[i][k] = matrix[i][k - 1] k -= 1 if erase_count == 1: score += 50 * level elif erase_count == 2: score += 150 * level elif erase_count == 3: score += 350 * level elif erase_count == 4: score += 1000 * level # Increase level goal -= erase_count if goal < 1 and level < 15: level += 1 goal += level * 5 framerate = int(framerate * 0.8) level_2 = level draw_mino(dp, dq, mino_n, rotation_n ,matrix_n) draw_multi_board_1(next_mino_n, hold_mino_n, score_n, level_n, goal_n, matrix_n) if not multi_over: erase_mino(dp, dq, mino_n, rotation_n, matrix_n) # Move mino down if not is_bottom(dp, dq, mino_n, rotation_n, matrix_n): dq += 1 else: if hard_drop_n or bottom_count_n == 6: hard_drop_n = False bottom_count_n = 0 score_n+=10*level_n draw_mino(dp, dq, mino_n, rotation_n, matrix_n) draw_multi_board_1(next_mino_n, hold_mino_n, score_n, level_n, goal_n, matrix_n) if is_stackable(next_mino_n, matrix_n): mino_n = next_mino_n next_mino_n = randint(1,7) dp, dq = 3, 0 rotation_n = 0 hold_n = False else: start_multi = False multi_over= True player = 2 single = False pygame.time.set_timer(pygame.USEREVENT, 1) else: bottom_count_n += 1 erase_count_n = 0 for j in range(21): is_full_n = True for i in range(10): if matrix_n[i][j] == 0: is_full_n = False if is_full_n: erase_count_n += 1 k = j while k > 0: for i in range(10): matrix_n[i][k] = matrix_n[i][k-1] k -= 1 if erase_count_n == 1: score_n += 50 * level_n elif erase_count_n == 2: score_n += 150 * level_n elif erase_count_n == 3: score_n += 350 * level_n elif erase_count_n == 4: score_n += 1000 * level_n # Increase level goal_n -= erase_count_n if goal_n < 1 and level_n < 15: level_n += 1 goal_n += level_n * 5 framerate_n = int(framerate_n * 0.8) level1 = level_n elif event.type == KEYDOWN: erase_mino(dx, dy, mino, rotation, matrix) erase_mino(dp, dq, mino_n, rotation_n, matrix_n) if event.key == K_ESCAPE: pause = True #Q누르면 창 나가짐 elif event.key == K_q: done = True # Hard drop elif event.key == K_SPACE: while not is_bottom(dx, dy, mino, rotation, matrix): dy += 1 hard_drop = True pygame.time.set_timer(pygame.USEREVENT, framerate) draw_mino(dx, dy, mino, rotation, matrix) draw_multi_board_2(next_mino, hold_mino, score, level, goal, matrix) elif event.key == K_LCTRL: while not is_bottom(dp, dq, mino_n, rotation_n, matrix_n): dq += 1 hard_drop_n = True pygame.time.set_timer(pygame.USEREVENT, framerate_n) draw_mino(dp, dq, mino_n, rotation_n, matrix_n) draw_multi_board_1(next_mino_n, hold_mino_n, score_n, level_n, goal_n, matrix_n) # Hold elif event.key == K_RSHIFT: if hold == False: if hold_mino == -1: hold_mino = mino mino = next_mino next_mino = randint(1, 7) else: hold_mino, mino = mino, hold_mino dx, dy = 3, 0 rotation = 0 hold = True draw_mino(dx, dy, mino, rotation, matrix) draw_multi_board_2(next_mino, hold_mino, score, level, goal, matrix) elif event.key == K_LSHIFT: if hold_n == False: if hold_mino_n == -1: hold_mino_n = mino_n mino_n = next_mino_n next_mino_n = randint(1,7) else: hold_mino_n, mino_n = mino_n, hold_mino_n dp, dq = 3, 0 rotation_n = 0 hold_n = True draw_mino(dp, dq, mino_n, rotation_n, matrix_n) draw_multi_board_1(next_mino_n, hold_mino_n, score_n, level_n, goal_n, matrix_n) # Turn right elif event.key == K_UP : if is_turnable_r(dx, dy, mino, rotation, matrix): rotation += 1 # Kick elif is_turnable_r(dx, dy - 1, mino, rotation, matrix): dy -= 1 rotation += 1 elif is_turnable_r(dx + 1, dy, mino, rotation, matrix): dx += 1 rotation += 1 elif is_turnable_r(dx - 1, dy, mino, rotation, matrix): dx -= 1 rotation += 1 elif is_turnable_r(dx, dy - 2, mino, rotation, matrix): dy -= 2 rotation += 1 elif is_turnable_r(dx + 2, dy, mino, rotation, matrix): dx += 2 rotation += 1 elif is_turnable_r(dx - 2, dy, mino, rotation, matrix): dx -= 2 rotation += 1 if rotation == 4: rotation = 0 draw_mino(dx, dy, mino, rotation, matrix) draw_multi_board_2(next_mino, hold_mino, score, level, goal, matrix) elif event.key == K_w: if is_turnable_r(dp, dq, mino_n, rotation_n, matrix_n): rotation_n += 1 # Kick elif is_turnable_r(dp, dq - 1, mino_n, rotation_n, matrix_n): dq -= 1 rotation_n += 1 elif is_turnable_r(dp + 1, dq, mino_n,rotation_n, matrix_n): dp += 1 rotation_n += 1 elif is_turnable_r(dp - 1, dq, mino_n, rotation_n, matrix_n): dp -= 1 rotation_n += 1 elif is_turnable_r(dp, dq - 2, mino_n, rotation_n, matrix_n): dq -= 2 rotation_n+= 1 elif is_turnable_r(dp + 2, dq, mino_n,rotation_n, matrix_n): dp += 2 rotation_n+= 1 elif is_turnable_r(dp - 2, dq, mino_n, rotation_n, matrix_n): dp -= 2 rotation_n += 1 if rotation_n == 4: rotation_n = 0 draw_mino(dp, dq, mino_n, rotation_n, matrix_n) draw_multi_board_1(next_mino_n, hold_mino_n, score_n, level_n, goal_n, matrix_n) # Move left elif event.key == K_LEFT: if not is_leftedge(dx, dy, mino, rotation, matrix): dx -= 1 draw_mino(dx, dy, mino, rotation, matrix) draw_multi_board_2(next_mino, hold_mino, score, level, goal, matrix) elif event.key == K_a: if not is_leftedge(dp, dq, mino_n, rotation_n, matrix_n): dp -= 1 draw_mino(dp, dq, mino_n, rotation_n, matrix_n) draw_multi_board_1(next_mino_n, hold_mino_n, score_n, level_n, goal_n, matrix_n) # Move right elif event.key == K_RIGHT: if not is_rightedge(dx, dy, mino, rotation, matrix): dx += 1 draw_mino(dx, dy, mino, rotation, matrix) draw_multi_board_2(next_mino, hold_mino, score, level, goal, matrix) elif event.key == K_d: if not is_rightedge(dp, dq, mino_n, rotation_n, matrix_n): dp += 1 draw_mino(dp, dq, mino_n, rotation_n, matrix_n) draw_multi_board_1(next_mino_n, hold_mino_n, score_n, level_n, goal_n, matrix_n) pygame.display.update() # Game over screen elif game_over: for event in pygame.event.get(): if event.type == QUIT: done = True elif event.type == USEREVENT: pygame.time.set_timer(pygame.USEREVENT, 300) over_text_1 = ui_variables.DG_70.render("GAME OVER", 1, ui_variables.white) over_start = ui_variables.DG_v_small.render("Press return to continue", 1, ui_variables.white) draw_single_board(next_mino, hold_mino, score, level, goal, matrix) #pause시 화면 불투명하게 over_surface = screen.convert_alpha() over_surface.fill((0, 0, 0, 0)) pygame.draw.rect(over_surface, ui_variables.black_t, [0, 0, int(screen_width), int(screen_height)]) screen.blit(over_surface, (0, 0)) name_1 = ui_variables.DGM40.render(chr(name[0]), 1, ui_variables.white) name_2 = ui_variables.DGM40.render(chr(name[1]), 1, ui_variables.white) name_3 = ui_variables.DGM40.render(chr(name[2]), 1, ui_variables.white) underbar_1 = ui_variables.DGM40.render("_", 1, ui_variables.white) underbar_2 = ui_variables.DGM40.render("_", 1, ui_variables.white) underbar_3 = ui_variables.DGM40.render("_", 1, ui_variables.white) screen.blit(over_text_1, (int(screen_width*0.37), int(screen_height*0.2))) screen.blit(name_1, (int(screen_width*0.4), int(screen_height*0.5))) screen.blit(name_2, (int(screen_width*0.5), int(screen_height*0.5))) screen.blit(name_3, (int(screen_width*0.6), int(screen_height*0.5))) if blink: screen.blit(over_start, (int(screen_width*0.38), int(screen_height*0.7))) blink = False else: if name_location == 0: screen.blit(underbar_1, (int(screen_width*0.4), int(screen_height*0.52))) elif name_location == 1: screen.blit(underbar_2, (int(screen_width*0.5), int(screen_height*0.52))) elif name_location == 2: screen.blit(underbar_3, (int(screen_width*0.6), int(screen_height*0.52))) blink = True pygame.display.update() elif event.type == KEYDOWN: if event.key == K_RETURN: outfile = open('leaderboard.txt','a') outfile.write(chr(name[0]) + chr(name[1]) + chr(name[2]) + ' ' + str(score) + '\n') outfile.close() pygame.time.set_timer(pygame.USEREVENT, 1) sys.exit() game_over = False hold = False dx, dy = 3, 0 dp, dq = 3, 0 rotation = 0 rotation_n =0 mino = randint(1, 7) mino_n = randint(1,7) next_mino = randint(1, 7) next_mino_n = randint(1,7) hold_mino = -1 hold_mino_n = -1 framerate = 30 framerate_n = 30 score = 0 score_n = 0 level = 1 level_n = 1 goal = level * 5 goal_n = level_n*5 bottom_count = 0 bottom_count_n = 0 hard_drop = False hard_drop_n = False if event.key == K_RIGHT: if name_location != 2: name_location += 1 else: name_location = 0 pygame.time.set_timer(pygame.USEREVENT, 1) elif event.key == K_LEFT: if name_location != 0: name_location -= 1 else: name_location = 2 pygame.time.set_timer(pygame.USEREVENT, 1) elif event.key == K_UP: if name[name_location] != 90: name[name_location] += 1 else: name[name_location] = 65 pygame.time.set_timer(pygame.USEREVENT, 1) elif event.key == K_DOWN: if name[name_location] != 65: name[name_location] -= 1 else: name[name_location] = 90 pygame.time.set_timer(pygame.USEREVENT, 1) elif event.key == K_q: done = True elif multi_over: for event in pygame.event.get(): if event.type == QUIT: done = True elif event.type == USEREVENT: pygame.time.set_timer(pygame.USEREVENT, 300) title = "ACOTRIS" winner_text = "{}P win".format(player) title_text_1 = ui_variables.DG_big.render(title, 1, ui_variables.white) over_text_1 = ui_variables.DG_70.render(winner_text, 1, ui_variables.white) draw_multi_board_1(next_mino_n, hold_mino_n, score_n, level_n, goal_n, matrix_n) draw_multi_board_2(next_mino, hold_mino, score, level, goal, matrix) #pause시 화면 불투명하게 over_surface = screen.convert_alpha() over_surface.fill((0, 0, 0, 0)) pygame.draw.rect(over_surface, ui_variables.black_t, [0, 0, int(screen_width), int(screen_height)]) screen.blit(over_surface, (0, 0)) screen.blit(title_text_1,(int(screen_width*0.35), int(screen_height*0.1))) screen.blit(over_text_1, (int(screen_width*0.39), int(screen_height*0.75))) insert_image(image_winner, screen_width*0.25, screen_height*0.12, int(screen_width*0.55), int(screen_height*0.65)) insert_image(image_trophy, screen_width*0.21, screen_height*0.13, int(screen_width*0.1), int(screen_height*0.18)) insert_image(image_trophy, screen_width*0.7, screen_height*0.13, int(screen_width*0.1), int(screen_height*0.18)) pygame.display.update() if event.type == KEYDOWN: if event.key == K_q: done = True elif event.key == K_RETURN: done = True elif game_mode: for event in pygame.event.get(): if event.type == QUIT: done = True elif event.type == KEYDOWN: keys = pygame.key.get_pressed() # Q누르면 창 나가짐 if event.key == K_q: done = True elif keys[pygame.K_s] and keys[pygame.K_e]: start_single = True level = 1 goal = level * 5 type = 1 elif keys[pygame.K_s] and keys[pygame.K_r]: level = 5 start_single = True goal = level * 5 type = 2 elif keys[pygame.K_s] and keys[pygame.K_t]: level = 10 start_single = True goal = level * 5 type = 3 elif keys[pygame.K_m] and keys[pygame.K_e]: level = 1 goal = level * 5 level_n = 1 goal_n = level_n*5 start_multi= True type = 1 elif keys[pygame.K_m] and keys[pygame.K_r]: level = 5 goal = level * 5 level_n = 5 goal_n = level_n*5 start_multi = True type = 2 elif keys[pygame.K_m] and keys[pygame.K_t]: level = 10 start_multi = True goal = level * 5 level_n = 10 goal_n = level_n*5 type = 3 elif event.type == USEREVENT: pygame.time.set_timer(pygame.USEREVENT, 300) screen.fill(ui_variables.black) background_image(background_file, screen_width, int(screen_height/2), int(screen_height/2)) game_mode_title = ui_variables.DG_small.render("게임옵션설정(두개의 키를 동시에 눌러주세요!)", 1, ui_variables.white) game_mode_choice = ui_variables.DG_v_small.render("게임모드설정", 1, ui_variables.white) game_mode_speed = ui_variables.DG_v_small.render("게임속도설정", 1, ui_variables.white) game_mode_single = ui_variables.DG_v_small.render("● Single 모드 (S키)", 1, ui_variables.white) game_mode_single_des = ui_variables.DG_v_small.render("혼자서 재미있게 하기!!", 1, ui_variables.white) game_mode_multi = ui_variables.DG_v_small.render("● Multi 모드 (M키)", 1, ui_variables.white) game_mode_multi_des = ui_variables.DG_v_small.render("둘이서 재미있게 하기!!", 1, ui_variables.white) game_speed_easy = ui_variables.DG_v_small.render("● 아코 모드(E키)", 1, ui_variables.white) game_speed_normal = ui_variables.DG_v_small.render("● 엉아코 모드(R키)", 1, ui_variables.white) game_speed_hard = ui_variables.DG_v_small.render("● 졸업코 모드(T키)", 1, ui_variables.white) game_speed_easy_des = ui_variables.DG_v_small.render("EASY 모드!", 1, ui_variables.white) game_speed_normal_des = ui_variables.DG_v_small.render("NORMAL 모드!!", 1, ui_variables.white) game_speed_hard_des = ui_variables.DG_v_small.render("HARD 모드!!!", 1, ui_variables.white) pygame.draw.line(screen, ui_variables.white, [0, int(screen_height*0.055)], [screen_width,int(screen_height*0.055)],2) screen.blit(game_mode_title, (int(screen_width*0.1)+int(int(screen_width*0.3)*0.4), int(screen_height*0.065))) pygame.draw.line(screen, ui_variables.white, [0, int(screen_height*0.125)], [screen_width,int(screen_height*0.125)],2) pygame.draw.rect(screen, ui_variables.white, [int(screen_width*0.175), int(screen_height*0.2), int(screen_width*0.2), int(screen_height*0.075)], 2) pygame.draw.rect(screen, ui_variables.white, [int(screen_width*0.625), int(screen_height*0.2), int(screen_width*0.2), int(screen_height*0.075)], 2) screen.blit(game_mode_choice, (int(screen_width*0.198), int(screen_height*0.215))) screen.blit(game_mode_speed, (int(screen_width*0.655), int(screen_height*0.215))) screen.blit(game_mode_single, (int(screen_width*0.15), int(screen_height*0.35))) screen.blit(game_mode_multi, (int(screen_width*0.15), int(screen_height*0.55))) screen.blit(game_mode_single_des, (int(screen_width*0.179), int(screen_height*0.4))) screen.blit(game_mode_multi_des, (int(screen_width*0.179), int(screen_height*0.6))) screen.blit(game_speed_easy, (int(screen_width*0.6), int(screen_height*0.3))) screen.blit(game_speed_normal, (int(screen_width*0.6), int(screen_height*0.45))) screen.blit(game_speed_hard, (int(screen_width*0.6), int(screen_height*0.6))) screen.blit(game_speed_easy_des, (int(screen_width*0.65), int(screen_height*0.35))) screen.blit(game_speed_normal_des, (int(screen_width*0.65), int(screen_height*0.5))) screen.blit(game_speed_hard_des, (int(screen_width*0.65), int(screen_height*0.65))) insert_image(image_aco1, int(screen_width*0.79), int(screen_height*0.295), int(screen_width*0.1), int(screen_height*0.1)) insert_image(image_aco2, int(screen_width*0.8), int(screen_height*0.445), int(screen_width*0.1), int(screen_height*0.1)) insert_image(image_aco3, int(screen_width*0.8), int(screen_height*0.595), int(screen_width*0.1), int(screen_height*0.1)) pygame.display.update() # Manual screen elif show_manual: for event in pygame.event.get(): if event.type == QUIT: done = True elif event.type == KEYDOWN: if event.key == K_SPACE: game_mode = True elif event.key == K_q: done = True elif event.type == USEREVENT: pygame.time.set_timer(pygame.USEREVENT, 300) screen.fill(ui_variables.black) background_image('../assets/images/manual.png', screen_width, screen_height, 0) show_score_manual = ui_variables.DG_small.render("Manual", 1, ui_variables.white) show_desc1_manual = ui_variables.DGM23.render("Pytris는 테트리스 게임으로 총 7가지 모양의 블록이 위에서 아래로", 1, ui_variables.white) show_desc2_manual = ui_variables.DGM23.render("떨어질 때 블록을 회전, 이동, 낙하 시켜 빈 곳으로 블록을 끼워 넣어", 1, ui_variables.white) show_desc3_manual = ui_variables.DGM23.render("한 라인을 채우면 라인이 제거되면서 점수를 얻는 방식입니다.", 1, ui_variables.white) pygame.draw.line(screen, ui_variables.white, [0, int(screen_height*0.055)], [screen_width,int(screen_height*0.055)],2) screen.blit(show_score_manual, (int(screen_width*0.3)+int(int(screen_width*0.3)*0.5), int(screen_height*0.06))) screen.blit(show_desc1_manual, (int(screen_width*0.05)+int(int(screen_width*0.1)*0.5), int(screen_height*0.15))) screen.blit(show_desc2_manual, (int(screen_width*0.05)+int(int(screen_width*0.1)*0.5), int(screen_height*0.2))) screen.blit(show_desc3_manual, (int(screen_width*0.05)+int(int(screen_width*0.1)*0.5), int(screen_height*0.25))) pygame.draw.line(screen, ui_variables.white, [0, int(screen_height*0.125)], [screen_width,int(screen_height*0.125)],2) title_start = ui_variables.DGM23.render("<Press space to start>", 1, ui_variables.white) screen.blit(title_start, (screen_width*0.37, screen_height*0.75)) pygame.display.update() # Show score elif show_score: for event in pygame.event.get(): if event.type == QUIT: done = True elif event.type == KEYDOWN: # Q누르면 창 나가짐 if event.key == K_q: done = True #space누르면 매뉴얼 창으로 elif event.key == K_SPACE: show_manual = True elif event.type == USEREVENT: pygame.time.set_timer(pygame.USEREVENT, 300) screen.fill(ui_variables.black) background_image(background_file, screen_width, int(screen_height/2), int(screen_height/2)) show_score_list = list() i = 0 try: while i<10: j=0 temp = ui_variables.DG_small.render('%2d' % ((i+1))+'등 '+'{:>6s}'.format(leaders[i][j]) + ' ' + '{:<8s}'.format(str(leaders[i][j+1])), 1, ui_variables.white) show_score_list.append(temp) i+=1 except: show_manual = True show_name_y = int(screen_height*0.17) prop = (show_name_y*0.3) for element in show_score_list: screen.blit(element, (int(screen_width*0.3)+int(int(screen_width*0.3)*0.25), show_name_y)) show_name_y += prop show_button_right = ui_variables.DGM23.render("<Press space to start>", 1, ui_variables.white) show_score_title = ui_variables.DG_small.render("Ranking", 1, ui_variables.white) pygame.draw.line(screen, ui_variables.white, [0, int(screen_height*0.055)], [screen_width,int(screen_height*0.055)],2) screen.blit(show_score_title, (int(screen_width*0.3)+int(int(screen_width*0.3)*0.5), int(screen_height*0.065))) pygame.draw.line(screen, ui_variables.white, [0, int(screen_height*0.125)], [screen_width,int(screen_height*0.125)],2) screen.blit(show_button_right, (int(screen_width*0.33)+int(int(screen_width*0.33)*0.2), show_name_y+prop)) pygame.display.flip() # Start screen else: for event in pygame.event.get(): if event.type == QUIT: done = True elif event.type == KEYDOWN: if event.key == K_SPACE: show_score=True #Q 누르면 창 나가짐 elif event.key == K_q: done = True screen.fill(ui_variables.white) background_image(background_file, screen_width, int(screen_height/2), int(screen_height/2)) insert_image(image_aco1, screen_width*0.52, screen_height*0.29, 150, 130) insert_image(image_aco2, screen_width*0.65, screen_height*0.22, 180, 180) insert_image(image_aco3, screen_width*0.8, screen_height*0.18, 210, 210) title = ui_variables.DG_big.render("ACOTRIS", 1, ui_variables.black) title_uni = ui_variables.DG_small.render("in DGU", 1, ui_variables.black) title_start = ui_variables.DGM23.render("<Press space to start>", 1, ui_variables.white) title_info = ui_variables.DGM13.render("Copyright (c) 2017 Jason Kim All Rights Reserved.", 1, ui_variables.white) if blink: screen.blit(title_start, (91, 195)) blink = False else: blink = True screen.blit(title, (screen_width*0.028, screen_height*0.3)) screen.blit(title_uni, (screen_width*0.37, screen_height*0.3)) screen.blit(title_start, (screen_width*0.37, screen_height*0.55)) screen.blit(title_info, (screen_width*0.35, screen_height*0.93)) if not show_score: pygame.display.update() clock.tick(3) pygame.quit()
41.313908
179
0.514134
6,714
53,171
3.841823
0.063896
0.065674
0.056951
0.040862
0.802512
0.753702
0.714856
0.650229
0.61964
0.577188
0
0.045269
0.383875
53,171
1,286
180
41.346034
0.74188
0.025992
0
0.627789
0
0
0.018535
0.005719
0
0
0
0
0
1
0.006085
false
0
0.010142
0
0.016227
0
0
0
0
null
0
0
0
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
9da1a92cdcf88a9e292d7bdc3fb0eeb027139777
2,305
py
Python
chemex/experiments/cpmg/fast/liouvillian.py
marcuscangussu/chemex_bouvignies
ce9ec20a42604eb5995abb0f8a84094b29747651
[ "BSD-3-Clause" ]
null
null
null
chemex/experiments/cpmg/fast/liouvillian.py
marcuscangussu/chemex_bouvignies
ce9ec20a42604eb5995abb0f8a84094b29747651
[ "BSD-3-Clause" ]
null
null
null
chemex/experiments/cpmg/fast/liouvillian.py
marcuscangussu/chemex_bouvignies
ce9ec20a42604eb5995abb0f8a84094b29747651
[ "BSD-3-Clause" ]
null
null
null
""" Created on Sep 1, 2011 @author: guillaume """ from scipy import zeros from chemex.bases.two_states.fast import R_IXY, DR_IXY, DW, KAB, KBA def compute_liouvillians(pb=0.0, kex=0.0, dw=0.0, r_ixy=5.0, dr_ixy=0.0): """ Compute the exchange matrix (Liouvillian) The function assumes a 2-site (A <-> B) exchanging system. The matrix is written in 6x6 cartesian basis, that is {Nx, Ny, Nz}{a,b}. Here the thermal equilibrium is assumed to be 0. This is justified because of the +/- phase cycling of the first 90 degree pulse at the beginning of the cpmg block. Parameters ---------- pb : float Fractional population of state B. 0.0 for 0%, 1.0 for 100%. kex : float Exchange rate between state A and B in /s. dw : float Chemical shift difference between states A and B in rad/s. r_nz : float Longitudinal relaxation rate of state {a,b} in /s. r_nxy : float Transverse relaxation rate of state a in /s. dr_nxy : float Transverse relaxation rate difference between states a and b in /s. cs_offset : float Offset from the carrier in rad/s. Returns ------- out: numpy.matrix Liouvillian describing free precession of one isolated spin in presence of two-site exchange. """ kab = kex * pb kba = kex - kab l_free = R_IXY * r_ixy l_free += DR_IXY * dr_ixy l_free += DW * dw l_free += KAB * kab l_free += KBA * kba return l_free def compute_iy_eq(pb): """ Returns the equilibrium magnetization vector. Parameters ---------- pb : float Fractional population of state B. 0.0 for 0%, 1.0 for 100%. Returns ------- out: numpy.matrix Magnetization vector at equilibrium. """ mag_eq = zeros((4, 1)) mag_eq[1, 0] += (1.0 - pb) mag_eq[3, 0] += pb return mag_eq def get_iy(mag): """ Returns the amount of magnetization along z. Parameters ---------- mag : ndarray Magnetization vector. Returns ------- magy_a, magy_b : float Amount of magnetization in state a and b along z. """ magy_a = mag[1, 0] magy_b = mag[3, 0] return magy_a, magy_b
21.745283
81
0.59436
344
2,305
3.886628
0.343023
0.008975
0.014959
0.015707
0.210172
0.133134
0.133134
0.088257
0.088257
0.088257
0
0.030644
0.306291
2,305
105
82
21.952381
0.805503
0.630803
0
0
0
0
0
0
0
0
0
0
0
1
0.142857
false
0
0.095238
0
0.380952
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9da1d621b03730a6eb8d7bba6dfd398419916f66
7,261
py
Python
test/nba/test_fzrs.py
jgershen/sportsball
8aa2a599091fb14d1897f2e4b77384e9ee6b0eed
[ "MIT" ]
21
2016-03-12T00:59:04.000Z
2022-03-01T21:32:51.000Z
test/nba/test_fzrs.py
jgershen/sportsball
8aa2a599091fb14d1897f2e4b77384e9ee6b0eed
[ "MIT" ]
1
2017-04-17T04:39:46.000Z
2017-04-17T04:39:46.000Z
test/nba/test_fzrs.py
jgershen/sportsball
8aa2a599091fb14d1897f2e4b77384e9ee6b0eed
[ "MIT" ]
4
2016-07-25T11:55:52.000Z
2019-06-19T20:55:53.000Z
import tempfile import shutil import os import pandas import numpy as np import datetime import pkg_resources from unittest import TestCase from dfs.nba.featurizers import feature_generators from dfs.nba.featurizers import fantasy_points_fzr, last5games_fzr, nf_stats_fzr, vegas_fzr, \ opp_ffpg_fzr, salary_fzr class FeaturizersTest(TestCase): def setUp(self): # A little test data from the past few years, useful for testing BREF data testfn = pkg_resources.resource_filename(__name__, 'test.pickle') self.data = pandas.read_pickle(testfn) # More recent test data -- necessary for testing external data recentfn = pkg_resources.resource_filename(__name__, 'recent.pickle') self.recentdata = pandas.read_pickle(recentfn) def testDataIntegrity(self): assert len(self.data) == 10 assert self.data.iloc[0]['bref_id'] == 'gallola01' assert self.data.iloc[9]['bref_id'] == 'dunlemi02' assert len(self.recentdata) == 10 assert self.recentdata.iloc[0]['bref_id'] == 'barnema02' assert self.recentdata.iloc[9]['bref_id'] == 'lawsoty01' def testDecorator(self): # Make sure the decorator is properly wrapping functions and turning their list outputs into pandas.Series for func_name in feature_generators: assert isinstance(func_name, basestring) wrapper, columns, live = feature_generators[func_name] output = wrapper(self.data.iloc[0]) self.assertTrue(isinstance(output, pandas.Series)) self.assertItemsEqual(columns, output.index) def applyFeaturizer(self, fzr_function, expected_output, use_recent=False): data = self.recentdata if use_recent else self.data for integer_index, (_, row) in enumerate(data.iterrows()): actual_output = fzr_function(row) for i in range(len(expected_output[integer_index])): # First check if they're both NaN if np.isnan(expected_output[integer_index][i]) and np.isnan(actual_output.iloc[i]): continue self.assertAlmostEqual(expected_output[integer_index][i], actual_output.iloc[i], places=3, msg="Error in row %d item %d of %s. Reference %s, actual output %s." % ( integer_index, i, 'recentdata' if use_recent else 'data', expected_output[integer_index][i], actual_output.iloc[i] )) def test_fantasy_points_fzr(self): self.applyFeaturizer(fantasy_points_fzr, [[20.1], [4.0], [17.3], [4.2], [22.5], [36.3], [27.9], [31.3], [17.8], [11.7]]) def test_last5games_fzr(self): self.applyFeaturizer(last5games_fzr, [[25.1], [6.78], [18.78], [6.26], [19.24], [29.56], [30.74], [31.36], [13.94], [23.72]]) def test_nf_stats_fzr(self): self.applyFeaturizer(nf_stats_fzr, [[23.76,6.0,2.7,1.4,0.6,0.2,0.8,1.9,12.14], [35.97,19.0,6.1,4.0,1.1,0.2,2.1,2.9,32.82], [23.58,12.9,2.7,1.7,0.7,0.2,1.2,2.4,19.29], [np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan], [27.23,10.4,4.4,2.9,0.6,0.3,1.8,2.3,20.03], [23.39,7.10,3.0,1.0,0.5,0.3,0.6,2.1,13.2], [24.62,8.1,4.2,1.6,0.6,0.2,1.4,2.4,15.74], [18.26,9.2,3.0,1.1,0.5,0.4,0.7,1.4,15.55], [23.38,8.1,3.5,0.9,0.6,0.2,0.8,1.7,14.45], [35.62,18.8,4,7.5,1.5,0.1,2.8,2.4,35.25]], use_recent=True) def test_vegas_fzr(self): self.applyFeaturizer(vegas_fzr, [[10.5, 189.5], [6.5, 199.5], [9.5, 194.5], [4.5, 194.0], [8.5, 195.5], [-1, 190.5], [-5, 198], [2.5, 196.5], [-19, 200.0], [-9, 181.0] ]) self.applyFeaturizer(vegas_fzr, [[9.0, 204.5], [-6.0, 200.5], [4.5, 217.5], [-5.5, 202.5], [-5.5, 202.5], [2.0, 195], [13.0, 195], [-4.0, 203.5], [-6.0, 200.5], [4.5, 217.5]], use_recent=True) def test_opp_ffpg_fzr(self): self.applyFeaturizer(opp_ffpg_fzr, [[18.389285714285712, 48.0, 0.85816666666666663, 1.1187666671058538, 20.0], [17.040909090909093, 67.2, 0.76771331058020498, 0.76122548332443785, 2.0055710306406684], [20.261666666666667, 42.4, 0.85140009104385328, 0.80628334990429773, 1.5840597758405979], [15.684848484848485, 35.3, 0.71887224832758501, 0.67037347774416234, 1.3499043977055449], [20.426530612244896, 52.4, 0.83409491798497215, 0.81556700238463165, 1.9865319865319866], [17.885365853658534, 51.8, 0.7638541666666665, 0.69248549436529994, 1.3061224489795917], [18.26969696969697, 66.2, 0.83735141954375503, 0.89284459636178026, 10.105263157894738], [19.694339622641515, 54.6, 0.86982125248260445, 0.80132994567677285, 1.7091633466135459], [17.863636363636363, 46.4, 0.81874052383653018, 0.80001770931620431, 1.5218658892128281], [16.608974358974361, 56.2, 0.77021403091557705, 0.7193626173392953, 1.3805774278215222]], use_recent=False) def test_salary_fzr(self): self.applyFeaturizer(salary_fzr, [[3500], [8200], [3700], [np.nan], [4100], [3500], [3500], [4000], [3700], [7100]], use_recent=True)
49.060811
115
0.450764
792
7,261
4.026515
0.301768
0.015679
0.01756
0.025086
0.131703
0.054249
0.054249
0.049859
0.049859
0.014111
0
0.27964
0.433136
7,261
148
116
49.060811
0.495141
0.037185
0
0.097744
0
0.007519
0.023476
0
0
0
0
0
0.075188
1
0.075188
false
0
0.075188
0
0.157895
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
9da1ed6becdb22c4f8292e530b55e6268710e72f
1,346
py
Python
tests/test_status.py
ehdgua01/blocksync
da0198dde87d284ea3c9472c10f51028e05014a0
[ "MIT" ]
5
2020-06-03T09:30:15.000Z
2021-12-14T23:48:47.000Z
tests/test_status.py
ehdgua01/blocksync
da0198dde87d284ea3c9472c10f51028e05014a0
[ "MIT" ]
2
2021-03-19T07:37:57.000Z
2021-06-18T11:54:46.000Z
tests/test_status.py
ehdgua01/blocksync
da0198dde87d284ea3c9472c10f51028e05014a0
[ "MIT" ]
null
null
null
from blocksync._consts import ByteSizes from blocksync._status import Blocks def test_initialize_status(fake_status): # Expect: Set chunk size assert fake_status.chunk_size == fake_status.src_size // fake_status.workers def test_add_block(fake_status): # Expect: Add each blocks and calculate done block fake_status.add_block("same") fake_status.add_block("same") fake_status.add_block("diff") assert fake_status.blocks == Blocks(same=2, diff=1, done=3) def test_get_rate(fake_status): # Expect: Return 0.00 when nothing done assert fake_status.rate == 0.00 fake_status.block_size = ByteSizes.MiB fake_status.src_size = fake_status.dest_size = ByteSizes.MiB * 10 # Expect: Return 50.00 when half done fake_status.add_block("same") fake_status.add_block("same") fake_status.add_block("same") fake_status.add_block("diff") fake_status.add_block("diff") assert fake_status.rate == 50.00 # Expect: Return 100.00 when all done fake_status.add_block("same") fake_status.add_block("same") fake_status.add_block("same") fake_status.add_block("diff") fake_status.add_block("diff") assert fake_status.rate == 100.00 # Expect: Return 100.00 when exceeding the total size fake_status.add_block("diff") assert fake_status.rate == 100.00
30.590909
80
0.724368
204
1,346
4.509804
0.22549
0.304348
0.197826
0.273913
0.543478
0.543478
0.436957
0.436957
0.419565
0.419565
0
0.035009
0.172363
1,346
43
81
31.302326
0.790844
0.173106
0
0.592593
0
0
0.050633
0
0
0
0
0
0.222222
1
0.111111
false
0
0.074074
0
0.185185
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
9da20747a22e24702a7eb51c79e588aff84309dd
275
py
Python
tests/helpers.py
hawkfish/sudoku
eaae1aa3080032266db0fcfc8a6520a9cb5690fe
[ "MIT" ]
null
null
null
tests/helpers.py
hawkfish/sudoku
eaae1aa3080032266db0fcfc8a6520a9cb5690fe
[ "MIT" ]
null
null
null
tests/helpers.py
hawkfish/sudoku
eaae1aa3080032266db0fcfc8a6520a9cb5690fe
[ "MIT" ]
null
null
null
#!/usr/bin/python3 import os def readFixture(sdk): f = open(os.path.join('tests', 'fixtures', sdk), 'r') lines = f.readlines() f.close() return [ line.strip('\n') for line in lines] def valuesFromDisplay(display): return [value-1 for value in display]
21.153846
57
0.643636
40
275
4.425
0.7
0
0
0
0
0
0
0
0
0
0
0.009009
0.192727
275
12
58
22.916667
0.788288
0.061818
0
0
0
0
0.062257
0
0
0
0
0
0
1
0.25
false
0
0.125
0.125
0.625
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
9da26db5109dcd203a39bfcab1fbaa5c755f0368
33,787
py
Python
Software/python/config_dialog.py
edavalosanaya/SKORE
72e742611ba96b0df542781ded0685f525bea82b
[ "MIT" ]
1
2020-09-20T19:00:17.000Z
2020-09-20T19:00:17.000Z
Software/python/config_dialog.py
MrCodingRobot/SKORE
72e742611ba96b0df542781ded0685f525bea82b
[ "MIT" ]
null
null
null
Software/python/config_dialog.py
MrCodingRobot/SKORE
72e742611ba96b0df542781ded0685f525bea82b
[ "MIT" ]
null
null
null
# General Utility Libraries import sys import os import warnings # PyQt5, GUI Library from PyQt5 import QtCore, QtGui, QtWidgets # Serial and Midi Port Library import rtmidi import serial import serial.tools.list_ports # SKORE Library from lib_skore import read_config, update_config import globals #------------------------------------------------------------------------------- # Classes class ArduinoComboBox(QtWidgets.QComboBox): """ This class allows the combobox to recognize arduinos connected as soon as the user clicks the combobox. """ def avaliable_arduino_com(self): """ This fuction returns all the available COM ports in a list of strings. """ ports = serial.tools.list_ports.comports(include_links=False) results = [] for port in ports: results.append(str(port.device)) return results def showPopup(self): """ This function appends to the original showPopup function from the QComboBox by adding the avaliable arduino com ports. """ avaliable_arduino_ports = self.avaliable_arduino_com() self.clear() for avaliable_port in avaliable_arduino_ports: self.addItem(avaliable_port) super(ArduinoComboBox, self).showPopup() return None class PianoComboBox(QtWidgets.QComboBox): """ This class allows the combobox to recognize piano connected as soon as the user clicks the combobox. """ def avaliable_piano_port(self): """ This function returns all the available MIDI ports in a list of string. """ temp_midi_in = [] temp_midi_in = rtmidi.MidiIn() avaliable_ports = temp_midi_in.get_ports() results = [] for port_name in avaliable_ports: results.append(str(port_name)) return results def showPopup(self): """ This function appends to the showPopup function of the QComboBox by adding the avaliable MIDI ports to the listed items in the QComboBox. """ avaliable_piano_ports = self.avaliable_piano_port() self.clear() for avaliable_piano_port_connected in avaliable_piano_ports: self.addItem(avaliable_piano_port_connected) super(PianoComboBox, self).showPopup() return None class ConfigDialog(QtWidgets.QDialog): """ This class is the settings dialog that provides the user the capability of changing the settings of the SKORE application. """ finish_apply_signal = QtCore.pyqtSignal() def __init__(self): """ This function sets the settings dialog by changing the title, size, icon, and placing the widgets. """ super(QtWidgets.QDialog, self).__init__() self.setObjectName("Dialog") self.resize(530 * globals.S_W_R, 679 * globals.S_H_R) self.setWindowTitle("SKORE - General Configuration") self.setWindowIcon(QtGui.QIcon('.\images\skore_icon.png')) self.setup_ui() self.setup_func() self.read_all_settings() self.update_settings() return None def setup_ui(self): """ This function places all the widgets in the settings dialog. """ self.apply_close_buttonBox = QtWidgets.QDialogButtonBox(self) self.apply_close_buttonBox.setGeometry(QtCore.QRect(310 * globals.S_W_R, 640 * globals.S_H_R, 201 * globals.S_W_R, 32 * globals.S_H_R)) self.apply_close_buttonBox.setLayoutDirection(QtCore.Qt.RightToLeft) self.apply_close_buttonBox.setOrientation(QtCore.Qt.Horizontal) self.apply_close_buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Apply|QtWidgets.QDialogButtonBox.Close) self.apply_close_buttonBox.setObjectName("apply_cancel_buttonBox") #----------------------------------------------------------------------- # Tab Widget self.tabWidget = QtWidgets.QTabWidget(self) self.tabWidget.setGeometry(QtCore.QRect(10 * globals.S_W_R, 10 * globals.S_H_R, 511 * globals.S_W_R, 621 * globals.S_H_R)) self.tabWidget.setLayoutDirection(QtCore.Qt.LeftToRight) self.tabWidget.setObjectName("tabWidget") #-----------------------------------------------------------------------# # Tab Widget -> path_and_comm_tab self.path_and_comm_tab = QtWidgets.QWidget() self.path_and_comm_tab.setObjectName("path_and_comm_tab") #----------------------------------------------------------------------- # Tab Widget -> path_and_comm_tab -> path section self.configure_path_label = QtWidgets.QLabel(self.path_and_comm_tab) self.configure_path_label.setGeometry(QtCore.QRect(10 * globals.S_W_R, 5 * globals.S_H_R, 231 * globals.S_W_R, 16 * globals.S_H_R)) self.configure_path_label.setObjectName("configure_path_label") self.path_line = QtWidgets.QFrame(self.path_and_comm_tab) self.path_line.setGeometry(QtCore.QRect(10 * globals.S_W_R, 20 * globals.S_H_R, 481 * globals.S_W_R, 20 * globals.S_H_R)) self.path_line.setFrameShape(QtWidgets.QFrame.HLine) self.path_line.setFrameShadow(QtWidgets.QFrame.Sunken) self.path_line.setObjectName("path_line") self.audiveris_pushButton = QtWidgets.QPushButton(self.path_and_comm_tab) self.audiveris_pushButton.setGeometry(QtCore.QRect(400 * globals.S_W_R, 60 * globals.S_H_R, 93 * globals.S_W_R, 31 * globals.S_H_R)) self.audiveris_pushButton.setObjectName("audiveris_pushButton") self.audiveris_label = QtWidgets.QLabel(self.path_and_comm_tab) self.audiveris_label.setGeometry(QtCore.QRect(10 * globals.S_W_R, 40 * globals.S_H_R, 101 * globals.S_W_R, 16 * globals.S_H_R)) self.audiveris_label.setObjectName("audiveris_label") self.audiveris_lineEdit = QtWidgets.QLineEdit(self.path_and_comm_tab) self.audiveris_lineEdit.setGeometry(QtCore.QRect(10 * globals.S_W_R, 60 * globals.S_H_R, 381 * globals.S_W_R, 31 * globals.S_H_R)) self.audiveris_lineEdit.setObjectName("audiveris_lineEdit") self.amazingmidi_lineEdit = QtWidgets.QLineEdit(self.path_and_comm_tab) self.amazingmidi_lineEdit.setGeometry(QtCore.QRect(10 * globals.S_W_R, 120 * globals.S_H_R, 381 * globals.S_W_R, 31 * globals.S_H_R)) self.amazingmidi_lineEdit.setObjectName("amazingmidi_lineEdit") self.amazingmidi_label = QtWidgets.QLabel(self.path_and_comm_tab) self.amazingmidi_label.setGeometry(QtCore.QRect(10 * globals.S_W_R, 100 * globals.S_H_R, 121 * globals.S_W_R, 16 * globals.S_H_R)) self.amazingmidi_label.setObjectName("amazingmidi_label") self.amazingmidi_pushButton = QtWidgets.QPushButton(self.path_and_comm_tab) self.amazingmidi_pushButton.setGeometry(QtCore.QRect(400 * globals.S_W_R, 120 * globals.S_H_R, 93 * globals.S_W_R, 31 * globals.S_H_R)) self.amazingmidi_pushButton.setObjectName("amazingmidi_pushButton") self.anthemscore_pushButton = QtWidgets.QPushButton(self.path_and_comm_tab) self.anthemscore_pushButton.setGeometry(QtCore.QRect(400 * globals.S_W_R, 180 * globals.S_H_R, 93 * globals.S_W_R, 31 * globals.S_H_R)) self.anthemscore_pushButton.setObjectName("anthemscore_pushButton") self.anthemscore_lineEdit = QtWidgets.QLineEdit(self.path_and_comm_tab) self.anthemscore_lineEdit.setGeometry(QtCore.QRect(10 * globals.S_W_R, 180 * globals.S_H_R, 381 * globals.S_W_R, 31 * globals.S_H_R)) self.anthemscore_lineEdit.setObjectName("anthemscore_lineEdit") self.anthemscore_label = QtWidgets.QLabel(self.path_and_comm_tab) self.anthemscore_label.setGeometry(QtCore.QRect(10 * globals.S_W_R, 160 * globals.S_H_R, 191 * globals.S_W_R, 16 * globals.S_H_R)) self.anthemscore_label.setObjectName("anthemscore_label") self.muse_score_pushButton = QtWidgets.QPushButton(self.path_and_comm_tab) self.muse_score_pushButton.setGeometry(QtCore.QRect(400 * globals.S_W_R, 240 * globals.S_H_R, 93 * globals.S_W_R, 31 * globals.S_H_R)) self.muse_score_pushButton.setObjectName("muse_score_pushButton") self.muse_score_lineEdit = QtWidgets.QLineEdit(self.path_and_comm_tab) self.muse_score_lineEdit.setGeometry(QtCore.QRect(10 * globals.S_W_R, 240 * globals.S_H_R, 381 * globals.S_W_R, 31 * globals.S_H_R)) self.muse_score_lineEdit.setObjectName("muse_score_linedEdit") self.muse_score_label = QtWidgets.QLabel(self.path_and_comm_tab) self.muse_score_label.setGeometry(QtCore.QRect(10 * globals.S_W_R, 220 * globals.S_H_R, 191 * globals.S_W_R, 16 * globals.S_H_R)) self.muse_score_label.setObjectName("muse_score_label") self.mp3_to_midi_converter_label = QtWidgets.QLabel(self.path_and_comm_tab) self.mp3_to_midi_converter_label.setGeometry(QtCore.QRect(10 * globals.S_W_R, 280 * globals.S_H_R, 141 * globals.S_W_R, 16 * globals.S_H_R)) self.mp3_to_midi_converter_label.setObjectName("mp3_to_midi_converter_label") self.open_source_radioButton = QtWidgets.QRadioButton(self.path_and_comm_tab) self.open_source_radioButton.setGeometry(QtCore.QRect(240 * globals.S_W_R, 280 * globals.S_H_R, 111 * globals.S_W_R, 20 * globals.S_H_R)) self.open_source_radioButton.setObjectName("open_source_radioButton") self.close_source_radioButton = QtWidgets.QRadioButton(self.path_and_comm_tab) self.close_source_radioButton.setGeometry(QtCore.QRect(380 * globals.S_W_R, 280 * globals.S_H_R, 111 * globals.S_W_R, 20 * globals.S_H_R)) self.close_source_radioButton.setObjectName("close_source_radioButton") #----------------------------------------------------------------------- # Tab Widget -> path_and_comm_tab -> comm section self.comm_line = QtWidgets.QFrame(self.path_and_comm_tab) self.comm_line.setGeometry(QtCore.QRect(10 * globals.S_W_R, 300 * globals.S_H_R, 481 * globals.S_W_R, 20 * globals.S_H_R)) self.comm_line.setFrameShape(QtWidgets.QFrame.HLine) self.comm_line.setFrameShadow(QtWidgets.QFrame.Sunken) self.comm_line.setObjectName("comm_line") self.portsettings_label = QtWidgets.QLabel(self.path_and_comm_tab) self.portsettings_label.setGeometry(QtCore.QRect(210 * globals.S_W_R, 320 * globals.S_H_R, 81* globals.S_W_R, 20 * globals.S_H_R)) self.portsettings_label.setObjectName("portsettings_label") self.piano_port_label = QtWidgets.QLabel(self.path_and_comm_tab) self.piano_port_label.setGeometry(QtCore.QRect(10 * globals.S_W_R, 340 * globals.S_H_R, 71 * globals.S_W_R, 16 * globals.S_H_R)) self.piano_port_label.setObjectName("pianoport_label") self.piano_port_comboBox = PianoComboBox(self.path_and_comm_tab) self.piano_port_comboBox.setGeometry(QtCore.QRect(10 * globals.S_W_R, 360 * globals.S_H_R, 481 * globals.S_W_R, 31 * globals.S_H_R)) self.piano_port_comboBox.setObjectName("pianoport_comboBox") self.piano_size_label = QtWidgets.QLabel(self.path_and_comm_tab) self.piano_size_label.setGeometry(QtCore.QRect(10 * globals.S_W_R, 400 * globals.S_H_R, 71* globals.S_W_R, 16* globals.S_H_R)) self.piano_size_label.setObjectName("pianosize_label") self.piano_size_comboBox = QtWidgets.QComboBox(self.path_and_comm_tab) self.piano_size_comboBox.setGeometry(QtCore.QRect(10 * globals.S_W_R, 420 * globals.S_H_R, 481 * globals.S_W_R, 31 * globals.S_H_R)) self.piano_size_comboBox.setObjectName("pianosize_comboBox") self.arduinoport_label = QtWidgets.QLabel(self.path_and_comm_tab) self.arduinoport_label.setGeometry(QtCore.QRect(10 * globals.S_W_R, 460 * globals.S_H_R, 81 * globals.S_W_R, 16* globals.S_H_R)) self.arduinoport_label.setObjectName("arduinoport_label") self.arduino_port_comboBox = ArduinoComboBox(self.path_and_comm_tab) self.arduino_port_comboBox.setGeometry(QtCore.QRect(10 * globals.S_W_R, 480 * globals.S_H_R, 481 * globals.S_W_R, 31 * globals.S_H_R)) self.arduino_port_comboBox.setObjectName("arduinoport_comboBox") self.arduino_baud_rate_label = QtWidgets.QLabel(self.path_and_comm_tab) self.arduino_baud_rate_label.setGeometry(QtCore.QRect(10 * globals.S_W_R, 520 * globals.S_H_R, 200 * globals.S_W_R, 20* globals.S_H_R)) self.arduino_baud_rate_label.setText("Arduino Baud Rate") self.arduino_baud_rate_comboBox = QtWidgets.QComboBox(self.path_and_comm_tab) self.arduino_baud_rate_comboBox.setGeometry(QtCore.QRect(10 * globals.S_W_R, 540 * globals.S_H_R, 481* globals.S_W_R, 31 * globals.S_H_R)) self.tabWidget.addTab(self.path_and_comm_tab, "") #----------------------------------------------------------------------- # Tab Widget -> Lighting and Color Tab self.color_tab = QtWidgets.QWidget() self.color_tab.setObjectName("color_tab") #----------------------------------------------------------------------- # Tab Widget -> Tutoring Tab -> Timing Section self.timingsettings_label = QtWidgets.QLabel(self.color_tab) self.timingsettings_label.setGeometry(QtCore.QRect(200 * globals.S_W_R, 10 * globals.S_H_R, 151 * globals.S_W_R, 20 * globals.S_H_R)) self.timingsettings_label.setObjectName("timingsettings_label") self.chord_tick_tolerance_label = QtWidgets.QLabel(self.color_tab) self.chord_tick_tolerance_label.setGeometry(QtCore.QRect(20 * globals.S_W_R, 40* globals.S_H_R, 200 * globals.S_W_R, 20 * globals.S_H_R)) self.chord_tick_tolerance_label.setText("Chord Tick Tolerance:") self.chord_tick_tolerance_lineEdit = QtWidgets.QLineEdit(self.color_tab) self.chord_tick_tolerance_lineEdit.setGeometry(QtCore.QRect(200 * globals.S_W_R, 40 * globals.S_H_R, 280 * globals.S_W_R, 20 * globals.S_H_R)) self.chord_sum_tolerance_label = QtWidgets.QLabel(self.color_tab) self.chord_sum_tolerance_label.setGeometry(QtCore.QRect(20 * globals.S_W_R, 80 * globals.S_H_R, 200 * globals.S_W_R, 20 * globals.S_H_R)) self.chord_sum_tolerance_label.setText("Chord Sum Tolerance:") self.chord_sum_tolerance_lineEdit = QtWidgets.QLineEdit(self.color_tab) self.chord_sum_tolerance_lineEdit.setGeometry(QtCore.QRect(200 * globals.S_W_R, 80 * globals.S_H_R, 280 * globals.S_W_R, 20 * globals.S_H_R)) self.record_chord_tolerance_label = QtWidgets.QLabel(self.color_tab) self.record_chord_tolerance_label.setGeometry(QtCore.QRect(20* globals.S_W_R, 120 * globals.S_H_R, 200* globals.S_W_R, 20 * globals.S_H_R)) self.record_chord_tolerance_label.setText("Record Chord Tolerance:") self.record_chord_tolerance_lineEdit = QtWidgets.QLineEdit(self.color_tab) self.record_chord_tolerance_lineEdit.setGeometry(QtCore.QRect(200* globals.S_W_R, 120 * globals.S_H_R, 280 * globals.S_W_R, 20 * globals.S_H_R)) self.arduino_handshake_timeout_label = QtWidgets.QLabel(self.color_tab) self.arduino_handshake_timeout_label.setGeometry(QtCore.QRect(20 * globals.S_W_R, 160* globals.S_H_R, 200 * globals.S_W_R, 20 * globals.S_H_R)) self.arduino_handshake_timeout_label.setText("Arduino Handshake Timeout:") self.arduino_handshake_timeout_lineEdit = QtWidgets.QLineEdit(self.color_tab) self.arduino_handshake_timeout_lineEdit.setGeometry(QtCore.QRect(200 * globals.S_W_R, 160 * globals.S_H_R, 280 * globals.S_W_R, 20 * globals.S_H_R)) self.line = QtWidgets.QFrame(self.color_tab) self.line.setGeometry(QtCore.QRect(10 * globals.S_W_R, 230 * globals.S_H_R, 481 * globals.S_W_R, 16 * globals.S_H_R)) self.line.setFrameShape(QtWidgets.QFrame.HLine) self.line.setFrameShadow(QtWidgets.QFrame.Sunken) self.line.setObjectName("line") #----------------------------------------------------------------------- # Tab Widget -> Tutoring Tab -> Color Section self.colorsettings_label = QtWidgets.QLabel(self.color_tab) self.colorsettings_label.setGeometry(QtCore.QRect(210 * globals.S_W_R, 250 * globals.S_H_R, 81 * globals.S_W_R, 20 * globals.S_H_R)) self.colorsettings_label.setObjectName("colorsettings_label_2") bw_y = ( 250 + 40 ) * globals.S_H_R space = 20 * globals.S_H_R self.black_key_label = QtWidgets.QLabel(self.color_tab) self.black_key_label.setGeometry(QtCore.QRect(80 * globals.S_W_R, bw_y, 61 * globals.S_W_R, 16 * globals.S_H_R)) self.black_key_label.setObjectName("black_key_label") self.black_key_pushButton = QtWidgets.QPushButton(self.color_tab) self.black_key_pushButton.setGeometry(QtCore.QRect(40 * globals.S_W_R, bw_y + space, 141 * globals.S_W_R, 61 * globals.S_H_R)) self.black_key_pushButton.setText("") self.black_key_pushButton.setObjectName("black_key_pushButton") self.white_key_label = QtWidgets.QLabel(self.color_tab) self.white_key_label.setGeometry(QtCore.QRect(360 * globals.S_W_R, bw_y, 71 * globals.S_W_R, 16 * globals.S_H_R)) self.white_key_label.setObjectName("white_key_label") self.white_key_pushButton = QtWidgets.QPushButton(self.color_tab) self.white_key_pushButton.setGeometry(QtCore.QRect(320 * globals.S_W_R, bw_y + space, 141 * globals.S_W_R, 61 * globals.S_W_R)) self.white_key_pushButton.setText("") self.white_key_pushButton.setObjectName("white_key_pushButton") wu_y = ( 390 + 40 ) * globals.S_H_R self.wrong_label = QtWidgets.QLabel(self.color_tab) self.wrong_label.setGeometry(QtCore.QRect(75 * globals.S_W_R, wu_y, 71 * globals.S_W_R, 16 * globals.S_H_R)) self.wrong_label.setObjectName("wrong_label") self.wrong_pushButton = QtWidgets.QPushButton(self.color_tab) self.wrong_pushButton.setGeometry(QtCore.QRect(40 * globals.S_W_R, wu_y + space, 141 * globals.S_W_R, 61 * globals.S_H_R)) self.wrong_pushButton.setText("") self.wrong_pushButton.setObjectName("wrong_pushButton") self.upcoming_label = QtWidgets.QLabel(self.color_tab) self.upcoming_label.setGeometry(QtCore.QRect(350 * globals.S_W_R, wu_y, 91 * globals.S_W_R, 16 * globals.S_H_R)) self.upcoming_label.setObjectName("upcoming_label") self.upcoming_pushButton = QtWidgets.QPushButton(self.color_tab) self.upcoming_pushButton.setGeometry(QtCore.QRect(320 * globals.S_W_R, wu_y + space, 141 * globals.S_W_R, 61 * globals.S_H_R)) self.upcoming_pushButton.setText("") self.upcoming_pushButton.setObjectName("upcoming_pushButton") self.tabWidget.addTab(self.color_tab, "") self.retranslate_ui() self.tabWidget.setCurrentIndex(0) self.apply_close_buttonBox.accepted.connect(self.accept) self.apply_close_buttonBox.rejected.connect(self.close) QtCore.QMetaObject.connectSlotsByName(self) def setup_func(self): """ This function places all the slot and signals for the widgets of the settings dialog. """ self.browse_button_group = QtWidgets.QButtonGroup() self.browse_button_group.addButton(self.audiveris_pushButton) self.browse_button_group.addButton(self.amazingmidi_pushButton) self.browse_button_group.addButton(self.anthemscore_pushButton) self.browse_button_group.addButton(self.muse_score_pushButton) self.browse_button_group.buttonClicked.connect(self.upload_exe_file) self.browse_button_dict = {self.audiveris_pushButton: ['', self.audiveris_lineEdit, 'audiveris'], self.amazingmidi_pushButton: ['',self.amazingmidi_lineEdit, 'amazing_midi'], self.anthemscore_pushButton: ['', self.anthemscore_lineEdit,'anthemscore'], self.muse_score_pushButton: ['', self.muse_score_lineEdit, 'muse_score']} self.port_dict = {self.piano_port_comboBox: ['','piano'], self.piano_size_comboBox: ['','piano_size'], self.arduino_port_comboBox: ['','arduino'], self.arduino_baud_rate_comboBox: ['', 'arduino baud rate']} self.piano_size_comboBox.addItem('76 Key Piano') self.piano_size_comboBox.addItem('88 Key Piano') self.arduino_baud_rate_comboBox.addItem('300') self.arduino_baud_rate_comboBox.addItem('600') self.arduino_baud_rate_comboBox.addItem('1200') self.arduino_baud_rate_comboBox.addItem('4800') self.arduino_baud_rate_comboBox.addItem('9600') self.arduino_baud_rate_comboBox.addItem('14400') self.arduino_baud_rate_comboBox.addItem('19200') self.arduino_baud_rate_comboBox.addItem('28800') self.arduino_baud_rate_comboBox.addItem('38400') self.arduino_baud_rate_comboBox.addItem('57600') self.arduino_baud_rate_comboBox.addItem('115200') self.arduino_baud_rate_comboBox.addItem('230400') self.timing_button_dict = {self.chord_tick_tolerance_lineEdit: ['', 'chord tick tolerance'], self.chord_sum_tolerance_lineEdit: ['','chord sum tolerance'], self.record_chord_tolerance_lineEdit: ['', 'record chord tolerance'], self.arduino_handshake_timeout_lineEdit: ['', 'count timeout'] } self.color_button_group = QtWidgets.QButtonGroup() self.color_button_group.addButton(self.black_key_pushButton) self.color_button_group.addButton(self.white_key_pushButton) self.color_button_group.addButton(self.wrong_pushButton) self.color_button_group.addButton(self.upcoming_pushButton) self.color_button_group.buttonClicked.connect(self.color_picker) self.color_button_dict = {self.black_key_pushButton: ['','black'], self.white_key_pushButton: ['','white'], self.wrong_pushButton: ['','wrong'], self.upcoming_pushButton: ['','upcoming'] } self.apply_close_buttonBox.button(QtWidgets.QDialogButtonBox.Apply).clicked.connect(self.apply_changes) return None #--------------------------------------------------------------------------- # Path Section Functions def open_file_name_dialog_exe_file(self): """ This file dialog is used to obtain the file location of the .exe file. """ options = QtWidgets.QFileDialog.Options() options |= QtWidgets.QFileDialog.DontUseNativeDialog fileName, _ = QtWidgets.QFileDialog.getOpenFileName(self, "Select .exe/.bat File", "", "Executiable Files (*.exe);; Batch Files (*.bat)", options=options) if fileName: file_dialog_output = str(fileName) else: return "" file_dialog_output = file_dialog_output.replace('/' , '\\' ) return file_dialog_output def open_directory_name_dialog_exe_path(self): """ This file dialog is used to obtain the folder directory of the desired exe folder location. """ options = QtWidgets.QFileDialog.Options() options |= QtWidgets.QFileDialog.ShowDirsOnly options |= QtWidgets.QFileDialog.DontUseNativeDialog directory = QtWidgets.QFileDialog.getExistingDirectory(self, caption = 'Select a folder', options = options) if directory: file_dialog_output = str(directory) else: return "" file_dialog_output = file_dialog_output.replace('/' , '\\' ) return file_dialog_output def upload_exe_file(self, button): """ This function decides wether to use the exe file or exe path function. If the pushButton is for audiveris, utlize the exe path. Else, use the standard exe file function. """ upload_exe_path = self.open_file_name_dialog_exe_file() if upload_exe_path != '': self.browse_button_dict[button][0] = upload_exe_path self.update_settings() return None #--------------------------------------------------------------------------- # Color def color_picker(self, button): """ This function creates a QColorDialog when the user clicks the color wheel color. Once the user selects a color, it will display the RGB colors in the lineedits. """ color = QtWidgets.QColorDialog.getColor() if color.isValid(): # Converting Hexadecimal to RGB values value = color.name() value = value.lstrip('#') rgb = tuple(int(value[i:i+2], 16) for i in (0, 2, 4)) rgb = str(rgb)[1:-1].replace(" ","") self.color_button_dict[button][0] = rgb button.setStyleSheet('background-color:rgb({})'.format(rgb)) return None #--------------------------------------------------------------------------- # Reading Settings def read_all_settings(self): """ This function reads all the settings in the config.yml and stores them in dictionaries that correlate the settings to the widgets. """ cfg = read_config() # Path Settings for key in self.browse_button_dict.keys(): self.browse_button_dict[key][0] = cfg['app_path'][self.browse_button_dict[key][2]] # Mp3 to midi Settings self.mp3_to_midi_setting = cfg['app_path']['open_close_source'] # Port Settings for key in self.port_dict.keys(): self.port_dict[key][0] = cfg['port'][self.port_dict[key][1]] # Timing Settings for key in self.timing_button_dict.keys(): self.timing_button_dict[key][0] = cfg['timing'][self.timing_button_dict[key][1]] # Color Settings for key in self.color_button_dict.keys(): self.color_button_dict[key][0] = cfg['color'][self.color_button_dict[key][1]] return None def update_settings(self): """ This function places the information of the settings into the widgets, such as placing the value or color to the widget. """ # Path Settings for button in self.browse_button_dict: self.browse_button_dict[button][1].setText(self.browse_button_dict[button][0]) # Mp3 to midi Settings if self.mp3_to_midi_setting == 'open_source': self.open_source_radioButton.setChecked(True) self.close_source_radioButton.setChecked(False) elif self.mp3_to_midi_setting == 'close_source': self.close_source_radioButton.setChecked(True) self.open_source_radioButton.setChecked(False) # Port Settings for key in self.port_dict.keys(): if self.port_dict[key][1] == 'piano_size': key.setCurrentText(str(self.port_dict[key][0]) + ' Key Piano') elif key == self.arduino_baud_rate_comboBox: key.setCurrentText(str(self.port_dict[key][0])) else: key.addItem(str(self.port_dict[key][0])) key.setCurrentText(str(self.port_dict[key][0])) # Timing Settings for key in self.timing_button_dict.keys(): key.setText(str(self.timing_button_dict[key][0])) # Color Settings for key in self.color_button_dict.keys(): rgb = self.color_button_dict[key][0] key.setStyleSheet('background-color:rgb({})'.format(rgb)) return None def apply_changes(self): """ This fuction applies any of the changes done by the user to the settings. This changes are recorded in the config.yml file. """ cfg = read_config() # Apply Path for button in self.browse_button_dict: text = self.browse_button_dict[button][1].text() cfg['app_path'][self.browse_button_dict[button][2]] = text # Mp3 to midi Settings if self.open_source_radioButton.isChecked(): cfg['app_path']['open_close_source'] = 'open_source' elif self.close_source_radioButton.isChecked(): cfg['app_path']['open_close_source'] = 'close_source' # Color Settings for key in self.color_button_dict.keys(): rgb = self.color_button_dict[key][0] cfg['color'][self.color_button_dict[key][1]] = rgb for key in self.timing_button_dict.keys(): cfg['timing'][self.timing_button_dict[key][1]] = int(key.text()) # Port Settings for key in self.port_dict.keys(): index = key.currentIndex() if index == -1: continue if key == self.piano_port_comboBox or key == self.arduino_port_comboBox: cfg['port'][self.port_dict[key][1]] = key.currentText() elif key == self.piano_size_comboBox: cfg['port'][self.port_dict[key][1]] = key.currentText()[:2] elif key == self.arduino_baud_rate_comboBox: cfg['port'][self.port_dict[key][1]] = int(key.currentText()) update_config(cfg) print("Applied Changes") self.finish_apply_signal.emit() return None #--------------------------------------------------------------------------- # Misc Functions def retranslate_ui(self): """ This function places all the text content in the configuration dialog widgets. """ _translate = QtCore.QCoreApplication.translate self.anthemscore_pushButton.setText(_translate("Dialog", "Browse")) self.anthemscore_label.setText(_translate("Dialog", "AnthemScore [.exe] (Optional)")) self.audiveris_pushButton.setText(_translate("Dialog", "Browse")) self.audiveris_label.setText(_translate("Dialog", "Audiveris [folder]")) self.amazingmidi_pushButton.setText(_translate("Dialog", "Browse")) self.amazingmidi_label.setText(_translate("Dialog", "AmazingMIDI [.exe]")) self.muse_score_label.setText(_translate("Dialog", "MuseScore [.exe]")) self.muse_score_pushButton.setText(_translate("Dialog", "Browse")) self.configure_path_label.setText(_translate("Dialog", "Configure the path for each program.")) self.mp3_to_midi_converter_label.setText(_translate("Dialog", "MP3 to MIDI Converter:")) self.open_source_radioButton.setText(_translate("Dialog", "Open-Source")) self.close_source_radioButton.setText(_translate("Dialog", "Close-Source")) self.piano_port_label.setText(_translate("Dialog", "Piano Port")) self.piano_size_label.setText(_translate("Dialog", "Piano Size")) self.portsettings_label.setText(_translate("Dialog", "Port Settings")) self.arduinoport_label.setText(_translate("Dialog", "Arduino Port")) self.tabWidget.setTabText(self.tabWidget.indexOf(self.path_and_comm_tab), _translate("Dialog", "Path and Communication Settings")) self.timingsettings_label.setText(_translate("Dialog", "Timing Settings")) self.colorsettings_label.setText(_translate("Dialog", "Color Settings")) self.black_key_label.setText(_translate("Dialog", "Black Keys")) self.white_key_label.setText(_translate("Dialog", "White Keys")) self.wrong_label.setText(_translate("Dialog", "Wrong Note")) self.upcoming_label.setText(_translate("Dialog", "Upcoming Note")) self.tabWidget.setTabText(self.tabWidget.indexOf(self.color_tab), _translate("Dialog", "Tutoring Settings")) #----------------------------------------------------------------------- # Text Scaling font = self.anthemscore_label.font() font.setPixelSize(13) print("Prescaling Font Pixel Size: ", font.pixelSize()) font.setPixelSize(font.pixelSize() * globals.S_W_R) print("Postscaling Font Pixel Size: ", font.pixelSize()) text_group = [self.anthemscore_pushButton, self.anthemscore_label, self.anthemscore_lineEdit, self.audiveris_pushButton, self.audiveris_label, self.audiveris_lineEdit, self.amazingmidi_pushButton, self.amazingmidi_label, self.amazingmidi_lineEdit, self.muse_score_pushButton, self.muse_score_label, self.muse_score_lineEdit, self.configure_path_label, self. mp3_to_midi_converter_label, self.piano_port_label, self.piano_size_label, self.piano_size_comboBox, self.portsettings_label, self.arduinoport_label, self.piano_port_comboBox, self.arduino_port_comboBox, self.timingsettings_label, self.colorsettings_label, self.black_key_label, self.white_key_label, self.wrong_label, self.upcoming_label, self.arduino_baud_rate_comboBox, self.open_source_radioButton, self.close_source_radioButton, self.chord_tick_tolerance_label, self.chord_tick_tolerance_lineEdit, self.chord_sum_tolerance_label, self.chord_sum_tolerance_lineEdit, self.record_chord_tolerance_label, self.record_chord_tolerance_lineEdit, self.arduino_handshake_timeout_label, self.arduino_handshake_timeout_lineEdit, self.apply_close_buttonBox, self.tabWidget] for element in text_group: element.setFont(font) #------------------------------------------------------------------------------- # Main Code if __name__ == "__main__": app = QtWidgets.QApplication(sys.argv) config_dialog = ConfigDialog() config_dialog.show() sys.exit(app.exec_())
48.336195
184
0.671797
4,302
33,787
4.96676
0.095769
0.071138
0.0417
0.046333
0.600178
0.489774
0.405813
0.335564
0.296298
0.158188
0
0.021526
0.19839
33,787
698
185
48.405444
0.76739
0.108089
0
0.123487
0
0
0.068422
0.008553
0
0
0
0
0
1
0.03632
false
0
0.021792
0
0.104116
0.007264
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9da270a879210ead826c86bdc8c185c7e2c0effa
1,814
py
Python
valorant/caller.py
frissyn/valorant.py
49abceab5cc1f3af016ce0b1d253d10089aeb0b4
[ "MIT" ]
56
2021-01-22T01:48:23.000Z
2022-03-31T20:44:23.000Z
valorant/caller.py
Tominous/valorant.py
b462441ab4ab403123ad245cab30f3abbd891a66
[ "MIT" ]
20
2021-02-03T10:40:37.000Z
2022-03-24T11:23:57.000Z
valorant/caller.py
Tominous/valorant.py
b462441ab4ab403123ad245cab30f3abbd891a66
[ "MIT" ]
15
2021-03-24T01:17:58.000Z
2022-02-01T02:10:27.000Z
import requests from .values import ROUTES from .values import LOCALES from .values import REGIONS from .values import ENDPOINTS def value_check(*args): KEYS = ROUTES + LOCALES + REGIONS for arg in args: if arg not in KEYS: raise ValueError else: return True class WebCaller(object): def __init__(self, token: str, locale: str, region: str, route: str): self.base = "https://{root}.api.riotgames.com/" self.eps = ENDPOINTS["web"] self.sess = requests.Session() self.sess.params.update({"locale": locale}) self.sess.headers.update( { "Accept-Charset": "application/x-www-form-urlencoded; charset=UTF-8", "User-Agent": "Mozilla/5.0", "X-Riot-Token": token, } ) if value_check(locale, region, route): self.locale = locale self.region = region self.route = route def call(self, m: str, ep: str, params=None, route=False, **kw): if ep not in list(self.eps.keys()): raise ValueError else: pass prefix = self.base.format(root=self.route if route else self.region) url = prefix + self.eps[ep].format(**kw) r = self.sess.request(m, url, params=params) r.raise_for_status() return r.json() class ClientCaller(object): def __init__(self, token: str): self.base = "https://pd.{code}.a.pvp.net/" self.token = token self.sess = requests.Session() self.sess.headers.update( { "Authorization": f"Bearer {token}", "Content-Type": "application/json", "X-Riot-Entitlements-JWT": "riot_entitlement", } )
27.484848
85
0.555678
212
1,814
4.693396
0.424528
0.048241
0.064322
0.046231
0.112563
0.112563
0
0
0
0
0
0.002435
0.320838
1,814
65
86
27.907692
0.805195
0
0
0.156863
0
0
0.142778
0.031422
0
0
0
0
0
1
0.078431
false
0.019608
0.098039
0
0.254902
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9da470ea36af0b767f746d020e41a7f0c5dba94a
153
py
Python
python/niveau1/2-Repetitions/6.py
ThomasProg/France-IOI
03ea502e03f686d74ecf31a17273aded7b8e8a1f
[ "MIT" ]
2
2022-02-13T13:35:13.000Z
2022-03-31T21:02:11.000Z
python/niveau1/2-Repetitions/6.py
ThomasProg/France-IOI
03ea502e03f686d74ecf31a17273aded7b8e8a1f
[ "MIT" ]
null
null
null
python/niveau1/2-Repetitions/6.py
ThomasProg/France-IOI
03ea502e03f686d74ecf31a17273aded7b8e8a1f
[ "MIT" ]
1
2020-11-15T15:21:24.000Z
2020-11-15T15:21:24.000Z
for i in range(30): print("a_", end="") print() for i in range(30): print("b_", end="") print() for i in range(30): print("c_", end="")
15.3
23
0.51634
26
153
2.923077
0.384615
0.157895
0.236842
0.434211
0.921053
0.921053
0.684211
0.684211
0
0
0
0.051724
0.24183
153
10
24
15.3
0.603448
0
0
0.625
0
0
0.038961
0
0
0
0
0
0
1
0
false
0
0
0
0
0.625
1
0
0
null
0
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
6
9da846794dabe811239a290251111e03ccfb593a
1,256
py
Python
test_LearnSubtitles.py
heitor31415/LearnSubtitles
153178ea11d700a49a1f3692de39e8fc81e3cc4e
[ "MIT" ]
8
2020-02-13T03:08:25.000Z
2021-01-11T20:28:39.000Z
test_LearnSubtitles.py
heitor31415/LearnSubtitles
153178ea11d700a49a1f3692de39e8fc81e3cc4e
[ "MIT" ]
1
2020-04-28T19:48:16.000Z
2020-04-29T12:28:15.000Z
test_LearnSubtitles.py
heitor31415/LearnSubtitles
153178ea11d700a49a1f3692de39e8fc81e3cc4e
[ "MIT" ]
1
2020-03-14T00:46:36.000Z
2020-03-14T00:46:36.000Z
import os import pytest from typing import Any, Callable, Dict, List import LearnSubtitles as ls def prepare(language: str) -> List: """ Create LearnSubtitles objects for every subtitle in folder 'language' """ test_dir = "testfiles/" + language subs = [ ls.LearnSubtitles(os.path.abspath(os.path.join(test_dir, x)), language) for x in os.listdir(test_dir) ] return subs languages = ["de", "en", "pt"] # supported languages def test_LearnSubtitles_parsing(): for language in languages: subs = prepare(language) for sub in subs: assert len(sub.text) != 0 def test_LearnSubtitles_bad_file(): with pytest.raises(FileNotFoundError): ls.LearnSubtitles(os.path.abspath("testfiles/fail/fail.srt"), "en") with pytest.raises(ls.LearnSubtitlesError): ls.LearnSubtitles(os.path.abspath("testfiles/fail/bad_file.srt"), "en") def test_LearnSubtitles_level(): levels = ["A1", "A2", "B1"] subs = [ ls.LearnSubtitles( "testfiles/de/Nicos Weg – " + level + " – Ganzer Film - German.srt", "de" ) for level in levels ] assert subs[0].film_level > subs[1].film_level assert subs[1].film_level > subs[2].film_level
26.723404
85
0.648089
161
1,256
4.975155
0.403727
0.0799
0.067416
0.082397
0.141074
0.104869
0.104869
0
0
0
0
0.008239
0.226911
1,256
46
86
27.304348
0.814624
0.072452
0
0.0625
0
0
0.112263
0.043178
0
0
0
0
0.09375
1
0.125
false
0
0.125
0
0.28125
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9daad46c18973b22ab6ea33d444cd0187d68fcac
2,455
py
Python
programs/graduation-project/featureselection.py
Dilmuratjan/MyProject
26f4ee708eb4a7ceef780842ad737fef64a39d7e
[ "WTFPL" ]
2
2017-02-19T15:11:06.000Z
2017-02-22T18:34:10.000Z
programs/graduation-project/featureselection.py
Dilmuratjan/MyProject
26f4ee708eb4a7ceef780842ad737fef64a39d7e
[ "WTFPL" ]
null
null
null
programs/graduation-project/featureselection.py
Dilmuratjan/MyProject
26f4ee708eb4a7ceef780842ad737fef64a39d7e
[ "WTFPL" ]
4
2017-02-26T08:10:30.000Z
2017-05-02T10:02:03.000Z
import pandas as pd import numpy as np from time import time import matplotlib.pyplot as plt from sklearn.ensemble import ExtraTreesClassifier train = pd.read_excel('stats.xls', sheet_name='train') test = pd.read_excel('stats.xls', sheet_name='test') array_train = train.values array_test = test.values X = array_train[0:, 1:11] y = np.asarray(train['状态'], dtype="|S6") X_test = array_test[0:, 1:11] # Build a forest and compute the pixel importances print("Fitting ExtraTreesClassifier on faces data with %d cores..." % n_jobs) t0 = time() forest = ExtraTreesClassifier(n_estimators=1000, max_features=128, random_state=0) forest.fit(X, y) print("done in %0.3fs" % (time() - t0)) importances = forest.feature_importances_ importances = importances.reshape(data.images[0].shape) # Plot pixel importances plt.matshow(importances, cmap=plt.cm.hot) plt.title("Pixel importances with forests of trees") plt.show() # # X_indices = np.arange(X.shape[-1]) # # # ############################################################################# # # Univariate feature selection with F-test for feature scoring # # We use the default selection function: the 10% most significant features # selector = SelectPercentile(f_classif, percentile=10) # selector.fit(X, y) # scores = -np.log10(selector.pvalues_) # scores /= scores.max() # plt.bar(X_indices - .45, scores, width=.2, # label=r'Univariate score ($-Log(p_{value})$)', color='darkorange', # edgecolor='black') # # # ############################################################################# # # Compare to the weights of an SVM # clf = svm.SVC(kernel='linear') # clf.fit(X, y) # # svm_weights = (clf.coef_ ** 2).sum(axis=0) # svm_weights /= svm_weights.max() # # plt.bar(X_indices - .25, svm_weights, width=.2, label='SVM weight', # color='navy', edgecolor='black') # # clf_selected = svm.SVC(kernel='linear') # clf_selected.fit(selector.transform(X), y) # # svm_weights_selected = (clf_selected.coef_ ** 2).sum(axis=0) # svm_weights_selected /= svm_weights_selected.max() # # plt.bar(X_indices[selector.get_support()] - .05, svm_weights_selected, # width=.2, label='SVM weights after selection', color='c', # edgecolor='black') # # # plt.title("Comparing feature selection") # plt.xlabel('Feature number') # plt.yticks(()) # plt.axis('tight') # plt.legend(loc='upper right') # plt.show()
27.277778
81
0.638697
324
2,455
4.716049
0.441358
0.058901
0.04712
0.019634
0.127618
0.066754
0.066754
0
0
0
0
0.020221
0.153971
2,455
90
82
27.277778
0.715455
0.526273
0
0
0
0
0.15
0
0
0
0
0
0
1
0
false
0
0.375
0
0.375
0.083333
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
9dabb9b1903ea38cf40c186f6bcbd195fb25dff0
618
py
Python
logpot/admin/file.py
moremorefor/logpot
26a48766dc764f93aa29f6d949af8a05de5d9152
[ "MIT" ]
4
2016-08-31T08:03:09.000Z
2019-03-15T07:11:49.000Z
logpot/admin/file.py
moremorefor/logpot
26a48766dc764f93aa29f6d949af8a05de5d9152
[ "MIT" ]
4
2021-05-10T00:34:14.000Z
2022-03-11T23:22:06.000Z
logpot/admin/file.py
moremorefor/logpot
26a48766dc764f93aa29f6d949af8a05de5d9152
[ "MIT" ]
1
2017-08-08T22:51:13.000Z
2017-08-08T22:51:13.000Z
#-*- coding: utf-8 -*- from logpot.admin.base import AuthenticateView from logpot.utils import ImageUtil from flask import flash, redirect from flask_admin import expose from flask_admin.contrib.fileadmin import FileAdmin from flask_admin.babel import gettext import os import os.path as op from operator import itemgetter from datetime import datetime class EntryFileView(AuthenticateView, FileAdmin): def __init__(self, dirpath, **kwargs): super().__init__(dirpath, **kwargs) can_delete = False can_upload = False can_mkdir = False allowed_extensions = ImageUtil.ALLOWED_EXTENSIONS
24.72
53
0.775081
79
618
5.860759
0.518987
0.077754
0.090713
0
0
0
0
0
0
0
0
0.001923
0.158576
618
24
54
25.75
0.888462
0.033981
0
0
0
0
0
0
0
0
0
0
0
1
0.058824
false
0
0.588235
0
0.941176
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
2
9dabcfa6524e1e4a0e2b51dbe24a327024815ea3
24
py
Python
emailutil/__init__.py
cityofaustin/atd-utils-email
bcf2c55fe770745a2ed6da22e44971ef6ceaae37
[ "CC0-1.0" ]
null
null
null
emailutil/__init__.py
cityofaustin/atd-utils-email
bcf2c55fe770745a2ed6da22e44971ef6ceaae37
[ "CC0-1.0" ]
null
null
null
emailutil/__init__.py
cityofaustin/atd-utils-email
bcf2c55fe770745a2ed6da22e44971ef6ceaae37
[ "CC0-1.0" ]
null
null
null
from .emailutil import *
24
24
0.791667
3
24
6.333333
1
0
0
0
0
0
0
0
0
0
0
0
0.125
24
1
24
24
0.904762
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
9dacec32c244293fcf0c09720725cd6c562e10da
4,888
py
Python
fast_downloader_mt/main.py
Kirozen/fast-downloader
febdcc8b6a6ad3b8d263a8923b8f24e8402df618
[ "MIT" ]
null
null
null
fast_downloader_mt/main.py
Kirozen/fast-downloader
febdcc8b6a6ad3b8d263a8923b8f24e8402df618
[ "MIT" ]
null
null
null
fast_downloader_mt/main.py
Kirozen/fast-downloader
febdcc8b6a6ad3b8d263a8923b8f24e8402df618
[ "MIT" ]
null
null
null
from __future__ import annotations import multiprocessing import os import re import sys from concurrent.futures import ThreadPoolExecutor from dataclasses import dataclass, field from itertools import chain from pathlib import Path from urllib.parse import urlparse import click import requests from requests.models import HTTPError from rich.progress import ( BarColumn, DownloadColumn, Progress, TextColumn, TimeRemainingColumn, TransferSpeedColumn, ) @dataclass class DownloadFile: urls: list[str] dest: Path = Path.cwd() filename: str = field(init=False) def __post_init__(self): self.filename = Path(self.urls[0]).name @property def filepath(self): return self.dest / self.filename BUFFER_SIZE = 32768 progress = Progress( TextColumn("[bold blue]{task.fields[filename]}", justify="right"), BarColumn(bar_width=None), "[progress.percentage]{task.percentage:>3.1f}%", "•", DownloadColumn(), "•", TransferSpeedColumn(), "•", TimeRemainingColumn(), ) def parse_aria2(data: list[str], destination: Path): files = [] out_re = re.compile(r"^\s+out=(?P<out>.*)$") for line in data: if line.startswith("#") or not line: continue if line.startswith("http"): files.append(DownloadFile(line.split("\t"), destination)) else: match_out = out_re.match(line) if match_out: files[-1].filename = match_out.groupdict()["out"] return files def get_inputs(inputs: list[str], destination: Path, aria2_compatibility: bool): paths = [] for input in inputs: lines = Path(input).read_text().splitlines(keepends=False) if aria2_compatibility: paths.extend(parse_aria2(lines, destination)) else: paths.extend( DownloadFile([url], destination) for url in lines if url.startswith("http") ) return paths def downloader(downloadfile: DownloadFile, buffer_size: int, quiet: bool): if not quiet: task_id = progress.add_task( "download", filename=downloadfile.filename, ) iterator = iter(downloadfile.urls) response = None try: while not response: url = next(iterator) try: response = requests.get(url, allow_redirects=True, stream=True) response.raise_for_status() except HTTPError: response = None if not quiet: size = int(response.headers.get("content-length")) progress.update(task_id, total=size) with open(downloadfile.filepath, "wb") as handler: if not quiet: progress.start_task(task_id) for data in response.iter_content(chunk_size=buffer_size): handler.write(data) if not quiet: progress.update(task_id, advance=len(data)) except StopIteration: print("Urls are not available") def executor(threads, downloadfiles, buffer_size, quiet): with ThreadPoolExecutor(max_workers=threads) as pool: for downloadfile in sorted( downloadfiles, key=lambda df: len(df.filename), reverse=True ): try: for url in downloadfile.urls: urlparse(url) except ValueError: print(f"An url in {downloadfile.urls} is not valid!", file=sys.stderr) continue pool.submit(downloader, downloadfile, buffer_size, quiet) @click.command() @click.option( "-t", "--threads", default=lambda: multiprocessing.cpu_count(), type=click.IntRange(min=1, max=1000, clamp=True), help="thread number", ) @click.option( "-i", "--input", "inputs", multiple=True, type=click.Path(exists=True, file_okay=True), help="input file", ) @click.option("-q", "--quiet", is_flag=True) @click.option( "-d", "--destination", type=click.Path(dir_okay=True, allow_dash=True), default=Path(os.getcwd()), ) @click.option("--aria2-compatibility", is_flag=True) @click.option( "--buffer-size", type=click.IntRange(min=1, clamp=True), default=BUFFER_SIZE ) @click.argument("urls", nargs=-1, type=click.Path()) def fast_downloader( threads, inputs, quiet, destination, buffer_size, aria2_compatibility, urls ): download_urls = (DownloadFile([url], Path(destination)) for url in urls) download_files = list( chain(download_urls, get_inputs(inputs, Path(destination), aria2_compatibility)) ) if quiet: executor(threads, download_files, buffer_size, quiet) else: with progress: executor(threads, download_files, buffer_size, quiet) if __name__ == "__main__": fast_downloader()
28.091954
88
0.625818
550
4,888
5.447273
0.341818
0.033378
0.013351
0.014686
0.056742
0.028705
0.028705
0
0
0
0
0.006354
0.259411
4,888
173
89
28.254335
0.820442
0
0
0.165563
0
0
0.067308
0.019231
0
0
0
0
0
1
0.046358
false
0
0.092715
0.006623
0.18543
0.013245
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9dad12fdcaa78561145c587bd080d424b377a384
1,060
py
Python
backend/app/core/security.py
rufusnufus/BTSParking
3bb6e7fd20943f258e297428ab1624c4f2786444
[ "MIT" ]
2
2021-11-13T08:05:14.000Z
2021-12-02T11:36:11.000Z
backend/app/core/security.py
rufusnufus/BTSParking
3bb6e7fd20943f258e297428ab1624c4f2786444
[ "MIT" ]
44
2021-11-23T10:06:11.000Z
2021-12-18T07:23:22.000Z
backend/app/core/security.py
rufusnufus/BTSParking
3bb6e7fd20943f258e297428ab1624c4f2786444
[ "MIT" ]
null
null
null
import os import time from hashlib import sha256 import requests from dotenv import load_dotenv from fastapi.security import OAuth2PasswordBearer BASE_DIR = os.path.dirname(os.path.abspath(__file__)) load_dotenv(os.path.join(BASE_DIR, "../.env")) oauth2_scheme = OAuth2PasswordBearer(tokenUrl="api/v1/activate-login-code") def create_access_code(email: str, expires_in: int) -> tuple: link_create_time = time.time() link_expire_time = time.time() + expires_in user_data = f"{email}{link_create_time}" login_code = sha256(user_data.encode("utf-8")).hexdigest() return (login_code, link_expire_time) def verified_email(email: str) -> bool: response = requests.get( os.environ.get("REAL_EMAIL_API_LINK"), params={"email": email}, headers={"Authorization": "Bearer " + os.environ.get("REAL_EMAIL_API_KEY")}, ) response_status = response.json()["status"] return False if response_status == "invalid" else True def cookie_is_none(auth_token: str) -> bool: return False if auth_token else True
30.285714
84
0.724528
147
1,060
4.965986
0.47619
0.043836
0.038356
0.043836
0.065753
0.065753
0
0
0
0
0
0.012249
0.15283
1,060
34
85
31.176471
0.800668
0
0
0
0
0
0.130189
0.048113
0
0
0
0
0
1
0.12
false
0.08
0.24
0.04
0.48
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
9dad8057a50b53867020fcecaeb0676d2cfff102
4,362
py
Python
sitch/sitchlib/geo_correlator.py
codecuisine/sensor
06fb0908178af1ab673b95e7f435b873cc62e61b
[ "ECL-2.0", "Apache-2.0", "BSD-2-Clause" ]
68
2016-08-08T17:28:59.000Z
2021-11-26T09:31:52.000Z
sitch/sitchlib/geo_correlator.py
codecuisine/sensor
06fb0908178af1ab673b95e7f435b873cc62e61b
[ "ECL-2.0", "Apache-2.0", "BSD-2-Clause" ]
61
2016-08-20T21:01:01.000Z
2020-07-22T06:10:45.000Z
sitch/sitchlib/geo_correlator.py
codecuisine/sensor
06fb0908178af1ab673b95e7f435b873cc62e61b
[ "ECL-2.0", "Apache-2.0", "BSD-2-Clause" ]
40
2017-01-28T23:06:22.000Z
2021-08-13T15:09:43.000Z
"""Correlate based on geograpgic information.""" from alert_manager import AlertManager from utility import Utility class GeoCorrelator(object): """Geographic correlator.""" def __init__(self, device_id): """Initialize the Geographic Correlator.""" self.geo_anchor = {} self.threshold = 100 self.time_threshold = 10 self.device_id = device_id def correlate(self, scan_bolus): """Correlate one geo event. The first time we get a geo event, we set the state and print a message to stdout to that effect. Every subsequent message is compared against the geo_anchor. Once the anchor is set, it does not change for the life of the instance. Correlation of subsequent events causes the distance beween the anchor and current event to be determined and if the threshold of 10km is exceeded, an alert is returned. Args: scan_bolus (tuple): Two-item tuple. Position 0 contains the scan type, which is not checked. We should only ever have geo events coming through this method. Position 1 is expected to contain geo json. Returns: list: List of alerts. If no alerts are fired, the list returned is zero-length. """ scan_body = scan_bolus[1] if self.geo_anchor == {}: self.geo_anchor = scan_body print("GeoCorrelator: Setting anchor to %s" % str(scan_body)) alerts = [] else: alerts = GeoCorrelator.geo_drift_check(self.geo_anchor, scan_body, self.threshold, self.device_id) for alert in GeoCorrelator.time_drift_check(scan_body, self.time_threshold, self.device_id): alerts.append(alert) for alert in alerts: alert[1]["site_name"] = scan_body["site_name"] alert[1]["sensor_name"] = scan_body["sensor_name"] alert[1]["sensor_id"] = scan_body["sensor_id"] return alerts @classmethod def geo_drift_check(cls, geo_anchor, gps_scan, threshold, device_id): """Fire alarm if distance between points exceeds threshold. Args: geo_anchor (dict): Geographic anchor point, usually stored in an instance variable and passed in via the `correlate()` method. gps_scan (dict): Same format as geo_anchor, expects the same format as `geo_anchor`. threshold (int): Alerting threshold in km. Returns: list: list of alerts (usually just one) or an empty list of there are no alerts. """ lat_1 = geo_anchor["location"]["coordinates"][1] lon_1 = geo_anchor["location"]["coordinates"][0] lat_2 = gps_scan["location"]["coordinates"][1] lon_2 = gps_scan["location"]["coordinates"][0] current_distance = Utility.calculate_distance(lon_1, lat_1, lon_2, lat_2) if current_distance < threshold: return [] else: message = "Possible GPS spoofing attack! %d delta from anchor at %s / %s %s !" % (current_distance, gps_scan["site_name"], gps_scan["sensor_name"], Utility.create_gmaps_link(lat_1, lon_1)) # NOQA alert = AlertManager(device_id).build_alert(300, message, gps_scan["location"]) return[alert] @classmethod def time_drift_check(cls, gps_scan, threshold_mins, device_id): """Checks drift value, alarms if beyond threshold.""" current_delta = gps_scan["time_drift"] if current_delta < threshold_mins: return [] else: message = "Possible GPS time spoofing attack! %d delta from system at %s / %s" % (current_delta, gps_scan["site_name"], gps_scan["sensor_name"]) # NOQA alert = AlertManager(device_id).build_alert(310, message, gps_scan["location"]) return[alert]
44.969072
208
0.570381
505
4,362
4.744554
0.330693
0.035058
0.020033
0.01419
0.231219
0.086811
0.059265
0.026711
0
0
0
0.01162
0.348923
4,362
96
209
45.4375
0.832042
0.323017
0
0.211538
0
0
0.135374
0
0
0
0
0
0
1
0.076923
false
0
0.038462
0
0.192308
0.019231
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9dadf1bb28dc34ec81f4c906780d3dcd3137e862
1,697
py
Python
grid_search_results_v1/get_vals_heatmap.py
malfarasplux/pnet2019
ae34d5c84fb4d3985634b237a14dfb69e98b8339
[ "BSD-3-Clause" ]
1
2020-11-29T12:42:30.000Z
2020-11-29T12:42:30.000Z
grid_search_results_v1/get_vals_heatmap.py
malfarasplux/pnet2019
ae34d5c84fb4d3985634b237a14dfb69e98b8339
[ "BSD-3-Clause" ]
null
null
null
grid_search_results_v1/get_vals_heatmap.py
malfarasplux/pnet2019
ae34d5c84fb4d3985634b237a14dfb69e98b8339
[ "BSD-3-Clause" ]
null
null
null
import numpy as np import matplotlib.pyplot as plt N =[20,40,50,75,100,150,200] scale = [0.0001, 0.001, 0.005, 0.01, 0.1, 1, 10] mem = [0.001, 0.01, 0.1, 0.13, 0.25, 0.5, 1] sigexp = [0.01, 0.1, 0.5, 1, 2, 5, 10] val_key = {} with open("./grid_search_results_v1/F1_report.txt") as f: for i, line in enumerate(f): lineval = line.split()[0] print ("line {0} = {1}".format(i, lineval)) val_key[lineval.split(".txt:")[0][7:]] = float(lineval.split(".txt:")[1]) F1_matrix = np.zeros((len(scale),len(mem)),dtype=np.float) N_i = str(200) sigexp_i = str(0.1) for i in range(len(scale)): scale_i = str(scale[i]) for j in range(len(mem)): mem_i = str(mem[j]) key_i = N_i + "_" + scale_i + "_" + mem_i + "_" + sigexp_i F1_matrix[i,j] = val_key[key_i] fig, ax = plt.subplots() im = ax.imshow(F1_matrix) ax.set_title("Grid search F1 opt") ax.set_xticks(np.arange(len(mem))) ax.set_yticks(np.arange(len(scale))) ax.set_xticklabels(mem) ax.set_yticklabels(scale) ax.set_xlabel('mem') ax.set_ylabel('scale') cbar = ax.figure.colorbar(im, ax=ax) # Loop over data dimensions and create text annotations. for i in range(len(scale)): for j in range(len(mem)): text = ax.text(j, i, F1_matrix[i, j], ha="center", va="center", color="w")
38.568182
160
0.476134
245
1,697
3.171429
0.367347
0.045045
0.05148
0.019305
0.136422
0.092664
0
0
0
0
0
0.080675
0.371833
1,697
43
161
39.465116
0.648218
0.031821
0
0.114286
0
0
0.063376
0.023157
0
0
0
0
0
1
0
false
0
0.057143
0
0.057143
0.028571
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9daef14a7cdf5e935df51508fb1293fad577407c
72
py
Python
build/scripts-3.5/mooc_anon.py
acheamponge/mooc_anon
b06dec9c4c47011f69ff4f6e21a0f5862e2ffd5c
[ "MIT" ]
3
2019-07-08T01:16:57.000Z
2021-09-23T12:44:02.000Z
build/scripts-3.5/mooc_anon.py
acheamponge/mooc_anon
b06dec9c4c47011f69ff4f6e21a0f5862e2ffd5c
[ "MIT" ]
null
null
null
build/scripts-3.5/mooc_anon.py
acheamponge/mooc_anon
b06dec9c4c47011f69ff4f6e21a0f5862e2ffd5c
[ "MIT" ]
null
null
null
#!/usr/bin/env python print("hey there, this is my first pip package")
18
48
0.708333
13
72
3.923077
1
0
0
0
0
0
0
0
0
0
0
0
0.152778
72
3
49
24
0.836066
0.277778
0
0
0
0
0.764706
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
9daf2e07854e8ace58237146dcb7ca501dc5a1ae
111
py
Python
odata_query/django/__init__.py
itd-fsc/odata-query
7d5239b775633594ce52d4eda5754c2ad078eb75
[ "MIT" ]
26
2021-06-11T07:42:08.000Z
2022-02-16T04:42:45.000Z
odata_query/django/__init__.py
itd-fsc/odata-query
7d5239b775633594ce52d4eda5754c2ad078eb75
[ "MIT" ]
13
2021-08-07T21:38:22.000Z
2022-03-28T17:25:47.000Z
odata_query/django/__init__.py
itd-fsc/odata-query
7d5239b775633594ce52d4eda5754c2ad078eb75
[ "MIT" ]
6
2021-07-28T04:46:14.000Z
2022-03-15T08:22:19.000Z
from .django_q import AstToDjangoQVisitor from .django_q_ext import * from .shorthand import apply_odata_query
27.75
41
0.855856
16
111
5.625
0.625
0.222222
0.244444
0
0
0
0
0
0
0
0
0
0.108108
111
3
42
37
0.909091
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
9dafa0a196d3c478e9ef8c55c4f9dd2dd56b60ad
1,457
py
Python
_snippets/scrape_RAND_pdfs.py
vashu1/data_snippets
b0ae5230d60c2054c7b9278093533b7f71f3758b
[ "MIT" ]
1
2021-02-10T20:33:43.000Z
2021-02-10T20:33:43.000Z
_snippets/scrape_RAND_pdfs.py
vashu1/data_snippets
b0ae5230d60c2054c7b9278093533b7f71f3758b
[ "MIT" ]
null
null
null
_snippets/scrape_RAND_pdfs.py
vashu1/data_snippets
b0ae5230d60c2054c7b9278093533b7f71f3758b
[ "MIT" ]
null
null
null
# scrape articles from RAND site, see https://vashu11.livejournal.com/20523.html import re import requests from bs4 import BeautifulSoup import os content = ['https://www.rand.org/pubs/papers.html'] + ['https://www.rand.org/pubs/papers.{}.html'.format(i) for i in range(2, 108)] def get_articles(page): page = requests.get(page) soup = BeautifulSoup(page.content, 'html.parser') return [('https://www.rand.org' + link.get('href')) for link in soup.findAll('a', attrs={'href': re.compile("/pubs/papers/.*")})] def get_pdfs(link): page = requests.get(link) soup = BeautifulSoup(page.content, 'html.parser') name = soup.findAll('h1', attrs={'id': 'RANDTitleHeadingId'})[0].text return set([(name, ('https://www.rand.org' if not 'http' in link.get('href') else '') + link.get('href')) for link in soup.findAll('a', attrs={'href': re.compile(".*\.pdf")})]) os.mkdir('pdfs') for page in content[11:]: print('PAGE', page) articles = get_articles(page) for article in articles: print('ARTICLE', article) c = 0 for d in get_pdfs(article): name, link = d if c > 0: name += '_{}'.format(c) print('NAME', name) r = requests.get(link) l = len(r.content) print('LEN', l) with open('./pdfs/' + re.sub('[^\w\-_\. ]', '_', name) + '.pdf', 'wb') as f: f.write(r.content) c += 1
38.342105
180
0.577213
201
1,457
4.149254
0.363184
0.038369
0.057554
0.071942
0.280576
0.280576
0.189448
0.119904
0.119904
0.119904
0
0.016949
0.230611
1,457
37
181
39.378378
0.727029
0.053535
0
0.0625
0
0
0.18809
0
0
0
0
0
0
1
0.0625
false
0
0.125
0
0.25
0.125
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9db042c12b1460a61eed0c0cb77f85501b0f72a1
215
py
Python
plugins/dbnd-snowflake/src/dbnd_snowflake/__init__.py
FHoffmannCode/dbnd
82beee1a8c752235bf21b4b0ceace5ab25410e52
[ "Apache-2.0" ]
null
null
null
plugins/dbnd-snowflake/src/dbnd_snowflake/__init__.py
FHoffmannCode/dbnd
82beee1a8c752235bf21b4b0ceace5ab25410e52
[ "Apache-2.0" ]
null
null
null
plugins/dbnd-snowflake/src/dbnd_snowflake/__init__.py
FHoffmannCode/dbnd
82beee1a8c752235bf21b4b0ceace5ab25410e52
[ "Apache-2.0" ]
null
null
null
from dbnd._core.commands.metrics import log_snowflake_table from dbnd_snowflake.snowflake_resources import log_snowflake_resource_usage __all__ = [ "log_snowflake_resource_usage", "log_snowflake_table", ]
23.888889
75
0.827907
27
215
5.962963
0.481481
0.298137
0.223602
0.310559
0
0
0
0
0
0
0
0
0.111628
215
8
76
26.875
0.842932
0
0
0
0
0
0.218605
0.130233
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
6
9db66809b3f7cfe04fff2e0d4fd9725d23130f54
2,422
py
Python
inputs/fino2_dats.py
a2edap/WE-Validate
6e4be8228c9b4f66fb1a056f7566030b79441f2e
[ "BSD-3-Clause" ]
1
2022-01-21T08:09:03.000Z
2022-01-21T08:09:03.000Z
inputs/fino2_dats.py
a2edap/WE-Validate
6e4be8228c9b4f66fb1a056f7566030b79441f2e
[ "BSD-3-Clause" ]
null
null
null
inputs/fino2_dats.py
a2edap/WE-Validate
6e4be8228c9b4f66fb1a056f7566030b79441f2e
[ "BSD-3-Clause" ]
1
2021-06-14T09:32:36.000Z
2021-06-14T09:32:36.000Z
# A parser for multiple FINO2 .dat files in a directory. import os import pathlib import pandas as pd import numpy as np import glob import sys class fino2_dats: """FINO2 data class """ def __init__(self, info, conf): self.path = os.path.join( (pathlib.Path(os.getcwd()).parent), str(info['path']) ) self.var = info['var'] # self.lev = conf['levels']['height_agl'] self.target_var = info['target_var'] def get_ts(self, lev): """The directory can contain multiple FINO2 files, and each file contains data at one height level. The function only read in one data file at one height level. """ file_list = glob.glob(os.path.join(self.path, '*.dat')) for file in file_list: if str(lev)+'m' in file: df_all = pd.read_csv(file) # Get variable name and column names var_name = df_all.iloc[0][0].split(': ', 1)[1] col_names = df_all.iloc[3][0].split('\t')[1:] df = pd.read_csv(file, skiprows=6, sep='\s+') # Turn column names into 1st row df = pd.DataFrame(np.vstack([df.columns, df])) # Combine 2 time columns, hard coded df['t'] = df[0].map(str)+' '+df[1] # Drop duplicating columns df.pop(0) df.pop(1) # Reassign column names for i in range(len(col_names)): df[col_names[i]] = df[i+2] df.pop(i+2) df = df.set_index('t').sort_index() df.index = pd.to_datetime(df.index) # FINO data are averages centered at each 10-minute period # Data between 10:30 and 10:40 are averaged and labelled as # 10:35 # Apply correction to label data at the end of each period # Hence data between 10:30 and 10:40 are averaged and labelled # as 10:40 df.index = df.index+pd.Timedelta('5minutes') # Extract only 1 column of data out_df = df.loc[:, [self.var]] out_df.rename( columns={self.var: self.target_var}, inplace=True ) out_df = out_df.astype(float) return out_df
31.051282
78
0.514038
322
2,422
3.776398
0.400621
0.020559
0.016447
0.026316
0.078947
0.078947
0.078947
0.078947
0.078947
0.078947
0
0.031936
0.379438
2,422
77
79
31.454545
0.777112
0.28943
0
0
0
0
0.024522
0
0
0
0
0
0
1
0.054054
false
0
0.162162
0
0.27027
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9db67e536e2a5337dee11670942d6aa03db5b908
2,481
py
Python
bin/ess/dependencies.py
clu3bot/cora
de4d1af983c135184ebaf557271fa14c7c0e1849
[ "MIT" ]
null
null
null
bin/ess/dependencies.py
clu3bot/cora
de4d1af983c135184ebaf557271fa14c7c0e1849
[ "MIT" ]
null
null
null
bin/ess/dependencies.py
clu3bot/cora
de4d1af983c135184ebaf557271fa14c7c0e1849
[ "MIT" ]
null
null
null
import subprocess as sp import os import time import platform from os.path import exists #colar vars class color: lightblue='\033[1;34m' #light blue lightred='\033[1;31m' #light red lightgreen='\033[1;32m' #lightgreen red='\033[0;31m' #red yellow='\033[1;33m' #yellow none='\033[0m' #no color purple='\033[1;35m' #purple cyan='\033[0;36m' #cyan green='\033[0;32m' #green def permissions(): #checks for root permissions if not os.environ.get("SUDO_UID") and os.geteuid() != 0: print(color.lightred + "You need to run this script with sudo or as root.") time.sleep(0.3) quit() permissions() def getos(): osys=platform.system() if osys != "Linux": print(color.lightred + "This program only runs on Linux operating systems.") time.sleep(2) quit() getos() def check_file(): file = exists("tmp/flag.txt") if file == 'True': os.system("rm -rf tmp/flag.txt") else: time.sleep(0.5) check_file() #dependencies class dependencies: dependencie1 = 'mdk3' dependencie2 = 'aircrack-ng' dependencie3 = 'xterm' dependencie4 = 'macchanger' def check_mdk3(): check_d1 = sp.getoutput("bash etc/dpkg-check/dpkg-check-mdk3.sh") if check_d1 == '0': mdk3 = 'null' else: mdk3 = 'inst' return mdk3 def check_aircrack(): check_d2 = sp.getoutput("bash etc/dpkg-check/dpkg-check-aircrack-ng.sh") if check_d2 == '0': aircrack = 'null' else: aircrack = 'inst' return aircrack def check_xterm(): check_d3 = sp.getoutput("bash etc/dpkg-check/dpkg-check-xterm.sh") if check_d3 == '0': xterm = 'null' else: xterm = 'inst' return xterm def check_macchanger(): check_d4 = sp.getoutput("bash etc/dpkg-check/dpkg-check-macchanger.sh") if check_d4 == '0': macchanger = 'null' else: macchanger = 'inst' return macchanger def export(): mdk3 = check_mdk3() aircrack = check_aircrack() xterm = check_xterm() macchanger = check_macchanger() if mdk3 == 'null': flag = "null" elif aircrack == 'null': flag = "null" elif xterm == 'null': flag = "null" elif macchanger == "null": flag = "null" else: time.sleep(1) if flag == 'null': os.system("echo "+flag+" > tmp/flag.txt") else: check_file()
20.675
84
0.584442
320
2,481
4.46875
0.334375
0.05035
0.041958
0.05035
0.100699
0.100699
0.100699
0.100699
0
0
0
0.046745
0.275695
2,481
119
85
20.84874
0.749026
0.044337
0
0.172414
0
0
0.221374
0.061917
0
0
0
0
0
1
0.091954
false
0
0.057471
0
0.367816
0.022989
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9db6de217e5adf7d8e64871e558fa7b849812773
3,880
py
Python
calculate_Total-Hetero.py
evodify/population-genetic-analyses
5295f9d68736ac02fc5f3ece43dadd5bf4e98e6f
[ "MIT" ]
3
2018-01-31T09:57:10.000Z
2021-02-03T18:34:01.000Z
calculate_Total-Hetero.py
evodify/population-genetic-analyses
5295f9d68736ac02fc5f3ece43dadd5bf4e98e6f
[ "MIT" ]
null
null
null
calculate_Total-Hetero.py
evodify/population-genetic-analyses
5295f9d68736ac02fc5f3ece43dadd5bf4e98e6f
[ "MIT" ]
1
2019-09-02T06:13:29.000Z
2019-09-02T06:13:29.000Z
#! /usr/bin/env python ''' This script calculates total heterozygosity. #Example input: CHROM POS REF sample1 sample2 sample3 sample4 sample5 sample6 sample7 sample8 chr_1 1 A W N N A N N N N chr_1 2 C Y Y N C C N C N chr_1 3 C N C N C C C C C chr_1 4 T T T N T T T T T chr_2 1 A A A N A A A A A chr_2 2 C C C N C C C C C chr_2 3 C N N N N N N N N chr_2 4 C C T C C C C C C chr_2 5 T T C T Y T Y T T chr_3 1 G G N N G N N N N chr_3 2 C S C N C C N C N chr_3 3 N N N N N N N N N chr_3 4 N T T N T T T T N chr_3 5 G - N N G G G C G #Example input2: CHROM POS REF sample1 sample2 sample3 sample4 sample5 sample6 sample7 sample8 chr_1 1 A/A A/T ./. ./. A/A ./. ./. ./. ./. chr_1 2 C/C T/C T/C ./. C/C C/C ./. C/C ./. chr_1 3 C/C ./. C/C ./. C/C C/C C/C C/C C/C chr_1 4 T/T T/T T/T ./. T/T T/T T/T T/T T/T chr_2 1 A/A A/A A/A ./. A/A A/A A/A A/A A/A chr_2 2 C/C C/C C/C ./. C/C C/C C/C C/C C/C chr_2 3 C/C ./. ./. ./. ./. ./. ./. ./. ./. chr_2 4 C/C C/C T/T C/C C/C C/C C/C C/C C/C chr_2 5 T/T T/T C/C T/T T/C T/T T/C T/T T/T chr_3 1 G/G G/G ./. ./. G/G ./. ./. ./. ./. chr_3 2 C/C G/C C/C ./. C/C C/C ./. C/C ./. chr_3 3 ./. ./. ./. ./. ./. ./. ./. ./. ./. chr_3 4 ./. T/T T/T ./. T/T T/T T/T T/T ./. chr_3 5 G/G -/- ./. ./. G/G G/G G/G C/C G/G #Example output: test.tab 0.1125 #command: $ python calculate_Total-Hetero.py -i input.tab -o output.tab -s "sample1,sample2,sample3,sample4,sample5,sample6,sample7,sample8" #contact: Dmytro Kryvokhyzha dmytro.kryvokhyzha@evobio.eu ''' ############################# modules ############################# import calls # my custom module import numpy as np ############################# options ############################# parser = calls.CommandLineParser() parser.add_argument('-i', '--input', help = 'name of the input file', type=str, required=True) parser.add_argument('-o', '--output', help = 'name of the output file', type=str, required=True) parser.add_argument('-s', '--samples', help = 'column names of the samples to process (optional)', type=str, required=False) args = parser.parse_args() # check if samples names are given and if all sample names are present in a header sampleNames = calls.checkSampleNames(args.samples, args.input) ############################# functions ############################# ############################# program ############################# print('Opening the file...') counter = 0 with open(args.input) as datafile: header_line = datafile.readline() header_words = header_line.split() # index samples sampCol = calls.indexSamples(sampleNames, header_words) # count number of sample nSample = len(sampleNames) ############################## perform counting #################### print('Counting heterozygots ...') Hcount = [] for line in datafile: words = line.split() # select samples sample_charaters = calls.selectSamples(sampCol, words) # check if one- or two-character code if any(["/" in gt for gt in sample_charaters]): sample_charaters = calls.twoToOne(sample_charaters) # count hetero Nmising = calls.countPerPosition(sample_charaters, 'N') nHeter = calls.countHeteroPerPosition(sample_charaters) nTotal = float(nSample - Nmising) if nTotal != 0: Hcount.append(float(nHeter/nTotal)) # track progress counter += 1 if counter % 1000000 == 0: print str(counter), "lines processed" # make output header outputFile = open(args.output, 'w') heteroT = round(np.mean(Hcount), 4) outputFile.write("%s\t%s\n" % (args.input, heteroT)) datafile.close() outputFile.close() print('Done!')
30.077519
130
0.549227
675
3,880
3.093333
0.225185
0.075671
0.087644
0.097701
0.292625
0.284483
0.277778
0.253352
0.178161
0.15613
0
0.033851
0.253866
3,880
128
131
30.3125
0.687392
0.079897
0
0
0
0
0.130406
0
0
0
0
0
0
0
null
null
0
0.055556
null
null
0.111111
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
9db72ff4ce32323ddaf8107b708ab0ac40987bfc
2,748
py
Python
src/bfh.py
Pella86/Snake4d
cdf3773b42efc888affa33dd22ebe56a48f6d979
[ "MIT" ]
79
2018-05-23T09:39:00.000Z
2021-11-29T02:26:07.000Z
src/bfh.py
Pella86/Snake4d
cdf3773b42efc888affa33dd22ebe56a48f6d979
[ "MIT" ]
1
2020-06-13T17:57:14.000Z
2020-06-16T15:53:40.000Z
src/bfh.py
Pella86/Snake4d
cdf3773b42efc888affa33dd22ebe56a48f6d979
[ "MIT" ]
6
2018-06-28T13:03:38.000Z
2021-03-06T14:24:32.000Z
# -*- coding: utf-8 -*- """ Created on Wed Jun 27 17:24:58 2018 @author: Mauro """ #============================================================================== # Imports #============================================================================== import struct #============================================================================== # Helpers #============================================================================== def as_bytes(dtype, data): return struct.pack(dtype, data) #============================================================================== # Constants #============================================================================== # little conversion table for the supported files type_to_size = {} type_to_size['I'] = 4 type_to_size['d'] = 8 type_to_size['c'] = 1 #============================================================================== # Binary file class #============================================================================== class BinaryFile: ''' reads the bytes from a file object with custom cumulative offset''' def __init__(self, fobj, co = 0): ''' self.file is a file object, self.co is the cumulative offset where to start the procedure ''' self.file = fobj self.co = co def write(self, dtype, data): ''' writes a data packet and moves the offset''' self.file.seek(self.co) b = as_bytes(dtype, data) self.file.write(b) self.co += len(b) def read(self, dtype): ''' reads a data packet and moves the offset, returns the data packet in the specified format ''' self.file.seek(self.co) size_read = type_to_size[dtype] b = self.file.read(size_read) self.co += size_read return struct.unpack(dtype, b)[0] def write_string(self, string): ''' Writess a string saving the length first and then the caracters encoded with UTF-8 ''' self.file.seek(self.co) strlen = len(string) #write str len self.write("I", strlen) fmt = 'c'*strlen data = [] for c in string: data.append(bytes(c, "utf-8")) b = struct.pack(fmt, *data) self.file.write(b) self.co += len(b) def read_string(self): ''' readst the string from a binary file... in ascii? mmh... ''' self.file.seek(self.co) # read the length strlen = self.read("I") b = self.file.read(strlen) s = str(b, "ascii") self.co += strlen return s
26.941176
79
0.409025
279
2,748
3.953405
0.329749
0.072529
0.045331
0.058024
0.17951
0.114234
0.114234
0.063463
0.063463
0.063463
0
0.01003
0.274381
2,748
101
80
27.207921
0.543129
0.456696
0
0.2
0
0
0.011791
0
0
0
0
0
0
1
0.15
false
0
0.025
0.025
0.275
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9db736834f35ad283117ff978c76815cc0ba771c
8,726
py
Python
bin/read_analysis.py
louperelo/longmetarg
026b66c3621a4bcc71f5bc8a73955faf57978985
[ "MIT" ]
null
null
null
bin/read_analysis.py
louperelo/longmetarg
026b66c3621a4bcc71f5bc8a73955faf57978985
[ "MIT" ]
null
null
null
bin/read_analysis.py
louperelo/longmetarg
026b66c3621a4bcc71f5bc8a73955faf57978985
[ "MIT" ]
null
null
null
#!/usr/bin/env python import pandas as pd from scipy import stats import numpy as np #import seaborn as sns #import matplotlib.pyplot as plt import math from Bio import SeqIO import io import re import pysam from functools import reduce import argparse import os parser = argparse.ArgumentParser() parser.add_argument("--bam_file", metavar="<BAM>", dest="bam", help="enter the path to the alignment.bam file. By default 'aln_F4.bam' will be used", type=str, default="aln_F4.bam") parser.add_argument("--reads_fasta", metavar="<FASTA>", dest="fasta", help="enter the path to the original fasta file being analysed. By default 'reads.fasta' will be used", type=str, default="reads.fasta") parser.add_argument("--ident", metavar="<IDENT>", dest="ident", help="enter the int value for minimum identity. By default 80 will be used", type=int, default= 80) parser.add_argument("--cov_length", metavar="<COV>", dest="cov", help="enter the int value for minimum coverage length. By default 95 will be used", type=int, default= 95) parser.add_argument("--folder_out", metavar="<OUT>", dest="out", help="enter name for output files. By default 'arg_results' will be used", type=str, default="../out_dir/") parser.add_argument("--aro_idx", metavar="<IDX>", dest="idx", help="enter the path to the aro_index.csv file. By default 'aro_index.tsv' will be used", type=str, default="aro_index.tsv") # print help message for user parser.print_help() # get command line arguments args = parser.parse_args() # read files from path bam = args.bam fasta = args.fasta ident = args.ident covlen = args.cov folder = args.out idx = args.idx #read list of cigar tuples and get number of matches (0), insertions (1) or deletions (2) #auxiliary function in parse_bam() def read_cigar(lof_tup, idnum): x = 0 for t in lof_tup: if(t[0]==idnum): x += t[1] return x #Joins information from BAM file in pandas dataframe #query sequence: query_name, query_length #reference sequence: reference_name (gives one string, is split into ARO, ID, gene name and NCBI reference id), reference_start, reference_length #alignment: query_alignment_length, number of mismatches and gaps (tag 'NM) #calculates sequence identity % (identity(A,B)=100*(identical nucleotides / min(length(A),length(B)))), with identical nucleotides = query_alignment_length - NM #calculates cover length % (query_alignment_length*100 / reference_length) pd.options.mode.chained_assignment = None def parse_bam(bam_path): aln_file = pysam.AlignmentFile(bam_path, "rb") lst = [] # loop over alignments, get values per contig and store in list of lists (lst) for index, aln in enumerate(aln_file.fetch(until_eof = True)): #index = int(0 ... n), aln = all information on read substr = [aln.query_name, aln.query_length, aln.query_alignment_length, aln.get_tag('NM'), aln.reference_length, aln.reference_start, aln.cigartuples] #divide information in reference_name string = str(aln.reference_name) start=[] stop=[] for i, c in enumerate(string): if ((c==':')): start.append(i+1) elif (c=='|'): stop.append(i) else: continue stop.append(len(string)) for i in range(0, len(start)): #substr = [] substr.append(string[start[i]:stop[i]]) lst.append(substr) #print(lst[0:10]) df = pd.DataFrame(lst, columns=('contig_name', 'contig_length', 'aln_length', 'aln_nm', 'ref_length', 'ref_start', 'c_tuples', 'ref_ARO', 'ref_ID', 'ref_genename', 'ref_NCBI')) #get number of matches from cigar tuples df['matches'] = df['c_tuples'].apply(lambda x: read_cigar(x, 0)) df['insertions'] = df['c_tuples'].apply(lambda x: read_cigar(x, 1)) df['deletions'] = df['c_tuples'].apply(lambda x: read_cigar(x, 2)) #infer contig_length in repetitions of same contig_name (otherwise the value is 0) for i in range(1, df.shape[0]-1): if (df['contig_name'].iloc[i+1]==df['contig_name'].iloc[i]): df['contig_length'].iloc[i+1] = df['contig_length'].iloc[i] #calculate coverage length df['cov_length'] = df['aln_length']*100/df['ref_length'] #Sequence identity is the amount of characters which match exactly between two different sequences. #identity(A,B)=100% (num identical nucleotides / min(length(A),length(B))) df['cov_identity'] = 100*df['matches']/(df.loc[:,['aln_length','ref_length']].min(axis=1)) return df #Filter df for highest identity and coverlength rates def filter_best(df, ident, cov_l): return df[(df['cov_identity']>=ident) & (df['cov_length']>=cov_l)] #Filter assembly fasta for contigs of interest (data) and save to out_name.fasta #for taxonomic analysis def arg_contigs(data, fasta, out_name): #filter contigs with antibiotic resistance genes arg_contigs = data['contig_name'].drop_duplicates().to_list() # filter contig sequence information from original fasta file #filter fasta for contigs with antibiotic resistance genes (arg) for taxonomic analysis fasta_sequences = SeqIO.parse(open(fasta),'fasta') with open(out_name, 'w') as out_file: for fasta in fasta_sequences: #name, sequence = fasta.id, fasta.seq.tostring() #tostring() should be replaced by str(fasta.seq), but is not working on my computer name, sequence = fasta.id, str(fasta.seq) for c in arg_contigs: if (name==c): out_file.write('>'+ name + '\n' + sequence + '\n') #check for and eliminate less significant (lower cover identity) overlaps #generate list of index numbers of non-overlapping hits from df sorted by coverage identity (highest first) #in case of overlaps, keep the hit with the highest coverage identity def overlaps(df_in): df = df_in.reset_index() #list of contig_names reads = df['contig_name'].unique() #list of indices to keep keep = [] #check overlaps for one contig_name at a time for read in reads: #create dataframe for each contig_name, sorted by cov_identity, highest value first readdf = df[df['contig_name']==read].sort_values(by='cov_identity', ascending=False) #list of indices to keep for each read k=[] #iterate over each enty for one read for i in range(0, readdf.shape[0]-1): #append first entry of sorted readdf (highest cov_identity) to list of indices to keep for this contig_name k.append(readdf['index'].iloc[0]) #list for indices of contigs not overlapping with first entry lst=[] #compare first entry with all other entries for j in range (i+1, readdf.shape[0]): #get start s and end e position of two resistance gene hits s1, e1 = readdf['ref_start'].iloc[i], readdf['ref_start'].iloc[i] + readdf['ref_length'].iloc[i] s2, e2 = readdf['ref_start'].iloc[j], readdf['ref_start'].iloc[j] + readdf['ref_length'].iloc[j] #if there is no overlap, add the entry index to lst if (e1<s2 or e2<s1): lst.append(readdf['index'].iloc[j]) #update readdf, only keep entries with index in lst readdf = readdf[readdf['index'].isin(lst)] #if updated readdf only contains one entry, add index to k and pass on to next read if (readdf.shape[0]==1): k.append(readdf['index'].iloc[0]) break #if updated readdf is empty, pass on to next read if(readdf.shape[0]==0): break #append indices for each read to lst keep keep.append(k) #flatten list of lists (keep) keep = reduce(lambda x,y: x+y,keep) return(df[df['index'].isin(keep)]) if __name__ == "__main__": #extract data of interest from bam file, filter best hits and eliminate overlaps result_df = overlaps(filter_best(parse_bam(bam), ident, covlen)) #add corresponding drug class from CARD aro_index.tsv to result_df rgdrug_dict = pd.read_csv(idx, sep='\t').set_index('ARO Name').to_dict()['Drug Class'] result_df['drug_class'] = result_df['ref_genename'].map(rgdrug_dict) #save result_df as tsv result_df.to_csv("argHitsDf.tsv", sep='\t') #save reads/contigs of hits in result_df in 'result.fasta' for further analysis with PlasFlow or Blast/Diamond arg_contigs(result_df, fasta, "argHits.fasta")
47.68306
180
0.655168
1,280
8,726
4.352344
0.244531
0.01795
0.018309
0.015078
0.156704
0.135703
0.06839
0.027464
0.027464
0
0
0.009771
0.225877
8,726
182
181
47.945055
0.814952
0.358584
0
0.055046
0
0.009174
0.205235
0
0
0
0
0
0
1
0.045872
false
0
0.100917
0.009174
0.174312
0.009174
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9db737d0aa2bbc9904ff5f6209cdc235a2493a9c
6,315
py
Python
parkinglot/admin.py
YangWanjun/areaparking
b08bc9b8f8d5f602d823115263b9d040edb9f245
[ "Apache-2.0" ]
1
2018-08-02T04:00:44.000Z
2018-08-02T04:00:44.000Z
parkinglot/admin.py
YangWanjun/areaparking
b08bc9b8f8d5f602d823115263b9d040edb9f245
[ "Apache-2.0" ]
null
null
null
parkinglot/admin.py
YangWanjun/areaparking
b08bc9b8f8d5f602d823115263b9d040edb9f245
[ "Apache-2.0" ]
null
null
null
import datetime from django.contrib import admin from django.core.exceptions import ObjectDoesNotExist from django.db.models import Max from . import models, forms from address.biz import geocode from utils import common from utils.django_base import BaseAdmin # Register your models here. class ParkingPositionInline(admin.TabularInline): model = models.ParkingPosition extra = 0 class ParkingLotDocInline(admin.TabularInline): model = models.ParkingLotDoc form = forms.ParkingLotDocForm extra = 0 class ParkingLotImageInline(admin.TabularInline): model = models.ParkingLotImage extra = 0 class ParkingLotCommentInline(admin.TabularInline): model = models.ParkingLotComment extra = 0 class ParkingLotKeyInline(admin.TabularInline): model = models.ParkingLotKey extra = 0 class ParkingLotStaffHistoryInline(admin.TabularInline): model = models.ParkingLotStaffHistory extra = 0 def has_add_permission(self, request): return False # def has_delete_permission(self, request, obj=None): # return False class ParkingPositionKeyInline(admin.TabularInline): model = models.ParkingPositionKey extra = 0 class ManagementCompanyStaffInline(admin.TabularInline): model = models.ManagementCompanyStaff extra = 0 @admin.register(models.ParkingLotType) class ParkingLotTypeAdmin(BaseAdmin): list_display = ('code', 'name') list_display_links = ('code', 'name') # @admin.register(models.LeaseManagementCompany) # class LeaseManagementCompanyAdmin(BaseAdmin): # list_display = ('name', 'department', 'position', 'staff', 'address', 'tel', 'email') # # # @admin.register(models.BuildingManagementCompany) # class BuildingManagementCompanyAdmin(BaseAdmin): # list_display = ('name', 'department', 'position', 'staff', 'address', 'tel', 'email') @admin.register(models.ManagementCompany) class ManagementCompanyAdmin(BaseAdmin): list_display = ('name', 'address', 'tel', 'email') inlines = (ManagementCompanyStaffInline,) @admin.register(models.TryPuttingOperator) class TryPuttingOperatorAdmin(BaseAdmin): pass @admin.register(models.ParkingLot) class ParkingLotAdmin(BaseAdmin): form = forms.ParkingLotForm icon = '<i class="material-icons">local_parking</i>' list_display = ('code', 'name', 'category', 'address', 'subscription_list_send_type') search_fields = ('code', 'name',) inlines = (ParkingLotCommentInline, ParkingLotStaffHistoryInline, ParkingLotDocInline, ParkingLotImageInline, ParkingLotKeyInline) def save_model(self, request, obj, form, change): if change is False or ( 'pref_name' in form.changed_data or 'city_name' in form.changed_data or 'town_name' in form.changed_data or 'aza_name' in form.changed_data or 'other_name' in form.changed_data ): # 新規の場合、または住所変更した場合、座標を取得しなおします。 coordinate = geocode(obj.address) if coordinate.get('lng', None): obj.lng = coordinate.get('lng', None) if coordinate.get('lat', None): obj.lat = coordinate.get('lat', None) if coordinate.get('post_code', None): obj.post_code = coordinate.get('post_code', None) # 担当者変更時、駐車場担当者履歴追加 if change and 'staff' in form.changed_data: queryset = models.ParkingLotStaffHistory.objects.public_filter(parking_lot=obj) try: last_staff = models.ParkingLot.objects.get(pk=obj.pk).staff last_start_date = models.ParkingLot.objects.get(pk=obj.pk).staff_start_date history_end_date = queryset.aggregate(Max('end_date')).get('end_date__max', None) if (history_end_date is None or history_end_date < obj.staff_start_date) and last_start_date != obj.staff_start_date: models.ParkingLotStaffHistory.objects.create( parking_lot=obj, member=last_staff, start_date=last_start_date, end_date=(obj.staff_start_date + datetime.timedelta(days=-1)) ) except ObjectDoesNotExist: pass super(ParkingLotAdmin, self).save_model(request, obj, form, change) @admin.register(models.ParkingPosition) class ParkingPosition(BaseAdmin): form = forms.ParkingPositionForm list_display = ('parking_lot', 'name', 'length', 'width', 'height', 'weight') list_display_links = ('parking_lot', 'name',) search_fields = ('parking_lot__code', 'parking_lot__name') fieldsets = ( (None, { 'fields': ( 'parking_lot', 'name', 'category', 'cost', ) }), ("賃料", { 'classes': ('collapse',), 'fields': ( ('price_recruitment_no_tax', 'price_recruitment'), ('price_homepage_no_tax', 'price_homepage'), ('price_handbill_no_tax', 'price_handbill'), ) }), ("サイズ", { 'classes': ('collapse',), 'fields': ( ('length', 'width', 'height', 'weight'), ('tyre_width', 'tyre_width_ap', 'min_height', 'min_height_ap'), ('f_value', 'r_value',), ) }), ('備考', { 'fields': ( 'comment', ) }), ) inlines = (ParkingPositionKeyInline,) save_as = True def save_model(self, request, obj, form, change): continued_positions = common.get_continued_positions(obj.name) if continued_positions: split_positions = [] else: split_positions = [s for s in obj.name.split(',') if s] continued_positions.extend(split_positions) if not change and continued_positions: # 複数の車室を追加の場合 for name in continued_positions: if models.ParkingPosition.objects.public_filter(parking_lot=obj.parking_lot, name=name).count() == 0: obj.pk = None obj.name = name obj.save() else: super(ParkingPosition, self).save_model(request, obj, form, change)
33.951613
133
0.62977
625
6,315
6.1824
0.28
0.023292
0.047619
0.060041
0.173913
0.150104
0.097308
0.080228
0.041925
0.041925
0
0.002142
0.260808
6,315
185
134
34.135135
0.825621
0.08361
0
0.175182
0
0
0.106376
0.023042
0
0
0
0
0
1
0.021898
false
0.014599
0.058394
0.007299
0.423358
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9db76eb5840b9b7ac5d4ffae358c55f69c7c5da4
965
py
Python
graficas.py
dianuchitop/el26
e84bb35ca9d6a603d515a624a85dae27cd4d10f2
[ "MIT" ]
null
null
null
graficas.py
dianuchitop/el26
e84bb35ca9d6a603d515a624a85dae27cd4d10f2
[ "MIT" ]
null
null
null
graficas.py
dianuchitop/el26
e84bb35ca9d6a603d515a624a85dae27cd4d10f2
[ "MIT" ]
null
null
null
import matplotlib import matplotlib.pyplot as plt import numpy as np filenames=["euler.dat","rk4.dat","leapfrog.dat"] fig, axs = plt.subplots(nrows=3, ncols=3) ax=axs[0][0] ax.set_title('Euler') ax=axs[0][1] ax.set_title('RK4') ax=axs[0][2] ax.set_title('Leap_frog') for i in range(3): f=open(filenames[i],"r") s=list(map(float,f.readline().split())) s1=list(map(float,f.readline().split())) time=list(map(float,f.readline().split())) ax=axs[0][i] ax.set_xlabel("time") ax.set_ylabel("posistion") ax.plot(time,s ) ax.set_ylim(-1.5,1.5) ax.set_xlim(0,15) ax=axs[1][i] ax.plot(time, s1) ax.set_ylim(-1.5,1.5) ax.set_xlim(0,15) ax.set_xlabel("time") ax.set_ylabel("velocity") ax=axs[2][i] ax.plot(s, s1) ax.set_ylim(-2.0,2.0) ax.set_xlim(-2.0,2.0) ax.set_xlabel("position") ax.set_ylabel("velocity") fig.subplots_adjust(hspace=1, wspace=1) plt.savefig('graficas.png') plt.show()
24.74359
48
0.635233
181
965
3.292818
0.320442
0.125839
0.040268
0.065436
0.33557
0.33557
0.174497
0.090604
0.090604
0.090604
0
0.049939
0.149223
965
38
49
25.394737
0.676005
0
0
0.216216
0
0
0.102697
0
0
0
0
0
0
1
0
false
0
0.081081
0
0.081081
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9db821a6f16092b02b4cd4951deab910f4dfd292
565
py
Python
__scraping__/zipnet.in - requests/main.py
whitmans-max/python-examples
881a8f23f0eebc76816a0078e19951893f0daaaa
[ "MIT" ]
140
2017-02-21T22:49:04.000Z
2022-03-22T17:51:58.000Z
__scraping__/zipnet.in - requests/main.py
whitmans-max/python-examples
881a8f23f0eebc76816a0078e19951893f0daaaa
[ "MIT" ]
5
2017-12-02T19:55:00.000Z
2021-09-22T23:18:39.000Z
__scraping__/zipnet.in - requests/main.py
whitmans-max/python-examples
881a8f23f0eebc76816a0078e19951893f0daaaa
[ "MIT" ]
79
2017-01-25T10:53:33.000Z
2022-03-11T16:13:57.000Z
import requests from bs4 import BeautifulSoup from time import sleep url = "http://zipnet.in/index.php?page=missing_person_search&criteria=browse_all&Page_No=1" r = requests.get(url) soup = BeautifulSoup(r.content, 'html.parser') all_tables = soup.findAll('table') for table in all_tables: print('--- table ---') all_rows = table.findAll('tr') for row in all_rows: all_cols = row.findAll('td') if len(all_cols) > 1: fields = all_cols[0].string details = all_cols[1].string print(fields, details)S
26.904762
91
0.660177
82
565
4.402439
0.54878
0.077562
0.044321
0
0
0
0
0
0
0
0
0.011236
0.212389
565
20
92
28.25
0.8
0
0
0
0
0.0625
0.20531
0
0
0
0
0
0
0
null
null
0
0.1875
null
null
0.125
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
9dbc6591cdea251b119f8bcead36767b18ac8b75
4,654
py
Python
mailpile/plugins/contacts.py
k0nsl/Mailpile
556f5f9040c4e01b005b4d633f3213668a474936
[ "Apache-2.0" ]
null
null
null
mailpile/plugins/contacts.py
k0nsl/Mailpile
556f5f9040c4e01b005b4d633f3213668a474936
[ "Apache-2.0" ]
null
null
null
mailpile/plugins/contacts.py
k0nsl/Mailpile
556f5f9040c4e01b005b4d633f3213668a474936
[ "Apache-2.0" ]
null
null
null
import mailpile.plugins from mailpile.commands import Command from mailpile.mailutils import Email, ExtractEmails from mailpile.util import * class VCard(Command): """Add/remove/list/edit vcards""" ORDER = ('Internals', 6) KIND = '' SYNOPSIS = '<nickname>' def command(self, save=True): session, config = self.session, self.session.config vcards = [] for email in self.args: vcard = config.get_vcard(email) if vcard: vcards.append(vcard) else: session.ui.warning('No such contact: %s' % email) return vcards def _fparse(self, fromdata): email = ExtractEmails(fromdata)[0] name = fromdata.replace(email, '').replace('<>', '').strip() return email, (name or email) def _prepare_new_vcard(self, vcard): pass def _valid_vcard_handle(self, vc_handle): return (vc_handle and '@' in vc_handle[1:]) def _add_from_messages(self): pairs, idx = [], self._idx() for email in [Email(idx, i) for i in self._choose_messages(self.args)]: pairs.append(self._fparse(email.get_msg_info(idx.MSG_FROM))) return pairs def _pre_delete_vcard(self, vcard): pass def add_vcards(self): session, config, idx = self.session, self.session.config, self._idx() if (len(self.args) > 2 and self.args[1] == '=' and self._valid_vcard_handle(self.args[0])): pairs = [(self.args[0], ' '.join(self.args[2:]))] elif self.data: if self.data.has_key("@contactname") and self.data.has_key("@contactemail"): pairs = [(self.data["@contactemail"], self.data["@contactname"])] elif self.data.has_key("contactnames") and self.data.has_key("contactemails"): pairs = zip(self.data["contactemails"], self.data["contactnames"]) else: pairs = self._add_from_messages() if pairs: vcards = [] for handle, name in pairs: if handle.lower() not in config.vcards: vcard = config.add_vcard(handle, name, self.KIND) self._prepare_new_vcard(vcard) vcards.append(vcard) else: session.ui.warning('Already exists: %s' % handle) else: return self._error('Nothing to do!') return {"contacts": [x.as_mpCard() for x in vcards]} def _format_values(self, key, vals): if key.upper() in ('MEMBER', ): return [['mailto:%s' % e, []] for e in vals] else: return [[e, []] for e in vals] def set_vcard(self): session, config = self.session, self.session.config handle, var = self.args[0], self.args[1] if self.args[2] == '=': val = ' '.join(self.args[3:]) else: val = ' '.join(self.args[2:]) try: vcard = config.get_vcard(handle) if not vcard: return self._error('Contact not found') config.deindex_vcard(vcard) if val: if ',' in val: vcard[var] = self._format_values(var, val.split(',')) else: vcard[var] = val else: del vcard[var] vcard.save() config.index_vcard(vcard) session.ui.display_vcard(vcard, compact=False) return True except: self._ignore_exception() return self._error('Error setting %s = %s' % (var, val)) def rm_vcards(self): session, config = self.session, self.session.config for handle in self.args: vcard = config.get_vcard(handle) if vcard: self._pre_delete_vcard(vcard) config.del_vcard(handle) else: session.ui.error('No such contact: %s' % handle) return True def find_vcards(self): session, config = self.session, self.session.config if self.args and self.args[0] == '--full': self.args.pop(0) compact = False else: compact = True kinds = self.KIND and [self.KIND] or [] vcards = config.find_vcards(self.args, kinds=kinds) #for vcard in vcards: # session.ui.display_vcard(vcard, compact=compact) ctx = {} ctx["contacts"] = [x.as_mpCard() for x in vcards] ctx["query"] = " ".join(self.args) ctx["total"] = len(vcards) ctx["start"] = 1 ctx["end"] = len(vcards) ctx["count"] = len(vcards) return ctx SUBCOMMANDS = { 'add': (add_vcards, '<msgs>|<email> = <name>'), 'set': (set_vcard, '<email> <attr> <value>'), 'list': (find_vcards, '[--full] [<terms>]'), 'delete': (rm_vcards, '<email>'), } class Contact(VCard): """Add/remove/list/edit contacts""" KIND = 'individual' ORDER = ('Tagging', 3) SYNOPSIS = '<email>' TEMPLATE_IDS = ['contact'] mailpile.plugins.register_command('C:', 'contact=', Contact) mailpile.plugins.register_command('_vcard', 'vcard=', VCard)
30.220779
84
0.613666
608
4,654
4.577303
0.225329
0.051743
0.054977
0.039526
0.247575
0.17571
0.139418
0.103845
0.036651
0
0
0.004757
0.232058
4,654
153
85
30.418301
0.773923
0.027503
0
0.20155
0
0
0.096831
0
0
0
0
0
0
1
0.085271
false
0.015504
0.031008
0.007752
0.294574
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9dbe26545533c7c7d397d2847ba2a1eeca8ad8ef
1,663
py
Python
hw2/codes/plot.py
Trinkle23897/Artificial-Neural-Network-THU-2018
3326ed131298caaaf3fd0b6af80de37fd1ff9526
[ "MIT" ]
38
2019-01-23T07:14:19.000Z
2022-03-07T06:03:21.000Z
hw2/codes/plot.py
ywythu/Artificial-Neural-Network-THU-2018
3326ed131298caaaf3fd0b6af80de37fd1ff9526
[ "MIT" ]
null
null
null
hw2/codes/plot.py
ywythu/Artificial-Neural-Network-THU-2018
3326ed131298caaaf3fd0b6af80de37fd1ff9526
[ "MIT" ]
17
2019-03-30T06:33:06.000Z
2021-12-24T10:42:39.000Z
import numpy as np from pylab import * D = 10 acc1 = np.load('res/small/acc.npy').reshape(D, -1).mean(axis=0) loss1 = np.load('res/small/loss.npy').reshape(D, -1).mean(axis=0) acc2 = np.load('res/large/acc.npy').reshape(D, -1).mean(axis=0) loss2 = np.load('res/large/loss.npy').reshape(D, -1).mean(axis=0) cut = int(acc1.shape[0] / 10 * 4) print(' 1: %.2f %.6f'%(100*acc1[:cut].max(), loss1[:cut].min())) print(' 2: %.2f %.6f'%(100*acc2[:cut].max(), loss2[:cut].min())) iter_ = np.arange(acc1.shape[0]) * D print(acc1.shape, iter_.shape[0]) figure() p = subplot(111) p.plot(iter_[:cut], loss1[:cut], '-', label='Original CNN') p.plot(iter_[:cut], loss2[:cut], '-', label='Designed CNN') p.set_ylim((0, .4)) p.set_xlabel(r'# of Iterations') p.set_ylabel(r'Loss') p.legend(loc='upper right') tight_layout() savefig("loss.pdf") figure() p = subplot(111) p.plot(iter_[:cut], acc1[:cut], '-', label='Original CNN') p.plot(iter_[:cut], acc2[:cut], '-', label='Designed CNN') p.set_ylim((.9, 1)) p.set_xlabel(r'# of Iterations') p.set_ylabel(r'Accuracy') p.legend(loc='lower right') tight_layout() savefig("acc.pdf") # 1: 23:24:44.414 Testing, total mean loss 0.019417, total acc 0.863300 - 23:24:33.131 # 2s: 20:20:39.807 Testing, total mean loss 0.003224, total acc 0.967700 - 20:18:21.597 # 2r: 20:48:01.448 Testing, total mean loss 0.002306, total acc 0.981300 - 20:45:16.709 #-2r: 20:38:47.940 Testing, total mean loss 0.002271, total acc 0.981500 - 20:35:59.910 # 3s: 00:38:10.865 Testing, total mean loss 0.001759, total acc 0.980098 - 00:33:01.622 # 3r: 21:24:04.253 Testing, total mean loss 0.001675, total acc 0.980588 - 21:19:28.262
41.575
91
0.654841
313
1,663
3.434505
0.364217
0.022326
0.089302
0.111628
0.433488
0.316279
0.316279
0.266047
0.063256
0.063256
0
0.1718
0.121467
1,663
40
92
41.575
0.563997
0.323512
0
0.25
0
0
0.203041
0
0
0
0
0
0
1
0
false
0
0.0625
0
0.0625
0.09375
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9dbe2a0458905fed950a4384ff34ad0dc77f394d
696
py
Python
app/helpers/__init__.py
jaywonder20/Flask_Api_Starter
d3cf69f4742923737e826261f5e737f00d1c6270
[ "MIT" ]
1
2020-07-28T13:28:42.000Z
2020-07-28T13:28:42.000Z
app/helpers/__init__.py
jaywonder20/Flask_Api_Starter
d3cf69f4742923737e826261f5e737f00d1c6270
[ "MIT" ]
null
null
null
app/helpers/__init__.py
jaywonder20/Flask_Api_Starter
d3cf69f4742923737e826261f5e737f00d1c6270
[ "MIT" ]
null
null
null
from flask_restful import reqparse def send_api_response(response_code, response_message, http_status, response_data={}): if http_status not in [200, 201]: return {'responseCode': response_code, 'responseMessage': response_message }, int(http_status), \ {"Access-Control-Allow-Origin": "*"} else: return {'responseCode': response_code, 'responseMessage': response_message, 'data': response_data }, int(http_status), \ {"Access-Control-Allow-Origin": "*"} parser = reqparse.RequestParser() parser.add_argument('email_address', help='field cannot be blank.')
33.142857
86
0.616379
69
696
5.971014
0.565217
0.097087
0.126214
0.145631
0.470874
0.470874
0.470874
0
0
0
0
0.011742
0.265805
696
20
87
34.8
0.794521
0
0
0.4
0
0
0.21408
0.077586
0
0
0
0
0
1
0.066667
false
0
0.066667
0
0.266667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9dc09ed0aa1f145f5e2a90e86cf3072696bbd4e9
3,435
py
Python
tests/fakedb.py
justinfay/dbkit
2aef6376a60965d7820c91692046f4bcf7d43640
[ "MIT" ]
4
2016-02-08T05:43:39.000Z
2020-08-25T21:37:55.000Z
tests/fakedb.py
justinfay/dbkit
2aef6376a60965d7820c91692046f4bcf7d43640
[ "MIT" ]
8
2015-04-24T13:39:42.000Z
2016-04-07T01:58:53.000Z
tests/fakedb.py
justinfay/dbkit
2aef6376a60965d7820c91692046f4bcf7d43640
[ "MIT" ]
null
null
null
""" A fake DB-API 2 driver. """ # DB names used to trigger certain behaviours. INVALID_DB = 'invalid-db' INVALID_CURSOR = 'invalid-cursor' HAPPY_OUT = 'happy-out' apilevel = '2.0' threadsafety = 2 paramstyle = 'qmark' def connect(database): return Connection(database) class Connection(object): """ A fake connection. """ def __init__(self, database): super(Connection, self).__init__() self.database = database self.session = [] self.cursors = set() self.executed = 0 if database == INVALID_DB: self.valid = False raise OperationalError() self.valid = True def close(self): if not self.valid: raise ProgrammingError("Cannot close a closed connection.") self.valid = False for cursor in self.cursors: cursor.close() self.session.append('close') if self.database == INVALID_DB: raise OperationalError() def commit(self): self.session.append('commit') def rollback(self): self.session.append('rollback') def cursor(self): self.session.append('cursor') if not self.valid: raise InterfaceError() return Cursor(self) class Cursor(object): """ A fake cursor. """ def __init__(self, connection): self.connection = connection self.result = None if connection.database == INVALID_CURSOR: self.valid = False raise OperationalError("You've tripped INVALID_CURSOR!") connection.cursors.add(self) self.valid = True self.rowcount = -1 def close(self): self.connection.session.append('cursor-close') if not self.valid: raise InterfaceError("Cursor is closed") self.connection.cursors.remove(self) self.valid = False def execute(self, stmt, args=()): if not self.valid or not self.connection.valid: raise InterfaceError() stmt = stmt.lstrip().lower() # It's the ping! if stmt == 'select 1': return self stmt_type, = stmt.split(' ', 1) if stmt_type in ('select', 'update', 'insert', 'delete'): self.result = None if args is () else args self.connection.session.append(stmt_type) self.connection.executed += 1 else: self.result = None raise ProgrammingError() def callproc(self, procname, args=()): if not self.valid or not self.connection.valid: raise InterfaceError() self.result = None if len(args) == 0 else args self.connection.session.append('proc:' + procname) self.connection.executed += 1 def fetchone(self): if not self.valid: raise InterfaceError("Cursor is closed") result = self.result self.result = None return result def fetchall(self): return () class Warning(Exception): pass class Error(Exception): pass class InterfaceError(Error): pass class DatabaseError(Error): pass class DataError(DatabaseError): pass class OperationalError(DatabaseError): pass class IntegrityError(DatabaseError): pass class InternalError(DatabaseError): pass class ProgrammingError(DatabaseError): pass class NotSupportedError(DatabaseError): pass
22.598684
71
0.604076
373
3,435
5.504021
0.252011
0.052606
0.026303
0.040916
0.20263
0.168534
0.105212
0.105212
0.105212
0.059425
0
0.004534
0.293741
3,435
151
72
22.748344
0.841715
0.034352
0
0.356436
0
0
0.064369
0
0
0
0
0
0
1
0.118812
false
0.09901
0
0.019802
0.287129
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
9dc60e93e26c2a9f12204a366a70cced0bf9b339
4,081
py
Python
chapter_3_featurization/text_features.py
fancyerii/voicebook
def82da8577086d0361643a05fec2463006533a9
[ "Apache-2.0" ]
1
2020-03-05T01:19:17.000Z
2020-03-05T01:19:17.000Z
chapter_3_featurization/text_features.py
fancyerii/voicebook
def82da8577086d0361643a05fec2463006533a9
[ "Apache-2.0" ]
null
null
null
chapter_3_featurization/text_features.py
fancyerii/voicebook
def82da8577086d0361643a05fec2463006533a9
[ "Apache-2.0" ]
null
null
null
''' ================================================ ## VOICEBOOK REPOSITORY ## ================================================ repository name: voicebook repository version: 1.0 repository link: https://github.com/jim-schwoebel/voicebook author: Jim Schwoebel author contact: js@neurolex.co description: a book and repo to get you started programming voice applications in Python - 10 chapters and 200+ scripts. license category: opensource license: Apache 2.0 license organization name: NeuroLex Laboratories, Inc. location: Seattle, WA website: https://neurolex.ai release date: 2018-09-28 This code (voicebook) is hereby released under a Apache 2.0 license license. For more information, check out the license terms below. ================================================ ## LICENSE TERMS ## ================================================ Copyright 2018 NeuroLex Laboratories, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ ## SERVICE STATEMENT ## ================================================ If you are using the code written for a larger project, we are happy to consult with you and help you with deployment. Our team has >10 world experts in Kafka distributed architectures, microservices built on top of Node.js / Python / Docker, and applying machine learning to model speech and text data. We have helped a wide variety of enterprises - small businesses, researchers, enterprises, and/or independent developers. If you would like to work with us let us know @ js@neurolex.co. ================================================ ## TEXT_FEATURES.PY ## ================================================ extract all text features: nltk_features() spacy_features() gensim_features() ''' import transcribe as ts import sounddevice as sd import soundfile as sf import nltk_features as nf import spacy_features as spf import gensim_features as gf import numpy as np import os, json def sync_record(filename, duration, fs, channels): print('recording') myrecording = sd.rec(int(duration * fs), samplerate=fs, channels=channels) sd.wait() sf.write(filename, myrecording, fs) print('done recording') def text_featurize(filename,jsondump): # transcribe with sphinx transcript=ts.transcribe_sphinx('test.wav') # now put transcript through various feature engines nltk_featureset, nltk_labels=nf.nltk_featurize(transcript) spacy_featureset, spacy_labels=spf.spacy_featurize(transcript) # make gensim embedding on alice and wonderland text # (or any text corpus you'd like) modelname='alice.pickle' if modelname not in os.listdir(): text=open('alice.txt').read() gf.w2v_train(text,100,modelname) gensim_featureset=gf.sentence_embedding(transcript,100,modelname) data={ 'transcript':transcript, 'transcript type':'sphinx', 'nltk':np.array(nltk_featureset).tolist(), 'spacy':np.array(spacy_featureset).tolist(), 'gensim':np.array(gensim_featureset).tolist(), } if jsondump == True: jsonfilename=filename[0:-4]+'.json' jsonfile=open(jsonfilename,'w') json.dump(data,jsonfile) jsonfile.close() return data # # record and get transcript # if 'test.wav' not in os.listdir(): # sync_record('test.wav', 10, 44100, 2) # # now extract all text features # data=text_featurize('test.wav', True)
34.584746
121
0.639304
497
4,081
5.201207
0.480885
0.027079
0.009284
0.011605
0
0
0
0
0
0
0
0.013956
0.192355
4,081
117
122
34.880342
0.770328
0.669199
0
0
0
0
0.078137
0
0
0
0
0
0
1
0.055556
false
0
0.222222
0
0.305556
0.055556
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9dc760639ffd67ca1391d622bcca50ed7b1b5700
5,178
py
Python
neurotin/logs/scores.py
mscheltienne/neurotin-analysis
841b7d86c0c990169cceb02b40d9eb6bd0d07612
[ "MIT" ]
null
null
null
neurotin/logs/scores.py
mscheltienne/neurotin-analysis
841b7d86c0c990169cceb02b40d9eb6bd0d07612
[ "MIT" ]
null
null
null
neurotin/logs/scores.py
mscheltienne/neurotin-analysis
841b7d86c0c990169cceb02b40d9eb6bd0d07612
[ "MIT" ]
null
null
null
from typing import List, Tuple, Union import pandas as pd import seaborn as sns from matplotlib import pyplot as plt from ..utils._checks import ( _check_participant, _check_participants, _check_type, ) from ..utils._docs import fill_doc @fill_doc def boxplot_scores_evolution( csv, participant: Union[int, list, tuple], scores: int = 10, swarmplot: bool = False, figsize: Tuple[float, float] = (10.0, 5.0), ): """Plot the NFB scores as boxplots. X: Session Y: Score Hue: Score ID (0 to 10) The NFB scores displayed are logged in a .csv file with the syntax: [participant, session, model_idx, online_idx, transfer, scores [...]] The evolution of the NFB score during the 15 sessions is plotted for the given participant with boxplots. Scores from different part of the NFB runs can be displayed by providing the argument scores. By default, the last score corresponding to the total score obtained on a given run is used. Parameters ---------- csv : path-like Path to the 'scores_logs.csv' file to read. %(participant)s scores : int | list of int ID of the non-regulation/regulation cycle score to include, or list of the IDs to include. Each cycle is displayed as a separate boxplot. Must be between 1 and 10 included. swarmplot : bool, optional If True, plots the datapoints on top of the boxes with a swarmplot. %(plt.figsize)s Returns ------- f : Figure ax : Axes """ _check_participant(participant) scores = _check_scores_idx(scores) _check_type(swarmplot, (bool,), item_name="swarmplot") _check_type(figsize, (tuple,), item_name="figsize") # Select data df = pd.read_csv(csv) df = df.loc[df["Participant"] == int(participant)] df = pd.melt( df, id_vars="Session", value_vars=[f"Score {k}" for k in scores], var_name="Score ID", value_name="Score", ) # Plot f, ax = plt.subplots(1, 1, figsize=tuple(figsize)) sns.boxplot( x="Session", y="Score", hue="Score ID", data=df, palette="muted", ax=ax ) if swarmplot: sns.swarmplot( x="Session", y="Score", hue="Score ID", data=df, size=3, color="black", ax=ax, ) handles, labels = ax.get_legend_handles_labels() ax.legend(handles=handles[: len(scores)], labels=labels[: len(scores)]) return f, ax @fill_doc def boxplot_scores_between_participants( csv, participants: Union[int, list, tuple], scores: int = 10, swarmplot: bool = False, figsize: Tuple[float, float] = (10.0, 5.0), ): """Plot the NFB scores as boxplots. X: Participant Y: Score Hue: Score ID (0 to 10) The NFB scores displayed are logged in a .csv file with the syntax: [participant, session, model_idx, online_idx, transfer, scores [...]] The scores obtained during the 15 sessions are plotted in a single boxplot for each participant. Parameters ---------- csv : path-like Path to the 'scores_logs.csv' file to read. %(participant)s scores : int | list of int ID of the non-regulation/regulation cycle score to include, or list of the IDs to include. Each cycle is displayed as a separate boxplot. Must be between 1 and 10 included. swarmplot : bool, optional If True, plots the datapoints on top of the boxes with a swarmplot. %(plt.figsize)s Returns ------- f : Figure ax : Axes """ participants = _check_participants(participants) scores = _check_scores_idx(scores) _check_type(swarmplot, (bool,), item_name="swarmplot") _check_type(figsize, (tuple,), item_name="figsize") # Select data df = pd.read_csv(csv) df = pd.melt( df, id_vars="Participant", value_vars=[f"Score {k}" for k in scores], var_name="Score ID", value_name="Score", ) df = df[df["Participant"].isin(participants)] # Plot f, ax = plt.subplots(1, 1, figsize=tuple(figsize)) sns.boxplot( x="Participant", y="Score", hue="Score ID", data=df, palette="muted", ax=ax, ) if swarmplot: sns.swarmplot( x="Participant", y="Score", hue="Score ID", data=df, size=3, color="black", ax=ax, ) handles, labels = ax.get_legend_handles_labels() ax.legend(handles=handles[: len(scores)], labels=labels[: len(scores)]) return f, ax def _check_scores_idx(scores: Union[int, list, tuple]) -> List[int]: """Check that the scores passed are valid.""" _check_type(scores, ("int", list, tuple), item_name="scores") if isinstance(scores, int): scores = [scores] elif isinstance(scores, tuple): scores = list(scores) for score in scores: _check_type(score, ("int",), item_name="score") assert all(1 <= score <= 10 for score in scores) return scores
28.295082
79
0.609888
690
5,178
4.476812
0.214493
0.018129
0.017481
0.027193
0.679832
0.66494
0.655228
0.649401
0.649401
0.637747
0
0.010447
0.279065
5,178
182
80
28.450549
0.817037
0.356895
0
0.613861
0
0
0.077517
0
0
0
0
0
0.009901
1
0.029703
false
0
0.059406
0
0.118812
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
9dcad228e81ec6b0f9a3bb86c1710900d1f1972c
1,755
py
Python
3. Python Advanced (September 2021)/3.2 Python OOP (October 2021)/24. Exam Preparation/22.08.2020/project/everland.py
kzborisov/SoftUni
ccb2b8850adc79bfb2652a45124c3ff11183412e
[ "MIT" ]
1
2021-02-07T07:51:12.000Z
2021-02-07T07:51:12.000Z
3. Python Advanced (September 2021)/3.2 Python OOP (October 2021)/24. Exam Preparation/22.08.2020/project/everland.py
kzborisov/softuni
9c5b45c74fa7d9748e9b3ea65a5ae4e15c142751
[ "MIT" ]
null
null
null
3. Python Advanced (September 2021)/3.2 Python OOP (October 2021)/24. Exam Preparation/22.08.2020/project/everland.py
kzborisov/softuni
9c5b45c74fa7d9748e9b3ea65a5ae4e15c142751
[ "MIT" ]
null
null
null
class Everland: def __init__(self): self.rooms = [] def add_room(self, room): self.rooms.append(room) def get_monthly_consumptions(self): total_consumption = 0 for room in self.rooms: total_consumption += room.expenses + room.room_cost return f"Monthly consumption: {total_consumption:.2f}$." def pay(self): result = [] for room in self.rooms: total_cost = room.expenses + room.room_cost if room.budget >= total_cost: room.budget -= total_cost result.append(f"{room.family_name} paid {total_cost:.2f}$ and" f" have {room.budget:.2f}$ left.") else: self.rooms.remove(room) result.append(f"{room.family_name} does not have enough" f" budget and must leave the hotel.") return "\n".join(result) def status(self): result = "" result += f"Total population: {sum([r.members_count for r in self.rooms])}\n" for r in self.rooms: result += f"{r.family_name} with {r.members_count} members. Budget: {r.budget:.2f}$, " \ f"Expenses: {r.expenses:.2f}$\n" if r.children: counter = 0 for c in r.children: counter += 1 result += f"--- Child {counter} monthly cost: {c.cost * 30:.2f}$\n" if hasattr(r, "appliances"): total_expenses = 0 for a in r.appliances: total_expenses += a.get_monthly_expense() result += f"--- Appliances monthly cost: {total_expenses:.2f}$\n" return result
39
100
0.520228
206
1,755
4.305825
0.291262
0.071026
0.049605
0.029312
0.200676
0.11274
0
0
0
0
0
0.011638
0.363533
1,755
44
101
39.886364
0.782453
0
0
0.05
0
0.025
0.271795
0.039316
0
0
0
0
0
1
0.125
false
0
0
0
0.225
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9dcae389894300bd7f91c57ac11fc79ac0e2fd30
14,770
py
Python
backend/core/migrations/0001_initial.py
mashuq/academia
571b3db58de4a70210ebd9d92c0f152016aec861
[ "Unlicense" ]
null
null
null
backend/core/migrations/0001_initial.py
mashuq/academia
571b3db58de4a70210ebd9d92c0f152016aec861
[ "Unlicense" ]
null
null
null
backend/core/migrations/0001_initial.py
mashuq/academia
571b3db58de4a70210ebd9d92c0f152016aec861
[ "Unlicense" ]
null
null
null
# Generated by Django 3.1.6 on 2021-02-25 05:46 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('contenttypes', '0002_remove_content_type_name'), ] operations = [ migrations.CreateModel( name='Answer', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('mark', models.FloatField()), ], ), migrations.CreateModel( name='Assessment', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=256)), ('contribution', models.FloatField()), ('start_date', models.DateTimeField()), ('end_date', models.DateTimeField()), ], ), migrations.CreateModel( name='Course', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=256)), ('code', models.CharField(max_length=10)), ('description', models.TextField()), ('curriculum', models.TextField(blank=True, null=True)), ('image', models.ImageField(upload_to='course_images/')), ('visible', models.BooleanField()), ], ), migrations.CreateModel( name='CourseCategory', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=256)), ], ), migrations.CreateModel( name='Lesson', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=512)), ], ), migrations.CreateModel( name='Question', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('mark', models.FloatField()), ('polymorphic_ctype', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='polymorphic_core.question_set+', to='contenttypes.contenttype')), ], options={ 'abstract': False, 'base_manager_name': 'objects', }, ), migrations.CreateModel( name='Section', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=256)), ('start_date', models.DateTimeField()), ('end_date', models.DateTimeField()), ('visible', models.BooleanField()), ('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.course')), ], ), migrations.CreateModel( name='Session', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=512)), ('serial', models.IntegerField()), ('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.course')), ], ), migrations.CreateModel( name='Testimonial', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('testimonial', models.CharField(max_length=4000)), ('name', models.CharField(max_length=64)), ('identity', models.CharField(max_length=128)), ('serial', models.IntegerField()), ], ), migrations.CreateModel( name='AudioLesson', fields=[ ('lesson_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='core.lesson')), ('embed', models.TextField()), ('audio_type', models.CharField(choices=[('SOUNDCLOUD', 'SoundCloud')], default='SOUNDCLOUD', max_length=32)), ], bases=('core.lesson',), ), migrations.CreateModel( name='BroadAnswer', fields=[ ('answer_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='core.answer')), ('answer', models.TextField()), ], bases=('core.answer',), ), migrations.CreateModel( name='BroadQuestion', fields=[ ('question_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='core.question')), ('question', models.CharField(max_length=512)), ], options={ 'abstract': False, 'base_manager_name': 'objects', }, bases=('core.question',), ), migrations.CreateModel( name='MultipleChoiceAnswer', fields=[ ('answer_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='core.answer')), ('answer', models.CharField(max_length=128)), ], bases=('core.answer',), ), migrations.CreateModel( name='MultipleChoiceQuestion', fields=[ ('question_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='core.question')), ('question', models.CharField(max_length=512)), ('choice1', models.CharField(max_length=128)), ('choice2', models.CharField(max_length=128)), ('choice3', models.CharField(max_length=128)), ('choice4', models.CharField(max_length=128)), ('correct_choice', models.CharField(max_length=128)), ], options={ 'abstract': False, 'base_manager_name': 'objects', }, bases=('core.question',), ), migrations.CreateModel( name='NoteLesson', fields=[ ('lesson_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='core.lesson')), ('note', models.FileField(upload_to='course_files/')), ], bases=('core.lesson',), ), migrations.CreateModel( name='ShortAnswer', fields=[ ('answer_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='core.answer')), ('answer', models.CharField(max_length=2048)), ], bases=('core.answer',), ), migrations.CreateModel( name='ShortQuestion', fields=[ ('question_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='core.question')), ('question', models.CharField(max_length=512)), ], options={ 'abstract': False, 'base_manager_name': 'objects', }, bases=('core.question',), ), migrations.CreateModel( name='VideoLesson', fields=[ ('lesson_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='core.lesson')), ('link', models.CharField(max_length=512)), ('video_type', models.CharField(choices=[('YOUTUBE', 'YouTube')], default='YOUTUBE', max_length=32)), ], bases=('core.lesson',), ), migrations.CreateModel( name='Teacher', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=128)), ('gender', models.CharField(choices=[('MALE', 'Male'), ('FEMALE', 'Female')], max_length=6)), ('date_of_birth', models.DateTimeField(null=True)), ('biography', models.TextField(null=True)), ('profile_picture', models.ImageField(null=True, upload_to='')), ('teacher_type', models.CharField(choices=[('MAIN', 'Main'), ('ASSISTANT', 'Assistant')], max_length=32)), ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Student', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=128)), ('gender', models.CharField(choices=[('MALE', 'Male'), ('FEMALE', 'Female')], max_length=6)), ('date_of_birth', models.DateTimeField()), ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.AddField( model_name='question', name='session', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.session'), ), migrations.AddField( model_name='lesson', name='session', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.session'), ), migrations.AddField( model_name='course', name='course_category', field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='core.coursecategory'), ), migrations.CreateModel( name='AssessmentResult', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('total_mark', models.FloatField()), ('assessment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.assessment')), ('student', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.student')), ], ), migrations.AddField( model_name='assessment', name='section', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.section'), ), migrations.AddField( model_name='answer', name='assessment', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.assessment'), ), migrations.AddField( model_name='answer', name='student', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.student'), ), migrations.CreateModel( name='Admin', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=128)), ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='SessionSection', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('visible', models.BooleanField()), ('section', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.section')), ('session', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.session')), ], options={ 'unique_together': {('section', 'session')}, }, ), migrations.CreateModel( name='SectionTeacher', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('section', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.section')), ('teacher', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.teacher')), ], options={ 'unique_together': {('section', 'teacher')}, }, ), migrations.CreateModel( name='Enrolment', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('final_grade', models.FloatField()), ('section', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.section')), ('student', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.student')), ], options={ 'unique_together': {('section', 'student')}, }, ), migrations.CreateModel( name='AssessmentQuestion', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('assessment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.assessment')), ('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.question')), ], options={ 'unique_together': {('assessment', 'question')}, }, ), ]
47.491961
207
0.561882
1,369
14,770
5.913806
0.119065
0.032609
0.055336
0.086957
0.740983
0.713809
0.684906
0.679718
0.661314
0.642416
0
0.009835
0.290928
14,770
310
208
47.645161
0.763201
0.003047
0
0.693069
1
0
0.134619
0.007132
0
0
0
0
0
1
0
false
0
0.009901
0
0.023102
0
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
9dcd01c7a81f81cad912ec87f997c4e5ba58f9bb
2,448
py
Python
minifold/log.py
nokia/minifold
3687d32ab6119dc8293ae370c8c4ba9bbbb47deb
[ "BSD-3-Clause" ]
15
2018-09-03T09:40:59.000Z
2021-07-16T16:14:46.000Z
src/log.py
Infinite-Blue-1042/minifold
cd0aa9207f9e1819ed2ecbb24373cdcfe27abd16
[ "BSD-3-Clause" ]
null
null
null
src/log.py
Infinite-Blue-1042/minifold
cd0aa9207f9e1819ed2ecbb24373cdcfe27abd16
[ "BSD-3-Clause" ]
8
2019-01-25T07:18:59.000Z
2021-04-07T17:54:54.000Z
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # This file is part of the minifold project. # https://github.com/nokia/minifold __author__ = "Marc-Olivier Buob" __maintainer__ = "Marc-Olivier Buob" __email__ = "marc-olivier.buob@nokia-bell-labs.com" __copyright__ = "Copyright (C) 2018, Nokia" __license__ = "BSD-3" import sys from pprint import pformat DEBUG = 0 INFO = 1 WARNING = 2 ERROR = 3 # Shell colors DEFAULT = 0 RED = 1 GREEN = 2 YELLOW = 3 BLUE = 4 PINK = 5 CYAN = 6 GRAY = 7 # Shell style DEFAULT = 0 BOLD = 1 UNDERLINED = 4 BLINKING = 5 HIGHLIGHTED = 7 class Log: enable_print = False # TODO: The following static paramaters should be load from ~/.minifoldrc # TODO: dark / light colors with_color = True log_level = 0 message_header = { DEBUG : "DEBUG", INFO : "INFO", WARNING : "WARNING", ERROR : "ERROR", } message_color = { DEBUG : CYAN, INFO : GREEN, WARNING : YELLOW, ERROR : RED, } @staticmethod def start_style( fg_color :int = None, bg_color :int = None, styles :list = list() ) -> str: styling = list() if fg_color != None: styling.append("3%d" % fg_color) if bg_color != None: styling.append("4%d" % bg_color) if styles: styling += styles return "\033[%sm" % ";".join(styling) if styling else "" @staticmethod def default_style() -> str: return "\033[0m" @classmethod def print(cls, message_type :int, message :str, file = sys.stderr): if cls.enable_print and message_type >= cls.log_level: color = cls.message_color[message_type] header = cls.message_header[message_type] print( "%(start_style)s%(message)s%(end_style)s" % { "start_style" : cls.start_style(fg_color = color), "message" : " ".join([header, message if isinstance(message, str) else pformat(message)]), "end_style" : cls.default_style() }, file = file ) @classmethod def debug(cls, s): cls.print(DEBUG, s) @classmethod def info(cls, s): cls.print(INFO, s) @classmethod def warning(cls, s): cls.print(WARNING, s) @classmethod def error(cls, s): cls.print(ERROR, s)
24
114
0.562908
296
2,448
4.493243
0.381757
0.052632
0.021053
0.03609
0
0
0
0
0
0
0
0.02042
0.319853
2,448
101
115
24.237624
0.778378
0.099265
0
0.12
0
0
0.096128
0.034624
0
0
0
0.009901
0
1
0.093333
false
0
0.026667
0.013333
0.226667
0.12
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9dcdcb702db69a33b8fb22a29cccef585723a801
4,515
py
Python
cardgame_channels_app/migrations/0001_initial.py
cyface/cardgame_channels
22f2bef190ee20999eae27e6aa9ce138a78ae47f
[ "MIT" ]
null
null
null
cardgame_channels_app/migrations/0001_initial.py
cyface/cardgame_channels
22f2bef190ee20999eae27e6aa9ce138a78ae47f
[ "MIT" ]
null
null
null
cardgame_channels_app/migrations/0001_initial.py
cyface/cardgame_channels
22f2bef190ee20999eae27e6aa9ce138a78ae47f
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # Generated by Django 1.11.7 on 2017-11-18 11:31 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Card', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=255)), ('type', models.CharField(blank=True, max_length=255, null=True)), ('text', models.TextField(blank=True, null=True)), ('date_created', models.DateTimeField(auto_now_add=True)), ('date_updated', models.DateTimeField(auto_now=True)), ], options={ 'verbose_name_plural': 'cards', 'ordering': ['name'], }, ), migrations.CreateModel( name='CardGamePlayer', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('status', models.CharField(db_index=True, default='hand', max_length=30)), ('date_created', models.DateTimeField(auto_now_add=True)), ('date_updated', models.DateTimeField(auto_now=True)), ('card', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cardgame_channels_app.Card')), ], options={ 'verbose_name': 'Card Game Player', 'verbose_name_plural': 'Card Game Players', 'ordering': ['date_created'], }, ), migrations.CreateModel( name='Game', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('code', models.CharField(db_index=True, max_length=255, unique=True)), ('date_created', models.DateTimeField(auto_now_add=True)), ('date_updated', models.DateTimeField(auto_now=True)), ('cards', models.ManyToManyField(through='cardgame_channels_app.CardGamePlayer', to='cardgame_channels_app.Card')), ], options={ 'verbose_name_plural': 'games', 'ordering': ['code'], }, ), migrations.CreateModel( name='Player', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=20)), ('status', models.CharField(default='waiting', max_length=20)), ('score', models.IntegerField(default=0)), ('date_created', models.DateTimeField(auto_now_add=True)), ('date_updated', models.DateTimeField(auto_now=True)), ('cards', models.ManyToManyField(through='cardgame_channels_app.CardGamePlayer', to='cardgame_channels_app.Card')), ('game', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='players', to='cardgame_channels_app.Game')), ], options={ 'verbose_name_plural': 'players', 'ordering': ['name'], }, ), migrations.AddField( model_name='cardgameplayer', name='game', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cardgame_channels_app.Game'), ), migrations.AddField( model_name='cardgameplayer', name='player', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cardgame_channels_app.Player'), ), migrations.AddField( model_name='card', name='games', field=models.ManyToManyField(through='cardgame_channels_app.CardGamePlayer', to='cardgame_channels_app.Game'), ), migrations.AddField( model_name='card', name='players', field=models.ManyToManyField(through='cardgame_channels_app.CardGamePlayer', to='cardgame_channels_app.Player'), ), migrations.AlterUniqueTogether( name='cardgameplayer', unique_together=set([('card', 'game')]), ), ]
43.834951
169
0.575858
431
4,515
5.825986
0.213457
0.076464
0.0908
0.082836
0.657905
0.62724
0.576264
0.561529
0.545599
0.520908
0
0.010255
0.287265
4,515
102
170
44.264706
0.770044
0.015061
0
0.521277
1
0
0.189694
0.080108
0
0
0
0
0
1
0
false
0
0.031915
0
0.074468
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
9dce2d32fa35d3b007796ab403b5019d5baeeffb
2,820
py
Python
data_collection/omscs_website/omscs_cleaner.py
yashchitalia/jack-holmes
1ce3c65c1477390fb15d99a14f608f62745548b1
[ "Apache-2.0" ]
1
2017-03-30T02:25:18.000Z
2017-03-30T02:25:18.000Z
data_collection/omscs_website/omscs_cleaner.py
yashchitalia/jack-holmes
1ce3c65c1477390fb15d99a14f608f62745548b1
[ "Apache-2.0" ]
null
null
null
data_collection/omscs_website/omscs_cleaner.py
yashchitalia/jack-holmes
1ce3c65c1477390fb15d99a14f608f62745548b1
[ "Apache-2.0" ]
null
null
null
from bs4 import BeautifulSoup import re import urllib import pickle as pkl def cleanhtml(raw_html): cleanr = re.compile('<.*?>') cleantext = re.sub(cleanr, '', raw_html) cleanr_still = re.compile('\\xa0') cleanertext = re.sub(cleanr_still, '', cleantext) cleanr_even = re.compile('\\u2019s') cleanesttext= re.sub(cleanr_even, '', cleanertext) cleanr_more = re.compile('\\u2019ll') cleanest_even = re.sub(cleanr_more, ' ', cleanesttext) cleanest_even_more = cleanest_even.replace('\\xa0', ' ') cleanest_even_more = cleanest_even_more.replace('\\u2014', ' ') cleanest_even_more = cleanest_even_more.replace('\\u201c', ' ') cleanest_even_more = cleanest_even_more.replace('\\u201d', ' ') cleanest_even_more = cleanest_even_more.replace('\\u2013', ' ') return cleanest_even_more unclean_dat = pkl.load(open('omscs_website_data.p', 'rb')) clean_dat = {} for course_number in unclean_dat.keys(): curr_unclean_dat = unclean_dat[course_number] curr_clean_dat = {} for attribute in curr_unclean_dat.keys(): if attribute == 'Instructor': try: instructor_name = str(curr_unclean_dat[attribute][0]) except: continue curr_clean_dat[attribute] = instructor_name elif attribute == 'Name': try: class_name = str(curr_unclean_dat[attribute]) except: continue curr_clean_dat[attribute] = class_name elif attribute in ['Overview', 'Prerequisites', 'Grading', 'Technical', 'Reading']: final_string= '' unclean_list = curr_unclean_dat[attribute] unclean_list.pop(0) for item in unclean_list: try: if str(type(item)) == "<class 'bs4.element.NavigableString'>": item = item.encode('ascii', errors='backslashreplace') if str(item) == '\n': continue final_string = final_string+ ' ' + str(item) elif str(type(item)) == "<class 'bs4.element.Tag'>": if item.next == '\n': continue final_string = final_string+ ' '+ str(item.next) except UnicodeEncodeError: item = item.encode('ascii', errors='backslashreplace') if str(item) == '\n': continue final_string = final_string+ ' ' + str(item) html_cleaned_string = cleanhtml(final_string) curr_clean_dat[attribute] = html_cleaned_string continue clean_dat[course_number] = curr_clean_dat pkl.dump(clean_dat, open('omscs_cleaned_data.p', 'wb'))
40.285714
91
0.575887
298
2,820
5.174497
0.278523
0.093385
0.103761
0.077821
0.411154
0.392996
0.239948
0.138781
0.114137
0.114137
0
0.014834
0.306738
2,820
69
92
40.869565
0.773913
0
0
0.274194
0
0
0.099681
0.010642
0
0
0
0
0
1
0.016129
false
0
0.064516
0
0.096774
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9dce34cc1f5685467f230a6aaddab0a3ca10dd09
1,116
py
Python
testinfra/test_hypervisor-runc.py
devbox-tools/sfc
0a5a9c3db165b35506f84d4c2dbfc1dace3fcea1
[ "Apache-2.0" ]
1
2019-02-26T13:25:17.000Z
2019-02-26T13:25:17.000Z
testinfra/test_hypervisor-runc.py
devbox-tools/sfc
0a5a9c3db165b35506f84d4c2dbfc1dace3fcea1
[ "Apache-2.0" ]
null
null
null
testinfra/test_hypervisor-runc.py
devbox-tools/sfc
0a5a9c3db165b35506f84d4c2dbfc1dace3fcea1
[ "Apache-2.0" ]
null
null
null
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import utils import yaml class TestHypervisorRunC(utils.Base): def test_slaves_are_running(self, host): assert host.check_output("runc list -q") def test_slaves_are_isolated(self, host): group_vars = yaml.safe_load(open( "/var/lib/software-factory/ansible/group_vars/all.yaml")) if group_vars.get("enable_insecure_slaves") is not True: # Make sure managesf internal url access fails assert host.run("curl --connect-timeout 3 %s" % group_vars[ "managesf_internal_url"]).rc in (7, 28)
39.857143
75
0.713262
164
1,116
4.756098
0.670732
0.076923
0.033333
0.041026
0
0
0
0
0
0
0
0.008989
0.202509
1,116
27
76
41.333333
0.867416
0.508065
0
0
0
0
0.251866
0.179104
0
0
0
0
0.181818
1
0.181818
false
0
0.181818
0
0.454545
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9dcee3a8fc687322519c4ee6dd19ea787ec8d273
280
py
Python
Frameworks/urls.py
MiniJez/TP_Django
e7540f3178d44efeab69a8c8bea14a70fdaa9b4e
[ "MIT" ]
null
null
null
Frameworks/urls.py
MiniJez/TP_Django
e7540f3178d44efeab69a8c8bea14a70fdaa9b4e
[ "MIT" ]
null
null
null
Frameworks/urls.py
MiniJez/TP_Django
e7540f3178d44efeab69a8c8bea14a70fdaa9b4e
[ "MIT" ]
null
null
null
from django.urls import path from .views import index, create, delete, update urlpatterns = [ path('', index, name='index'), path('create/', create, name='create'), path('delete/<int:pk>', delete, name='delete'), path('update/<int:pk>', update, name='update'), ]
28
51
0.639286
36
280
4.972222
0.388889
0.055866
0
0
0
0
0
0
0
0
0
0
0.157143
280
10
52
28
0.758475
0
0
0
0
0
0.213523
0
0
0
0
0
0
1
0
false
0
0.25
0
0.25
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9dd02fb84f2d21edf2c3f482fb528f7ff864783d
1,831
py
Python
scrape.py
valvoda/holjplus
6a214911b477adf1253b43e46f7f5afc3076a86a
[ "MIT" ]
null
null
null
scrape.py
valvoda/holjplus
6a214911b477adf1253b43e46f7f5afc3076a86a
[ "MIT" ]
null
null
null
scrape.py
valvoda/holjplus
6a214911b477adf1253b43e46f7f5afc3076a86a
[ "MIT" ]
null
null
null
""" Adapted from https://realpython.com/python-web-scraping-practical-introduction/ for the purpose of scraping https://publications.parliament.uk/pa/ld/ldjudgmt.HTML to create an expanded HOLJ+ corpus """ import requests from requests import get from requests.exceptions import RequestException from contextlib import closing class Scrape: def simple_get(self, url): """ Attempts to get the content at `url` by making an HTTP GET request. If the content-type of response is some kind of HTML/XML, return the text content, otherwise return None """ try: with closing(get(url, stream=True)) as resp: if self.is_good_response(resp): return resp.content else: return None except RequestException as e: self.log_error('Error during requests to {0} : {1}'.format(url, str(e))) return None def is_good_response(self, resp): """ Returns true if the response seems to be HTML, false otherwise """ content_type = resp.headers['Content-Type'].lower() return (resp.status_code == 200 and content_type is not None and content_type.find('html') > -1) def log_error(self, e): """ It is always a good idea to log errors. This function just prints them, but you can make it do anything. """ print(e) if __name__ == "__main__": sc = Scrape() print("Testing the scaper:") raw_html = sc.simple_get('https://realpython.com/blog/') assert (len(raw_html) > 0), "Error, does not get" no_html = sc.simple_get("https://doesnotexist.com/thereshouldbenothing/") assert (no_html == None), "Error, does get" print("Working")
30.516667
84
0.616057
237
1,831
4.654008
0.49789
0.049864
0.032638
0.027199
0.036265
0
0
0
0
0
0
0.005344
0.284544
1,831
59
85
31.033898
0.836641
0.293829
0
0.066667
0
0
0.162299
0
0
0
0
0
0.066667
1
0.1
false
0
0.133333
0
0.4
0.1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9dd06c5c9ed12f49b25dc9756a8a419ae3530b18
1,881
py
Python
emotional_ai/model.py
fuluny/Emotional-AI
1372933ec410f72cd500513ea560f43167382e34
[ "MIT" ]
null
null
null
emotional_ai/model.py
fuluny/Emotional-AI
1372933ec410f72cd500513ea560f43167382e34
[ "MIT" ]
null
null
null
emotional_ai/model.py
fuluny/Emotional-AI
1372933ec410f72cd500513ea560f43167382e34
[ "MIT" ]
null
null
null
# #!/usr/bin/python import os import numpy as np import pandas as pd from keras.models import load_model from keras.models import Sequential from keras.utils import np_utils from keras.layers.core import Dense, Activation, Dropout from keras import optimizers from matplotlib import pyplot as plt print('Loading data...') data = pd.read_csv('fer2013.csv') #data = pd.read_csv('testdata.csv') im = data['pixels'] im_list = [] print('Pre-processing data...') for i in range(len(im)): im_list.append(list(map(int,im[i].split()))) X_train = np.asarray(im_list).astype('float32') y_train = np_utils.to_categorical(np.asarray(data['emotion'])) X_train *= 2.0/255 X_train -= 1 input_dim = X_train.shape[1] nb_classes = y_train.shape[1] # Parameters were chosen from most commonly used and sometimes at random # Further development of the model may be needed print('Making model') model = Sequential() # Dense define number of nodes model.add(Dense(1000, input_dim=input_dim)) # Activation defines the output model.add(Activation('relu')) # Dropout to avoid overfitting. model.add(Dropout(0.15)) model.add(Dense(500)) model.add(Activation('relu')) model.add(Dropout(0.15)) model.add(Dense(100)) model.add(Activation('relu')) model.add(Dropout(0.15)) model.add(Dense(50)) model.add(Activation('relu')) model.add(Dropout(0.15)) model.add(Dense(10)) model.add(Activation('relu')) model.add(Dropout(0.15)) model.add(Dense(nb_classes)) model.add(Activation('softmax')) print(model.summary()) sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) model.compile(loss='categorical_crossentropy',optimizer=sgd,metrics=['accuracy']) print("Training...") model.fit(X_train, y_train, epochs=100, validation_split=0.1, verbose=2) scores = model.evaluate(X_train, y_train, verbose=0) print(scores) # save model to HDF5 model.save('model.h5') print("Saved model to disk")
25.767123
81
0.747475
306
1,881
4.509804
0.418301
0.098551
0.056522
0.07971
0.176087
0.176087
0.176087
0.176087
0.153623
0.153623
0
0.034767
0.09782
1,881
72
82
26.125
0.778433
0.14673
0
0.2
0
0
0.111041
0.015056
0
0
0
0
0
1
0
false
0
0.18
0
0.18
0.14
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9dd27bec72ba1ef4b5afcb916eaaa9109718bd5c
2,487
py
Python
detect_port_services.py
amir78729/penetration-test-project
c85376303ce0451e2e3a3150617484d5e6837168
[ "MIT" ]
1
2022-02-04T19:29:18.000Z
2022-02-04T19:29:18.000Z
detect_port_services.py
amir78729/penetration-test-project
c85376303ce0451e2e3a3150617484d5e6837168
[ "MIT" ]
null
null
null
detect_port_services.py
amir78729/penetration-test-project
c85376303ce0451e2e3a3150617484d5e6837168
[ "MIT" ]
null
null
null
from socket import socket, gaierror, getservbyport, AF_INET, SOCK_STREAM, setdefaulttimeout from tqdm import tqdm from datetime import datetime def detect_port_services(ip, range_start, range_end): port_services = {} port_detecting_progress = tqdm(range(range_start, range_end + 1)) try: for port in port_detecting_progress: port_detecting_progress.set_description('checking port {}'.upper().format(port)) setdefaulttimeout(2) s = socket(AF_INET, SOCK_STREAM) result = s.connect_ex((ip, port)) # trying to get more information about port service try: message = b'WhoAreYou' s.send(message) banner = s.recv(100) s.close() except IOError: banner = b'' if result == 0: service_name = getservbyport(port) port_services.update({port: (service_name, banner.replace(b'\r\n', b'').decode('utf-8'))}) s.close() log_port_services(ip, range_start, range_end, port_services) except KeyboardInterrupt: print("\ncanceled...".upper()) except gaierror: print("\nHostname Could Not Be Resolved".upper()) return port_services def log_port_services(ip, range_start, range_end, port_services): try: with open("results/result_port_services.txt", "a") as file: file.write('@ {}'.upper().format(datetime.now())) file.write('\nhost {} open ports\' services from {} to {}:'.upper().format(ip, range_start, range_end)) [file.write('\n {}:\t{} {}' .format(port, port_services[port][0].upper(), '' if not port_services[port][1] else '\n\t\t({})\n' .format(port_services[port][1])) ) for port in port_services.keys()] if not port_services.keys(): file.write('\n× no open ports was founded!'.upper()) file.write('\n----------------------------------------------------\n') except FileNotFoundError: print('PLEASE CREATE \"/results/result_detect_open_ports.txt\" AND TRY AGAIN.') if __name__ == '__main__': detect_port_services( ip=input('TARGET IP ADDRESS: '), range_start=int(input('START OF RANGE : ')), range_end=int(input('END OF RANGE : ')), )
38.859375
115
0.556494
281
2,487
4.725979
0.362989
0.135542
0.056476
0.067771
0.118976
0.103916
0.103916
0.103916
0.103916
0.070783
0
0.005744
0.29996
2,487
63
116
39.47619
0.756462
0.019702
0
0.098039
0
0
0.157225
0.052956
0
0
0
0
0
1
0.039216
false
0
0.058824
0
0.117647
0.058824
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9dd2a344fe4c04f0564d9da26c93b7f70200954e
14,829
py
Python
zvdata/apps/data_app.py
freedom6xiaobai/zvt
f4ba510a30f1014cc0e48b85370b0d3936bd851a
[ "MIT" ]
1
2019-10-28T08:03:26.000Z
2019-10-28T08:03:26.000Z
zvdata/apps/data_app.py
freedom6xiaobai/zvt
f4ba510a30f1014cc0e48b85370b0d3936bd851a
[ "MIT" ]
null
null
null
zvdata/apps/data_app.py
freedom6xiaobai/zvt
f4ba510a30f1014cc0e48b85370b0d3936bd851a
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- import json from collections import OrderedDict from typing import List import dash_core_components as dcc import dash_html_components as html import dash_table import pandas as pd from dash import dash from dash.dependencies import Input, Output, State from zvdata import IntervalLevel from zvdata.app import app from zvdata.chart import Drawer from zvdata.domain import global_providers, get_schemas, get_schema_by_name, get_schema_columns from zvdata.normal_data import NormalData, IntentType from zvdata.reader import DataReader from zvdata.utils.pd_utils import df_is_not_null from zvdata.utils.time_utils import now_pd_timestamp, TIME_FORMAT_DAY current_df = None layout = html.Div( [ html.Div( [ # provider selector dcc.Dropdown( id='provider-selector', placeholder='select provider', options=[{'label': provider, 'value': provider} for provider in global_providers]), # schema selector dcc.Dropdown(id='schema-selector', placeholder='select schema'), # level selector dcc.Dropdown(id='level-selector', placeholder='select level', options=[{'label': level.value, 'value': level.value} for level in IntervalLevel], value=IntervalLevel.LEVEL_1DAY.value), # column selector html.Div(id='schema-column-selector-container', children=None), dcc.Dropdown( id='properties-selector', options=[ {'label': 'undefined', 'value': 'undefined'} ], value='undefined', multi=True ), # codes filter dcc.Input(id='input-code-filter', type='text', placeholder='input codes', style={'width': '400px'}), # time range filter dcc.DatePickerRange( id='date-picker-range', start_date='2009-01-01', end_date=now_pd_timestamp(), display_format=TIME_FORMAT_DAY ), # load data for table html.Button('load data', id='btn-load-data', n_clicks_timestamp=0), # table container html.Div(id='data-table-container', children=None), # selected properties html.Label('setting y_axis and chart type for the columns:'), # col setting container html.Div(id='col-setting-container', children=dash_table.DataTable( id='col-setting-table', columns=[ {'id': 'property', 'name': 'property', 'editable': False}, {'id': 'y_axis', 'name': 'y_axis', 'presentation': 'dropdown'}, {'id': 'chart', 'name': 'chart', 'presentation': 'dropdown'} ], dropdown={ 'y_axis': { 'options': [ {'label': i, 'value': i} for i in ['y1', 'y2', 'y3', 'y4', 'y5'] ] }, 'chart': { 'options': [ {'label': chart_type.value, 'value': chart_type.value} for chart_type in NormalData.get_charts_by_intent(IntentType.compare_self) ] } }, editable=True ), ), html.Div(id='table-type-label', children=None), html.Div( [ html.Div([dcc.Dropdown(id='intent-selector')], style={'width': '50%', 'display': 'inline-block'}), html.Div([dcc.Dropdown(id='chart-selector')], style={'width': '50%', 'display': 'inline-block'}) ] ), html.Div(id='chart-container', children=None) ]) ] ) @app.callback( Output('schema-selector', 'options'), [Input('provider-selector', 'value')]) def update_schema_selector(provider): if provider: return [{'label': schema.__name__, 'value': schema.__name__} for schema in get_schemas(provider=provider)] raise dash.exceptions.PreventUpdate() @app.callback( Output('schema-column-selector-container', 'children'), [Input('schema-selector', 'value')], state=[State('provider-selector', 'value')]) def update_column_selector(schema_name, provider): if provider and schema_name: schema = get_schema_by_name(name=schema_name) cols = get_schema_columns(schema=schema) return dcc.Dropdown( id='schema-column-selector', options=[ {'label': col, 'value': col} for col in cols ], value=get_schema_by_name(name=schema_name).important_cols(), multi=True ) raise dash.exceptions.PreventUpdate() @app.callback( [Output('properties-selector', 'options'), Output('properties-selector', 'value')], [Input('schema-column-selector', 'value')], state=[State('provider-selector', 'value'), State('schema-selector', 'value'), State('properties-selector', 'options'), State('properties-selector', 'value')]) def update_selected_properties(selected_cols, provider, schema_name, options, value): if selected_cols and provider and schema_name: current_options = options current_value = value added_labels = [] added_values = [] for col in selected_cols: added_labels.append(col) added_values.append( json.dumps({ 'provider': provider, 'schema': schema_name, 'column': col })) added_options = [{'label': col, 'value': added_values[i]} for i, col in enumerate(added_labels)] if 'undefined' in value: current_options = [] current_value = [] current_options += added_options current_value += added_values return current_options, current_value raise dash.exceptions.PreventUpdate() def properties_to_readers(properties, level, codes, start_date, end_date) -> List[DataReader]: provider_schema_map_cols = {} for prop in properties: provider = prop['provider'] schema_name = prop['schema'] key = (provider, schema_name) if key not in provider_schema_map_cols: provider_schema_map_cols[key] = [] provider_schema_map_cols[key].append(prop['column']) readers = [] for item, columns in provider_schema_map_cols.items(): provider = item[0] schema_name = item[1] schema = get_schema_by_name(schema_name) readers.append(DataReader(data_schema=schema, provider=provider, codes=codes, level=level, columns=columns, start_timestamp=start_date, end_timestamp=end_date, time_field=schema.time_field())) return readers @app.callback( [Output('data-table-container', 'children'), Output('col-setting-table', 'data'), Output('table-type-label', 'children'), Output('intent-selector', 'options'), Output('intent-selector', 'value')], [Input('btn-load-data', 'n_clicks')], state=[State('properties-selector', 'value'), State('level-selector', 'value'), State('input-code-filter', 'value'), State('date-picker-range', 'start_date'), State('date-picker-range', 'end_date')]) def update_data_table(n_clicks, properties, level, codes: str, start_date, end_date): if n_clicks and properties: props = [] for prop in properties: props.append(json.loads(prop)) readers = properties_to_readers(properties=props, level=level, codes=codes, start_date=start_date, end_date=end_date) if readers: data_df = readers[0].data_df for reader in readers[1:]: if df_is_not_null(reader.data_df): data_df = data_df.join(reader.data_df, how='outer') global current_df current_df = data_df if not df_is_not_null(current_df): return 'no data,please reselect!', [], '', [ {'label': 'compare_self', 'value': 'compare_self'}], 'compare_self' normal_data = NormalData(current_df) data_table = Drawer(data=normal_data).draw_data_table(id='data-table-content') # generate col setting table properties = normal_data.data_df.columns.to_list() df = pd.DataFrame(OrderedDict([ ('property', properties), ('y_axis', ['y1'] * len(properties)), ('chart', ['line'] * len(properties)) ])) # generate intents intents = normal_data.get_intents() intent_options = [ {'label': intent.value, 'value': intent.value} for intent in intents ] intent_value = intents[0].value return data_table, df.to_dict('records'), normal_data.get_table_type(), intent_options, intent_value else: return 'no data,please reselect!', [], '', [ {'label': 'compare_self', 'value': 'compare_self'}], 'compare_self' raise dash.exceptions.PreventUpdate() @app.callback( [Output('chart-selector', 'options'), Output('chart-selector', 'value')], [Input('intent-selector', 'value')]) def update_chart_selector(intent): if intent: charts = NormalData.get_charts_by_intent(intent=intent) options = [ {'label': chart.value, 'value': chart.value} for chart in charts ] value = charts[0].value return options, value raise dash.exceptions.PreventUpdate() operators_df = [['ge ', '>='], ['le ', '<='], ['lt ', '<'], ['gt ', '>'], ['ne ', '!='], ['eq ', '='], ['contains '], ['datestartswith ']] operators_sql = [['>= ', '>='], ['<= ', '<='], ['< ', '<'], ['> ', '>'], ['!= ', '!='], ['== ', '='], ['contains '], ['datestartswith ']] def split_filter_part(filter_part, operators=operators_df): for operator_type in operators: for operator in operator_type: if operator in filter_part: name_part, value_part = filter_part.split(operator, 1) name = name_part[name_part.find('{') + 1: name_part.rfind('}')] value_part = value_part.strip() v0 = value_part[0] if (v0 == value_part[-1] and v0 in ("'", '"', '`')): value = value_part[1: -1].replace('\\' + v0, v0) else: try: value = float(value_part) except ValueError: value = value_part # word operators need spaces after them in the filter string, # but we don't want these later return name, operator_type[0].strip(), value return [None] * 3 @app.callback( [Output('data-table-content', "data"), Output('chart-container', "children")], [Input('data-table-content', "page_current"), Input('data-table-content', "page_size"), Input('data-table-content', "sort_by"), Input('data-table-content', "filter_query"), Input('intent-selector', "value"), Input('chart-selector', "value"), Input('col-setting-table', 'data'), Input('col-setting-table', 'columns')]) def update_table_and_graph(page_current, page_size, sort_by, filter, intent, chart, rows, columns): if chart: property_map = {} for row in rows: property_map[row['property']] = { 'y_axis': row['y_axis'], 'chart': row['chart'] } dff = current_df if filter: filtering_expressions = filter.split(' && ') for filter_part in filtering_expressions: col_name, operator, filter_value = split_filter_part(filter_part) if operator in ('eq', 'ne', 'lt', 'le', 'gt', 'ge'): # these operators match pandas series operator method names dff = dff.loc[getattr(dff[col_name], operator)(filter_value)] elif operator == 'contains': dff = dff.loc[dff[col_name].str.contains(filter_value)] elif operator == 'datestartswith': # this is a simplification of the front-end filtering logic, # only works with complete fields in standard format dff = dff.loc[dff[col_name].str.startswith(filter_value)] # if sort_by: # dff = dff.sort_values( # [col['entity_id'] for col in sort_by], # ascending=[ # col['direction'] == 'asc' # for col in sort_by # ], # inplace=False # ) if intent in (IntentType.compare_self.value, IntentType.compare_to_other.value): graph_data, graph_layout = Drawer(NormalData(dff)).draw_compare(chart=chart, property_map=property_map, render=None, keep_ui_state=False) else: graph_data, graph_layout = Drawer(NormalData(dff)).draw(chart=chart, property_map=property_map, render=None, keep_ui_state=False) table_data = dff.iloc[page_current * page_size: (page_current + 1) * page_size ].to_dict('records') return table_data, \ dcc.Graph( id='chart-content', figure={ 'data': graph_data, 'layout': graph_layout } ) raise dash.exceptions.PreventUpdate()
36.796526
120
0.52458
1,470
14,829
5.104762
0.167347
0.025986
0.012127
0.025586
0.196962
0.102212
0.102212
0.057303
0.045842
0.033849
0
0.004586
0.353024
14,829
402
121
36.88806
0.777569
0.047947
0
0.139535
0
0
0.144216
0.009155
0
0
0
0
0
1
0.026578
false
0
0.059801
0
0.122924
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9dd308c092689ec19be480b950fd1043adb5873d
1,139
py
Python
api-gateway/fcgi/handwritten/python/fcgi_codec.py
intel/cloud-client-ai-service-framework
01676b08878f7a58201854aedb181134eafef7a2
[ "Apache-2.0" ]
3
2022-03-25T17:28:53.000Z
2022-03-29T03:30:25.000Z
api-gateway/fcgi/handwritten/python/fcgi_codec.py
intel/cloud-client-ai-service-framework
01676b08878f7a58201854aedb181134eafef7a2
[ "Apache-2.0" ]
null
null
null
api-gateway/fcgi/handwritten/python/fcgi_codec.py
intel/cloud-client-ai-service-framework
01676b08878f7a58201854aedb181134eafef7a2
[ "Apache-2.0" ]
1
2022-03-27T12:44:19.000Z
2022-03-27T12:44:19.000Z
import numpy as np class CTCCodec(object): """ Convert index to label """ def __init__(self, char_label, top_k): # char_label : all the characters. self.top_k = top_k self.index = {} list_character = list(char_label) for i, char in enumerate(list_character): # 0 is for 'blank' self.index[char] = i + 1 self.char_label = ['[blank]'] + list_character def decode(self, predicts): """ convert index to label. """ texts_label = [] text_list = [] # Select max probability index_predicts = np.argmax(predicts, 2) # WBD - > WB index_predicts = index_predicts.transpose(1, 0) # WB -> BW index_predicts_reshape = index_predicts.reshape(-1) # B*W for i in range(len(index_predicts_reshape)): if index_predicts_reshape[i] != 0 and (not (i > 0 and index_predicts_reshape[i] == index_predicts_reshape[i - 1])): text_list.append(self.char_label[index_predicts_reshape[i]]) text = ''.join(text_list) texts_label.append(text) return texts_label
30.783784
127
0.600527
147
1,139
4.401361
0.367347
0.200927
0.216383
0.12983
0
0
0
0
0
0
0
0.011097
0.287972
1,139
36
128
31.638889
0.786683
0.128183
0
0
0
0
0.007179
0
0
0
0
0
0
1
0.095238
false
0
0.047619
0
0.238095
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
9dd3506fa61a6efdbedcfd729d5128ff929686bf
4,333
py
Python
src/hmmmr/non_batched_functions.py
carojasq/HMMMR
f94846d8f02fe8993a0e5fb55e936dd1c1596187
[ "MIT" ]
null
null
null
src/hmmmr/non_batched_functions.py
carojasq/HMMMR
f94846d8f02fe8993a0e5fb55e936dd1c1596187
[ "MIT" ]
1
2019-11-01T08:32:04.000Z
2019-11-01T08:32:04.000Z
src/hmmmr/non_batched_functions.py
carojasq/HMMMR
f94846d8f02fe8993a0e5fb55e936dd1c1596187
[ "MIT" ]
1
2019-04-05T00:06:31.000Z
2019-04-05T00:06:31.000Z
from common_libs import * from cublas_functions import * linalg.init() def cublas_calculate_transpose_non_batched(h, a_gpu): cublas_transpose = get_single_transpose_function(a_gpu) m, k = a_gpu.shape at_gpu = gpuarray.empty((k, m), a_gpu.dtype) k, n = at_gpu.shape # Calculate transpose transa = transb = 't' cublas_transpose(h, transa, transb, m, k, 1.0, a_gpu.gpudata, k, 0.0, a_gpu.gpudata, k, at_gpu.gpudata, m) return at_gpu # Matrix product, there is a batch equivalent for this function too # Make sure it has 2 dimensions (use reshape in the case is 1d) def cublas_matrix_product_gemm_non_batched(handle, a_gpu, b_gpu): """ :param handle: :param a_gpu: Be carefull to pass X here :param b_gpu: Xt should be here :return: """ cublas_dot = get_single_dot_function(b_gpu) if len(a_gpu.shape)!=2 or len(a_gpu.shape)!=2: raise ValueError('Make sure the arrays are 2 dimensional') n, l = a_gpu.shape k, m = b_gpu.shape c_gpu = gpuarray.empty((n, m), b_gpu.dtype) lda = max(1, a_gpu.strides[0] // a_gpu.dtype.itemsize) ldb = max(1, b_gpu.strides[0] // b_gpu.dtype.itemsize) ldc = max(1, c_gpu.strides[0] // c_gpu.dtype.itemsize) alpha = np.float32(1.0) beta = np.float32(0.0) transa = transb = 'n' cublas_dot(handle, transb, transa, m, n, k, alpha, b_gpu.gpudata, ldb, a_gpu.gpudata, lda, beta, c_gpu.gpudata, ldc) return c_gpu def cublas_matrix_product_gemm_batched(handle, as_gpu, bs_gpu): cublas_dot = get_batched_dot_function(as_gpu) if len(a_gpu.shape) != 2 or len(a_gpu.shape) != 2: raise ValueError('Make sure the arrays are 2 dimensional') # n, z, l n, l = as_gpu.shape k, m = bs_gpu.shape c_gpu = gpuarray.empty((n, m), b_gpu.dtype) lda = max(1, a_gpu.strides[0] // a_gpu.dtype.itemsize) ldb = max(1, b_gpu.strides[0] // b_gpu.dtype.itemsize) ldc = max(1, c_gpu.strides[0] // c_gpu.dtype.itemsize) alpha = np.float32(1.0) beta = np.float32(0.0) transa = transb = 'n' cublas_dot(handle, transb, transa, m, n, k, alpha, b_gpu.gpudata, ldb, a_gpu.gpudata, lda, beta, c_gpu.gpudata, ldc) return c_gpu "TODO: Fix this function, like linalg.inv" def cublas_single_matrix_inversion_non_batched(h, a_gpu, overwrite=False, ipiv_gpu=None): (cublas_getrf, bufsize, cublas_getrs) = get_single_inverse_function(a_gpu) data_type = a_gpu.dtype n = a_gpu.shape[0] if ipiv_gpu is None: ipiv_gpu = gpuarray.empty((n, 1), np.int32) try: in_gpu = a_gpu if overwrite else a_gpu.copy() Lwork = bufsize(h, n, n, in_gpu.gpudata, n) Work = gpuarray.empty(Lwork, data_type) devInfo = gpuarray.empty(1, np.int32) cublas_getrf(h, n, n, in_gpu.gpudata, n, Work.gpudata, ipiv_gpu.gpudata, devInfo.gpudata) except cusolver.CUSOLVER_ERROR as e: raise ValueError("Error while generating inverse of the matrix") d = devInfo.get()[0] if d != 0: raise ValueError("Singular matrix or wrong params") try: b_gpu = linalg.eye(n, data_type) cublas_getrs(h, cublas._CUBLAS_OP['n'], n, n, in_gpu.gpudata, n, ipiv_gpu.gpudata, b_gpu.gpudata, n, devInfo.gpudata) # Since CUSOLVER's getrs functions save their output in b_gpu, we # need to copy it back to the input matrix if overwrite is requested: if overwrite: a_gpu.set(b_gpu) return a_gpu else: return b_gpu except cusolver.CUSOLVER_ERROR as e: raise "Error with cusolver {}".format(e.message) return h def calculate_regression_coeffs_non_batched(handle, x_gpu, y_gpu): xt_gpu = cublas_calculate_transpose_non_batched(handle, x_gpu) xtx_gpu = cublas_matrix_product_gemm_non_batched(handle, xt_gpu, x_gpu) xty_gpu = cublas_matrix_product_gemm_non_batched(handle, xt_gpu, y_gpu) # xtx_inv_gpu = cublas_single_matrix_inversion(handle, xtx_gpu) xtx_inv_gpu = linalg.inv(xtx_gpu, lib="cusolver") b_coefficients = cublas_matrix_product_gemm_non_batched(handle, xtx_inv_gpu, xty_gpu) return b_coefficients def calculate_predictions_from_model_non_batched(handle, x_gpu, b_coefficients_gpu): return cublas_matrix_product_gemm_non_batched(handle, x_gpu, b_coefficients_gpu)
41.663462
120
0.686591
710
4,333
3.930986
0.209859
0.038696
0.045862
0.049445
0.460408
0.398424
0.393049
0.331781
0.291652
0.291652
0
0.014252
0.206554
4,333
104
121
41.663462
0.797557
0.103162
0
0.320988
0
0
0.058396
0
0
0
0
0
0
1
0.074074
false
0
0.024691
0.012346
0.197531
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9dd7404e8264756d1a9d92df88241f2bdb03e559
793
py
Python
tools/run/mrcnnalt.py
MartinPlantinga/TomatoNet
52f3f993665865d1e74b24c43bf4a722c470eac1
[ "BSD-2-Clause" ]
1
2022-03-13T23:52:22.000Z
2022-03-13T23:52:22.000Z
tools/run/mrcnnalt.py
MartinPlantinga/TomatoNet
52f3f993665865d1e74b24c43bf4a722c470eac1
[ "BSD-2-Clause" ]
null
null
null
tools/run/mrcnnalt.py
MartinPlantinga/TomatoNet
52f3f993665865d1e74b24c43bf4a722c470eac1
[ "BSD-2-Clause" ]
null
null
null
import os from time import localtime, strftime pwd = os.curdir root_dir = pwd + './../' weights_path = '{}data/imagenet_models/VGG16.v2.caffemodel'.format(root_dir) cfg_path = '{}experiments/cfgs/mask_rcnn_alt_opt.yml'.format(root_dir) log_file="{}experiments/logs/mask_rcnn_alt_opt_{}".format(root_dir, strftime("%d-%m-%Y_%H_%M", localtime())) #print log_file exec_log_file = "exec &> >(tee -a \"{}\")".format(log_file) #echo Logging output to "$LOG" #os.system(exec &> >(tee -a "$LOG") exec_python = "python ../train_mask_rcnn_alt_opt.py --gpu 0 --net_name 'VGG16' --weights {} --imdb 'voc_2012_train' --cfg {}".format(weights_path, cfg_path) exec_all = "'/bin/bash -c {}' ; {}".format(exec_log_file, exec_python) #os.system(exec_all) print exec_all os.system(exec_all)
41.736842
158
0.696091
124
793
4.145161
0.459677
0.068093
0.075875
0.081712
0
0
0
0
0
0
0
0.014245
0.114754
793
18
159
44.055556
0.717949
0.121059
0
0
0
0.083333
0.42963
0.222222
0
0
0
0
0
0
null
null
0
0.166667
null
null
0.083333
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
9dd7a2e49e2ed72a4a6612efc5a036e4272aa367
1,325
py
Python
toontown/ai/DistributedTrashcanZeroMgr.py
TheFamiliarScoot/open-toontown
678313033174ea7d08e5c2823bd7b473701ff547
[ "BSD-3-Clause" ]
99
2019-11-02T22:25:00.000Z
2022-02-03T03:48:00.000Z
toontown/ai/DistributedTrashcanZeroMgr.py
TheFamiliarScoot/open-toontown
678313033174ea7d08e5c2823bd7b473701ff547
[ "BSD-3-Clause" ]
42
2019-11-03T05:31:08.000Z
2022-03-16T22:50:32.000Z
toontown/ai/DistributedTrashcanZeroMgr.py
TheFamiliarScoot/open-toontown
678313033174ea7d08e5c2823bd7b473701ff547
[ "BSD-3-Clause" ]
57
2019-11-03T07:47:37.000Z
2022-03-22T00:41:49.000Z
from direct.directnotify import DirectNotifyGlobal from direct.distributed import DistributedObject from toontown.ai import DistributedPhaseEventMgr class DistributedTrashcanZeroMgr(DistributedPhaseEventMgr.DistributedPhaseEventMgr): neverDisable = 1 notify = DirectNotifyGlobal.directNotify.newCategory('DistributedTrashcanZeroMgr') def __init__(self, cr): DistributedPhaseEventMgr.DistributedPhaseEventMgr.__init__(self, cr) cr.trashcanZeroMgr = self def announceGenerate(self): DistributedPhaseEventMgr.DistributedPhaseEventMgr.announceGenerate(self) messenger.send('trashcanZeroIsRunning', [self.isRunning]) def delete(self): self.notify.debug('deleting trashcanzeromgr') messenger.send('trashcanZeroIsRunning', [False]) DistributedPhaseEventMgr.DistributedPhaseEventMgr.delete(self) if hasattr(self.cr, 'trashcanZeroMgr'): del self.cr.trashcanZeroMgr def setCurPhase(self, newPhase): DistributedPhaseEventMgr.DistributedPhaseEventMgr.setCurPhase(self, newPhase) messenger.send('trashcanZeroPhase', [newPhase]) def setIsRunning(self, isRunning): DistributedPhaseEventMgr.DistributedPhaseEventMgr.setIsRunning(self, isRunning) messenger.send('trashcanZeroIsRunning', [isRunning])
42.741935
87
0.768302
101
1,325
10
0.366337
0.285149
0.10099
0
0
0
0
0
0
0
0
0.00089
0.151698
1,325
30
88
44.166667
0.897687
0
0
0
0
0
0.109434
0.06717
0
0
0
0
0
1
0.208333
false
0
0.125
0
0.458333
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
2
9dd862d583434b6ed73a9e6519551c5f6c54561e
1,575
py
Python
examples/run_fieldtrip_IF.py
annapasca/ephypype
6dbacdd6913234a28b690b401862ff062accecc7
[ "BSD-3-Clause" ]
18
2018-04-18T12:14:52.000Z
2022-02-25T19:31:44.000Z
examples/run_fieldtrip_IF.py
annapasca/ephypype
6dbacdd6913234a28b690b401862ff062accecc7
[ "BSD-3-Clause" ]
106
2017-12-09T13:34:30.000Z
2022-03-12T01:02:17.000Z
examples/run_fieldtrip_IF.py
annapasca/ephypype
6dbacdd6913234a28b690b401862ff062accecc7
[ "BSD-3-Clause" ]
13
2017-05-28T20:38:56.000Z
2022-03-06T15:58:02.000Z
""" .. _ft_seeg_example: ========================================= Apply bipolar montage to depth electrodes ========================================= This scripts shows a very simple example on how to create an Interface wrapping a desired function of a Matlab toolbox (|FieldTrip|). .. |FieldTrip| raw:: html <a href="http://www.fieldtriptoolbox.org/" target="_blank">FieldTrip</a> The **input** data should be a **.mat** file containing a FieldTrip data struct """ # Authors: Annalisa Pascarella <a.pascarella@iac.cnr.it> # License: BSD (3-clause) import os.path as op import ephypype from ephypype.nodes.FT_tools import Reference from ephypype.datasets import fetch_ieeg_dataset ############################################################################### # Let us fetch the data first. It is around 675 MB download. base_path = op.join(op.dirname(ephypype.__file__), '..', 'examples') data_path = fetch_ieeg_dataset(base_path) ft_path = '/usr/local/MATLAB/R2018a/toolbox/MEEG/fieldtrip-20200327/' refmethod = 'bipolar' channels_name = '{\'RAM*\', \'RHH*\', \'RTH*\', \'ROC*\', \'LAM*\',\'LHH*\', \'LTH*\'}' # noqa # Now we call the interface Reference to apply a bipolar montage to sEEG data reference_if = Reference() reference_if.inputs.data_file = op.join(data_path, 'SubjectUCI29_data.mat') reference_if.inputs.channels = channels_name reference_if.inputs.ft_path = ft_path reference_if.inputs.refmethod = refmethod reference_if.inputs.script = '' out = reference_if.run() print('Rereferenced data saved at {}'.format(out.outputs.data_output))
32.8125
95
0.665397
205
1,575
4.956098
0.55122
0.075787
0.083661
0
0
0
0
0
0
0
0
0.012866
0.111746
1,575
47
96
33.510638
0.713367
0.43619
0
0
0
0
0.180678
0.097867
0
0
0
0
0
1
0
false
0
0.235294
0
0.235294
0.058824
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9dd8b07faafc812e62e163fe5ae0d1616164fd3e
2,224
py
Python
tree.py
korbi98/TicTacToeGo_Zero
b8ea4562f3ddf914a53fc380f2266f13ab887e04
[ "MIT" ]
null
null
null
tree.py
korbi98/TicTacToeGo_Zero
b8ea4562f3ddf914a53fc380f2266f13ab887e04
[ "MIT" ]
null
null
null
tree.py
korbi98/TicTacToeGo_Zero
b8ea4562f3ddf914a53fc380f2266f13ab887e04
[ "MIT" ]
1
2021-12-20T12:03:49.000Z
2021-12-20T12:03:49.000Z
# Simple tree structure import numpy as np import math class Node: ''' Class defining a node for the game tree. Nodes store their position on the board, their reward, their visits and their children. ''' def __init__(self, parent, boardposition, current_player): self.parent = parent self.boardposition = boardposition self.reward = 0 self.visits = 0 self.current_player = current_player self.children = [] def add_child(self, boardposition): '''add child with certain position on the board''' player = 3 - self.current_player self.children.append(Node(self, boardposition, player)) def isExpanded(self): '''Check if node is fully expanded, meaning all childs have been visited''' return self.children and all(child.visits > 0 for child in self.children) def getPossibleChildren(self, game_state): '''Used to add all children to node when visited for the first time''' flatgame = np.array(game_state).flatten() for position, value in enumerate(flatgame): if value == 0: self.add_child(position) def update(self, result): '''update visits and reward according to result''' self.visits += 1 reward = 0 # reward a tie with 0 if result: if self.current_player == result: # player of current node has won reward = 1 else: # player of current node has lost reward = -1 self.reward += reward def UTC_traverse(self, root): '''Choosed child for node via UCT function''' choosen_child = max(self.children, key= lambda x: x.UCT(root)) return choosen_child def UCT(self, root): '''calculate UCT value for given node''' if self.visits == 0: return 0 return self.reward/self.visits + 1*math.sqrt(math.log(root.visits)/self.visits) def print(self, root): print("Position ", self.boardposition, ", Player ", self.current_player,\ ", Reward ", self.reward, ", Visits ", self.visits,\ ", UTC ", round(self.UCT(root), 3), ", Childcount ", len(self.children))
35.870968
87
0.616007
281
2,224
4.814947
0.338078
0.05765
0.050259
0.026608
0.03252
0
0
0
0
0
0
0.008794
0.284173
2,224
62
88
35.870968
0.84108
0.240108
0
0
0
0
0.03366
0
0
0
0
0
0
1
0.205128
false
0
0.051282
0
0.358974
0.051282
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
9dd8bbfb2717a06b4b3ec45eb064716d069fb7b0
269
py
Python
vibrant_frequencies/cli.py
garstka/vibrant-frequencies
e237bf97089c87ca3e9335ba0d2abd09756b98fc
[ "MIT" ]
2
2019-01-31T15:13:37.000Z
2020-11-19T03:24:12.000Z
vibrant_frequencies/cli.py
garstka/vibrant-frequencies
e237bf97089c87ca3e9335ba0d2abd09756b98fc
[ "MIT" ]
null
null
null
vibrant_frequencies/cli.py
garstka/vibrant-frequencies
e237bf97089c87ca3e9335ba0d2abd09756b98fc
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """Console script for vibrant_frequencies.""" import logging import click from .prototype import visualize @click.command() def main(): logging.getLogger('').setLevel(logging.WARN) visualize() if __name__ == "__main__": main()
14.944444
48
0.67658
30
269
5.766667
0.733333
0
0
0
0
0
0
0
0
0
0
0.004464
0.167286
269
17
49
15.823529
0.767857
0.230483
0
0
0
0
0.039801
0
0
0
0
0
0
1
0.111111
true
0
0.333333
0
0.444444
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
2
9dda3faed30d9ee945694fcad8f057ec177bc507
6,568
py
Python
rak_net/protocol/handler.py
L0RD-ZER0/aio-rak-net
0ec0b6ac4daf6a4b146ac94ac2d0313c13975363
[ "MIT" ]
1
2021-12-02T04:37:08.000Z
2021-12-02T04:37:08.000Z
rak_net/protocol/handler.py
L0RD-ZER0/aio-rak-net
0ec0b6ac4daf6a4b146ac94ac2d0313c13975363
[ "MIT" ]
null
null
null
rak_net/protocol/handler.py
L0RD-ZER0/aio-rak-net
0ec0b6ac4daf6a4b146ac94ac2d0313c13975363
[ "MIT" ]
null
null
null
from __future__ import annotations from typing import TYPE_CHECKING from .packet import ( ConnectionRequest, ConnectionRequestAccepted, NewIncomingConnection, OfflinePing, OfflinePong, OnlinePing, OnlinePong, OpenConnectionRequest1, OpenConnectionReply1, OpenConnectionRequest2, OpenConnectionReply2, IncompatibleProtocolVersion, ) from .protocol_info import ProtocolInfo from ..utils import InternetAddress if TYPE_CHECKING: from ..server import Server __all__ = 'Handler', class Handler: """ Class containing various handler methods to handle packets :param server: Server for which handler is intended """ __slots__ = 'server', def __init__(self, server: Server): self.server = server async def handle_connection_request(self, data: bytes, address: InternetAddress, *, server: Server = None) -> bytes: """ Handler to handle `Connection-Request` :param data: data of the packet :param address: :class:`InternetAddress` of the packet :param server: Optional server to use the handler with, defaults to ``self.handler`` :return: returns the processed data """ server = server or self.server packet: ConnectionRequest = ConnectionRequest(data) packet.decode() new_packet: ConnectionRequestAccepted = ConnectionRequestAccepted() new_packet.client_address = address new_packet.system_index = 0 new_packet.server_guid = server.guid new_packet.system_addresses = [InternetAddress("255.255.255.255", 19132)] * 20 new_packet.request_timestamp = server.get_time_ms() new_packet.encode() return new_packet.data async def handle_connection_request_accepted(self, data: bytes, address: InternetAddress, *, server: Server = None) -> bytes: """ Handler to handle `Connection-Request-Accepted` :param data: data of the packet :param address: :class:`InternetAddress` of the packet :param server: Optional server to use the handler with, defaults to ``self.handler`` :return: returns the processed data """ server = server or self.server packet: ConnectionRequestAccepted = ConnectionRequestAccepted(data) packet.decode() new_packet: NewIncomingConnection = NewIncomingConnection() new_packet.server_address = address new_packet.system_addresses = packet.system_addresses new_packet.request_timestamp = packet.accepted_timestamp new_packet.accepted_timestamp = server.get_time_ms() new_packet.encode() return new_packet.data async def handle_offline_ping(self, data: bytes, address: InternetAddress = None, *, server: Server = None) -> bytes: """ Handler to handle `Offline-Ping` :param data: data of the packet :param address: :class:`InternetAddress` of the packet :param server: Optional server to use the handler with, defaults to ``self.handler`` :return: returns the processed data """ server = server or self.server packet: OfflinePing = OfflinePing(data) packet.decode() new_packet: OfflinePong = OfflinePong() new_packet.client_timestamp = packet.client_timestamp new_packet.server_guid = server.guid new_packet.magic = ProtocolInfo.MAGIC new_packet.server_name = server.name if hasattr(server, "name") else "" new_packet.encode() return new_packet.data async def handle_online_ping(self, data: bytes, address: InternetAddress = None, *, server: Server = None) -> bytes: """ Handler to handle `Online-Ping` :param data: data of the packet :param address: :class:`InternetAddress` of the packet :param server: Optional server to use the handler with, defaults to ``self.handler`` :return: returns the processed data """ server = server or self.server packet: OnlinePing = OnlinePing(data) packet.decode() new_packet: OnlinePong = OnlinePong() new_packet.client_timestamp = packet.client_timestamp new_packet.server_timestamp = server.get_time_ms() new_packet.encode() return new_packet.data async def handle_open_connection_request_1(self, data: bytes, address: InternetAddress = None, *, server: Server = None) -> bytes: """ Handler to handle `Open-Connection-Request-1` :param data: data of the packet :param address: :class:`InternetAddress` of the packet :param server: Optional server to use the handler with, defaults to ``self.handler`` :return: returns the processed data """ server = server or self.server packet: OpenConnectionRequest1 = OpenConnectionRequest1(data) packet.decode() if packet.protocol_version == server.protocol_version: new_packet: OpenConnectionReply1 = OpenConnectionReply1() new_packet.magic = ProtocolInfo.MAGIC new_packet.server_guid = server.guid new_packet.use_security = False new_packet.mtu_size = packet.mtu_size else: new_packet: IncompatibleProtocolVersion = IncompatibleProtocolVersion() new_packet.protocol_version = server.protocol_version new_packet.magic = ProtocolInfo.MAGIC new_packet.server_guid = server.guid new_packet.encode() return new_packet.data async def handle_open_connection_request_2(self, data: bytes, address: InternetAddress = None, *, server: Server = None) -> bytes: """ Handler to handle `Open-Connection-Request-2` :param data: data of the packet :param address: :class:`InternetAddress` of the packet :param server: Optional server to use the handler with, defaults to ``self.handler`` :return: returns the processed data """ server = server or self.server packet: OpenConnectionRequest2 = OpenConnectionRequest2(data) packet.decode() new_packet: OpenConnectionReply2 = OpenConnectionReply2() new_packet.magic = ProtocolInfo.MAGIC new_packet.server_guid = server.guid new_packet.client_address = address new_packet.mtu_size = packet.mtu_size new_packet.use_encryption = False new_packet.encode() await server.add_connection(address, packet.mtu_size) return new_packet.data
40.294479
134
0.676309
711
6,568
6.075949
0.136428
0.095833
0.030556
0.044444
0.664583
0.615509
0.615509
0.588657
0.544444
0.544444
0
0.007262
0.24528
6,568
162
135
40.54321
0.864232
0.0169
0
0.397959
0
0
0.006943
0
0
0
0
0
0
1
0.010204
false
0
0.061224
0
0.153061
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0