hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f5301087690900f18790595cf080153f91b40dd0
| 954
|
py
|
Python
|
motivation_quote/app.py
|
lukas-weiss/motivation-quote
|
90c73342a71f6a8f8b5339b5d080d19ac67083b7
|
[
"MIT"
] | null | null | null |
motivation_quote/app.py
|
lukas-weiss/motivation-quote
|
90c73342a71f6a8f8b5339b5d080d19ac67083b7
|
[
"MIT"
] | null | null | null |
motivation_quote/app.py
|
lukas-weiss/motivation-quote
|
90c73342a71f6a8f8b5339b5d080d19ac67083b7
|
[
"MIT"
] | null | null | null |
import json
import os.path
import logging
import csv
from random import randint
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def get_quote(file):
if os.path.exists(file):
with open(file) as csvfile:
quotes = list(csv.reader(csvfile, delimiter=';'))
max_quotes = len(quotes) - 1
rand_quotes_idx = randint(0, max_quotes)
logger.debug(quotes[rand_quotes_idx])
return quotes[rand_quotes_idx]
else:
logger.info(file + " not found")
def lambda_handler(event, context):
# logger.debug(context.aws_request_id)
quote_entry = get_quote("quotes.csv")
logger.debug(quote_entry)
quote = ""
author = ""
if quote_entry is not None:
quote = quote_entry[0]
author = quote_entry[1]
return {
"statusCode": 200,
"body": json.dumps({
"quote": quote,
"author": author
}),
}
| 25.105263
| 61
| 0.603774
| 116
| 954
| 4.810345
| 0.456897
| 0.089606
| 0.069892
| 0.0681
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010264
| 0.285115
| 954
| 37
| 62
| 25.783784
| 0.807918
| 0.037736
| 0
| 0
| 0
| 0
| 0.050218
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.15625
| 0
| 0.28125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f531e1bea64fba94ad609a7c42aeb9cf4d1498ca
| 3,142
|
py
|
Python
|
tools/extract_textline.py
|
bitcoder-17/scale-digits-recognition
|
b75c658ffdc830784ae4be9c007909e4c8f1d695
|
[
"MIT"
] | null | null | null |
tools/extract_textline.py
|
bitcoder-17/scale-digits-recognition
|
b75c658ffdc830784ae4be9c007909e4c8f1d695
|
[
"MIT"
] | null | null | null |
tools/extract_textline.py
|
bitcoder-17/scale-digits-recognition
|
b75c658ffdc830784ae4be9c007909e4c8f1d695
|
[
"MIT"
] | null | null | null |
from pathlib import Path
import cv2
import json
import math
import numpy as np
from argparse import ArgumentParser
def distance(p1, p2):
return math.sqrt((p2[0] - p1[0])**2 + (p2[1] - p1[1])**2)
def order_points(points):
pts = {}
for x1, y1 in points:
count_x_larger = 0
count_x_smaller = 0
count_y_larger = 0
count_y_smaller = 0
for x2, y2 in points:
if x1 > x2:
count_x_larger += 1
elif x1 < x2:
count_x_smaller += 1
if y1 > y2:
count_y_larger += 1
elif y1 < y2:
count_y_smaller += 1
p = (x1, y1)
if count_x_larger >= 2 and count_y_larger >= 2:
pts['br'] = p
elif count_x_smaller >= 2 and count_y_larger >= 2:
pts['bl'] = p
elif count_y_smaller >= 2 and count_x_smaller >= 2:
pts['tl'] = p
else:
pts['tr'] = p
return [pts['tl'], pts['tr'], pts['br'], pts['bl']]
def get_padding_box(points, x_factor, y_factor):
tl, tr, br, bl = points
width = int(np.round(max([distance(tl, tr), distance(bl, br)])))
height = int(np.round(max([distance(tl, bl), distance(tr, br)])))
padding_x = x_factor * width
padding_y = y_factor * height
points2 = [
[tl[0] - padding_x, tl[1] - padding_y],
[tr[0] + padding_x, tr[1] - padding_y],
[br[0] + padding_x, br[1] + padding_y],
[bl[0] - padding_x, bl[1] + padding_y],
]
return points2
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument('input_dir', type=str,
help='Directory where the frame image and the json label be')
parser.add_argument('output_dir', type=str,
help='Directory where the textline would be extracted to')
parser.add_argument('--ext', type=str, default='png')
args = parser.parse_args()
input_dir = Path(args.input_dir)
output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
jsons = list(input_dir.glob('*.json'))
json_path: Path
for json_path in jsons:
label_dict = json.load(open(json_path, 'rt'))
if len(label_dict['shapes']) == 0:
continue
frame = cv2.imread(str(json_path.with_suffix(f'.{args.ext}')))
for i, shape in enumerate(label_dict['shapes']):
points = order_points(shape['points'])
tl, tr, br, bl = points
width = int(np.round(max([distance(tl, tr), distance(bl, br)])))
height = int(np.round(max([distance(tl, bl), distance(tr, br)])))
dst = np.array([[0, 0],
[width - 1, 0],
[width - 1, height - 1],
[0, height - 1]], dtype=np.float32)
M = cv2.getPerspectiveTransform(np.array(points, dtype=np.float32), dst)
warp = cv2.warpPerspective(frame, M, (width, height))
output_path = output_dir.joinpath(json_path.stem + f'.{args.ext}')
cv2.imwrite(str(output_path), warp)
| 33.073684
| 85
| 0.54965
| 433
| 3,142
| 3.806005
| 0.263279
| 0.025485
| 0.031553
| 0.031553
| 0.182039
| 0.182039
| 0.182039
| 0.120146
| 0.120146
| 0.120146
| 0
| 0.031496
| 0.312858
| 3,142
| 94
| 86
| 33.425532
| 0.73182
| 0
| 0
| 0.076923
| 0
| 0
| 0.06429
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0.076923
| 0.012821
| 0.153846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f536be230ab9f47d327f6fa5a8e54f230ab096d9
| 1,745
|
py
|
Python
|
chatServer/server.py
|
RobbeBryssinck/chatApplication
|
628ab6acb2b19d26d3e5c064cbea14747041f43e
|
[
"MIT"
] | null | null | null |
chatServer/server.py
|
RobbeBryssinck/chatApplication
|
628ab6acb2b19d26d3e5c064cbea14747041f43e
|
[
"MIT"
] | null | null | null |
chatServer/server.py
|
RobbeBryssinck/chatApplication
|
628ab6acb2b19d26d3e5c064cbea14747041f43e
|
[
"MIT"
] | null | null | null |
import socket
import sys
import os
import optparse
from threading import *
def createServer(ip, port):
# create a TCP socket
sck = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# bind the socket to the port
server_address = (ip, port)
print("starting up on {} port {}".format(*server_address))
sck.bind(server_address)
# put the socket into server mode
sck.listen(5)
return sck
def clientHandler(sck, conn, client, logs):
# initialise user
name = conn.recv(2014)
# receive data
while True:
try:
data = conn.recv(1024)
message = name.decode() + ': ' + data.decode() + '\n'
print(message)
updateClients(message)
logs.write(message)
except:
message = name.decode() + " closed the connection.\n"
logs.write(message)
print(message)
break
conn.close()
def updateClients(message):
for client in clients:
client.send(bytes(message, 'ASCII'))
def main():
# option to set port when launching the server
parser = optparse.OptionParser("Usage: python3 server.py -h <server ip> -p <server port>")
parser.add_option('-p', dest='port', type='int', help="specify server port")
parser.add_option('-h', dest='ip', type='string', help="specify server ip")
(options, args) = parser.parse_args()
port = options.port
ip = options.ip
if port == None:
print(parser.usage)
exit(0)
logs = open('./logs.txt', 'a+')
sck = createServer(ip, port)
while True:
# wait for connection
conn, client = sck.accept()
clients.append(conn)
# log connection
message = client[0] + " connected.\n"
print(message)
logs.write(message)
# start thread
t = Thread(target=clientHandler, args=(sck, conn, client, logs))
t.start()
clients = []
if __name__ == '__main__':
main()
| 19.606742
| 91
| 0.676218
| 240
| 1,745
| 4.85
| 0.429167
| 0.015464
| 0.041237
| 0.02921
| 0.042955
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008415
| 0.182808
| 1,745
| 88
| 92
| 19.829545
| 0.807854
| 0.115186
| 0
| 0.150943
| 0
| 0
| 0.132334
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075472
| false
| 0
| 0.09434
| 0
| 0.188679
| 0.09434
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f537b763bb0939c0d65ba5d32dd7d3fcdadbcca3
| 1,502
|
py
|
Python
|
tests/test_utils_bytes.py
|
cwichel/embutils
|
188d86d84637088bafef188b3312078048934113
|
[
"MIT"
] | null | null | null |
tests/test_utils_bytes.py
|
cwichel/embutils
|
188d86d84637088bafef188b3312078048934113
|
[
"MIT"
] | null | null | null |
tests/test_utils_bytes.py
|
cwichel/embutils
|
188d86d84637088bafef188b3312078048934113
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: ascii -*-
"""
Byte utilities testing.
:date: 2021
:author: Christian Wiche
:contact: cwichel@gmail.com
:license: The MIT License (MIT)
"""
import unittest
from embutils.utils import bitmask, reverse_bits, reverse_bytes
# -->> Definitions <<------------------
# -->> Test API <<---------------------
class TestBytes(unittest.TestCase):
"""
Test byte utilities.
"""
def test_01_bitmask(self):
"""
Test bitmask generation.
"""
# Test bitmask fill
mask = bitmask(bit=7, fill=True)
assert mask == 0b11111111
# Test bitmask
mask = bitmask(bit=7)
assert mask == 0b10000000
def test_02_reverse_bits(self):
"""
Test bit reverse functionality
"""
# Test using fixed size
rev_bits = reverse_bits(value=0b00101011, size=8)
assert rev_bits == 0b11010100
# Test using minimum size
rev_bits = reverse_bits(value=0b00101011)
assert rev_bits == 0b110101
def test_03_reverse_bytes(self):
"""
Test byte reverse functionality.
"""
# Test using fixed size
rev_bytes = reverse_bytes(value=0x00020304, size=4)
assert rev_bytes == 0x04030200
# Test using minimum size
rev_bytes = reverse_bytes(value=0x00020304)
assert rev_bytes == 0x040302
# -->> Test Execution <<---------------
if __name__ == '__main__':
unittest.main()
| 23.107692
| 63
| 0.581891
| 159
| 1,502
| 5.308176
| 0.427673
| 0.052133
| 0.033175
| 0.035545
| 0.298578
| 0.260664
| 0.260664
| 0
| 0
| 0
| 0
| 0.092593
| 0.280959
| 1,502
| 64
| 64
| 23.46875
| 0.688889
| 0.345539
| 0
| 0
| 0
| 0
| 0.009081
| 0
| 0
| 0
| 0.043133
| 0
| 0.3
| 1
| 0.15
| false
| 0
| 0.1
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f53d0274845ff18a273019ee23bb400432511d7c
| 588
|
py
|
Python
|
utils/tool.py
|
yongleex/SBCC
|
40f8e67e446fc14fc82ea87f82ee841d62520c71
|
[
"MIT"
] | 4
|
2021-09-04T04:02:57.000Z
|
2021-12-27T13:27:26.000Z
|
utils/tool.py
|
yongleex/SBCC
|
40f8e67e446fc14fc82ea87f82ee841d62520c71
|
[
"MIT"
] | 1
|
2021-09-10T07:40:36.000Z
|
2022-01-02T06:23:12.000Z
|
utils/tool.py
|
yongleex/SBCC
|
40f8e67e446fc14fc82ea87f82ee841d62520c71
|
[
"MIT"
] | 1
|
2021-09-10T07:36:29.000Z
|
2021-09-10T07:36:29.000Z
|
import numpy as np
from scipy.ndimage import maximum_filter
class AttrDict(dict):
__setattr__ = dict.__setitem__
__getattr__ = dict.__getitem__
def signal2noise(r_map):
""" Compute the signal-to-noise ratio of correlation plane.
w*h*c"""
r = r_map.copy()
max_r = maximum_filter(r_map, (5,5,1))
ind = max_r> (r_map+1e-3)
r[ind] = 0.05
r = np.reshape(r, (-1, r.shape[-1]))
r = np.sort(r,axis=0)
ratio = r[-1,:]/r[-2,:]
return ratio
def main():
r = np.random.randn(5,5,3)
signal2noise(r)
if __name__=='__main__':
main()
| 18.375
| 63
| 0.612245
| 95
| 588
| 3.452632
| 0.547368
| 0.04878
| 0.030488
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.039648
| 0.227891
| 588
| 31
| 64
| 18.967742
| 0.682819
| 0.103742
| 0
| 0
| 0
| 0
| 0.015564
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.105263
| 0
| 0.421053
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f53f1078d0ccf6010a2d5acd1664c6d7881e41c8
| 8,584
|
py
|
Python
|
bjtunlp/train.py
|
bigbosskai/bjtunlp
|
58d8ca53fa1d99df2f47f10a0780619c4cdba22f
|
[
"MIT"
] | 1
|
2020-12-16T07:18:00.000Z
|
2020-12-16T07:18:00.000Z
|
bjtunlp/train.py
|
bigbosskai/bjtunlp
|
58d8ca53fa1d99df2f47f10a0780619c4cdba22f
|
[
"MIT"
] | null | null | null |
bjtunlp/train.py
|
bigbosskai/bjtunlp
|
58d8ca53fa1d99df2f47f10a0780619c4cdba22f
|
[
"MIT"
] | 1
|
2022-03-12T16:41:32.000Z
|
2022-03-12T16:41:32.000Z
|
import os
import time
import argparse
from tqdm import tqdm
import torch
from torch import optim
from torch import nn
from fastNLP import BucketSampler
from fastNLP import logger
from fastNLP import DataSetIter
from fastNLP import Tester
from fastNLP import cache_results
from bjtunlp.models import BertParser
from bjtunlp.models.metrics import SegAppCharParseF1Metric, CWSPOSMetric, ParserMetric
from bjtunlp.modules.trianglelr import TriangleLR
from bjtunlp.modules.chart import save_table
from bjtunlp.modules.pipe import CTBxJointPipe
from bjtunlp.modules.word_batch import BatchSampler
from bjtunlp.modules.embedding import ElectraEmbedding
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model_type', type=str,
help=r'Whether to use the first-order model or the second-order model, LOC represents the first-order model CRF2 stands for second-order model. default:LOC',
choices=['LOC', 'CRF2'],
default='LOC')
parser.add_argument('--output', type=str,
help=r'The path where the output model is stored. default:./output',
default=r'output')
parser.add_argument('--dataset', type=str,
help=r'The data set required for training the joint model, which must include the training set, test set and development set, and the data format is CoNLL format. default:./ctb7',
default=r'G:\真正联合\bjtunlp\data\ctb7')
parser.add_argument('--pretraining', type=str,
help='Pre-trained language models Electra downloaded from huggingface. default:./discriminator',
default=r'H:\预训练语言模型\哈工大20G语料-Electra\base\discriminator')
parser.add_argument('--epochs', type=int, help='Number of epoch to train the model. default:15', default=15)
parser.add_argument('--lr', type=float, help='Learning rate setting. default:2e-5', default=2e-5)
parser.add_argument('--batch_size', type=int, help='The number of words fed to the model at a time. default:1000',
default=1000)
parser.add_argument('--clip', type=float, help='Value for gradient clipping nn.utils.clip_grad_value_. default:5.0',
default=5.0)
parser.add_argument('--weight_decay', type=float, help='L2 regularization. default:1e-2',
default=1e-2)
parser.add_argument('--device', type=int,
help='Whether to use GPU for training, 0 means cuda:0, -1 means cpu. default:0',
default=0)
parser.add_argument('--dropout', type=float, help='dropout. default:0.5', default=0.5)
parser.add_argument('--arc_mlp_size', type=int,
help='The hidden dimensions of predicting the dependency arc. default:500',
default=500)
parser.add_argument('--label_mlp_size', type=int,
help='The hidden dimensions of predicting the dependency label. default:100',
default=300)
args = parser.parse_args()
print(args)
context_path = os.getcwd()
save_path = os.path.join(context_path, args.output)
if not os.path.exists(context_path):
os.makedirs(context_path)
model_type = args.model_type
data_name = args.dataset
pretraining = args.pretraining
epochs = args.epochs
lr = args.lr # 0.01~0.001
batch_size = args.batch_size # 1000
clip = args.clip
weight_decay = args.weight_decay
device = torch.device("cuda:%d" % args.device if (torch.cuda.is_available()) else "cpu")
dropout = args.dropout # 0.3~0.6
arc_mlp_size = args.arc_mlp_size # 200, 300
label_mlp_size = args.label_mlp_size
logger.add_file(save_path + '/joint' + time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time())) + '.log',
level='INFO')
# 将超参数保存的日志中
logger.info(f'model_type:{model_type}')
logger.info(f'data_name:{data_name}')
logger.info(f'pretraining:{pretraining}')
logger.info(f'epochs:{epochs}')
logger.info(f'lr:{lr}')
logger.info(f'batch_size:{batch_size}')
logger.info(f'clip:{clip}')
logger.info(f'weight_decay:{weight_decay}')
logger.info(f'device:{device}')
logger.info(f'dropout:{dropout}')
logger.info(f'arc_mlp_size:{arc_mlp_size}')
logger.info(f'label_mlp_size:{label_mlp_size}')
cache_name = os.path.split(data_name)[-1]
@cache_results(save_path + '/caches/{}.pkl'.format(cache_name), _refresh=False)
def get_data(data_name, pretraining):
data, special_root = CTBxJointPipe().process_from_file(data_name)
data.delete_field('bigrams')
data.delete_field('trigrams')
data.delete_field('chars')
data.rename_field('pre_chars', 'chars')
data.delete_field('pre_bigrams')
data.delete_field('pre_trigrams')
embed = ElectraEmbedding(data.get_vocab('chars'), pretraining)
return data, embed, special_root
data, embed, special_root = get_data(data_name, pretraining)
print(data)
model = BertParser(embed=embed, char_label_vocab=data.get_vocab('char_labels'),
num_pos_label=len(data.get_vocab('char_pos')), arc_mlp_size=arc_mlp_size,
label_mlp_size=label_mlp_size, dropout=dropout,
model=model_type,
special_root=special_root,
use_greedy_infer=False,
)
metric1 = SegAppCharParseF1Metric(data.get_vocab('char_labels'))
metric2 = CWSPOSMetric(data.get_vocab('char_labels'), data.get_vocab('char_pos'))
metric3 = ParserMetric(data.get_vocab('char_labels'))
metrics = [metric1, metric2, metric3]
optimizer = optim.AdamW([param for param in model.parameters() if param.requires_grad], lr=lr,
weight_decay=weight_decay)
sampler = BucketSampler(batch_size=4, seq_len_field_name='seq_lens')
train_batch = DataSetIter(batch_size=4, dataset=data.get_dataset('train'), sampler=sampler,
batch_sampler=BatchSampler(data.get_dataset('train'), batch_size, 'seq_lens'))
scheduler = TriangleLR(optimizer, len(train_batch) * epochs, schedule='linear')
best_score = 0.
best_epoch = 0
table = []
model = model.to(device)
for i in range(epochs):
for batch_x, batch_y in tqdm(train_batch, desc='Epoch: %3d' % i):
optimizer.zero_grad()
if args.device >= 0:
batch_x['chars'] = batch_x['chars'].to(device)
batch_y['char_heads'] = batch_y['char_heads'].to(device)
batch_y['char_labels'] = batch_y['char_labels'].to(device)
batch_y['char_pos'] = batch_y['char_pos'].to(device)
batch_y['sibs'] = batch_y['sibs'].to(device)
output = model(batch_x['chars'], batch_y['char_heads'], batch_y['char_labels'],
batch_y['sibs'])
loss = output['loss']
loss.backward()
nn.utils.clip_grad_value_(model.parameters(), clip)
optimizer.step()
scheduler.step()
dev_tester = Tester(data.get_dataset('dev'), model, batch_size=8, metrics=metrics, device=device, verbose=0)
dev_res = dev_tester.test()
logger.info('Epoch:%3d Dev' % i + dev_tester._format_eval_results(dev_res))
print('Epoch:%3d Dev' % i + dev_tester._format_eval_results(dev_res))
test_tester = Tester(data.get_dataset('test'), model, batch_size=8, metrics=metrics, device=device, verbose=0)
test_res = test_tester.test()
logger.info('Epoch:%3d Test' % i + test_tester._format_eval_results(test_res))
print('Epoch:%3d Test' % i + test_tester._format_eval_results(test_res))
if dev_res['SegAppCharParseF1Metric']['u_f1'] > best_score:
best_score = dev_res['SegAppCharParseF1Metric']['u_f1']
best_epoch = i
torch.save(model, save_path + '/joint.model')
table.append([dev_res, test_res])
print('best performance on test dataset Related to the development set %d' % best_epoch)
print('Save the model in this directory :%s' % save_path)
logger.info('best performance on test dataset Related to the development set %d' % best_epoch)
logger.info('Save the model in this directory :%s' % save_path)
logger.info(str(table[best_epoch]))
save_table(table, save_path + '/results.csv')
if __name__ == '__main__':
main()
| 48.224719
| 203
| 0.65028
| 1,128
| 8,584
| 4.750887
| 0.215426
| 0.031722
| 0.041239
| 0.017914
| 0.234372
| 0.168688
| 0.125023
| 0.115693
| 0.115693
| 0.115693
| 0
| 0.015115
| 0.229264
| 8,584
| 177
| 204
| 48.497175
| 0.794891
| 0.005009
| 0
| 0
| 0
| 0.019231
| 0.242882
| 0.040187
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012821
| false
| 0
| 0.121795
| 0
| 0.141026
| 0.038462
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f53f3f14419ce7e5f5fb052bfc8906e374ee8971
| 7,978
|
py
|
Python
|
archived/functions/sync_elasticache/redis/LR_sync_redis_model_reuse.py
|
DS3Lab/LambdaML
|
0afca7819e08632ba116fec8e102084e4040a47a
|
[
"Apache-2.0"
] | 23
|
2021-05-17T09:24:24.000Z
|
2022-01-29T18:40:44.000Z
|
archived/functions/sync_elasticache/redis/LR_sync_redis_model_reuse.py
|
DS3Lab/LambdaML
|
0afca7819e08632ba116fec8e102084e4040a47a
|
[
"Apache-2.0"
] | 2
|
2021-05-17T16:15:12.000Z
|
2021-07-20T09:11:22.000Z
|
archived/functions/sync_elasticache/redis/LR_sync_redis_model_reuse.py
|
DS3Lab/LambdaML
|
0afca7819e08632ba116fec8e102084e4040a47a
|
[
"Apache-2.0"
] | 3
|
2021-05-17T09:31:53.000Z
|
2021-12-02T16:29:59.000Z
|
import time
import torch
from torch.autograd import Variable
from torch.utils.data.sampler import SubsetRandomSampler
from archived.elasticache import redis_init
from archived.s3.get_object import get_object
from archived.old_model import LogisticRegression
from data_loader.libsvm_dataset import DenseDatasetWithLines
# lambda setting
redis_location = "test.fifamc.ng.0001.euc1.cache.amazonaws.com"
grad_bucket = "tmp-grads"
model_bucket = "tmp-updates"
local_dir = "/tmp"
w_prefix = "w_"
b_prefix = "b_"
w_grad_prefix = "w_grad_"
b_grad_prefix = "b_grad_"
# algorithm setting
learning_rate = 0.1
batch_size = 100
num_epochs = 2
validation_ratio = .2
shuffle_dataset = True
random_seed = 42
endpoint = redis_init(redis_location)
def handler(event, context):
start_time = time.time()
bucket = event['bucket']
key = event['name']
num_features = event['num_features']
num_classes = event['num_classes']
print('bucket = {}'.format(bucket))
print('key = {}'.format(key))
key_splits = key.split("_")
worker_index = int(key_splits[0])
num_worker = int(key_splits[1])
# read file(dataset) from s3
file = get_object(bucket, key).read().decode('utf-8').split("\n")
print("read data cost {} s".format(time.time() - start_time))
parse_start = time.time()
dataset = DenseDatasetWithLines(file, num_features)
preprocess_start = time.time()
print("libsvm operation cost {}s".format(parse_start - preprocess_start))
# Creating data indices for training and validation splits:
dataset_size = len(dataset)
print("dataset size = {}".format(dataset_size))
indices = list(range(dataset_size))
split = int(np.floor(validation_ratio * dataset_size))
if shuffle_dataset:
np.random.seed(random_seed)
np.random.shuffle(indices)
train_indices, val_indices = indices[split:], indices[:split]
# Creating PT data samplers and loaders:
train_sampler = SubsetRandomSampler(train_indices)
valid_sampler = SubsetRandomSampler(val_indices)
train_loader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
sampler=train_sampler)
validation_loader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
sampler=valid_sampler)
print("preprocess data cost {} s".format(time.time() - preprocess_start))
model = LogisticRegression(num_features, num_classes)
# Loss and Optimizer
# Softmax is internally computed.
# Set parameters to be updated.
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
# Training the Model
for epoch in range(num_epochs):
for batch_index, (items, labels) in enumerate(train_loader):
print("------worker {} epoch {} batch {}------".format(worker_index, epoch, batch_index))
batch_start = time.time()
items = Variable(items.view(-1, num_features))
labels = Variable(labels)
# Forward + Backward + Optimize
optimizer.zero_grad()
outputs = model(items)
loss = criterion(outputs, labels)
loss.backward()
print("forward and backward cost {} s".format(time.time()-batch_start))
w_grad = model.linear.weight.grad.data.numpy()
b_grad = model.linear.bias.grad.data.numpy()
print("w_grad before merge = {}".format(w_grad[0][0:5]))
print("b_grad before merge = {}".format(b_grad))
#synchronization starts from that every worker writes their gradients of this batch and epoch
sync_start = time.time()
hset_object(endpoint, grad_bucket, w_grad_prefix + str(worker_index), w_grad.tobytes())
hset_object(endpoint, grad_bucket, b_grad_prefix + str(worker_index), b_grad.tobytes())
#merge gradients among files
merge_start = time.time()
file_postfix = "{}_{}".format(epoch, batch_index)
if worker_index == 0:
merge_start = time.time()
w_grad_merge, b_grad_merge = \
merge_w_b_grads(endpoint,
grad_bucket, num_worker, w_grad.dtype,
w_grad.shape, b_grad.shape,
w_grad_prefix, b_grad_prefix)
print("model average time = {}".format(time.time()-merge_start))
#possible rewrite the file before being accessed. wait until anyone finishes accessing.
put_merged_w_b_grads(endpoint, model_bucket,
w_grad_merge, b_grad_merge,
w_grad_prefix, b_grad_prefix)
hset_object(endpoint, model_bucket, "epoch", epoch)
hset_object(endpoint, model_bucket, "index", batch_index)
#delete_expired_w_b(endpoint,
# model_bucket, epoch, batch_index, w_grad_prefix, b_grad_prefix)
model.linear.weight.grad = Variable(torch.from_numpy(w_grad_merge))
model.linear.bias.grad = Variable(torch.from_numpy(b_grad_merge))
else:
# wait for flag to access
while hget_object(endpoint, model_bucket, "epoch") != None:
if int(hget_object(endpoint, model_bucket, "epoch")) == epoch \
and int(hget_object(endpoint, model_bucket, "index")) == batch_index:
break
time.sleep(0.01)
w_grad_merge, b_grad_merge = get_merged_w_b_grads(endpoint,model_bucket,
w_grad.dtype, w_grad.shape, b_grad.shape,
w_grad_prefix, b_grad_prefix)
hcounter(endpoint, model_bucket, "counter") #flag it if it's accessed.
print("number of access at this time = {}".format(int(hget_object(endpoint, model_bucket, "counter"))))
model.linear.weight.grad = Variable(torch.from_numpy(w_grad_merge))
model.linear.bias.grad = Variable(torch.from_numpy(b_grad_merge))
print("w_grad after merge = {}".format(model.linear.weight.grad.data.numpy()[0][:5]))
print("b_grad after merge = {}".format(model.linear.bias.grad.data.numpy()))
print("synchronization cost {} s".format(time.time() - sync_start))
optimizer.step()
print("batch cost {} s".format(time.time() - batch_start))
if (batch_index + 1) % 10 == 0:
print('Epoch: [%d/%d], Step: [%d/%d], Loss: %.4f'
% (epoch + 1, num_epochs, batch_index + 1, len(train_indices) / batch_size, loss.data))
"""
if worker_index == 0:
while sync_counter(endpoint, bucket, num_workers):
time.sleep(0.001)
clear_bucket(endpoint, model_bucket)
clear_bucket(endpoint, grad_bucket)
"""
# Test the Model
correct = 0
total = 0
for items, labels in validation_loader:
items = Variable(items.view(-1, num_features))
outputs = model(items)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum()
print('Accuracy of the model on the %d test samples: %d %%' % (len(val_indices), 100 * correct / total))
end_time = time.time()
print("elapsed time = {} s".format(end_time - start_time))
| 44.322222
| 120
| 0.587741
| 918
| 7,978
| 4.876906
| 0.227669
| 0.023453
| 0.046683
| 0.033505
| 0.29551
| 0.261782
| 0.173554
| 0.112799
| 0.112799
| 0.097387
| 0
| 0.008867
| 0.307345
| 7,978
| 179
| 121
| 44.569832
| 0.801303
| 0.083354
| 0
| 0.11811
| 0
| 0
| 0.093768
| 0.006377
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007874
| false
| 0
| 0.062992
| 0
| 0.070866
| 0.149606
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5401cd673d6e1e3eddd77c34fed0869702ad889
| 2,346
|
py
|
Python
|
src/backend/common/manipulators/team_manipulator.py
|
ofekashery/the-blue-alliance
|
df0e47d054161fe742ac6198a6684247d0713279
|
[
"MIT"
] | 266
|
2015-01-04T00:10:48.000Z
|
2022-03-28T18:42:05.000Z
|
src/backend/common/manipulators/team_manipulator.py
|
ofekashery/the-blue-alliance
|
df0e47d054161fe742ac6198a6684247d0713279
|
[
"MIT"
] | 2,673
|
2015-01-01T20:14:33.000Z
|
2022-03-31T18:17:16.000Z
|
src/backend/common/manipulators/team_manipulator.py
|
ofekashery/the-blue-alliance
|
df0e47d054161fe742ac6198a6684247d0713279
|
[
"MIT"
] | 230
|
2015-01-04T00:10:48.000Z
|
2022-03-26T18:12:04.000Z
|
from typing import List
from backend.common.cache_clearing import get_affected_queries
from backend.common.manipulators.manipulator_base import ManipulatorBase
from backend.common.models.cached_model import TAffectedReferences
from backend.common.models.team import Team
class TeamManipulator(ManipulatorBase[Team]):
"""
Handle Team database writes.
"""
@classmethod
def getCacheKeysAndQueries(
cls, affected_refs: TAffectedReferences
) -> List[get_affected_queries.TCacheKeyAndQuery]:
return get_affected_queries.team_updated(affected_refs)
"""
@classmethod
def postDeleteHook(cls, teams):
# To run after the team has been deleted.
for team in teams:
SearchHelper.remove_team_location_index(team)
@classmethod
def postUpdateHook(cls, teams, updated_attr_list, is_new_list):
# To run after models have been updated
for (team, updated_attrs) in zip(teams, updated_attr_list):
if 'city' in updated_attrs or 'state_prov' in updated_attrs or \
'country' in updated_attrs or 'postalcode' in updated_attrs:
try:
LocationHelper.update_team_location(team)
except Exception, e:
logging.error("update_team_location for {} errored!".format(team.key.id()))
logging.exception(e)
try:
SearchHelper.update_team_location_index(team)
except Exception, e:
logging.error("update_team_location_index for {} errored!".format(team.key.id()))
logging.exception(e)
cls.createOrUpdate(teams, run_post_update_hook=False)
"""
@classmethod
def updateMerge(
cls, new_model: Team, old_model: Team, auto_union: bool = True
) -> Team:
cls._update_attrs(new_model, old_model, auto_union)
# Take the new tpid and tpid_year iff the year is newer than or equal to the old one
if (
new_model.first_tpid_year is not None
and new_model.first_tpid_year >= old_model.first_tpid_year
):
old_model.first_tpid_year = new_model.first_tpid_year
old_model.first_tpid = new_model.first_tpid
old_model._dirty = True
return old_model
| 37.238095
| 101
| 0.656436
| 280
| 2,346
| 5.246429
| 0.346429
| 0.038121
| 0.066712
| 0.061266
| 0.208305
| 0.19401
| 0.19401
| 0.19401
| 0.19401
| 0
| 0
| 0
| 0.272379
| 2,346
| 62
| 102
| 37.83871
| 0.860574
| 0.047741
| 0
| 0.083333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.208333
| 0.041667
| 0.416667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5450958d50c031030e18504e081e98ce995e8e8
| 3,680
|
py
|
Python
|
measures/over_under_exposure_measure/over_under_exposure_measure.py
|
HensoldtOptronicsCV/ImageQualityAssessment
|
7bb3af2cd20a32415966304c8fa3acb77c54f85d
|
[
"MIT"
] | 8
|
2020-06-12T12:49:19.000Z
|
2021-04-27T12:10:49.000Z
|
measures/over_under_exposure_measure/over_under_exposure_measure.py
|
HensoldtOptronicsCV/ImageQualityAssessment
|
7bb3af2cd20a32415966304c8fa3acb77c54f85d
|
[
"MIT"
] | null | null | null |
measures/over_under_exposure_measure/over_under_exposure_measure.py
|
HensoldtOptronicsCV/ImageQualityAssessment
|
7bb3af2cd20a32415966304c8fa3acb77c54f85d
|
[
"MIT"
] | 5
|
2020-04-18T11:30:47.000Z
|
2022-03-04T07:05:21.000Z
|
# MIT License
#
# Copyright (c) 2020 HENSOLDT
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Python implementation of the under/over-exposure measure. We focus on simplicity and readability rather than efficiency.
#
# This code is related to the paper
# M. Teutsch, S. Sedelmaier, S. Moosbauer, G. Eilertsen, T. Walter,
# "An Evaluation of Objective Image Quality Assessment for Thermal Infrared Video Tone Mapping", IEEE CVPR Workshops, 2020.
#
# Please cite the paper if you use the code for your evaluations.
# This measure was originally proposed here:
# G. Eilertsen, R. Mantiuk, J. Unger, "A comparative review of tone-mapping algorithms for high dynamic range video", Eurographics, 2017.
import numpy as np
import cv2
## Calcuate the over- and under-exposure measure (number of over- and under-exposed pixels) for one given tone mapped LDR image.
# @param image_ldr Low Definition Range image (processed image after tone mapping).
def number_of_over_and_under_exposures_pixels(image_ldr):
# calculate exposure measure for one given frame
# calculate histogram of the image
hist, bins = np.histogram(image_ldr, 256, [0, 255])
# fraction of under-exposed pixels
under_exp_pix = sum(hist[0:int(255 * 0.02)])/sum(hist) * 100
# fraction of over-exposed pixels
over_exp_pix = sum(hist[int(255 * 0.95):])/sum(hist) * 100
return over_exp_pix, under_exp_pix
## Calculate over- and under-exposure measure for all (already tone mapped) images in given path.
# @param images_ldr_path Directory path that contains the tone mapped images of one sequence.
def calculate_over_and_under_exposure_measure(images_ldr_path):
sequence_length = len(images_ldr_path)
if sequence_length == 0:
raise ValueError('List of LDR image paths must not be empty.')
under_exp_pix = 0
over_exp_pix = 0
for image_ldr_path in images_ldr_path:
print(".", end = '', flush = True) # show progress
# read tone mapped (TM) image as grayscale image
image_ldr = cv2.imread(str(image_ldr_path), cv2.IMREAD_GRAYSCALE)
curr_over_exp_pix, curr_under_exp_pix = number_of_over_and_under_exposures_pixels(image_ldr)
# fraction of under-exposed pixels
under_exp_pix += curr_under_exp_pix
# fraction of over-exposed pixels
over_exp_pix += curr_over_exp_pix
# calculate average of over- and under-exposed pixels for this sequence
over_exposure = over_exp_pix / sequence_length
under_exposure = under_exp_pix / sequence_length
print() # newline after progress dots
return over_exposure, under_exposure
| 41.818182
| 137
| 0.741848
| 548
| 3,680
| 4.844891
| 0.406934
| 0.031638
| 0.031638
| 0.021092
| 0.165348
| 0.152919
| 0.112241
| 0.089642
| 0.032392
| 0
| 0
| 0.01485
| 0.194837
| 3,680
| 87
| 138
| 42.298851
| 0.881201
| 0.655978
| 0
| 0
| 0
| 0
| 0.035333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.086957
| 0
| 0.26087
| 0.086957
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f54590a9d9506eac6f07374f1bb10c88ce804b14
| 2,567
|
py
|
Python
|
tests/test_cascade.py
|
mathDR/jax-pilco
|
c6c75cd8d43ba894d8f1da2cf6b7c0eea5e43527
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_cascade.py
|
mathDR/jax-pilco
|
c6c75cd8d43ba894d8f1da2cf6b7c0eea5e43527
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_cascade.py
|
mathDR/jax-pilco
|
c6c75cd8d43ba894d8f1da2cf6b7c0eea5e43527
|
[
"BSD-3-Clause"
] | null | null | null |
from pilco.models.pilco import PILCO
import jax.numpy as jnp
import numpy as np
import objax
import os
import oct2py
import logging
oc = oct2py.Oct2Py(logger=oct2py.get_log())
oc.logger = oct2py.get_log("new_log")
oc.logger.setLevel(logging.INFO)
dir_path = os.path.dirname(os.path.realpath("__file__")) + "/tests/Matlab Code"
oc.addpath(dir_path)
def test_cascade():
objax.random.Generator(0)
d = 2 # State dimenstion
k = 1 # Controller's output dimension
b = 100
horizon = 10
e = jnp.array(
[[10.0]]
) # Max control input. Set too low can lead to Cholesky failures.
# Training Dataset
X0 = objax.random.uniform((b, d + k))
A = objax.random.uniform((d + k, d))
Y0 = jnp.sin(X0).dot(A) + 1e-3 * (objax.random.uniform((b, d)) - 0.5)
pilco = PILCO((X0, Y0))
pilco.controller.max_action = e
pilco.optimize_models(restarts=5)
pilco.optimize_policy(restarts=5)
# Generate input
m = objax.random.uniform((1, d))
s = objax.random.uniform((d, d))
s = s.dot(s.T) # Make s positive semidefinite
M, S, reward = pilco.predict(m, s, horizon)
# convert data to the struct expected by the MATLAB implementation
policy = oct2py.io.Struct()
policy.p = oct2py.io.Struct()
policy.p.w = np.array(pilco.controller.W)
policy.p.b = np.array(pilco.controller.b).T
policy.maxU = e
# convert data to the struct expected by the MATLAB implementation
lengthscales = np.stack(
[np.array(model.kernel.lengthscale) for model in pilco.mgpr.models]
)
variance = np.stack(
[np.array(model.kernel.variance) for model in pilco.mgpr.models]
)
noise = np.stack(
[np.array(model.likelihood.variance) for model in pilco.mgpr.models]
)
hyp = np.log(
np.hstack((lengthscales, np.sqrt(variance[:, None]), np.sqrt(noise[:, None])))
).T
dynmodel = oct2py.io.Struct()
dynmodel.hyp = hyp
dynmodel.inputs = X0
dynmodel.targets = Y0
plant = oct2py.io.Struct()
plant.angi = np.zeros(0)
plant.angi = np.zeros(0)
plant.poli = np.arange(d) + 1
plant.dyni = np.arange(d) + 1
plant.difi = np.arange(d) + 1
# Call function in octave
M_mat, S_mat = oc.pred(
policy, plant, dynmodel, m.T, s, horizon, nout=2, verbose=True
)
# Extract only last element of the horizon
M_mat = M_mat[:, -1]
S_mat = S_mat[:, :, -1]
assert jnp.allclose(M[0], M_mat, rtol=1e-2)
assert jnp.allclose(S, S_mat, rtol=1e-2)
if __name__ == "__main__":
test_cascade()
| 27.602151
| 86
| 0.638878
| 389
| 2,567
| 4.138817
| 0.359897
| 0.040994
| 0.055901
| 0.026087
| 0.261491
| 0.180124
| 0.109317
| 0.068323
| 0.068323
| 0.068323
| 0
| 0.023606
| 0.224386
| 2,567
| 92
| 87
| 27.902174
| 0.785033
| 0.1418
| 0
| 0.029412
| 0
| 0
| 0.018704
| 0
| 0
| 0
| 0
| 0
| 0.029412
| 1
| 0.014706
| false
| 0
| 0.102941
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f547d48b9bf65696e52de1543f4c4b442a9e0501
| 2,042
|
py
|
Python
|
python/general-python/create-replica-and-download/createReplicaAndDownload.py
|
claudeshyaka-esri/developer-support
|
016940d74f92a78f362900ab5329aa88c27d0a43
|
[
"Apache-2.0"
] | 272
|
2015-02-11T16:26:39.000Z
|
2022-03-31T08:47:33.000Z
|
python/general-python/create-replica-and-download/createReplicaAndDownload.py
|
claudeshyaka-esri/developer-support
|
016940d74f92a78f362900ab5329aa88c27d0a43
|
[
"Apache-2.0"
] | 254
|
2015-02-11T01:12:35.000Z
|
2021-04-22T22:14:20.000Z
|
python/general-python/create-replica-and-download/createReplicaAndDownload.py
|
claudeshyaka-esri/developer-support
|
016940d74f92a78f362900ab5329aa88c27d0a43
|
[
"Apache-2.0"
] | 211
|
2015-02-10T00:09:07.000Z
|
2022-02-24T12:27:40.000Z
|
import urllib, urllib2, json, time, os
username = "username" #CHANGE
password = "password" #CHANGE
replicaURL = "feature service url/FeatureServer/createReplica" #CHANGE
replicaLayers = [0] #CHANGE
replicaName = "replicaTest" #CHANGE
def sendRequest(request):
response = urllib2.urlopen(request)
readResponse = response.read()
jsonResponse = json.loads(readResponse)
return jsonResponse
print("Generating token")
url = "https://arcgis.com/sharing/rest/generateToken"
data = {'username': username,
'password': password,
'referer': "https://www.arcgis.com",
'f': 'json'}
request = urllib2.Request(url, urllib.urlencode(data))
jsonResponse = sendRequest(request)
token = jsonResponse['token']
print("Creating the replica")
data = {'f' : 'json',
'replicaName' : replicaName,
'layers' : replicaLayers,
'returnAttachments' : 'true',
'returnAttachmentsDatabyURL' : 'false',
'syncModel' : 'none',
'dataFormat' : 'filegdb',
'async' : 'true',
'token': token}
request = urllib2.Request(replicaURL, urllib.urlencode(data))
jsonResponse = sendRequest(request)
print(jsonResponse)
print("Pinging the server")
responseUrl = jsonResponse['statusUrl']
url = "{}?f=json&token={}".format(responseUrl, token)
request = urllib2.Request(url)
jsonResponse = sendRequest(request)
while not jsonResponse.get("status") == "Completed":
time.sleep(5)
request = urllib2.Request(url)
jsonResponse = sendRequest(request)
userDownloads = os.environ['USERPROFILE'] + "\\Downloads"
print("Downloading the replica. In case this fails note that the replica URL is: \n")
jres = jsonResponse['resultUrl']
url = "{0}?token={1}".format(jres, token)
print(url)
f = urllib2.urlopen(url)
with open(userDownloads + "\\" + os.path.basename(jres), "wb") as local_file:
local_file.write(f.read())
print("\n Finished!")
| 34.610169
| 85
| 0.642018
| 203
| 2,042
| 6.448276
| 0.463054
| 0.068755
| 0.064171
| 0.055004
| 0.157372
| 0.157372
| 0.082506
| 0
| 0
| 0
| 0
| 0.006888
| 0.217924
| 2,042
| 58
| 86
| 35.206897
| 0.812774
| 0.014691
| 0
| 0.117647
| 0
| 0
| 0.261086
| 0.028401
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019608
| false
| 0.039216
| 0.019608
| 0
| 0.058824
| 0.137255
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f54e716dfa472cc32b79479172fc0cb1532d563d
| 1,028
|
py
|
Python
|
setup.py
|
henryk/byro-cnss
|
77cc4d34a521879f9f225b473964b7384db306b1
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
henryk/byro-cnss
|
77cc4d34a521879f9f225b473964b7384db306b1
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
henryk/byro-cnss
|
77cc4d34a521879f9f225b473964b7384db306b1
|
[
"Apache-2.0"
] | null | null | null |
import os
from distutils.command.build import build
from django.core import management
from setuptools import find_packages, setup
try:
with open(os.path.join(os.path.dirname(__file__), 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
except:
long_description = ''
class CustomBuild(build):
def run(self):
management.call_command('compilemessages', verbosity=1)
build.run(self)
cmdclass = {
'build': CustomBuild
}
setup(
name='byro-cnss',
version='0.0.1',
description='Byro plugin for CNSS (Clausewitz-Netzwerk für Strategische Studien e.V.)',
long_description=long_description,
url='https://github.com/henryk/byro-cnss',
author='Henryk Plötz',
author_email='henryk@ploetzli.ch',
license='Apache Software License',
install_requires=[],
packages=find_packages(exclude=['tests', 'tests.*']),
include_package_data=True,
cmdclass=cmdclass,
entry_points="""
[byro.plugin]
byro_cnss=byro_cnss:ByroPluginMeta
""",
)
| 23.363636
| 92
| 0.696498
| 127
| 1,028
| 5.496063
| 0.622047
| 0.08596
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005848
| 0.168288
| 1,028
| 43
| 93
| 23.906977
| 0.810526
| 0
| 0
| 0
| 0
| 0
| 0.263619
| 0.033074
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029412
| false
| 0
| 0.117647
| 0
| 0.176471
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f54f18f6eb1da6e577537fa0c7b336cc4d1057b5
| 2,181
|
py
|
Python
|
utils/tensor_utils_test.py
|
zhuchen03/federated
|
6bbcdcb856759aa29daa9a510e7d5f34f6915010
|
[
"Apache-2.0"
] | 2
|
2021-10-19T13:55:11.000Z
|
2021-11-11T11:26:05.000Z
|
utils/tensor_utils_test.py
|
zhuchen03/federated
|
6bbcdcb856759aa29daa9a510e7d5f34f6915010
|
[
"Apache-2.0"
] | 2
|
2021-11-10T20:22:35.000Z
|
2022-02-10T04:44:40.000Z
|
utils/tensor_utils_test.py
|
zhuchen03/federated
|
6bbcdcb856759aa29daa9a510e7d5f34f6915010
|
[
"Apache-2.0"
] | 1
|
2021-03-09T09:48:56.000Z
|
2021-03-09T09:48:56.000Z
|
# Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from utils import tensor_utils
class TensorUtilsTest(tf.test.TestCase):
def test_zero_all_if_any_non_finite(self):
def expect_ok(structure):
with tf.Graph().as_default():
result, error = tensor_utils.zero_all_if_any_non_finite(structure)
with self.session() as sess:
result, error = sess.run((result, error))
try:
tf.nest.map_structure(np.testing.assert_allclose, result, structure)
except AssertionError:
self.fail('Expected to get input {} back, but instead got {}'.format(
structure, result))
self.assertEqual(error, 0)
expect_ok([])
expect_ok([(), {}])
expect_ok(1.1)
expect_ok([1.0, 0.0])
expect_ok([1.0, 2.0, {'a': 0.0, 'b': -3.0}])
def expect_zeros(structure, expected):
with tf.Graph().as_default():
result, error = tensor_utils.zero_all_if_any_non_finite(structure)
with self.session() as sess:
result, error = sess.run((result, error))
try:
tf.nest.map_structure(np.testing.assert_allclose, result, expected)
except AssertionError:
self.fail('Expected to get zeros, but instead got {}'.format(result))
self.assertEqual(error, 1)
expect_zeros(np.inf, 0.0)
expect_zeros((1.0, (2.0, np.nan)), (0.0, (0.0, 0.0)))
expect_zeros((1.0, (2.0, {
'a': 3.0,
'b': [[np.inf], [np.nan]]
})), (0.0, (0.0, {
'a': 0.0,
'b': [[0.0], [0.0]]
})))
if __name__ == '__main__':
tf.test.main()
| 32.552239
| 79
| 0.64099
| 318
| 2,181
| 4.261006
| 0.367925
| 0.023616
| 0.019926
| 0.01476
| 0.37786
| 0.368266
| 0.339483
| 0.278967
| 0.253875
| 0.253875
| 0
| 0.030806
| 0.226043
| 2,181
| 66
| 80
| 33.045455
| 0.771919
| 0.262265
| 0
| 0.285714
| 0
| 0
| 0.065204
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 1
| 0.071429
| false
| 0
| 0.071429
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f54f50b36cac1b6f41d6778991e01f0570bbafab
| 3,426
|
py
|
Python
|
autonmap/__main__.py
|
zeziba/AUTONMAP
|
50a2ae5f0731bc919ccb8978c619d1432b447286
|
[
"Apache-2.0"
] | null | null | null |
autonmap/__main__.py
|
zeziba/AUTONMAP
|
50a2ae5f0731bc919ccb8978c619d1432b447286
|
[
"Apache-2.0"
] | null | null | null |
autonmap/__main__.py
|
zeziba/AUTONMAP
|
50a2ae5f0731bc919ccb8978c619d1432b447286
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import logging.handlers
import sys
from sys import argv, modules
from os.path import join
from autonmap import cron_scheduler
from autonmap import launch_client
from autonmap import launch_server
from autonmap.server import server_config as sconfig
"""
This module allows autonmap to interact with the server and client process to
preform the tasks each is assigned.
"""
LOG_FILE = "/tmp/autonmap.log"
LOGGING_LEVEL = logging.INFO
logger = logging.getLogger(__name__)
logger.setLevel(LOGGING_LEVEL)
handler = logging.handlers.TimedRotatingFileHandler(LOG_FILE, when='midnight', backupCount=3)
formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
class Log(object):
def __init__(self, log, level):
self.logger = log
self.level = level
def write(self, message):
if message.rstrip() != "":
self.logger.log(self.level, message.rstrip())
def flush(self):
pass
def main():
"""
Main routine
:return: None
"""
if len(argv) > 1:
print("Automated nMap Server/Client Manager")
if argv[1] == 'cron':
cron_scheduler.main()
elif argv[1] == "update":
if len(argv) == 3:
file_location = join(sconfig.get_base(), "work.txt")
if str(argv[2]).lower() == "delete":
with open(file_location, "w") as file:
pass # This empties the file of all contents
else:
with open(argv[2], "r") as infile:
with open(file_location, "w+") as outfile:
subnets = set()
for in_line in infile:
subnets.add(in_line)
for out_line in outfile:
subnets.add(out_line)
outfile.seek(0)
outfile.truncate()
for item in subnets:
outfile.write("{}\n".format(item))
elif len(argv) == 3:
if argv[2] in ['start', 'stop', 'update', 'report']:
if argv[1] == 'server':
sys.stdout = Log(log=logger, level=logging.INFO)
sys.stderr = Log(log=logger, level=logging.ERROR)
launch_server.main(argv[2])
elif argv[1] == 'client':
sys.stdout = Log(log=logger, level=logging.INFO)
sys.stderr = Log(log=logger, level=logging.ERROR)
launch_client.main(argv[2])
else:
print("Invalid arguments")
else:
print("Invalid arguments")
else:
print("Usage: {} {} {}".format("python3 -m autonmap",
"client|server|update", "start<client>|stop|report|update|"
"location<update>|delete<update>"))
print("Usage: {} {}".format("python3 -m autonmap", "cron"))
print("\t{} {}".format("python3 -m autonmap", "update ~/workfile.txt"))
print("Client script is located at: \n\t\t{}".format(modules[launch_client.__name__]))
print("The log is located in /tmp/autonmap.log")
if __name__ == "__main__":
main()
| 35.6875
| 98
| 0.537069
| 376
| 3,426
| 4.787234
| 0.343085
| 0.033333
| 0.026667
| 0.037778
| 0.205556
| 0.181111
| 0.09
| 0.09
| 0.09
| 0.09
| 0
| 0.008441
| 0.342966
| 3,426
| 95
| 99
| 36.063158
| 0.791204
| 0.025102
| 0
| 0.166667
| 0
| 0
| 0.151032
| 0.020013
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0.027778
| 0.111111
| 0
| 0.180556
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f54ff4d5dcb3a333a55f6c56d21b89f6d29ae597
| 6,166
|
py
|
Python
|
src/logic_gradient.py
|
Einzberg/BattlesnakeFun
|
4276144c3ccfab66e7c9df4717681e305861f76a
|
[
"MIT"
] | null | null | null |
src/logic_gradient.py
|
Einzberg/BattlesnakeFun
|
4276144c3ccfab66e7c9df4717681e305861f76a
|
[
"MIT"
] | null | null | null |
src/logic_gradient.py
|
Einzberg/BattlesnakeFun
|
4276144c3ccfab66e7c9df4717681e305861f76a
|
[
"MIT"
] | null | null | null |
# import random
# from typing import List, Dict
import numpy as np
# import matplotlib.pyplot as plt
def get_info() -> dict:
"""
This controls your Battlesnake appearance and author permissions.
For customization options, see https://docs.battlesnake.com/references/personalization
TIP: If you open your Battlesnake URL in browser you should see this data.
"""
return {
"apiversion": "1",
"author": "Mex", # TODO: Your Battlesnake Username
"color": "#888889", # TODO: Personalize
"head": "silly", # TODO: Personalize
"tail": "curled", # TODO: Personalize
}
# Globals
food_weight = 9
snake_weight = -9
snake_head_weight = -2
wall_weight = -9
board_centre = 1
board_x = None
board_y = None
def gkern(l=10, scale=4):
"""\
creates gaussian kernel with side length `l` and a sigma of `sig`
"""
sig = (l-1)/3
ax = np.linspace(-(l - 1) / 2., (l - 1) / 2., l)
gauss = np.exp(-0.5 * np.square(ax) / np.square(sig))
kernel = np.outer(gauss, gauss)
return scale * kernel / np.max(kernel)
def centre_grad(data: dict) -> np.array:
board_w = data["board"]["width"]
board_h = data["board"]["height"]
gradient_board = gkern(max(board_w, board_h), board_centre)
return gradient_board
def populate_food(board: np.array, data: dict):
for food in data['board']['food']:
food_x, food_y = food['x'], food['y']
kern_size = max(board.shape[0], board.shape[1])
kernel = gkern(kern_size*2 + 1, 1)
mid = kern_size + 1
x_min = mid - food_x
x_max = mid + board.shape[0] - food_x
y_min = mid - food_y
y_max = mid + board.shape[0] - food_y
board += kernel[x_min:x_max, y_min:y_max]*food_weight
def populate_other_snakes(board: np.array, data: dict):
for snake in data['board']['snakes']:
snake_body = snake['body']
for ele in snake_body:
if ele == snake['head']:
if ele == data['you']['head']:
board[ele['x'], ele['y']] = snake_weight
elif snake['length'] < data['you']['length']:
continue
else:
board[ele['x'], ele['y']] = snake_weight
# direction snake head can go are dangerous
board[ele['x'] + 1, ele['y']] += snake_head_weight
board[ele['x'] - 1, ele['y']] += snake_head_weight
board[ele['x'], ele['y'] + 1] += snake_head_weight
board[ele['x'], ele['y'] - 1] += snake_head_weight
else:
board[ele['x'], ele['y']] = snake_weight
def follow_global_max(head: dict, board: np.array) -> str:
global_max = np.unravel_index(np.argmax(board), board.shape)
directions = {
"up": (0,1),
"down": (0,-1),
"left": (-1,0),
"right": (1,0)
}
direction = ""
distance = 10000
for item in directions.items():
curr_dist = (head['x'] + item[1][0] - global_max[0])**2 + (head['y'] + item[1][1] - global_max[1])**2
print(curr_dist, item[0])
if curr_dist < distance:
distance = curr_dist
direction = item[0]
return direction
def follow_grad(head: dict, board: np.array) -> str:
directions = {
"up": (0,1),
"down": (0,-1),
"left": (-1,0),
"right": (1,0)
}
direction = ""
max_score = 0
for item in directions.items():
curr_score = board[head['x'] + item[1][0] + 1, head['y'] + item[1][1] + 1]
if curr_score > max_score:
max_score = curr_score
direction = item[0]
return direction
def choose_move(data: dict) -> str:
board_y = data['board']['height']
board_x = data['board']['width']
board = centre_grad(data)
# print(f'GRADIENT ARRAY: {array_of_arrays}')
populate_other_snakes(board, data)
board = np.pad(board, 1, 'constant', constant_values=wall_weight)
populate_food(board, data)
direction = follow_global_max(data['you']['head'], board)
# direction = follow_grad(array_of_arrays)
print(f'GOING THIS DIRECTION: {direction}')
return direction
data = {
"turn": 14,
"board": {
"height": 11,
"width": 11,
"food": [
{"x": 5, "y": 5},
{"x": 9, "y": 0},
{"x": 2, "y": 6}
],
"hazards": [
{"x": 3, "y": 2}
],
"snakes": [
{
"id": "snake-508e96ac-94ad-11ea-bb37",
"name": "My Snake",
"health": 54,
"body": [
{"x": 0, "y": 0},
{"x": 1, "y": 0},
{"x": 2, "y": 0}
],
"latency": "111",
"head": {"x": 0, "y": 0},
"length": 3,
"shout": "why are we shouting??",
"squad": "",
"customizations":{
"color":"#FF0000",
"head":"pixel",
"tail":"pixel"
}
},
{
"id": "snake-b67f4906-94ae-11ea-bb37",
"name": "Another Snake",
"health": 16,
"body": [
{"x": 5, "y": 4},
{"x": 5, "y": 3},
{"x": 6, "y": 3},
{"x": 6, "y": 2}
],
"latency": "222",
"head": {"x": 5, "y": 4},
"length": 4,
"shout": "I'm not really sure...",
"squad": "",
"customizations":{
"color":"#26CF04",
"head":"silly",
"tail":"curled"
}
}
]
},
"you": {
"id": "snake-508e96ac-94ad-11ea-bb37",
"name": "My Snake",
"health": 54,
"body": [
{"x": 0, "y": 0},
{"x": 1, "y": 0},
{"x": 2, "y": 0}
],
"latency": "111",
"head": {"x": 0, "y": 0},
"length": 3,
"shout": "why are we shouting??",
"squad": "",
"customizations":{
"color":"#FF0000",
"head":"pixel",
"tail":"pixel"
}
}
}
if False:
board = centre_grad(data)
board_x, board_y = 11, 11
populate_other_snakes(board, data)
populate_food(board, data)
board = np.pad(board, 1, 'constant', constant_values=snake_weight)
# plt.imshow(np.rot90(np.fliplr(board)), interpolation='none', origin="lower")
# plt.show()
| 28.155251
| 105
| 0.509569
| 787
| 6,166
| 3.875476
| 0.236341
| 0.026557
| 0.020656
| 0.019672
| 0.342623
| 0.308197
| 0.224918
| 0.217049
| 0.198689
| 0.198689
| 0
| 0.043478
| 0.306195
| 6,166
| 218
| 106
| 28.284404
| 0.669472
| 0.111255
| 0
| 0.39779
| 0
| 0
| 0.142463
| 0.016013
| 0
| 0
| 0
| 0.004587
| 0
| 1
| 0.044199
| false
| 0
| 0.005525
| 0
| 0.082873
| 0.01105
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5538c72ced0bc74b5e82bee2c3ce5f0a35952cd
| 11,836
|
py
|
Python
|
nuclear/help/help.py
|
igrek51/glue
|
6726ba977a21e58b354a5c97f68639f84184be7a
|
[
"MIT"
] | 4
|
2019-07-04T20:41:06.000Z
|
2020-04-23T18:17:33.000Z
|
nuclear/help/help.py
|
igrek51/cliglue
|
6726ba977a21e58b354a5c97f68639f84184be7a
|
[
"MIT"
] | null | null | null |
nuclear/help/help.py
|
igrek51/cliglue
|
6726ba977a21e58b354a5c97f68639f84184be7a
|
[
"MIT"
] | null | null | null |
import os
import sys
from dataclasses import dataclass, field
from typing import List, Set, Optional
from nuclear.builder.rule import PrimaryOptionRule, ParameterRule, FlagRule, CliRule, SubcommandRule, \
PositionalArgumentRule, ManyArgumentsRule, DictionaryRule, ValueRule
from nuclear.parser.context import RunContext
from nuclear.parser.keyword import format_var_names, format_var_name
from nuclear.parser.parser import Parser
from nuclear.parser.transform import filter_rules
from nuclear.parser.value import generate_value_choices
from nuclear.version import __version__
@dataclass
class _OptionHelp(object):
cmd: str
help: str
parent: '_OptionHelp' = None
rule: SubcommandRule = None
subrules: List[CliRule] = field(default_factory=lambda: [])
internal_options = {'--autocomplete', '--install-bash', '--install-autocomplete'}
def print_help(rules: List[CliRule], app_name: str, version: str, help: str, subargs: List[str], hide_internal: bool):
helps = generate_help(rules, app_name, version, help, subargs, hide_internal)
print('\n'.join(helps))
def print_usage(rules: List[CliRule]):
all_rules, available_subcommands, precommands = help_context(rules, [])
pos_arguments = filter_rules(all_rules, PositionalArgumentRule)
many_args = filter_rules(all_rules, ManyArgumentsRule)
has_commands = bool(filter_rules(available_subcommands, SubcommandRule))
command_name = shell_command_name()
app_bin_prefix = ' '.join([command_name] + precommands)
usage = generate_usage(app_bin_prefix, has_commands, have_rules_options(all_rules), many_args, pos_arguments)
how_to_help = f'Run "{command_name} --help" for more information.'
print('\n'.join([f'Usage: {usage}', how_to_help]))
def generate_help(rules: List[CliRule], app_name: str, version: str, help: str, subargs: List[str],
hide_internal: bool) -> List[str]:
all_rules, available_subcommands, precommands = help_context(rules, subargs)
return generate_subcommand_help(all_rules, app_name, version, help,
precommands, available_subcommands, hide_internal)
def help_context(rules, subargs):
available_subcommands = filter_rules(rules, SubcommandRule)
run_context: Optional[RunContext] = Parser(rules, dry=True).parse_args(subargs)
all_rules: List[CliRule] = run_context.active_rules
active_subcommands: List[SubcommandRule] = run_context.active_subcommands
precommands: List[str] = [_subcommand_short_name(rule) for rule in active_subcommands]
if active_subcommands:
available_subcommands = filter_rules(active_subcommands[-1].subrules, SubcommandRule)
return all_rules, available_subcommands, precommands
def generate_subcommand_help(
all_rules: List[CliRule],
app_name: str,
version: str,
help: str,
precommands: List[str],
subcommands: List[SubcommandRule],
hide_internal: bool,
) -> List[str]:
pos_arguments = filter_rules(all_rules, PositionalArgumentRule)
many_args = filter_rules(all_rules, ManyArgumentsRule)
pos_args_helps: List[_OptionHelp] = _generate_pos_args_helps(pos_arguments, many_args)
options: List[_OptionHelp] = _generate_options_helps(all_rules, hide_internal)
commands: List[_OptionHelp] = _generate_commands_helps(subcommands)
out = []
app_info = app_help_info(app_name, help, version)
if app_info:
out.append(app_info + '\n')
app_bin_prefix = ' '.join([shell_command_name()] + precommands)
out.append('Usage:')
out.append(generate_usage(app_bin_prefix, bool(commands), have_rules_options(all_rules), many_args, pos_arguments))
if pos_args_helps:
out.append('\nArguments:')
__helpers_output(pos_args_helps, out)
if options:
out.append('\nOptions:')
__helpers_output(options, out)
if commands:
out.append('\nCommands:')
__helpers_output(commands, out)
out.append(f'\nRun "{app_bin_prefix} COMMAND --help" for more information on a command.')
return out
def app_help_info(app_name: str, help: str, version: str) -> Optional[str]:
info = app_name_version(app_name, version)
return ' - '.join(filter(bool, [info, help]))
def app_name_version(app_name, version):
infos = []
if app_name:
infos += [app_name]
if version:
version = _normalized_version(version)
infos += [version]
if infos:
infos += [f'(nuclear v{__version__})']
return ' '.join(infos)
def generate_usage(app_bin_prefix, has_commands: bool, has_options: bool, many_args, pos_arguments) -> str:
usage_syntax: str = app_bin_prefix
if has_commands:
usage_syntax += ' [COMMAND]'
if has_options:
usage_syntax += ' [OPTIONS]'
usage_syntax += usage_positional_arguments(pos_arguments)
usage_syntax += usage_many_arguments(many_args)
return usage_syntax
def __helpers_output(commands, out):
padding = _max_name_width(commands)
for helper in commands:
name_padded = helper.cmd.ljust(padding)
if helper.help:
for idx, line in enumerate(helper.help.splitlines()):
if idx == 0:
out.append(f' {name_padded} - {line}')
else:
out.append(' ' * (2 + padding + 3) + line)
else:
out.append(f' {name_padded}')
def print_version(app_name: str, version: str):
print(app_name_version(app_name, version))
def _normalized_version(version: str) -> str:
if version.startswith('v'):
return version
return f'v{version}'
def _max_name_width(helps: List[_OptionHelp]) -> int:
return max(map(lambda h: len(h.cmd), helps))
def _generate_pos_args_helps(
pos_arguments: List[PositionalArgumentRule],
many_args: List[ManyArgumentsRule]
) -> List[_OptionHelp]:
return [_pos_arg_help(rule) for rule in pos_arguments] + \
[_many_args_help(rule) for rule in many_args]
def _generate_options_helps(rules: List[CliRule], hide_internal: bool) -> List[_OptionHelp]:
# filter non-empty
return list(filter(lambda o: o, [_generate_option_help(rule, hide_internal) for rule in rules]))
def _generate_option_help(rule: CliRule, hide_internal: bool) -> Optional[_OptionHelp]:
if isinstance(rule, PrimaryOptionRule):
return _primary_option_help(rule, hide_internal)
elif isinstance(rule, FlagRule):
return _flag_help(rule)
elif isinstance(rule, ParameterRule):
return _parameter_help(rule)
elif isinstance(rule, DictionaryRule):
return _dictionary_help(rule)
return None
def _generate_commands_helps(rules: List[CliRule], parent: _OptionHelp = None, subrules: List[CliRule] = None
) -> List[_OptionHelp]:
commands: List[_OptionHelp] = []
for rule in filter_rules(rules, SubcommandRule):
subsubrules = (subrules or []) + rule.subrules
helper = _subcommand_help(rule, parent, subsubrules)
if rule.run or rule.help:
commands.append(helper)
commands.extend(_generate_commands_helps(rule.subrules, helper, subsubrules))
return commands
def _subcommand_help(rule: SubcommandRule, parent: _OptionHelp, subrules: List[CliRule]) -> _OptionHelp:
pos_args = filter_rules(subrules, PositionalArgumentRule)
many_args = filter_rules(subrules, ManyArgumentsRule)
cmd = _subcommand_prefix(parent) + '|'.join(sorted_keywords(rule.keywords))
cmd += usage_positional_arguments(pos_args)
cmd += usage_many_arguments(many_args)
return _OptionHelp(cmd, rule.help, parent=parent, rule=rule, subrules=subrules)
def _subcommand_prefix(helper: _OptionHelp) -> str:
if not helper:
return ''
return _subcommand_prefix(helper.parent) + '|'.join(sorted_keywords(helper.rule.keywords)) + ' '
def _primary_option_help(rule: PrimaryOptionRule, hide_internal: bool) -> Optional[_OptionHelp]:
if hide_internal:
for keyword in rule.keywords:
if keyword in internal_options:
return None
cmd = ', '.join(sorted_keywords(rule.keywords))
pos_args = filter_rules(rule.subrules, PositionalArgumentRule)
all_args = filter_rules(rule.subrules, ManyArgumentsRule)
cmd += usage_positional_arguments(pos_args)
cmd += usage_many_arguments(all_args)
return _OptionHelp(cmd, rule.help)
def _flag_help(rule: FlagRule) -> _OptionHelp:
cmd = ', '.join(sorted_keywords(rule.keywords))
return _OptionHelp(cmd, rule.help)
def _parameter_help(rule: ParameterRule) -> _OptionHelp:
cmd = ', '.join(sorted_keywords(rule.keywords)) + ' ' + _param_display_name(rule)
default_value = display_default_value(rule.default)
choices_help = display_choices_help(rule)
help_text = join_nonempty_lines(rule.help, default_value, choices_help)
return _OptionHelp(cmd, help_text)
def _dictionary_help(rule: DictionaryRule) -> _OptionHelp:
cmd = ', '.join(sorted_keywords(rule.keywords)) + ' KEY VALUE'
return _OptionHelp(cmd, rule.help)
def _pos_arg_help(rule: PositionalArgumentRule) -> _OptionHelp:
cmd = display_positional_argument(rule)
default_value = display_default_value(rule.default)
choices_help = display_choices_help(rule)
help_text = join_nonempty_lines(rule.help, default_value, choices_help)
return _OptionHelp(cmd, help_text)
def _many_args_help(rule: ManyArgumentsRule) -> _OptionHelp:
cmd = display_many_arguments(rule)
choices_help = display_choices_help(rule)
help_text = join_nonempty_lines(rule.help, choices_help)
return _OptionHelp(cmd, help_text)
def _param_display_name(rule: ParameterRule) -> str:
if rule.name:
return format_var_name(rule.name).upper()
else:
# get name from the longest keyword
names: Set[str] = format_var_names(rule.keywords)
return max(names, key=lambda n: len(n)).upper()
def _argument_var_name(rule: PositionalArgumentRule) -> str:
return format_var_name(rule.name).upper()
def _subcommand_short_name(rule: SubcommandRule) -> str:
return next(iter(rule.keywords))
def sorted_keywords(keywords: Set[str]) -> List[str]:
# shortest keywords first, then alphabetically
return sorted(keywords, key=lambda k: (len(k), k))
def display_positional_argument(rule: PositionalArgumentRule) -> str:
var_name = _argument_var_name(rule)
if rule.required:
return f' {var_name}'
else:
return f' [{var_name}]'
def display_many_arguments(rule: ManyArgumentsRule) -> str:
arg_name = rule.name.upper()
if rule.count_min():
return f' {arg_name}...'
else:
return f' [{arg_name}...]'
def usage_positional_arguments(rules: List[PositionalArgumentRule]) -> str:
return ''.join([display_positional_argument(rule) for rule in rules])
def usage_many_arguments(rules: List[ManyArgumentsRule]) -> str:
return ''.join([display_many_arguments(rule) for rule in rules])
def shell_command_name():
_, command = os.path.split(sys.argv[0])
if command == '__main__.py':
return sys.modules['__main__'].__package__
return command
def have_rules_options(rules: List[CliRule]) -> bool:
return bool(filter_rules(rules, FlagRule, ParameterRule, DictionaryRule, PrimaryOptionRule))
def display_default_value(default) -> Optional[str]:
if default is None:
return None
return 'Default: ' + str(default)
def display_choices_help(rule: ValueRule) -> Optional[str]:
choices = generate_value_choices(rule)
if not choices or not rule.strict_choices:
return None
return 'Choices: ' + ', '.join(choices)
def join_nonempty_lines(*lines: str) -> str:
return '\n'.join(filter(lambda t: t is not None, lines))
| 35.22619
| 119
| 0.710037
| 1,447
| 11,836
| 5.508639
| 0.124395
| 0.020073
| 0.016058
| 0.008155
| 0.304981
| 0.233973
| 0.176766
| 0.144524
| 0.12558
| 0.12558
| 0
| 0.000518
| 0.184099
| 11,836
| 335
| 120
| 35.331343
| 0.824894
| 0.008026
| 0
| 0.141667
| 0
| 0
| 0.039278
| 0.001874
| 0
| 0
| 0
| 0
| 0
| 1
| 0.154167
| false
| 0
| 0.045833
| 0.041667
| 0.416667
| 0.025
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f553c00e89c0f5a71a1f1863c8dfb6394c78b550
| 1,997
|
py
|
Python
|
Apps/Engines/Nuke/NukeTools_1.01/Python/LookAt.py
|
geoffroygivry/CyclopsVFX-Unity
|
6ab9ab122b6c3e6200e90d49a0c2bf774e53d985
|
[
"MIT"
] | 17
|
2017-06-27T04:14:42.000Z
|
2022-03-07T03:37:44.000Z
|
Apps/Engines/Nuke/NukeTools_1.01/Python/LookAt.py
|
geoffroygivry/Cyclops-VFX
|
6ab9ab122b6c3e6200e90d49a0c2bf774e53d985
|
[
"MIT"
] | 2
|
2017-06-14T04:17:51.000Z
|
2018-08-23T20:12:44.000Z
|
Apps/Engines/Nuke/NukeTools_1.01/Python/LookAt.py
|
geoffroygivry/CyclopsVFX-Unity
|
6ab9ab122b6c3e6200e90d49a0c2bf774e53d985
|
[
"MIT"
] | 2
|
2019-03-18T06:18:33.000Z
|
2019-08-14T21:07:53.000Z
|
#The MIT License (MIT)
#
#Copyright (c) 2015 Geoffroy Givry
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import nuke
LookAtName = "LookAt"
def panelLookAt():
p = nuke.Panel("Look At Panel")
p.addSingleLineInput("LookAt Name:", LookAtName)
p.addButton("Cancel")
p.addButton("OK")
result = p.show()
nameLookAt = p.value("LookAt Name:")
EXpX = 'degrees(atan2(%s.translate.y-translate.y,sqrt(pow(%s.translate.x-translate.x,2)+pow(%s.translate.z-translate.z,2))))' % (nameLookAt, nameLookAt, nameLookAt)
EXpY = '%s.translate.z-this.translate.z >= 0 ? 180+degrees(atan2(%s.translate.x-translate.x,%s.translate.z-translate.z)):180+degrees(atan2(%s.translate.x-translate.x,%s.translate.z-translate.z))' % (nameLookAt, nameLookAt, nameLookAt, nameLookAt, nameLookAt)
nuke.nodes.Axis(name=nameLookAt)
for n in nuke.selectedNodes():
n['rotate'].setExpression(EXpX, 0)
n['rotate'].setExpression(EXpY, 1)
| 46.44186
| 263
| 0.725588
| 288
| 1,997
| 5.03125
| 0.475694
| 0.060732
| 0.030366
| 0.045549
| 0.10766
| 0.078675
| 0.078675
| 0.078675
| 0.078675
| 0.078675
| 0
| 0.010837
| 0.168252
| 1,997
| 43
| 264
| 46.44186
| 0.861529
| 0.530796
| 0
| 0
| 0
| 0.133333
| 0.417143
| 0.336
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.066667
| 0
| 0.133333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5553f600d9e51ffdced6978931c7ede4d5b363d
| 7,458
|
py
|
Python
|
src/extract_features.py
|
AymericBebert/MusicLearning
|
8fbc931330029baa8ae9cfcfa20c79e41b5eca8f
|
[
"MIT"
] | null | null | null |
src/extract_features.py
|
AymericBebert/MusicLearning
|
8fbc931330029baa8ae9cfcfa20c79e41b5eca8f
|
[
"MIT"
] | null | null | null |
src/extract_features.py
|
AymericBebert/MusicLearning
|
8fbc931330029baa8ae9cfcfa20c79e41b5eca8f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*-coding:utf-8-*-
"""
This module is used to extract features from the data
"""
import numpy as np
from scipy.fftpack import fft
from scipy.fftpack.realtransforms import dct
import python_speech_features
eps = 0.00000001
def file_length(soundParams):
"""Returns the file length, in seconds"""
return soundParams[3] / soundParams[2]
def zcr(frame):
"""Computes zero crossing rate of frame"""
count = len(frame)
countZ = np.sum(np.abs(np.diff(np.sign(frame)))) / 2
return countZ / (count - 1)
def energy(frame):
"""Computes signal energy of frame"""
return np.sum(frame ** 2) / len(frame)
def energy_entropy(frame, numOfShortBlocks=10):
"""Computes entropy of energy"""
tfe = np.sum(frame ** 2) # total frame energy
L = len(frame)
subWinLength = int(np.floor(L / numOfShortBlocks))
if L != subWinLength * numOfShortBlocks:
frame = frame[0:subWinLength * numOfShortBlocks]
# subWindows is of size [numOfShortBlocks x L]
subWindows = frame.reshape(subWinLength, numOfShortBlocks, order='F').copy()
# Compute normalized sub-frame energies:
s = np.sum(subWindows ** 2, axis=0) / (tfe + eps)
# Compute entropy of the normalized sub-frame energies:
entropy = -1 * np.sum(s * np.log2(s + eps))
return entropy
def spectral_centroid_and_spread(X, fs):
"""Computes spectral centroid of frame (given abs(FFT))"""
ind = (np.arange(1, len(X) + 1)) * (fs/(2.0 * len(X)))
Xt = X.copy()
Xt = Xt / Xt.max()
NUM = np.sum(ind * Xt)
DEN = np.sum(Xt) + eps
C = (NUM / DEN) # Centroid
S = np.sqrt(np.sum(((ind - C) ** 2) * Xt) / DEN) # Spread
# Normalize:
C = C / (fs / 2.0)
S = S / (fs / 2.0)
return (C, S)
def avg_mfcc(sound_obj, avg=True):
"""Extract the MFCC from the sound object"""
soundD = sound_obj["sound"] # raw data
sr = sound_obj["params"][2] # samplerate
# nf = sound_obj["params"][3] # nframes
all_mfcc = python_speech_features.mfcc(soundD, samplerate=sr, winlen=0.025, winstep=1)
if avg:
return np.mean(all_mfcc, axis=0)
return all_mfcc
def mfcc_init_filter_banks(fs, nfft):
"""Computes the triangular filterbank for MFCC computation"""
# filter bank params:
lowfreq = 133.33
linsc = 200/3.
logsc = 1.0711703
numLinFiltTotal = 13
numLogFilt = 27
# Total number of filters
nFiltTotal = numLinFiltTotal + numLogFilt
# Compute frequency points of the triangle:
freqs = np.zeros(nFiltTotal+2)
freqs[:numLinFiltTotal] = lowfreq + np.arange(numLinFiltTotal) * linsc
freqs[numLinFiltTotal:] = freqs[numLinFiltTotal-1] * logsc ** np.arange(1, numLogFilt + 3)
heights = 2./(freqs[2:] - freqs[0:-2])
# Compute filterbank coeff (in fft domain, in bins)
fbank = np.zeros((nFiltTotal, nfft))
nfreqs = np.arange(nfft) / (1. * nfft) * fs
for i in range(nFiltTotal):
lowTrFreq = freqs[i]
cenTrFreq = freqs[i+1]
highTrFreq = freqs[i+2]
lid = np.arange(np.floor(lowTrFreq * nfft / fs) + 1, np.floor(cenTrFreq * nfft / fs) + 1, dtype=np.int)
lslope = heights[i] / (cenTrFreq - lowTrFreq)
rid = np.arange(np.floor(cenTrFreq * nfft / fs) + 1, np.floor(highTrFreq * nfft / fs) + 1, dtype=np.int)
rslope = heights[i] / (highTrFreq - cenTrFreq)
fbank[i][lid] = lslope * (nfreqs[lid] - lowTrFreq)
fbank[i][rid] = rslope * (highTrFreq - nfreqs[rid])
return fbank, freqs
def mfcc(X, fbank, nceps=13):
"""Computes the MFCCs of a frame, given the fft mag"""
mspec = np.log10(np.dot(X, fbank.T)+eps)
ceps = dct(mspec, type=2, norm='ortho', axis=-1)[:nceps]
return ceps
def extract_all_features0(sound_obj):
"""Extract the features from the sound object"""
# fl = file_length(sound_obj["params"])
test_mfcc_avg = avg_mfcc(sound_obj)
# return np.concatenate(([fl], test_mfcc_avg))
return test_mfcc_avg
def features_labels0():
"""Give a name to each feature"""
return ["mfcc{}".format(i) for i in range(13)]
def extract_all_features(sound_obj, wins=None, steps=None):
"""Extract the features from the sound object"""
sr = sound_obj["params"][2] # samplerate
nbs = sound_obj["params"][3] # number of samples
if wins is None:
wins = int(0.050 * sr)
if steps is None:
steps = int(nbs/15 - wins)
# Signal normalization
signal = sound_obj["sound"]
signal = signal / (2.0 ** 15)
DC = signal.mean()
MAX = (np.abs(signal)).max()
signal = (signal - DC) / (MAX + 0.0000000001)
N = len(signal) # total number of samples
curPos = steps // 2 # skip the very beginning
nFFT = wins // 2
# compute the triangular filter banks used in the mfcc calculation
#[fbank, _] = mfcc_init_filter_banks(sr, nFFT)
totalNumOfFeatures = 5 + 13
stFeatures = []
while curPos + wins - 1 < N: # for each short-term window until the end of signal
x = signal[curPos:curPos+wins] # get current window
curPos = curPos + steps # update window position
X = abs(fft(x)) # get fft magnitude
X = X[0:nFFT] # normalize fft
X = X / len(X)
curFV = np.zeros(totalNumOfFeatures)
curFV[0] = zcr(x) # zero crossing rate
curFV[1] = energy(x) # short-term energy
curFV[2] = energy_entropy(x) # short-term entropy of energy
[curFV[3], curFV[4]] = spectral_centroid_and_spread(X, sr) # spectral centroid and spread
# curFV[5] = stSpectralEntropy(X) # spectral entropy
# curFV[6] = stSpectralFlux(X, Xprev) # spectral flux
# curFV[7] = stSpectralRollOff(X, 0.90, sr) # spectral rolloff
# curFV[numOfTimeSpectralFeatures:numOfTimeSpectralFeatures+nceps, 0] = stMFCC(X, fbank, nceps).copy() # MFCCs
#
# chromaNames, chromaF = stChromaFeatures(X, sr, nChroma, nFreqsPerChroma)
# curFV[numOfTimeSpectralFeatures + nceps: numOfTimeSpectralFeatures + nceps + numOfChromaFeatures - 1] = chromaF
# curFV[numOfTimeSpectralFeatures + nceps + numOfChromaFeatures - 1] = chromaF.std()
#curFV[5:18] = mfcc(X, fbank, 13)
#curFV[0:13] = mfcc(X, fbank, 13)
curFV[5:18] = python_speech_features.mfcc(x, samplerate=sr, winlen=wins/sr, winstep=steps/sr)
# TEMP
#curFV = python_speech_features.mfcc(signal, samplerate=sr, winlen=wins, winstep=steps).T
stFeatures.append(curFV)
# stFeatures = np.array(stFeatures)
stFeatures = np.concatenate(stFeatures, 0).flatten()
#stFeatures = np.mean(stFeatures, axis=0)
# stFeatures = python_speech_features.mfcc(signal, samplerate=sr, winlen=wins/sr, winstep=steps/sr)
# stFeatures = np.mean(stFeatures, axis=0)
return stFeatures
# sound_obj2 = sound_obj.copy()
# sound_obj2["sound"] = signal
#
# # fl = file_length(sound_obj["params"])
# test_mfcc_avg = avg_mfcc(sound_obj2)
# # return np.concatenate(([fl], test_mfcc_avg))
# return test_mfcc_avg
def features_labels():
"""Give a name to each feature"""
return ["zrc", "energy", "en_ent", "centr", "spread"] + ["mfcc{}".format(i) for i in range(13)]
| 34.850467
| 121
| 0.616519
| 972
| 7,458
| 4.659465
| 0.248971
| 0.022963
| 0.018547
| 0.021197
| 0.210863
| 0.163612
| 0.121881
| 0.093619
| 0.071097
| 0.048134
| 0
| 0.027274
| 0.252749
| 7,458
| 213
| 122
| 35.014085
| 0.785394
| 0.349021
| 0
| 0.018519
| 0
| 0
| 0.015241
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.037037
| 0
| 0.268519
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5592d87345b5a481da2afaed4ea4665c57dc09d
| 2,435
|
py
|
Python
|
tools/blender/io_export_curve.py
|
waskosky/patches
|
f80a33eb6fd029b905aca55894ec7a7526b89042
|
[
"MIT"
] | 187
|
2015-09-21T15:08:57.000Z
|
2017-07-31T08:01:22.000Z
|
tools/blender/io_export_curve.py
|
waskosky/patches
|
f80a33eb6fd029b905aca55894ec7a7526b89042
|
[
"MIT"
] | 1,533
|
2015-09-15T23:49:33.000Z
|
2017-08-01T08:52:00.000Z
|
tools/blender/io_export_curve.py
|
waskosky/patches
|
f80a33eb6fd029b905aca55894ec7a7526b89042
|
[
"MIT"
] | 52
|
2015-10-11T10:42:50.000Z
|
2017-07-16T22:31:42.000Z
|
# Part of the Engi-WebGL suite.
from bpy.props import *
from bpy_extras.io_utils import ExportHelper
from mathutils import *
from functools import reduce
import os, sys, os.path, bpy, bmesh, math, struct, base64, itertools
bl_info = {
'name': 'Curve Export (.json)',
'author': 'Lasse Nielsen',
'version': (0, 2),
'blender': (2, 72, 0),
'location': 'File > Export > Curve (.json)',
'description': 'Curve Export (.json)',
'category': 'Import-Export'
}
# Compress number representation to save as much space as possible.
def cnr(n):
s = '%.4f' % n
while s[-1] == '0':
s = s[:-1]
if s[-1] == '.':
s = s[:-1]
return s
def format_stream(ident, id, s):
return '%s%s: [%s]' % (ident, id, ','.join(map(cnr, s)))
class EngiCurveExporter(bpy.types.Operator, ExportHelper):
bl_idname = 'curve.json'
bl_label = 'Export Curve (.json)'
bl_options = {'PRESET'}
filename_ext = ".json"
filter_glob = StringProperty(default="*.json", options={'HIDDEN'})
#filepath = StringProperty()
filename = StringProperty()
directory = StringProperty()
# Black Magic...
check_extension = True
def execute(self, context):
filename = os.path.splitext(self.filename)[0]
filename = filename + '.json'
# Check for a valid selection. We expect a single object of type 'CURVE'.
if bpy.context.active_object.type != 'CURVE':
print('The current selection is invalid. Please select a single curve to export.')
return {'FINISHED'}
spline = bpy.context.active_object.data.splines[0]
points = spline.points
json = '{\n'
json += '\t"count": ' + str(len(points)) + ',\n'
x_stream = []
y_stream = []
z_stream = []
for point in points:
x_stream.append(point.co[0])
y_stream.append(point.co[1])
z_stream.append(point.co[2])
json += format_stream('\t', '"x"', x_stream) + ',\n'
json += format_stream('\t', '"y"', y_stream) + ',\n'
json += format_stream('\t', '"z"', z_stream) + '\n'
json += '}'
with open(self.directory + filename, 'w') as out:
out.write(json)
return {'FINISHED'}
def menu_func(self, context):
self.layout.operator(EngiCurveExporter.bl_idname, text="Curve (.json)")
def register():
bpy.utils.register_class(EngiCurveExporter)
bpy.types.INFO_MT_file_export.append(menu_func)
def unregister():
bpy.utils.unregister_class(EngiCurveExporter)
bpy.types.INFO_MT_file_export.remove(menu_func)
if __name__ == '__main__':
register()
| 24.59596
| 85
| 0.657906
| 334
| 2,435
| 4.664671
| 0.416168
| 0.023107
| 0.048139
| 0.057766
| 0.089859
| 0.089859
| 0.05905
| 0.05905
| 0
| 0
| 0
| 0.009425
| 0.172074
| 2,435
| 98
| 86
| 24.846939
| 0.763393
| 0.085832
| 0
| 0.060606
| 0
| 0
| 0.17027
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.090909
| 0.015152
| 0.378788
| 0.015152
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f55a03a501c8713245dc76b3760e3ffdd100d23e
| 1,857
|
py
|
Python
|
third_party/conan/recipes/libprotobuf-mutator/conanfile.py
|
tufeigunchu/orbit
|
407354cf7c9159ff7e3177c603a6850b95509e3a
|
[
"BSD-2-Clause"
] | 1,847
|
2020-03-24T19:01:42.000Z
|
2022-03-31T13:18:57.000Z
|
third_party/conan/recipes/libprotobuf-mutator/conanfile.py
|
tufeigunchu/orbit
|
407354cf7c9159ff7e3177c603a6850b95509e3a
|
[
"BSD-2-Clause"
] | 1,100
|
2020-03-24T19:41:13.000Z
|
2022-03-31T14:27:09.000Z
|
third_party/conan/recipes/libprotobuf-mutator/conanfile.py
|
tufeigunchu/orbit
|
407354cf7c9159ff7e3177c603a6850b95509e3a
|
[
"BSD-2-Clause"
] | 228
|
2020-03-25T05:32:08.000Z
|
2022-03-31T11:27:39.000Z
|
from conans import ConanFile, CMake, tools
class LibprotobufMutatorConan(ConanFile):
name = "libprotobuf-mutator"
version = "20200506"
license = "Apache-2.0"
settings = "os", "compiler", "build_type", "arch"
generators = "cmake"
exports_sources = "patches/*",
build_requires = "protoc_installer/3.9.1@bincrafters/stable",
options = { "fPIC" : [True, False] }
default_options = { "fPIC" : True }
short_paths = True
def configure(self):
if self.settings.os == "Windows":
del self.options.fPIC
def source(self):
tools.get(**self.conan_data["sources"][self.version])
for patch in self.conan_data["patches"][self.version]:
tools.patch(**patch)
def requirements(self):
self.requires("lzma_sdk/19.00@orbitdeps/stable")
self.requires("zlib/1.2.11")
self.requires("protobuf/3.9.1@bincrafters/stable")
def build(self):
self._source_subfolder = self.conan_data["source_subfolder"][self.version]
cmake = CMake(self)
cmake.definitions["LIB_PROTO_MUTATOR_TESTING"] = False
cmake.definitions["CMAKE_CXX_FLAGS"] = "-fPIE"
cmake.definitions["CMAKE_C_FLAGS"] = "-fPIE"
cmake.configure(source_folder=self._source_subfolder)
cmake.build()
def package(self):
self.copy("*.h", dst="include",
src="{}/src".format(self._source_subfolder))
self.copy("*.h", dst="include/port",
src="{}/port".format(self._source_subfolder))
self.copy("*.lib", dst="lib", keep_path=False)
self.copy("*.pdb", dst="lib", keep_path=False)
self.copy("*.a", dst="lib", keep_path=False)
def package_info(self):
self.cpp_info.libdirs = ["lib"]
self.cpp_info.libs = ["protobuf-mutator-libfuzzer", "protobuf-mutator"]
| 36.411765
| 82
| 0.622509
| 221
| 1,857
| 5.081448
| 0.411765
| 0.066785
| 0.067676
| 0.061443
| 0.186109
| 0.106857
| 0.048085
| 0
| 0
| 0
| 0
| 0.016563
| 0.219709
| 1,857
| 50
| 83
| 37.14
| 0.758454
| 0
| 0
| 0
| 0
| 0
| 0.217555
| 0.084006
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.02381
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f55c8c9f40e1cf4319ff4ee1c9422d7c3883f725
| 524
|
py
|
Python
|
animation/common.py
|
codyly/locomotion-by-mann
|
89139466829ef7802bf645f865e335d4cda444e4
|
[
"MIT"
] | null | null | null |
animation/common.py
|
codyly/locomotion-by-mann
|
89139466829ef7802bf645f865e335d4cda444e4
|
[
"MIT"
] | null | null | null |
animation/common.py
|
codyly/locomotion-by-mann
|
89139466829ef7802bf645f865e335d4cda444e4
|
[
"MIT"
] | null | null | null |
import numpy as np
VEC_FORWARD = np.array([0, 0, 1])
VEC_UP = np.array([0, 1, 0])
VEC_RIGHT = np.array([1, 0, 0])
STYLE_NOMOVE = np.array([1, 0, 0, 0, 0, 0])
STYLE_TROT = np.array([0, 1, 0, 0, 0, 0])
STYLE_JUMP = np.array([0, 0, 1, 0, 0, 0])
STYLE_SIT = np.array([0, 0, 0, 1, 0, 0])
STYLE_STAND = np.array([0, 0, 0, 0, 1, 0])
STYLE_LAY = np.array([0, 0, 0, 0, 0, 1])
NUM_STYLES = 6
SYS_FREQ = 60
DURATION = 9
NUM_QUERIES = SYS_FREQ * DURATION
MOCAP_SAMPLE_PATH = "animation/data/mocap-sample.txt"
| 23.818182
| 54
| 0.593511
| 106
| 524
| 2.792453
| 0.311321
| 0.148649
| 0.121622
| 0.081081
| 0.398649
| 0.074324
| 0
| 0
| 0
| 0
| 0
| 0.118644
| 0.211832
| 524
| 21
| 55
| 24.952381
| 0.598063
| 0
| 0
| 0
| 0
| 0
| 0.06163
| 0.06163
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.066667
| 0
| 0.066667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f55e3e29a41fea6104e2a766525f7a160ac34c13
| 5,900
|
py
|
Python
|
Kinematic/forward.py
|
DDDong2666/tum-adlr-ws20-02
|
2e439886e0287777589cd276d614fd03bea4ed0c
|
[
"MIT"
] | null | null | null |
Kinematic/forward.py
|
DDDong2666/tum-adlr-ws20-02
|
2e439886e0287777589cd276d614fd03bea4ed0c
|
[
"MIT"
] | null | null | null |
Kinematic/forward.py
|
DDDong2666/tum-adlr-ws20-02
|
2e439886e0287777589cd276d614fd03bea4ed0c
|
[
"MIT"
] | null | null | null |
import numpy as np
from Optimizer.path import get_x_substeps
from Kinematic import frames, chain as kc
def initialize_frames(shape, robot, mode='hm'):
return frames.initialize_frames(shape=shape + (robot.n_frames,), n_dim=robot.n_dim, mode=mode)
def initialize_frames_jac(shape, robot, mode='hm'):
f = initialize_frames(shape=shape, robot=robot, mode=mode)
j = frames.initialize_frames(shape=shape + (robot.n_dof, robot.n_frames), n_dim=robot.n_dim, mode='zero')
return f, j
# General
def get_frames(q, robot):
return robot.get_frames(q)
def get_frames_jac(*, q, robot):
return robot.get_frames_jacs(q=q)
def get_x_frames(*, q, robot):
return robot.get_frames(q=q)[..., :-1, -1]
def frames2pos(f, frame_idx, rel_pos):
return (f[:, :, frame_idx, :, :] @ rel_pos[:, :, np.newaxis])[..., :-1, 0]
def frames2spheres(f, robot):
"""
x_spheres (n_samples, n_wp, n_links, n_dim)
"""
return frames2pos(f, frame_idx=robot.spheres_frame_idx, rel_pos=robot.spheres_position)
def frames2spheres_jac(f, j, robot):
"""
x_spheres (n_samples, n_wp, n_spheres, n_dim)
dx_dq (n_samples, n_wp, n_dof, n_spheres, n_dim)
"""
x_spheres = frames2spheres(f=f, robot=robot)
dx_dq = (j[:, :, :, robot.spheres_frame_idx, :, :] @ robot.spheres_position[:, :, np.newaxis])[..., :-1, 0]
return x_spheres, dx_dq
def get_x_spheres(q, robot, return_frames2=False):
f = robot.get_frames(q=q)
x_spheres = frames2spheres(f=f, robot=robot)
if return_frames2:
return f, x_spheres
else:
return x_spheres
def get_x_spheres_jac(*, q, robot, return_frames2=False):
f, j = robot.get_frames_jac(q=q)
x_spheres, dx_dq = frames2spheres_jac(f=f, j=j, robot=robot)
if return_frames2:
return (f, j), (x_spheres, dx_dq)
else:
return x_spheres, dx_dq
def get_x_spheres_substeps(*, q, robot, n_substeps, return_frames2=False):
q_ss = get_x_substeps(x=q, n_substeps=n_substeps, infinity_joints=robot.infinity_joints, include_end_point=True)
return get_x_spheres(q=q_ss, robot=robot, return_frames2=return_frames2)
def get_x_spheres_substeps_jac(*, q, robot, n_substeps, return_frames2=False):
q_ss = get_x_substeps(x=q, n_substeps=n_substeps, infinity_joints=robot.infinity_joints, include_end_point=True)
return get_x_spheres_jac(q=q_ss, robot=robot, return_frames2=return_frames2)
def get_frames_substeps(*, q, robot, n_substeps):
q_ss = get_x_substeps(x=q, n_substeps=n_substeps, infinity_joints=robot.infinity_joints, include_end_point=True)
return get_frames(q=q_ss, robot=robot)
def get_frames_substeps_jac(*, q, robot, n_substeps):
q_ss = get_x_substeps(x=q, n_substeps=n_substeps, infinity_joints=robot.infinity_joints, include_end_point=True)
return get_frames_jac(q=q_ss, robot=robot)
# nfi - next frame index
# iff - influence frame frame
# Helper
# Combine fun
def create_frames_dict(f, nfi):
"""
Create a dict to minimize the calculation of unnecessary transformations between the frames
The value to the key 0 holds all transformations form the origin to the whole chain.
Each next field holds the transformation from the current frame to all frames to come.
The calculation happens from back to front, to save some steps
# 0 1 2 3 4
# F01
# F02 F12
# F03 F13 F23
# F04 F14 F24 F34
# F05 F15 F25 F35 F45
"""
n_frames = f.shape[-3]
d = {}
for i in range(n_frames - 1, -1, -1):
nfi_i = nfi[i]
if nfi_i == -1:
d[i] = f[..., i:i + 1, :, :]
elif isinstance(nfi_i, (list, tuple)):
d[i] = np.concatenate([
f[..., i:i + 1, :, :],
f[..., i:i + 1, :, :] @ np.concatenate([d[j] for j in nfi_i], axis=-3)],
axis=-3)
else:
d[i] = np.concatenate([f[..., i:i + 1, :, :],
f[..., i:i + 1, :, :] @ d[nfi_i]], axis=-3)
return d
def combine_frames(f, prev_frame_idx):
for i, pfi in enumerate(prev_frame_idx[1:], start=1):
f[..., i, :, :] = f[..., pfi, :, :] @ f[..., i, :, :]
def combine_frames_jac(j, d, robot):
jf_all, jf_first, jf_last = kc.__get_joint_frame_indices_first_last(jfi=robot.joint_frame_idx)
pfi_ = robot.prev_frame_idx[jf_first]
joints_ = np.arange(robot.n_dof)[pfi_ != -1]
jf_first_ = jf_first[pfi_ != -1]
pfi_ = pfi_[pfi_ != -1]
# Previous to joint frame
# j(b)__a_b = f__a_b * j__b
j[..., joints_, jf_first_, :, :] = (d[0][..., pfi_, :, :] @ j[..., joints_, jf_first_, :, :])
# After
for i in range(robot.n_dof):
jf_inf_i = robot.joint_frame_influence[i, :]
jf_inf_i[:jf_last[i] + 1] = False
nfi_i = robot.next_frame_idx[jf_last[i]]
# Handle joints which act on multiple frames
if jf_first[i] != jf_last[i]:
for kk, fj_cur in enumerate(jf_all[i][:-1]):
jf_next = jf_all[i][kk + 1]
jf_next1 = jf_next - 1
if jf_next - fj_cur > 1:
j[..., i, fj_cur + 1:jf_next, :, :] = (j[..., i, fj_cur:fj_cur + 1, :, :] @
d[robot.next_frame_idx[fj_cur]][..., :jf_next - fj_cur - 1, :, :])
j[..., i, jf_next, :, :] = ((j[..., i, jf_next1, :, :] @ d[robot.next_frame_idx[jf_next1]][..., 0, :, :]) +
(d[0][..., jf_next1, :, :] @ j[..., i, jf_next, :, :]))
# j(b)__a_c = j__a_b * f__b_c
if isinstance(nfi_i, (list, tuple)):
j[..., i, jf_inf_i, :, :] = (j[..., i, jf_last[i]:jf_last[i] + 1, :, ] @ np.concatenate([d[j] for j in nfi_i], axis=-3))
elif nfi_i != -1:
j[..., i, jf_inf_i, :, :] = (j[..., i, jf_last[i]:jf_last[i] + 1, :, :] @ d[nfi_i])
| 33.908046
| 132
| 0.597119
| 919
| 5,900
| 3.545158
| 0.162133
| 0.039288
| 0.01504
| 0.014733
| 0.499079
| 0.40884
| 0.372621
| 0.305095
| 0.26949
| 0.232044
| 0
| 0.021622
| 0.247458
| 5,900
| 173
| 133
| 34.104046
| 0.712162
| 0.134068
| 0
| 0.141304
| 0
| 0
| 0.001596
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.184783
| false
| 0
| 0.032609
| 0.054348
| 0.402174
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f560897ff46b99cf1a7890d1251f2fa26c8a2e3a
| 977
|
py
|
Python
|
dnslookup.py
|
r1nzler/dnslookup
|
74613614b694602244582bfd555ffd8a5dea8bff
|
[
"MIT"
] | null | null | null |
dnslookup.py
|
r1nzler/dnslookup
|
74613614b694602244582bfd555ffd8a5dea8bff
|
[
"MIT"
] | null | null | null |
dnslookup.py
|
r1nzler/dnslookup
|
74613614b694602244582bfd555ffd8a5dea8bff
|
[
"MIT"
] | null | null | null |
import dns.resolver
import dns.ipv4
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-l', "--list", help="List of dns names you want IP's for")
parser.add_argument('-o', "--output", help="Output file to save list")
args = parser.parse_args()
ip_list = [...]
subs = open(args.list, 'r', newline='')
if args.list:
for host in subs:
host = host.strip('\n',)
host = host.strip('https://')
host = host.strip('http://')
# print(host)
try:
i = dns.resolver.query(host,'A' )
#print(i.rrset.items[0])
for item in i:
if not item in ip_list:
ip_list.append(item)
print(item)
except Exception as error:
a = error
if args.output:
file = open(args.output, "w")
for p in ip_list:
file.write(str(p))
file.write("\n")
file.close()
| 27.914286
| 79
| 0.518936
| 126
| 977
| 3.968254
| 0.452381
| 0.048
| 0.078
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003096
| 0.338792
| 977
| 34
| 80
| 28.735294
| 0.770898
| 0.0348
| 0
| 0
| 0
| 0
| 0.105319
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.107143
| 0
| 0.107143
| 0.035714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5609c24bd958aa1dc8093dff8643942d2269130
| 8,416
|
py
|
Python
|
eval/report.py
|
DBCobra/CobraBench
|
d48697248948decc206cfba0a6e40fea8a772ff9
|
[
"MIT"
] | 1
|
2021-03-03T06:52:50.000Z
|
2021-03-03T06:52:50.000Z
|
eval/report.py
|
DBCobra/CobraBench
|
d48697248948decc206cfba0a6e40fea8a772ff9
|
[
"MIT"
] | 1
|
2021-03-05T09:36:50.000Z
|
2021-03-08T12:02:53.000Z
|
eval/report.py
|
DBCobra/CobraBench
|
d48697248948decc206cfba0a6e40fea8a772ff9
|
[
"MIT"
] | 1
|
2021-03-03T06:57:02.000Z
|
2021-03-03T06:57:02.000Z
|
import pandas
import numpy as np
import math
import os
import sys
import re
from utils import *
DIR_PATH = os.path.dirname(os.path.realpath(__file__))
percentiles = [ 10, 25, 50, 75, 90, 95, 99, 99.9 ]
DATA_FOLDER = DIR_PATH + '/data'
def getResult(trial_string, thread, client_num=2):
print("thread: {}".format(thread))
datas = []
end_times = []
for i in range(1, client_num+1):
first_start_time = math.inf
last_start_time = 0
first_end_time = math.inf
last_end_time = 0
trial_name = DIR_PATH + '/./trials/{}-{}-{}'.format(i, trial_string, thread)
lats_folder = trial_name + '/cobra/lats'
if not os.path.exists(lats_folder):
continue
files = os.listdir(lats_folder)
for fname in files:
fpath = lats_folder + '/' + fname
data = pandas.read_csv(fpath, sep=' ').values
start_time = np.min(data[:, 0])
end_time = np.max(data[:, 1])
first_start_time = min(first_start_time, start_time)
last_start_time = max(last_start_time, start_time)
first_end_time = min(first_end_time, end_time)
last_end_time = max(last_end_time, end_time)
end_times.append(first_end_time - first_start_time)
data -= first_start_time
datas.append(data)
print("{}: start time gap: {}, end time gap: {}".format(i, (last_start_time - first_start_time) / 1e9,
(last_end_time - first_end_time) / 1e9))
print("total end time gap of all clients: {}s".format((max(end_times)-min(end_times))/1e9))
count_start = 0
count_end = min(end_times) - 0
count_time = count_end - count_start
print("total time: {}s".format((last_end_time - first_start_time)/1e9))
print("counted time: {}s".format(count_time/1e9))
res = []
res.append(thread)
lats = []
before_trimming = 0
for data in datas:
before_trimming += data.shape[0]
data = data[np.where(data[:,1] > count_start)]
data = data[np.where(data[:,1] < count_end)]
lats += (data[:,1]-data[:,0]).tolist()
print("Data size before trimming: {}, after trimming: {}".format(before_trimming, len(lats)))
tps = len(lats)/count_time*1e9
res.append(tps)
print('TPS: {}'.format(tps))
lats = np.array(lats)
lats.sort()
print('Latencies:')
for per in percentiles:
latency_value = np.percentile(lats, per)/1e6
print('{}%(ms) : {}'.format(per, latency_value))
res.append(latency_value)
# plt.hist(lats[:-int(0.001*len(lats))], bins="auto")
# plt.show()
return res
def get_report(trial_string, client_num):
thread_tps_lats = []
threads = {}
dir_names = os.listdir('trials')
for s in dir_names:
if '-'+trial_string+'-' in s:
threads[int(s.split('-')[-1])] = True
if len(threads.keys()) == 0:
return
for thread in sorted(threads.keys()):
res = getResult(trial_string, thread, client_num)
thread_tps_lats.append(res)
df = pandas.DataFrame(thread_tps_lats)
if not os.path.exists(DATA_FOLDER):
os.makedirs(DATA_FOLDER)
fname = DATA_FOLDER + '/{}.data'.format(trial_string)
df.to_csv(fname, sep=' ', header=['#thread', 'tps']+percentiles, index=False, float_format="%.5f")
printG("FINISHED: " + trial_string)
def get_network_old(fname):
net_thpt_rx = []
net_thpt_tx = []
with open(fname) as f:
for sline in f:
line = sline.split()
net_thpt_tx.append(float(line[1]))
net_thpt_rx.append(float(line[2]))
net_thpt_rx = np.array(net_thpt_rx)
net_thpt_tx = np.array(net_thpt_tx)
net_thpt_rx.sort()
net_thpt_tx.sort()
# print('receive peak: {}, send peak: {}'.format(net_thpt_rx[-1], net_thpt_tx[-1]))
top10p = int(len(net_thpt_rx) *100 / 30)
avg_rx = net_thpt_rx[-top10p: -1].mean()
avg_tx = net_thpt_tx[-top10p: -1].mean()
# print('avg of top 10% rx: {}'.format(avg_rx))
# print('avg of top 10% tx: {}'.format(avg_tx))
return avg_rx, avg_tx
def get_num_op(trial_string):
threads = {}
dir_names = os.listdir('trials')
for s in dir_names:
if trial_string in s:
threads[int(s.split('-')[-1])] = True
if len(threads.keys()) == 0:
printB('not found: ' + trial_string)
return
thread = 24
trial_name = DIR_PATH + '/./trials/{}-{}-{}/client.txt'.format(1, trial_string, thread)
result = ''
with open(trial_name) as f:
for line in f:
if re.search(r'NumOp: [0-9]+', line):
result = line
break
result = result.split()[1]
return result
def get_network(fname):
lines = []
with open(fname) as f:
for sline in f:
line = sline.split()
lines.append(line)
rx = int(lines[2][4]) - int(lines[0][4])
tx = int(lines[3][4]) - int(lines[1][4])
return (rx, tx)
def get_trace_size(trial_string):
threads = {}
dir_names = os.listdir('trials')
for s in dir_names:
if trial_string in s:
threads[int(s.split('-')[-1])] = True
if len(threads.keys()) == 0:
printB('not found: ' + trial_string)
return
thread = 24
trial_name = DIR_PATH + '/./trials/{}-{}-{}/client.txt'.format(1, trial_string, thread)
result = ''
with open(trial_name) as f:
for line in f:
if re.search(r'SizeOfTrace: [0-9]+', line):
result = line
break
result = result.split()[1]
return result
def main():
if len(sys.argv) == 1:
databases = ['rocksdb', 'postgres', 'google']
workload = 'cheng'
inst_level = 'cloud'
for database in databases:
for contention in ['low', 'high']:
for workload in ['cheng', 'tpcc', 'twitter', 'ycsb', 'rubis']:
for inst_level in ['no', 'ww', 'cloud', 'cloudnovnofz', 'cloudnofz', 'local']:
trial_string = '{}-{}-{}-{}'.format(database, workload, contention, inst_level)
get_report(trial_string, 10 if database == 'postgres' else 1)
elif sys.argv[1] == 'net':
database = 'postgres'
workloads = ['cheng', 'ycsb', 'twitter', 'rubis', 'tpcc']
inst_levels = ['no', 'local']
result_str = 'workload ' + ' '.join(inst_levels) + '\n'
for contention in ['low']:
for workload in workloads:
result_row = workload
for inst_level in inst_levels:
trial_string = '{}-{}-{}-{}'.format(database, workload, contention, inst_level)
thread = 24
rx, tx = get_network('netstats/netstats-'+trial_string + '-{}.log'.format(thread))
print('{}-{}: {}, {}'.format(workload, inst_level, rx, tx))
result_row += ' {}'.format(tx)
result_str += result_row + '\n'
print(result_str)
return
elif sys.argv[1] == 'numop':
inst_levels = ['no', 'cloud', 'ww']
result_str = 'workload ' + ' '.join(inst_levels) + '\n'
for contention in ['low']:
for workload in ['cheng', 'tpcc', 'twitter', 'ycsb', 'rubis']:
result_str += workload
for inst_level in inst_levels:
trial_string = '{}-{}-{}-{}'.format(database, workload, contention, inst_level)
numop = get_num_op(trial_string)
result_str += ' {}'.format(numop)
result_str += '\n'
print(result_str)
elif sys.argv[1] == 'tracesize':
database = 'rocksdb'
inst_levels = ['cloud', 'ww']
result_str = 'workload ' + ' '.join(inst_levels) + '\n'
for contention in ['low']:
for workload in ['cheng', 'ycsb', 'twitter', 'rubis', 'tpcc']:
result_str += workload
for inst_level in inst_levels:
trial_string = '{}-{}-{}-{}'.format(database, workload, contention, inst_level)
numop = get_trace_size(trial_string)
result_str += ' {}'.format(numop)
result_str += '\n'
print(result_str)
if __name__ == "__main__":
main()
| 33.52988
| 110
| 0.557153
| 1,067
| 8,416
| 4.182755
| 0.182755
| 0.056688
| 0.016133
| 0.016133
| 0.434013
| 0.38203
| 0.348868
| 0.337665
| 0.313018
| 0.313018
| 0
| 0.017935
| 0.291112
| 8,416
| 250
| 111
| 33.664
| 0.730137
| 0.028042
| 0
| 0.358209
| 0
| 0
| 0.096403
| 0.007096
| 0.014925
| 0
| 0
| 0
| 0
| 1
| 0.034826
| false
| 0
| 0.034826
| 0
| 0.114428
| 0.079602
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f560efe52fd0d8fc1e6638e6bf52578a71fd2927
| 1,821
|
py
|
Python
|
platypush/backend/foursquare.py
|
RichardChiang/platypush
|
1777ebb0516118cdef20046a92caab496fa7c6cb
|
[
"MIT"
] | 228
|
2018-01-30T11:17:09.000Z
|
2022-03-24T11:22:26.000Z
|
platypush/backend/foursquare.py
|
RichardChiang/platypush
|
1777ebb0516118cdef20046a92caab496fa7c6cb
|
[
"MIT"
] | 167
|
2017-12-11T19:35:38.000Z
|
2022-03-27T14:45:30.000Z
|
platypush/backend/foursquare/__init__.py
|
BlackLight/runbullet
|
8d26c8634d2677b4402f0a21b9ab8244b44640db
|
[
"MIT"
] | 16
|
2018-05-03T07:31:56.000Z
|
2021-12-05T19:27:37.000Z
|
from typing import Optional
from platypush.backend import Backend
from platypush.context import get_plugin
from platypush.message.event.foursquare import FoursquareCheckinEvent
class FoursquareBackend(Backend):
"""
This backend polls for new check-ins on the user's Foursquare account and triggers an event when a new check-in
occurs.
Requires:
* The :class:`platypush.plugins.foursquare.FoursquarePlugin` plugin configured and enabled.
Triggers:
- :class:`platypush.message.event.foursquare.FoursquareCheckinEvent` when a new check-in occurs.
"""
_last_created_at_varname = '_foursquare_checkin_last_created_at'
def __init__(self, poll_seconds: Optional[float] = 60.0, *args, **kwargs):
"""
:param poll_seconds: How often the backend should check for new check-ins (default: one minute).
"""
super().__init__(*args, poll_seconds=poll_seconds, **kwargs)
self._last_created_at = None
def __enter__(self):
self._last_created_at = int(get_plugin('variable').get(self._last_created_at_varname).
output.get(self._last_created_at_varname) or 0)
self.logger.info('Started Foursquare backend')
def loop(self):
checkins = get_plugin('foursquare').get_checkins().output
if not checkins:
return
last_checkin = checkins[0]
last_checkin_created_at = last_checkin.get('createdAt', 0)
if self._last_created_at and last_checkin_created_at <= self._last_created_at:
return
self.bus.post(FoursquareCheckinEvent(checkin=last_checkin))
self._last_created_at = last_checkin_created_at
get_plugin('variable').set(**{self._last_created_at_varname: self._last_created_at})
# vim:sw=4:ts=4:et:
| 34.358491
| 115
| 0.697968
| 229
| 1,821
| 5.235808
| 0.366812
| 0.105088
| 0.119266
| 0.127606
| 0.100083
| 0.080067
| 0
| 0
| 0
| 0
| 0
| 0.005563
| 0.210324
| 1,821
| 52
| 116
| 35.019231
| 0.828234
| 0.250412
| 0
| 0.083333
| 0
| 0
| 0.073563
| 0.02682
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.166667
| 0
| 0.458333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f560ffe95556ccc11b3d6d39837b76f47f81ba08
| 2,980
|
py
|
Python
|
src/data/make_dataset.py
|
karsti11/caffe_bar_sales_analysis
|
f7001bbf2d09c1ceeb8aef35322652a8495949ed
|
[
"MIT"
] | null | null | null |
src/data/make_dataset.py
|
karsti11/caffe_bar_sales_analysis
|
f7001bbf2d09c1ceeb8aef35322652a8495949ed
|
[
"MIT"
] | null | null | null |
src/data/make_dataset.py
|
karsti11/caffe_bar_sales_analysis
|
f7001bbf2d09c1ceeb8aef35322652a8495949ed
|
[
"MIT"
] | null | null | null |
import os
import time
import pandas as pd
from src.utils import get_project_root
from src.data.item_names_replacement import REPLACE_DICT1, REPLACE_DICT1
YEARS = [str(x) for x in list(range(2013,2021))]
ROOT_DIR = get_project_root()
def string_to_float(number):
#Custom function for converting 'sales_value' column to float
#because of faulty data. 28 rows have eg. '400.200.000.000.000.000'
try:
return float(number)
except:
return 0.5
def load_data(data_abs_path: str) -> pd.DataFrame:
"""Load raw data
Parameters:
-----------
data_abs_path: absolute path of csv data
Returns:
--------
data_df: raw data dataframe
"""
data_df = pd.read_csv(data_abs_path)
data_df.sales_datetime = pd.to_datetime(data_df.sales_datetime, format='%Y-%m-%d', utc=True)
data_df.set_index('sales_datetime', inplace=True)
return data_df
def arrange_data(data_df):
# Drop unnecessary columns -> no known meaning
data_df.drop(labels=[4,10,11], axis=1, inplace=True)
data_df.columns = ['bar_name', 'number2', 'feature1', 'sales_datetime', 'feature2',
'item_name', 'item_class', 'sales_qty', 'feature3', 'sales_value']
#data_df.sales_value=data_df.sales_value.apply(lambda x: string_to_float(x))
data_df.sales_datetime = pd.to_datetime(data_df.sales_datetime, utc=True)
data_df.set_index('sales_datetime', inplace=True)
data_df['item_price'] = abs(data_df['sales_value']/data_df['sales_qty'])
return data_df
def load_dataset():
columns_to_keep = ['item_name', 'sales_qty', 'sales_value', 'item_price']
all_data_df = pd.DataFrame(columns = columns_to_keep)
for year in YEARS:
start_time = time.time()
filename = os.path.join(ROOT_DIR, f'data/raw/{year}_eKasa_RECEIPT_ENTRIES.csv')
df = pd.read_csv(filename,
delimiter=';',
header=None,
converters={12: string_to_float})
data_df = arrange_data(df)
all_data_df = all_data_df.append(data_df[columns_to_keep])
print("Dataframe shape: ",df.shape)
#print("Dataframe head: ",df.head())
end_time = time.time()
print("Time (s): ", end_time-start_time)
print(f"{year} done.")
all_data_df.sales_qty = all_data_df.sales_qty.astype('int64')
all_data_df.item_name.replace(to_replace=REPLACE_DICT1, inplace=True)
all_data_df.item_name.replace(to_replace=REPLACE_DICT1, inplace=True)
all_data_df.index.name = 'sales_date'
all_data_daily_sales = all_data_df.groupby(['item_name', pd.Grouper(freq='D')]).agg({'sales_qty':'sum',
'item_price': 'mean',
'sales_value': 'sum'}).reset_index()
print(all_data_daily_sales)
return all_data_daily_sales
| 40.27027
| 125
| 0.632215
| 412
| 2,980
| 4.262136
| 0.322816
| 0.102506
| 0.062642
| 0.04328
| 0.249431
| 0.216401
| 0.207859
| 0.177107
| 0.177107
| 0.177107
| 0
| 0.021343
| 0.245302
| 2,980
| 74
| 126
| 40.27027
| 0.759449
| 0.137248
| 0
| 0.12
| 0
| 0
| 0.13568
| 0.016218
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0
| 0.1
| 0
| 0.28
| 0.08
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f565e620ce2b4fec57d532c3907bb966211865f1
| 5,858
|
py
|
Python
|
hard-gists/5181631/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 21
|
2019-07-08T08:26:45.000Z
|
2022-01-24T23:53:25.000Z
|
hard-gists/5181631/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 5
|
2019-06-15T14:47:47.000Z
|
2022-02-26T05:02:56.000Z
|
hard-gists/5181631/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 17
|
2019-05-16T03:50:34.000Z
|
2021-01-14T14:35:12.000Z
|
import os, time, random
from collections import defaultdict
from System import Console, ConsoleColor, ConsoleKey
from System.Threading import Thread, ThreadStart
class Screen(object):
red = ConsoleColor.Red; green = ConsoleColor.Green; blue = ConsoleColor.Blue;black = ConsoleColor.Black
dimension = (21,39)
def __update_input(self):
mapping = defaultdict(lambda: None,
{ConsoleKey.A:Snake.left,ConsoleKey.J:Snake.left, ConsoleKey.LeftArrow:Snake.left,
ConsoleKey.D:Snake.right,ConsoleKey.L:Snake.right,ConsoleKey.RightArrow:Snake.right,
ConsoleKey.W:Snake.up,ConsoleKey.I:Snake.up,ConsoleKey.UpArrow:Snake.up,
ConsoleKey.S:Snake.down,ConsoleKey.K:Snake.down,ConsoleKey.DownArrow:Snake.down})
while True: self.last_input = mapping[Console.ReadKey(True).Key]
def __init__(self):
self.last_input = None; self.__input_update_thread = Thread(ThreadStart(self.__update_input)); self.__input_update_thread.Start()
os.system("cls") # os.system("clear")
Console.Title = "Snake by LuYU426"
# The next line needed to be commented out on Unix-like systems. However before running, the console needs to be adjusted accordingly
Console.CursorVisible = False; Console.WindowWidth = 80; Console.WindowHeight = 25;Console.BufferHeight = Console.WindowHeight; Console.BufferWidth = Console.WindowWidth
for i in range(0,24):
for j in range(0, 80):
if i == 0 or j == 0: self.__show(j, i, Screen.black, "#")
elif i == 22 or j == 79: self.__show(j, i, Screen.black,"#")
else: self.__show(j, i, Screen.black," ")
def __show(self,left,top,color,content): Console.CursorLeft = left; Console.CursorTop = top; Console.BackgroundColor = color; Console.Write(content)
def show_score(self,score): self.__show(3,23,Screen.black,"Score: {0}".format(score))
def color(self, position, width, height, color):
for row in range(position[0], position[0] + height):
for col in range(position[1], position[1] + width):
self.__show(col * 2 + 1,row + 1,color," ")
class GameLogic(object):
def update(self, screen, snake, fruit, stats):
stats.increase_score()
screen.show_score(stats.current_score)
update_result = snake.update(screen.last_input,fruit.current_position)
if update_result[0] == False: return True
if update_result[1] == True: return False
if update_result[2][0] < 0 or update_result[2][1] < 0: return False
if update_result[2][0] >= Screen.dimension[0] or update_result[2][1] >= Screen.dimension[1]: return False
screen.color(update_result[2],1,1,screen.green)
if update_result[3] is None:
fruit.reset_position()
while snake.position_in_buffer(fruit.current_position): fruit.reset_position()
screen.color(fruit.current_position,1,1,screen.red)
stats.increase_level()
else: screen.color(update_result[3],1,1,screen.black)
return True
def end(self): screen.color((0,0),39,21,Screen.blue)
class Snake(object):
up = 0x00; down = 0x01; left = 0x10; right = 0x11
def __init__(self):
self.__buffer = list(); self.__current_time_slice = 0
self.__buffer = [[Screen.dimension[0]/2 + 1,Screen.dimension[1]/2 + 1]]
self.__current_direction = Snake.up
def __current_speed(self):
_s = 8 - len(self.__buffer)/2
return 1 if _s < 1 else _s
def position_in_buffer(self, fruit_pos):
for item in self.__buffer:
if item == fruit_pos:
return True
return False
# returns [whether_need_update_screen(bool), whether_fail(bool), head_pos_to_draw(x,y), tail_pos_to_remove(x,y)]
def update(self, direction, fruit_pos):
self.__current_time_slice += 1
self.__current_time_slice %= self.__current_speed()
if self.__current_time_slice != 0: return [False, False]
if direction is None: direction = self.__current_direction
if direction ^ self.__current_direction == 0x01: direction = self.__current_direction
self.__current_direction = direction; candidate = [0, 0]; head = self.__buffer[len(self.__buffer) - 1]
candidate[0] = head[0] + 1 if self.__current_direction == Snake.down else head[0] - 1 if self.__current_direction == Snake.up else head[0]
candidate[1] = head[1] + 1 if self.__current_direction == Snake.right else head[1] - 1 if self.__current_direction == Snake.left else head[1]
if self.position_in_buffer(candidate): return [True, True]
if candidate == fruit_pos: self.__buffer.append(candidate); return [True, False, candidate, None]
else:
self.__buffer.append(candidate); tail = self.__buffer[0]; self.__buffer.remove(tail)
return [True, False, candidate, tail]
class Fruit(object):
def __init__(self): self.reset_position()
@property
def current_position(self): return self.__position
def reset_position(self): self.__position = [random.randint(0,Screen.dimension[0]-1),random.randint(0,Screen.dimension[1]-1)]
class Stastics(object):
def __init__(self): self.current_score = 0; self.__level = 0
def increase_score(self): self.current_score += 1
def increase_level(self): self.__level += 1; self.current_score += pow(2,self.__level-1)
if __name__ == "__main__":
screen = Screen(); logic = GameLogic(); stats = Stastics(); fruit = Fruit(); snake = Snake()
while snake.position_in_buffer(fruit.current_position): fruit.reset_position()
screen.color(fruit.current_position,1,1,screen.red)
while logic.update(screen, snake, fruit, stats): time.sleep(0.05)
logic.end()
| 59.171717
| 177
| 0.669
| 787
| 5,858
| 4.740788
| 0.205845
| 0.050121
| 0.048244
| 0.033503
| 0.187617
| 0.132672
| 0.106674
| 0.0922
| 0.056821
| 0.056821
| 0
| 0.026788
| 0.209799
| 5,858
| 99
| 178
| 59.171717
| 0.779218
| 0.044725
| 0
| 0.087912
| 0
| 0
| 0.007688
| 0
| 0
| 0
| 0.003576
| 0
| 0
| 1
| 0.186813
| false
| 0
| 0.043956
| 0.010989
| 0.43956
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f56710ff85a90ed722496b29dbe8a6afdffc8f9d
| 2,291
|
py
|
Python
|
neural_structured_learning/tools/graph_builder.py
|
eustomaqua/neural-structured-learning
|
e63a9e7ef435caaf6d70c04b6529e830bf47239d
|
[
"Apache-2.0"
] | null | null | null |
neural_structured_learning/tools/graph_builder.py
|
eustomaqua/neural-structured-learning
|
e63a9e7ef435caaf6d70c04b6529e830bf47239d
|
[
"Apache-2.0"
] | null | null | null |
neural_structured_learning/tools/graph_builder.py
|
eustomaqua/neural-structured-learning
|
e63a9e7ef435caaf6d70c04b6529e830bf47239d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Program to build a graph based on dense input features (embeddings).
This is a wrapper around the `nsl.tools.build_graph` API. See its documentation
for more details.
USAGE:
`python graph_builder.py` [*flags*] *input_features.tfr... output_graph.tsv*
For details about this program's flags, run `python graph_builder.py --help`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
from neural_structured_learning.tools import graph_builder_lib
import tensorflow as tf
def _main(argv):
"""Main function for running the graph_builder program."""
flag = flags.FLAGS
flag.showprefixforinfo = False
if len(argv) < 3:
raise app.UsageError(
'Invalid number of arguments; expected 2 or more, got %d' %
(len(argv) - 1))
graph_builder_lib.build_graph(argv[1:-1], argv[-1], flag.similarity_threshold,
flag.id_feature_name,
flag.embedding_feature_name)
if __name__ == '__main__':
flags.DEFINE_string(
'id_feature_name', 'id',
"""Name of the singleton bytes_list feature in each input Example
whose value is the Example's ID.""")
flags.DEFINE_string(
'embedding_feature_name', 'embedding',
"""Name of the float_list feature in each input Example
whose value is the Example's (dense) embedding.""")
flags.DEFINE_float(
'similarity_threshold', 0.8,
"""Lower bound on the cosine similarity required for an edge
to be created between two nodes.""")
# Ensure TF 2.0 behavior even if TF 1.X is installed.
tf.compat.v1.enable_v2_behavior()
app.run(_main)
| 34.19403
| 80
| 0.717154
| 334
| 2,291
| 4.757485
| 0.48503
| 0.03776
| 0.030208
| 0.020138
| 0.06545
| 0.06545
| 0.06545
| 0.06545
| 0.06545
| 0.06545
| 0
| 0.011438
| 0.198603
| 2,291
| 66
| 81
| 34.712121
| 0.854031
| 0.430816
| 0
| 0.066667
| 0
| 0
| 0.136743
| 0.022965
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0
| 0.233333
| 0
| 0.266667
| 0.033333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f56a3c3291794639e68ab580cfe7cfde7175ba0c
| 11,672
|
py
|
Python
|
main/dataset.py
|
MarcSerraPeralta/rec-flows
|
d05c3eca944f2228cffa575698ee5b010e83f167
|
[
"MIT"
] | null | null | null |
main/dataset.py
|
MarcSerraPeralta/rec-flows
|
d05c3eca944f2228cffa575698ee5b010e83f167
|
[
"MIT"
] | null | null | null |
main/dataset.py
|
MarcSerraPeralta/rec-flows
|
d05c3eca944f2228cffa575698ee5b010e83f167
|
[
"MIT"
] | null | null | null |
import torch
from torch.utils import data
import sys
from sklearn.utils import shuffle
import numpy as np
import argparse
import matplotlib.pyplot as plt
class UserSet(data.Dataset):
def __init__(self, path, tsplit, idim=100, seed=0, Nsongs=180198, pc_split=0.1, tag2vector_path=""):
"""
path : str
path + fname of the user-playcounts list
the file has the index of the songs listened by each user
idim : int
maximum number of songs per user in items
>95% of users have listened less than 100 songs
tsplit : str
type of dataset: 'train', 'val', 'test'
loss : str
Name of the loss function used
seed : int
Seed used for the pcounts splitting
Nsongs : int
Number of different songs
pc_split : float
Percentage of the val and test set
(pc_split=1 corresponds to 100%)
"""
# LOAD DATA
self.path = path
self.pcounts = torch.load(self.path) #list
self.tsplit = tsplit
self.pc_split = pc_split
self.idim = idim
self.len = len(self.pcounts)
self.index1 = int(self.len*(1 - 2*pc_split))
self.index2 = int(self.len*(1 - pc_split))
self.seed = seed
self.Nsongs = Nsongs
# SPLIT DATASET
if self.tsplit == "train":
self.pcounts = shuffle(self.pcounts, random_state=self.seed)[:self.index1]
self.len = len(self.pcounts)
elif self.tsplit == "val":
self.pcounts = shuffle(self.pcounts, random_state=self.seed)[self.index1:self.index2]
self.len = len(self.pcounts)
elif self.tsplit == "test":
self.pcounts = shuffle(self.pcounts, random_state=self.seed)[self.index2:]
self.len = len(self.pcounts)
else:
print("ERROR: split options = 'train', 'val', 'test'. \n", self.tsplit)
self.len = None
self.pcounts = None
sys.exit(0)
return
def __len__(self):
return self.len
def __getitem__(self, idx): #given an index of user, returns two vectors of the listenned songs
user = shuffle(self.pcounts[idx])
idx_inp = np.random.randint(1, min(len(user)-1, self.idim))
idx_out = np.random.randint(idx_inp + 1, min(len(user) + 1, idx_inp + self.idim))
#INP PER EMBEDDING (song ID and -1)
inp = -torch.ones(self.idim, dtype=torch.long)
inp[range(idx_inp)] = torch.LongTensor(user[:idx_inp])
#OUT (one-hot vector)
out = torch.zeros(self.Nsongs, dtype=torch.long)
out[user[idx_inp:idx_out]] = torch.ones(len(user[idx_inp:idx_out]), dtype=torch.long)
return inp, out
def get_tags(self, Nusers=0, Ntags=1):
return torch.randint(Ntags, (Nusers, 1)).squeeze(1)
class EmbSet(data.Dataset):
def __init__(self, path, tsplit, idim=100, seed=0, Nsongs=180198, pc_split=0.1):
"""
See UserSet
This dataset is for flows.
"""
self.path = path
self.pcounts = torch.load(self.path) #list
self.tsplit = tsplit
self.pc_split = pc_split
self.idim = idim
self.len = len(self.pcounts)
self.index1 = int(self.len*(1 - 2*pc_split))
self.index2 = int(self.len*(1 - pc_split))
self.seed = seed
self.Nsongs = Nsongs
# SPLIT DATASET
if self.tsplit == "train":
self.pcounts = shuffle(self.pcounts, random_state=self.seed)[:self.index1]
self.len = len(self.pcounts)
elif self.tsplit == "val":
self.pcounts = shuffle(self.pcounts, random_state=self.seed)[self.index1:self.index2]
self.len = len(self.pcounts)
elif self.tsplit == "test":
self.pcounts = shuffle(self.pcounts, random_state=self.seed)[self.index2:]
self.len = len(self.pcounts)
else:
print("ERROR: split options = 'train', 'val', 'test'. \n", self.tsplit)
self.len = None
self.pcounts = None
sys.exit(0)
return
def __len__(self):
return self.len
def __getitem__(self, idx): #given an index of user, returns two vectors of the listenned songs
user = shuffle(self.pcounts[idx])
idx_inp = np.random.randint(1, min(len(user)-1, self.idim))
idx_out = np.random.randint(idx_inp + 1, min(len(user) + 1, idx_inp + self.idim))
#INP
inp_idim = -torch.ones(self.idim, dtype=torch.long)
inp_idim[range(idx_inp)] = torch.LongTensor(user[:idx_inp])
inp_idx = torch.zeros(self.Nsongs, dtype=torch.long)
inp_idx[user[:idx_inp]] = torch.ones(len(user[:idx_inp]), dtype=torch.long)
#OUT
out_idim = -torch.ones(self.idim, dtype=torch.long)
out_idim[range(idx_out - idx_inp)] = torch.LongTensor(user[idx_inp:idx_out])
out_idx = torch.zeros(self.Nsongs, dtype=torch.long)
out_idx[user[idx_inp:idx_out]] = torch.ones(len(user[idx_inp:idx_out]), dtype=torch.long)
return inp_idim, inp_idx, out_idim, out_idx
class PostSet(data.Dataset):
"""
Loads dataset for predict created by get_PostSet().
"""
def __init__(self, calculate=False, metadata_path="results/metadata", metadata_name="opt_tags", bias_top=1, bias_normal=1):
if calculate:
get_TestSetPredict()
self.data = torch.load(metadata_path + "/postset_{}_t{}_n{}".format(metadata_name, bias_top, bias_normal))
self.len = len(self.data)
self.path = metadata_path + "/postset_{}_t{}_n{}".format(metadata_name, bias_top, bias_normal)
return
def __len__(self):
return self.len
def __getitem__(self, idx):
return self.data[idx]
def get_PostSet(pcounts_name = "opt_pcounts", pcounts_path = "results/metadata",
pc_split=0.1, seed = 0,
metadata_name = "opt_tags", metadata_path = "results/metadata",
bias_top=1, bias_normal=1):
"""
ONLY VALID FOR METADATA THAT IS A LIST FOR EACH SONG
"""
# LOAD PCOUNTS AND METADATA
pcounts = torch.load(pcounts_path + "/" + pcounts_name) #list
index2 = int(len(pcounts)*(1 - pc_split))
pcounts = shuffle(pcounts, random_state=seed)[index2:] # Test partition
metadata, meta = torch.load(metadata_path + "/" + metadata_name)
Nclasses = len(meta)
meta2idx = {meta[i]:i for i in range(Nclasses)}
idx2meta = {i:meta[i] for i in range(Nclasses)}
# CHANGE METADATA
print("Metadata2num and opt_pcounts to dict...")
idx_metadata = {} # same as metadata but using the index of meta2idx
for i in range(len(metadata)):
if metadata[i] == -1:
idx_metadata[i] = -1
else:
idx_metadata[i] = [meta2idx[m] for m in metadata[i]]
dict_pcounts = {}
for i in range(len(pcounts)):
dict_pcounts[i] = pcounts[i]
# USER META COUNT
print("Before filtering users without metadata,", len(pcounts))
user2class_counts = {}
total = len(dict_pcounts)
for b, user in enumerate(list(dict_pcounts.keys())):
print(" {0:0.3f}% \r".format((b+1.)*100./total), end="")
class_counts = torch.zeros(Nclasses)
for song in dict_pcounts[user]:
if idx_metadata[song] != -1:
class_counts[idx_metadata[song]] += 1
if (class_counts != 0).any():
user2class_counts[user] = class_counts.data.tolist()
else:
del dict_pcounts[user]
# GET TOP CLASS
print("After filtering users without metadata,", len(user2class_counts), len(dict_pcounts))
user2topclass = {}
for user in user2class_counts.keys():
user2topclass[user] = idx2meta[torch.argmax(torch.tensor(user2class_counts[user])).data.tolist()]
# SPLIT INTO [SONGS, TOP CLASS SONGS, TOP TAG]
user2topsongs = {}
user2normalsongs = {}
total = len(dict_pcounts)
for b, user in enumerate(dict_pcounts.keys()):
print(" {0:0.3f}%\r".format((b+1.)/total*100), end="")
top = []
normal = []
Ntop = 0
for song in dict_pcounts[user]:
if metadata[song] != -1:
if (user2topclass[user] in metadata[song]) and Ntop<100:
top += [song]
Ntop += 1
else:
normal += [song]
else:
normal += [song]
user2topsongs[user] = top
user2normalsongs[user] = normal
# DELETE USERS (BIAS_TOP, BIAS_NORMAL)
predict_dataset = []
for b, user in enumerate(dict_pcounts.keys()):
print(" {0:0.3f}%\r".format((b+1.)/total*100), end="")
if len(user2topsongs[user]) >= bias_top and len(user2normalsongs[user]) >= bias_normal:
predict_dataset += [[user2normalsongs[user], user2topsongs[user], user2topclass[user]]]
print("# Users (after deleting top<{}, inp<{}): ".format(bias_top, bias_normal), len(predict_dataset))
torch.save(predict_dataset, metadata_path + "/postset_{}_t{}_n{}".format(metadata_name, bias_top, bias_normal))
return
def get_topclass2Ntopclass(bias_top=1, bias_normal=1, metadata_path="results/metadata", metadata_name="opt_tags"):
print("Calculating topclass2Ntopclass...")
PostSet = torch.load(metadata_path + "/postset_{}_t{}_n{}".format(metadata_name, bias_top, bias_normal))
topclass2Ntopclass = {}
for b, (inp, out, c) in enumerate(PostSet):
if c not in list(topclass2Ntopclass.keys()): topclass2Ntopclass[c] = 0
topclass2Ntopclass[c] += 1
torch.save(topclass2Ntopclass, metadata_path + "/topclass2Ntopclass_{}_t{}_n{}".format(metadata_name, bias_top, bias_normal))
return
def get_class2song(metadata_path="results/metadata", metadata_name="opt_tags"):
print("Calculating class2song...")
metadata, meta = torch.load(metadata_path + "/" + metadata_name)
class2song = {c:[] for c in meta}
total = len(metadata)
for i in range(total):
print(" {0:0.3f}%\r".format((i+1.)/total*100), end="")
if metadata[i] == -1: continue
for c in metadata[i]:
class2song[c] += [i]
torch.save(class2song, metadata_path + "/{}2song".format(metadata_name))
return
def get_class2vector(metadata_path="results/metadata", metadata_name="opt_tags", Nsongs=180198):
print("Calculating get_class2vector...")
class2song = torch.load(metadata_path + "/{}2song".format(metadata_name))
_, meta = torch.load(metadata_path + "/" + metadata_name) # for idx2meta
Nclasses = len(meta)
meta2idx = {meta[i]:i for i in range(Nclasses)}
idx2meta = {i:meta[i] for i in range(Nclasses)}
total = len(class2song)
class2vector = torch.zeros(total,Nsongs).long()
for i in range(total):
print(" {0:0.3f}%\r".format((i+1.)/total*100), end="")
class2vector[i][class2song[idx2meta[i]]] = 1
torch.save(class2vector, metadata_path + "/{}2vector".format(metadata_name))
return
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--bias_top', type=int, default=1, help="Minimum number of songs in user_topsongs to be taken in care")
parser.add_argument('--bias_normal', type=int, default=1, help="Minimum number of songs in user_normalsongs to be taken in care")
parser.add_argument('--Nsongs', type=int, default=180198, help="Number of different songs")
parser.add_argument('--metadata_name', type=str, default="opt_tags", help="Name of the metadata to use")
parser.add_argument('--metadata_path', type=str, default="results/metadata", help="Path of the metadata to use")
parser.add_argument('--pcounts_name', type=str, default="opt_pcounts", help="Name of the pcounts to use")
parser.add_argument('--pcounts_path', type=str, default="results/metadata", help="Path of the pcounts to use")
parser.add_argument('--TODO', nargs='+', type=str, default=["all"], help="Things to calculate")
args = parser.parse_args()
if args.TODO == ["all"]: args.TODO = ["postset", "topclass2Ntopclass", "class2song", "class2vector"]
print("METADATA: {}\nBIAS TOP: {}\nBIAS NORMAL: {}\n".format(args.metadata_name, args.bias_top, args.bias_normal))
if "postset" in args.TODO:
get_PostSet(bias_normal=args.bias_normal, bias_top=args.bias_top, metadata_name=args.metadata_name, metadata_path=args.metadata_path, pcounts_name=args.pcounts_name, pcounts_path=args.pcounts_path)
if "topclass2Ntopclass" in args.TODO:
get_topclass2Ntopclass(bias_normal=args.bias_normal, bias_top=args.bias_top, metadata_name=args.metadata_name, metadata_path=args.metadata_path)
if "class2song" in args.TODO:
get_class2song(metadata_name=args.metadata_name, metadata_path=args.metadata_path)
if "class2vector" in args.TODO:
get_class2vector(metadata_name=args.metadata_name, metadata_path=args.metadata_path, Nsongs=args.Nsongs)
| 36.936709
| 199
| 0.706306
| 1,731
| 11,672
| 4.60312
| 0.125361
| 0.035894
| 0.011295
| 0.015813
| 0.554342
| 0.53627
| 0.515813
| 0.500753
| 0.417043
| 0.40989
| 0
| 0.020367
| 0.146076
| 11,672
| 315
| 200
| 37.053968
| 0.779071
| 0.10598
| 0
| 0.432432
| 0
| 0
| 0.123779
| 0.004886
| 0
| 0
| 0
| 0
| 0
| 1
| 0.063063
| false
| 0
| 0.031532
| 0.022523
| 0.171171
| 0.067568
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f56aef37015ae46f5772b8eb36d680a12e113fe7
| 892
|
py
|
Python
|
back/LocationParser.py
|
DimaYurchenko/postdata-hackathon-app
|
f688491b27db991946fd104102a7912c1b104ea4
|
[
"MIT"
] | null | null | null |
back/LocationParser.py
|
DimaYurchenko/postdata-hackathon-app
|
f688491b27db991946fd104102a7912c1b104ea4
|
[
"MIT"
] | null | null | null |
back/LocationParser.py
|
DimaYurchenko/postdata-hackathon-app
|
f688491b27db991946fd104102a7912c1b104ea4
|
[
"MIT"
] | null | null | null |
import json
from typing import List
from LocationObject import LocationObject
def parse(file_path: str) -> List[LocationObject]:
with open(file_path, "r") as file:
data = json.loads(file.read().replace("\n", ""))
locations: List[LocationObject] = []
for object in data:
city = object["City"]
code = object["PostalCode"]
street = object["Street"]
streetNum = str(object["StreetNumber"])
openTime = object["OpenTime"]
closeTime = object["CloseTime"]
location = LocationObject(city, code, street, streetNum, openTime, closeTime)
locations.append(location)
uniqueLocations = []
for i in locations:
if i not in uniqueLocations:
uniqueLocations.append(i)
return uniqueLocations
# add geocoding for each location
| 27.030303
| 89
| 0.602018
| 88
| 892
| 6.079545
| 0.477273
| 0.029907
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.298206
| 892
| 32
| 90
| 27.875
| 0.854633
| 0.034753
| 0
| 0
| 0
| 0
| 0.060606
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.142857
| 0
| 0.238095
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f56b9c719e339cbfa0c390fd236dda0208636e27
| 7,786
|
py
|
Python
|
nfp/servicos/controles/controle_execucao.py
|
FranciscoACLima/Robo_NFP_Selenium
|
7702854f94355fee8d78a4c04fc134cf099db5f0
|
[
"MIT"
] | null | null | null |
nfp/servicos/controles/controle_execucao.py
|
FranciscoACLima/Robo_NFP_Selenium
|
7702854f94355fee8d78a4c04fc134cf099db5f0
|
[
"MIT"
] | 16
|
2020-09-05T16:03:40.000Z
|
2022-03-19T17:42:05.000Z
|
nfp/servicos/controles/controle_execucao.py
|
FranciscoACLima/Robo_NFP_Selenium
|
7702854f94355fee8d78a4c04fc134cf099db5f0
|
[
"MIT"
] | null | null | null |
import os
from datetime import datetime
from sqlalchemy.orm import sessionmaker
import nfp.servicos.model as tables
from nfp import CONEXAO
class ControleExecucao(object):
uri = ''
tarefa = None
tarefa_nova = False
engine = CONEXAO
def configurar_base_de_dados(self):
self.DBSession = sessionmaker(bind=self.engine)
if os.path.isfile(self.uri):
if not self.engine.dialect.has_table(self.engine, self.table_name):
print('Tabela {} ainda não existe. Criando tabela...'.format(self.table_name))
base = tables.Base
base.metadata.create_all(self.engine)
return
print('Base de dados ainda não existe. Criando...')
base = tables.Base
base.metadata.create_all(self.engine)
print('usando base de dados: ' + self.uri)
def get_tarefa(self, tarefa_id):
session = self.DBSession()
tarefa = tables.Tarefa
query = session.query(tarefa).filter(
tarefa.id == tarefa_id,
)
registro = query.first()
return registro
def atualizar_colunas_tabela(self):
colunas = self.localizar_colunas_faltantes()
if not colunas:
return
session = self.DBSession()
for coluna, tipo in colunas.items():
session.execute('ALTER TABLE %s ADD COLUMN %s %s' % (self.table_name, coluna, tipo))
session.commit()
def localizar_colunas_faltantes(self):
tabela = self.table_name
session = self.DBSession()
result = session.execute("SELECT name FROM PRAGMA_TABLE_INFO('%s')" % (tabela))
colunas_bd = set()
for coluna in result.fetchall():
colunas_bd.add(coluna[0])
mapper = self.model.__mapper__.columns
colunas = set()
colunas_dic = {}
for column in mapper:
colunas.add(column.name)
colunas_dic[column.name] = str(column.type)
diferencas = list(colunas - colunas_bd)
if diferencas:
retorno = {}
for diferenca in diferencas:
retorno[diferenca] = colunas_dic[diferenca]
return retorno
return None
def extrair_dados_tarefa(self, tarefa_id):
session = self.DBSession()
execucao = self.model
# busca uma tarefa iniciada
filtro = [execucao.tarefa_id == tarefa_id]
query = session.query(execucao).filter(
*filtro,
)
registros = query.all()
if not registros:
return None
colunas = [column.name for column in self.model.__mapper__.columns]
remover = ['id', 'tarefa_id', 'inicio', 'fim']
for item in remover:
try:
colunas.remove(item)
except Exception:
pass
linhas = [
[getattr(valor, column.name)
for column in self.model.__mapper__.columns
if not(column.name in remover)]
for valor in registros]
return [colunas] + linhas
def contador_processos_tarefa(self, tarefa_id):
session = self.DBSession()
execucao = self.model
query = session.query(execucao).filter(
execucao.tarefa_id == tarefa_id
)
registros = query.all()
executadas = [reg.fim for reg in registros if reg.fim is not None]
ex = len(executadas)
# ex += 1
tot = len(registros)
return ex, tot
def finalizar_tarefa(self):
session = self.DBSession()
tarefa = tables.Tarefa
robo = self.table_name
# busca a tarefa iniciada
query = session.query(tarefa).filter(
tarefa.inicio.isnot(None),
tarefa.fim.is_(None),
tarefa.robo == robo,
)
registro = query.first()
if not registro:
return None
# registra a finalização da tarefa
# registro = tarefa()
registro.robo = robo
registro.fim = datetime.now()
session.add(registro)
session.commit()
return registro
def limpar_tabela(self, tabela):
session = self.DBSession()
session.execute('''DELETE FROM {}'''.format(tabela))
session.commit()
def reativar_tarefa(self, tarefa_id):
session = self.DBSession()
tarefa = tables.Tarefa
query = session.query(tarefa).filter(
tarefa.id == tarefa_id,
)
registro = query.first()
registro.fim = None
session.commit()
return True
def selecionar_execucao(self, tarefa_id):
session = self.DBSession()
execucao = self.model
tarefa = tables.Tarefa
# busca uma tarefa iniciada
query = session.query(execucao).filter(
execucao.inicio.isnot(None),
execucao.fim.is_(None),
execucao.tarefa_id == tarefa_id
).join(tarefa).filter(tarefa.fim.is_(None))
registro = query.first()
if registro:
return registro
# busca a primeira tarefa livre
query = session.query(execucao).filter(
execucao.inicio.is_(None),
execucao.tarefa_id == tarefa_id
).join(tarefa).filter(tarefa.fim.is_(None))
registro = query.first()
# se não houver nenhuma livre, retorna vazio
if not registro:
return None
# registra a tarefa livre encontrada como iniciada
registro.inicio = datetime.now()
session.add(registro)
session.commit()
return registro
def selecionar_tarefa_ativa(self, criar_nova=False):
session = self.DBSession()
tarefa = tables.Tarefa
robo = self.table_name
# busca uma tarefa iniciada
query = session.query(tarefa).filter(
tarefa.inicio.isnot(None),
tarefa.fim.is_(None),
tarefa.robo == robo,
)
registro = query.first()
if registro:
self.tarefa_nova = False
return registro
# registra a entrada da tarefa marcando como iniciada
if criar_nova:
registro = tarefa()
registro.robo = robo
registro.inicio = datetime.now()
session.add(registro)
session.commit()
self.tarefa_nova = True
return registro
return None
def selecionar_ultima_tarefa_finalizada(self):
session = self.DBSession()
tarefa = tables.Tarefa
robo = self.table_name
# busca a ultima tarefa finalizada
query = session.query(tarefa).filter(
tarefa.inicio.isnot(None),
tarefa.fim.isnot(None),
tarefa.robo == robo,
).order_by(tarefa.fim.desc())
return query.first()
def __del__(self):
del self.DBSession
# ---------------- Funções de módulo ------
def selecionar_ultima_tarefa_remota_finalizada(tarefa_remota_id):
ctrl = ControleExecucao()
ctrl.table_name = 'tarefas'
ctrl.configurar_base_de_dados()
return ctrl.selecionar_ultima_tarefa_remota_finalizada(tarefa_remota_id)
def get_id_tarefa_remota(tarefa_id):
ctrl = ControleExecucao()
ctrl.table_name = 'tarefas'
ctrl.configurar_base_de_dados()
return ctrl.get_id_tarefa_remota(tarefa_id)
def get_tarefa(tarefa_id):
ctrl = ControleExecucao()
ctrl.table_name = 'tarefas'
ctrl.configurar_base_de_dados()
return ctrl.get_tarefa(tarefa_id)
def reativar_tarefa(tarefa_id):
ctrl = ControleExecucao()
ctrl.table_name = 'tarefas'
ctrl.configurar_base_de_dados()
return ctrl.reativar_tarefa(tarefa_id)
# ----------------------------------------
if __name__ == "__main__":
pass
| 32.041152
| 96
| 0.595171
| 852
| 7,786
| 5.278169
| 0.183099
| 0.042695
| 0.048922
| 0.021348
| 0.500778
| 0.480987
| 0.448521
| 0.409162
| 0.38648
| 0.320881
| 0
| 0.00037
| 0.305934
| 7,786
| 242
| 97
| 32.173554
| 0.831791
| 0.058053
| 0
| 0.509901
| 0
| 0
| 0.034158
| 0.003143
| 0
| 0
| 0
| 0
| 0
| 1
| 0.084158
| false
| 0.009901
| 0.024752
| 0
| 0.242574
| 0.014851
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f56e6fbda99325c6509cd93be29f620a11819e74
| 2,887
|
py
|
Python
|
app.py
|
PrismaPhonic/PetFinder-Exercise
|
a4d2c6293873299f9d6632158bca837a830fac98
|
[
"MIT"
] | null | null | null |
app.py
|
PrismaPhonic/PetFinder-Exercise
|
a4d2c6293873299f9d6632158bca837a830fac98
|
[
"MIT"
] | null | null | null |
app.py
|
PrismaPhonic/PetFinder-Exercise
|
a4d2c6293873299f9d6632158bca837a830fac98
|
[
"MIT"
] | null | null | null |
"""Adoption application."""
from flask import Flask, request, redirect, render_template
from models import db, connect_db, Pets
from wtforms import StringField, IntegerField, TextAreaField, BooleanField
from wtforms.validators import DataRequired,InputRequired,AnyOf,URL, NumberRange
from flask_wtf import FlaskForm
from petfunctions import get_random_pet
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql:///adopt'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_ECHO'] = True
connect_db(app)
db.create_all()
from flask_debugtoolbar import DebugToolbarExtension
app.config['SECRET_KEY'] ='SOSECRET'
debug=DebugToolbarExtension(app)
class AddPetForm(FlaskForm):
"""Form class for adding a pet"""
name = StringField('Pet Name')
#make this a dropdown (species)
species = StringField('Pet Species',validators=[InputRequired(),AnyOf(['dog','cat','porcupine','pickle'])])
photo_url = StringField('Pet Photo Url',validators=[InputRequired(),URL()])
age = IntegerField('Pet Age',validators=[InputRequired(), NumberRange(0, 30, "Age must be between 0 and 30")])
notes = TextAreaField('Notes')
class EditPetForm(FlaskForm):
""""Form class for editing pets"""
photo_url = StringField('Pet Photo Url',validators=[InputRequired(),URL()])
notes = TextAreaField('Notes')
available = BooleanField('Available')
@app.route('/')
def pet_list():
"""Display a homepage of pets we can adopt"""
pets = Pets.query.all()
pet_name,pet_age,pet_url = get_random_pet()
return render_template('index.html',pets=pets,pet_name=pet_name,pet_age=pet_age,pet_url=pet_url)
@app.route('/add', methods=['GET','POST'])
def add_pet_form():
"""Add pet to adoption database form"""
form = AddPetForm()
if form.validate_on_submit():
name = form.data['name']
species = form.data['species']
photo_url = form.data['photo_url']
age = form.data['age']
notes = form.data['notes']
pet = Pets(name=name,
species=species,
photo_url=photo_url,
age=age,
notes=notes,
)
db.session.add(pet)
db.session.commit()
return redirect('/')
else:
return render_template('add_pet_form.html',form=form)
@app.route('/<int:pet_id>', methods=['GET','POST'])
def pet_page(pet_id):
"""Display pet details and a form to edit pet"""
pet = Pets.query.get_or_404(pet_id)
form = EditPetForm(obj=pet)
if form.validate_on_submit():
pet.photo_url = form.data['photo_url']
pet.notes = form.data['notes']
pet.available = form.data['available']
db.session.commit()
return redirect(f'/{pet_id}')
else:
return render_template('pet_details.html',pet=pet, form=form)
| 29.459184
| 114
| 0.66505
| 362
| 2,887
| 5.146409
| 0.303867
| 0.042941
| 0.030596
| 0.022544
| 0.180354
| 0.085883
| 0.060118
| 0.060118
| 0.060118
| 0
| 0
| 0.003886
| 0.197783
| 2,887
| 97
| 115
| 29.762887
| 0.800518
| 0.078282
| 0
| 0.16129
| 0
| 0
| 0.137262
| 0.020152
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048387
| false
| 0
| 0.112903
| 0
| 0.403226
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f56f1c7317138379cc46e4bc9738fe0615922706
| 17,810
|
py
|
Python
|
pyrolite/util/resampling.py
|
bomtuckle/pyrolite
|
c0af0ade14ff26b4e9fdd5a033b27e73df085c55
|
[
"BSD-3-Clause"
] | 69
|
2019-02-25T00:17:53.000Z
|
2022-03-31T17:26:48.000Z
|
pyrolite/util/resampling.py
|
bomtuckle/pyrolite
|
c0af0ade14ff26b4e9fdd5a033b27e73df085c55
|
[
"BSD-3-Clause"
] | 68
|
2018-07-20T09:01:01.000Z
|
2022-03-31T16:28:36.000Z
|
pyrolite/util/resampling.py
|
bomtuckle/pyrolite
|
c0af0ade14ff26b4e9fdd5a033b27e73df085c55
|
[
"BSD-3-Clause"
] | 24
|
2018-10-02T04:32:10.000Z
|
2021-11-10T08:24:17.000Z
|
"""
Utilities for (weighted) bootstrap resampling applied to geoscientific point-data.
"""
import numpy as np
import pandas as pd
from .meta import subkwargs
from .spatial import great_circle_distance, _get_sqare_grid_segment_indicies
from .log import Handle
logger = Handle(__name__)
try:
import sklearn
HAVE_SKLEARN = True
except ImportError:
msg = "scikit-learn not installed"
logger.warning(msg)
HAVE_SKLEARN = False
def _segmented_univariate_distance_matrix(
A, B, distance_metric, dtype="float32", segs=10
):
"""
A method to generate a point-to-point distance matrix in segments to be softer
on memory requirements yet retain precision (e.g. beyond a few thousand points).
Parameters
-----------
A, B : :class:`numpy.ndarray`
Numpy arrays with positions of points.
distance_metric
Callable function f(a, b) from which to derive a distance metric.
dtype : :class:`str` | :class:`numpy.dtype`
Data type to use for the matrix.
segs : :class:`int`
Number of segments to split the matrix into (note that this will effectively
squared - i.e. 10 -> 100 individual segments).
Returns
-------
dist : :class:`numpy.ndarray`
2D point-to-point distance matrix.
"""
max_size = np.max([a.shape[0] for a in [A, B]])
dist = np.zeros((max_size, max_size), dtype=dtype) # full matrix
# note that this could be parallelized; the calcuations are independent
for ix_s, ix_e, iy_s, iy_e in _get_sqare_grid_segment_indicies(max_size, segs):
dist[ix_s:ix_e, iy_s:iy_e] = distance_metric(
A[ix_s:ix_e][:, np.newaxis], B[iy_s:iy_e][np.newaxis, :],
)
return dist
def univariate_distance_matrix(a, b=None, distance_metric=None):
"""
Get a distance matrix for a single column or array of values (here used for ages).
Parameters
-----------
a, b : :class:`numpy.ndarray`
Points or arrays to calculate distance between. If only one array is
specified, a full distance matrix (i.e. calculate a point-to-point distance
for every combination of points) will be returned.
distance_metric
Callable function f(a, b) from which to derive a distance metric.
Returns
-------
:class:`numpy.ndarray`
2D distance matrix.
"""
if distance_metric is None:
distance_metric = lambda a, b: np.abs(a - b)
a = np.atleast_1d(np.array(a).astype(np.float))
full_matrix = False
if b is not None:
# a second set of points is specified; the return result will be 1D
b = np.atleast_1d(np.array(b).astype(np.float))
else:
# generate a full point-to-point matrix for a single set of points
full_matrix = True
b = a.copy()
return _segmented_univariate_distance_matrix(a, b, distance_metric)
def get_spatiotemporal_resampling_weights(
df,
spatial_norm=1.8,
temporal_norm=38,
latlong_names=["Latitude", "Longitude"],
age_name="Age",
max_memory_fraction=0.25,
normalized_weights=True,
**kwargs
):
"""
Takes a dataframe with lat, long and age and returns a sampling weight for each
sample which is essentailly the inverse of the mean distance to other samples.
Parameters
-----------
df : :class:`pandas.DataFrame`
Dataframe to calculate weights for.
spatial_norm : :class:`float`
Normalising constant for spatial measures (1.8 arc degrees).
temporal_norm : :class:`float`
Normalising constant for temporal measures (38 Mya).
latlong_names : :class:`list`
List of column names referring to latitude and longitude.
age_name : :class:`str`
Column name corresponding to geological age or time.
max_memory_fraction : :class:`float`
Constraint to switch to calculating mean distances where :code:`matrix=True`
and the distance matrix requires greater than a specified fraction of total
avaialbe physical memory. This is passed on to
:func:`~pyrolite.util.spatial.great_circle_distance`.
normalized_weights : :class:`bool`
Whether to renormalise weights to unity.
Returns
--------
weights : :class:`numpy.ndarray`
Sampling weights.
Notes
------
This function is equivalent to Eq(1) from Keller and Schone:
.. math::
W_i \\propto 1 \\Big / \\sum_{j=1}^{n} \\Big ( \\frac{1}{((z_i - z_j)/a)^2 + 1} + \\frac{1}{((t_i - t_j)/b)^2 + 1} \\Big )
"""
weights = pd.Series(index=df.index, dtype="float")
z = great_circle_distance(
df[[*latlong_names]],
absolute=False,
max_memory_fraction=max_memory_fraction,
**subkwargs(kwargs, great_circle_distance)
) # angular distances
_invnormdistances = np.zeros_like(z)
# where the distances are zero, these weights will go to inf
# instead we replace with the smallest non-zero distance/largest non-inf
# inverse weight
norm_inverse_distances = 1.0 / ((z / spatial_norm) ** 2 + 1)
norm_inverse_distances[~np.isfinite(norm_inverse_distances)] = 1
_invnormdistances += norm_inverse_distances
# ages - might want to split this out as optional for spatial resampling only?
t = univariate_distance_matrix(df[age_name])
norm_inverse_time = 1.0 / ((t / temporal_norm) ** 2 + 1)
norm_inverse_time[~np.isfinite(norm_inverse_time)] = 1
_invnormdistances += norm_inverse_time
weights = 1.0 / np.sum(_invnormdistances, axis=0)
if normalized_weights:
weights = weights / weights.sum()
return weights
def add_age_noise(
df,
min_sigma=50,
noise_level=1.0,
age_name="Age",
age_uncertainty_name="AgeUncertainty",
min_age_name="MinAge",
max_age_name="MaxAge",
):
"""
Add gaussian noise to a series of geological ages based on specified uncertainties
or age ranges.
Parameters
-----------
df : :class:`pandas.DataFrame`
Dataframe with age data within which to look up the age name and add noise.
min_sigma : :class:`float`
Minimum uncertainty to be considered for adding age noise.
noise_level : :class:`float`
Scaling of the noise added to the ages. By default the uncertaines are unscaled,
but where age uncertaines are specified and are the one standard deviation level
this can be used to expand the range of noise added (e.g. to 2SD).
age_name : :class:`str`
Column name for absolute ages.
age_uncertainty_name : :class:`str`
Name of the column specifiying absolute age uncertainties.
min_age_name : :class:`str`
Name of the column specifying minimum absolute ages (used where uncertainties
are otherwise unspecified).
max_age_name : :class:`str`
Name of the column specifying maximum absolute ages (used where uncertainties
are otherwise unspecified).
Returns
--------
df : :class:`pandas.DataFrame`
Dataframe with noise-modified ages.
Notes
------
This modifies the dataframe which is input - be aware of this if using outside
of the bootstrap resampling for which this was designed.
"""
# try and get age uncertainty
try:
age_uncertainty = df[age_uncertainty_name]
except KeyError:
# otherwise get age min age max
# get age uncertainties
age_uncertainty = (
np.abs(df[max_age_name] - df[min_age_name]) / 2
) # half bin width
age_uncertainty[
~np.isfinite(age_uncertainty) | age_uncertainty < min_sigma
] = min_sigma
# generate gaussian age noise
age_noise = np.random.randn(df.index.size) * age_uncertainty.values
age_noise *= noise_level # scale the noise
# add noise to ages
df[age_name] += age_noise
return df
def spatiotemporal_bootstrap_resample(
df,
columns=None,
uncert=None,
weights=None,
niter=100,
categories=None,
transform=None,
boostrap_method="smooth",
add_gaussian_age_noise=True,
metrics=["mean", "var"],
default_uncertainty=0.02,
relative_uncertainties=True,
noise_level=1,
age_name="Age",
latlong_names=["Latitude", "Longitude"],
**kwargs
):
"""
Resample and aggregate metrics from a dataframe, optionally aggregating by a given
set of categories. Formulated specifically for dealing with resampling to address
uneven sampling density in space and particularly geological time.
Parameters
-----------
df : :class:`pandas.DataFrame`
Dataframe to resample.
columns : :class:`list`
Columns to provide bootstrap resampled estimates for.
uncert : :class:`float` | :class:`numpy.ndarray` | :class:`pandas.Series` | :class:`pandas.DataFrame`
Fractional uncertainties for the dataset.
weights : :class:`numpy.ndarray` | :class:`pandas.Series`
Array of weights for resampling, if precomputed.
niter : :class:`int`
Number of resampling iterations. This will be the minimum index size of the output
metric dataframes.
categories : :class:`list` | :class:`numpy.ndarray` | :class:`pandas.Series`
List of sample categories to group the ouputs by, which has the same size as the
dataframe index.
transform
Callable function to transform input data prior to aggregation functions. Note
that the outputs will need to be inverse-transformed.
boostrap_method : :class:`str`
Which method to use to add gaussian noise to the input dataset parameters.
add_gaussian_age_noise : :class:`bool`
Whether to add gassian noise to the input dataset ages, where present.
metrics : :class:`list`
List of metrics to use for dataframe aggregation.
default_uncertainty : :class:`float`
Default (fractional) uncertainty where uncertainties are not given.
relative_uncertainties : :class:`bool`
Whether uncertainties are relative (:code:`True`, i.e. fractional proportions
of parameter values), or absolute (:code:`False`)
noise_level : :class:`float`
Multiplier for the random gaussian noise added to the dataset and ages.
age_name : :class:`str`
Column name for geological age.
latlong_names : :class:`list`
Column names for latitude and longitude, or equvalent orthogonal spherical
spatial measures.
Returns
--------
:class:`dict`
Dictionary of aggregated Dataframe(s) indexed by statistical metrics. If
categories are specified, the dataframe(s) will have a hierarchical index of
:code:`categories, iteration`.
"""
# uncertainty managment ############################################################
uncertainty_type = None
if uncert is not None:
if isinstance(uncert, float):
uncertainty_type = "0D" # e.g. 2%
elif isinstance(uncert, (list, pd.Series)) or (
isinstance(uncert, np.ndarray) and np.array(uncert).ndim < 2
):
uncertainty_type = "1D" # e.g. [0.5%, 1%, 2%]
# shape should be equal to parameter column number
elif isinstance(uncert, (pd.DataFrame)) or (
isinstance(uncert, np.ndarray) and np.array(uncert).ndim >= 2
):
uncertainty_type = "2D" # e.g. [[0.5%, 1%, 2%], [1.5%, 0.6%, 1.7%]]
# shape should be equal to parameter column number by rows
else:
raise NotImplementedError("Unknown format for uncertainties.")
# weighting ########################################################################
# generate some weights for resampling - here addressing specifically spatial
# and temporal resampling
if weights is None:
weights = get_spatiotemporal_resampling_weights(
df,
age_name=age_name,
latlong_names=latlong_names,
**subkwargs(kwargs, get_spatiotemporal_resampling_weights)
)
# to efficiently manage categories we can make sure we have an iterable here
if categories is not None:
if isinstance(categories, (list, tuple, pd.Series, np.ndarray)):
pass
elif isinstance(categories, str) and categories in df.columns:
categories = df[categories]
else:
msg = "Categories unrecognized"
raise NotImplementedError(msg)
# column selection #################################################################
# get the subset of parameters to be resampled, removing spatial and age names
# and only taking numeric data
subset = columns or [
c
for c in df.columns
if c not in [[i for i in df.columns if age_name in i], *latlong_names]
and np.issubdtype(df.dtypes[c], np.number)
]
# resampling #######################################################################
def _metric_name(metric):
return repr(metric).replace("'", "")
metric_data = {_metric_name(metric): [] for metric in metrics}
# samples are independent, so this could be processed in parallel
for repeat in range(niter):
# take a new sample with replacement equal in size to the original dataframe
smpl = df.sample(weights=weights, frac=1, replace=True)
# whether to specfically add noise to the geological ages
# note that the metadata around age names are passed through to this function
# TODO: Update to have external disambiguation of ages/min-max ages,
# and just pass an age series to this function.
if add_gaussian_age_noise:
smpl = add_age_noise(
smpl,
min_sigma=50,
age_name=age_name,
noise_level=noise_level,
**subkwargs(kwargs, add_age_noise)
)
# transform the parameters to be estimated before adding parameter noise?
if transform is not None:
smpl[subset] = smpl[subset].apply(transform, axis="index")
# whether to add parameter noise, and if so which method to use?
# TODO: Update the naming of this? this is only one part of the bootstrap process
if boostrap_method is not None:
# try to get uncertainties for the data, otherwise use standard deviations?
if boostrap_method.lower() == "smooth":
# add random noise within uncertainty bounds
# this is essentially smoothing
# consider modulating the noise model using the covariance structure?
# this could be done by individual group to preserve varying covariances
# between groups?
if uncert is None:
noise = (
smpl[subset].values
* default_uncertainty
* np.random.randn(*smpl[subset].shape)
) * noise_level
else:
noise = np.random.randn(*smpl[subset].shape) * noise_level
if uncertainty_type in ["0D", "1D"]:
# this should work if a float or series is passed
noise *= uncert
else:
# need to get indexes of the sample to look up uncertainties
# need to extract indexes for the uncertainties, which might be arrays
arr_idxs = df.index.take(smpl.index).values
noise *= uncert[arr_idxs, :]
if relative_uncertainties:
noise *= smpl[subset].values
smpl[subset] += noise
elif (boostrap_method.upper() == "GP") or (
"process" in bootstrap_method.lower()
):
# gaussian process regression to adapt to covariance matrix
msg = "Gaussian Process boostrapping not yet implemented."
raise NotImplementedError(msg)
else:
msg = "Bootstrap method {} not recognised.".format(boostrap_method)
raise NotImplementedError(msg)
# whether to independently estimate metric values for individual categories?
# TODO: Should the categories argument be used to generate indiviudal
# bootstrap resampling processes?
if categories is not None:
for metric in metrics:
metric_data[_metric_name(metric)].append(
smpl[subset].groupby(categories).agg(metric)
)
else: # generate the metric summaries for the overall dataset
for metric in metrics:
metric_data[_metric_name(metric)].append(smpl[subset].agg(metric))
# where the whole dataset is presented
if categories is not None:
# the dataframe will be indexed by iteration of the bootstrap
return {
metric: pd.concat(data, keys=range(niter), names=["Iteration"])
.swaplevel(0, 1)
.sort_index()
for metric, data in metric_data.items()
}
else:
# the dataframe will be indexed by categories and iteration
# TODO: add iteration level to this index?
return {metric: pd.DataFrame(data) for metric, data in metric_data.items()}
| 40.022472
| 131
| 0.614935
| 2,161
| 17,810
| 4.961592
| 0.216104
| 0.013057
| 0.012684
| 0.006995
| 0.181309
| 0.13822
| 0.100634
| 0.079276
| 0.042529
| 0.035068
| 0
| 0.006952
| 0.289276
| 17,810
| 444
| 132
| 40.112613
| 0.840101
| 0.488995
| 0
| 0.204082
| 0
| 0
| 0.039005
| 0
| 0
| 0
| 0
| 0.002252
| 0
| 1
| 0.030612
| false
| 0.005102
| 0.035714
| 0.005102
| 0.102041
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f570043bcd7ec43faf876327124a5a21c6d01798
| 1,809
|
py
|
Python
|
src/examples/stimuli-representation.py
|
cwardell97/learn-hippo-1
|
90280c614fb94aea82a60c2ed071db8068a37d5c
|
[
"MIT"
] | null | null | null |
src/examples/stimuli-representation.py
|
cwardell97/learn-hippo-1
|
90280c614fb94aea82a60c2ed071db8068a37d5c
|
[
"MIT"
] | null | null | null |
src/examples/stimuli-representation.py
|
cwardell97/learn-hippo-1
|
90280c614fb94aea82a60c2ed071db8068a37d5c
|
[
"MIT"
] | null | null | null |
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from task import SequenceLearning
sns.set(style='white', palette='colorblind', context='poster')
np.random.seed(0)
'''how to use'''
# init
n_param, n_branch = 16, 4
pad_len = 0
n_parts = 2
n_samples = 256
p_rm_ob_enc = 0
p_rm_ob_rcl = 0
n_rm_fixed = False
task = SequenceLearning(
n_param, n_branch, pad_len=pad_len,
p_rm_ob_enc=p_rm_ob_enc,
p_rm_ob_rcl=p_rm_ob_rcl,
n_rm_fixed=n_rm_fixed,
)
# take sample
X, Y = task.sample(n_samples, to_torch=False)
print(f'X shape = {np.shape(X)}, n_example x time x x-dim')
print(f'Y shape = {np.shape(Y)}, n_example x time x y-dim')
'''visualize the sample'''
# pick a sample
i = 0
x, y = X[i], Y[i]
cmap = 'bone'
x_split = np.split(x, (n_param, n_param + n_branch), axis=1)
mat_list = x_split + [y]
f, axes = plt.subplots(
2, 4, figsize=(14, 11), sharey=True,
gridspec_kw={
'width_ratios': [n_param, n_branch, n_param, n_branch],
'height_ratios': [n_param, n_param]
},
)
title_list = ['Observed feature', 'Observed value',
'Queried feature', 'Queried value']
ylabel_list = ['Part one', 'Part two']
for i, mat in enumerate(mat_list):
[mat_p1, mat_p2] = np.split(mat, [n_param], axis=0)
axes[0, i].imshow(mat[:n_param, :], cmap=cmap)
axes[1, i].imshow(mat[n_param:, :], cmap=cmap)
axes[0, i].set_title(title_list[i], fontname='Helvetica')
axes[0, i].set_xticks([])
for i in [1, 3]:
axes[1, i].set_xticks(range(n_branch))
axes[1, i].set_xticklabels(i for i in np.arange(4) + 1)
for i in range(2):
axes[i, 0].set_yticks(np.arange(0, n_param, 5))
axes[i, 0].set_ylabel(ylabel_list[i], fontname='Helvetica')
f.tight_layout()
f.savefig(f'examples/figs/stimulus-rep.png', dpi=100, bbox_inches='tight')
| 28.265625
| 74
| 0.666667
| 331
| 1,809
| 3.425982
| 0.347432
| 0.063492
| 0.04321
| 0.057319
| 0.092593
| 0.067901
| 0.067901
| 0.049383
| 0
| 0
| 0
| 0.026631
| 0.169707
| 1,809
| 63
| 75
| 28.714286
| 0.728362
| 0.016584
| 0
| 0
| 0
| 0
| 0.159261
| 0.017311
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.078431
| 0
| 0.078431
| 0.039216
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f571719391b271f64aa33623e91452b85398b280
| 704
|
py
|
Python
|
eventbusk/exceptions.py
|
Airbase/eventbusk
|
704d50a4c9c1f7d332dba93ee04ab07afa59d216
|
[
"BSD-3-Clause"
] | null | null | null |
eventbusk/exceptions.py
|
Airbase/eventbusk
|
704d50a4c9c1f7d332dba93ee04ab07afa59d216
|
[
"BSD-3-Clause"
] | 1
|
2021-06-13T18:08:50.000Z
|
2021-06-13T18:08:50.000Z
|
eventbusk/exceptions.py
|
Airbase/eventbusk
|
704d50a4c9c1f7d332dba93ee04ab07afa59d216
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Custom exceptions
"""
from __future__ import annotations
__all__ = [
"AlreadyRegistered",
"ConsumerError",
"EventBusError",
"UnknownEvent",
]
class EventBusError(Exception):
"""
Base of exceptions raised by the bus.
"""
class UnknownEvent(EventBusError):
"""
Raised when an receiver is created for an event the bus does not recognize.
"""
class AlreadyRegistered(EventBusError):
"""
Raised when an event is registered more than once to the bus.
"""
class ProducerError(EventBusError):
"""
Raised during production of an event.
"""
class ConsumerError(EventBusError):
"""
Raised during consumption of an event
"""
| 16.761905
| 79
| 0.661932
| 71
| 704
| 6.450704
| 0.535211
| 0.165939
| 0.048035
| 0.10917
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.238636
| 704
| 41
| 80
| 17.170732
| 0.854478
| 0.382102
| 0
| 0
| 0
| 0
| 0.15625
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.083333
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f572b933b1b5aed70aca3d4ac6ade4a2e8fe1e58
| 9,580
|
py
|
Python
|
sparse_ct/example/dgr_example.py
|
mozanunal/SparseCT
|
97d7f06c0414f934c7fa36023adcf9fe4c071eaf
|
[
"MIT"
] | 11
|
2020-11-01T11:35:30.000Z
|
2022-03-30T02:19:52.000Z
|
sparse_ct/example/dgr_example.py
|
mozanunal/SparseCT
|
97d7f06c0414f934c7fa36023adcf9fe4c071eaf
|
[
"MIT"
] | 8
|
2020-12-13T12:17:38.000Z
|
2021-12-21T21:04:27.000Z
|
sparse_ct/example/dgr_example.py
|
mozanunal/SparseCT
|
97d7f06c0414f934c7fa36023adcf9fe4c071eaf
|
[
"MIT"
] | null | null | null |
from sparse_ct.tool import plot_grid
from sparse_ct.data import image_to_sparse_sinogram
from sparse_ct.reconstructor_2d import (
IRadonReconstructor,
SartReconstructor,
SartTVReconstructor,
DgrReconstructor,
SartBM3DReconstructor)
import logging
logging.basicConfig(
filename='dgr_example_32_35.log',
filemode='a',
format='%(asctime)s - %(levelname)s - %(message)s',
level=logging.DEBUG
)
def test(fname, label, n_proj=32, noise_pow=25.0):
dgr_iter = 4000
lr = 0.01
net = 'skip'
noise_std = 1./100
gt, sinogram, theta, FOCUS = image_to_sparse_sinogram(fname,
channel=1, n_proj=n_proj, size=512,
angle1=0.0, angle2=180.0, noise_pow=noise_pow)
logging.warning('Starting')
logging.warning('fname: %s %s',label, fname)
logging.warning('n_proj: %s', n_proj)
logging.warning('noise_pow: %s', noise_pow)
logging.warning('dgr_n_iter: %s', dgr_iter)
logging.warning('dgr_lr: %s', lr)
logging.warning('dgr_net: %s', net)
logging.warning('dgr_noise_std: %s', noise_std)
recons = [
IRadonReconstructor('FBP'),
SartReconstructor('SART', sart_n_iter=40, sart_relaxation=0.15),
SartTVReconstructor('SART+TV',
sart_n_iter=40, sart_relaxation=0.15,
tv_weight=0.5, tv_n_iter=100),
SartBM3DReconstructor('SART+BM3D',
sart_n_iter=40, sart_relaxation=0.15,
bm3d_sigma=0.5),
DgrReconstructor('DIP_1.00_0.00_0.00_0.00',
dip_n_iter=dgr_iter,
net=net,
lr=lr,
reg_std=noise_std,
w_proj_loss=1.0,
w_perceptual_loss=0.0,
w_tv_loss=0.0
),
DgrReconstructor('DIP_0.99_0.01_0.00_0.00',
dip_n_iter=dgr_iter,
net=net,
lr=lr,
reg_std=noise_std,
w_proj_loss=0.99,
w_perceptual_loss=0.01,
w_tv_loss=0.0
),
DgrReconstructor('DIP_0.90_0.10_0.00_0.00',
dip_n_iter=dgr_iter,
net=net,
lr=lr,
reg_std=noise_std,
w_proj_loss=0.90,
w_perceptual_loss=0.10,
w_tv_loss=0.0
),
DgrReconstructor('DIP_0.50_0.50_0.00_0.00',
dip_n_iter=dgr_iter,
net=net,
lr=lr,
reg_std=noise_std,
w_proj_loss=0.5,
w_perceptual_loss=0.5,
w_tv_loss=0.0
),
DgrReconstructor('DIP_0.10_0.90_0.00_0.00',
dip_n_iter=dgr_iter,
net=net,
lr=lr,
reg_std=noise_std,
w_proj_loss=0.10,
w_perceptual_loss=0.90,
w_tv_loss=0.0
),
DgrReconstructor('DIP_0.01_0.99_0.00_0.00',
dip_n_iter=dgr_iter,
net=net,
lr=lr,
reg_std=noise_std,
w_proj_loss=0.01,
w_perceptual_loss=0.99,
w_tv_loss=0.0
),
DgrReconstructor('DIP_0.00_1.00_0.00_0.00',
dip_n_iter=dgr_iter,
net=net,
lr=lr,
reg_std=noise_std,
w_proj_loss=0.0,
w_perceptual_loss=1.0,
w_tv_loss=0.0
),
DgrReconstructor('DIP_0.99_0.00_0.01_0.00',
dip_n_iter=dgr_iter,
net=net,
lr=lr,
reg_std=noise_std,
w_proj_loss=0.99,
w_perceptual_loss=0.0,
w_tv_loss=0.01
),
DgrReconstructor('DIP_0.90_0.00_0.10_0.00',
dip_n_iter=dgr_iter,
net=net,
lr=lr,
reg_std=noise_std,
w_proj_loss=0.9,
w_perceptual_loss=0.0,
w_tv_loss=0.1
),
DgrReconstructor('DIP_0.50_0.00_0.50_0.00',
dip_n_iter=dgr_iter,
net=net,
lr=lr,
reg_std=noise_std,
w_proj_loss=0.5,
w_perceptual_loss=0.0,
w_tv_loss=0.5
),
DgrReconstructor('DIP_0.10_0.00_0.90_0.00',
dip_n_iter=dgr_iter,
net=net,
lr=lr,
reg_std=noise_std,
w_proj_loss=0.1,
w_perceptual_loss=0.0,
w_tv_loss=0.9
),
DgrReconstructor('DIP_0.01_0.00_0.99_0.00',
dip_n_iter=dgr_iter,
net=net,
lr=lr,
reg_std=noise_std,
w_proj_loss=0.01,
w_perceptual_loss=0.0,
w_tv_loss=0.99
),
DgrReconstructor('DIP_0.00_0.00_1.0_0.00',
dip_n_iter=dgr_iter,
net=net,
lr=lr,
reg_std=noise_std,
w_proj_loss=0.00,
w_perceptual_loss=0.0,
w_tv_loss=1.0
),
DgrReconstructor('DIP_0.33_0.33_0.33_0.00',
dip_n_iter=dgr_iter,
net=net,
lr=lr,
reg_std=noise_std,
w_proj_loss=0.33,
w_perceptual_loss=0.33,
w_tv_loss=0.33
),
DgrReconstructor('DIP_0.8_0.10_0.10_0.00',
dip_n_iter=dgr_iter,
net=net,
lr=lr,
reg_std=noise_std,
w_proj_loss=0.8,
w_perceptual_loss=0.1,
w_tv_loss=0.1
),
DgrReconstructor('DIP_0.98_0.01_0.01_0.00',
dip_n_iter=dgr_iter,
net=net,
lr=lr,
reg_std=noise_std,
w_proj_loss=0.98,
w_perceptual_loss=0.01,
w_tv_loss=0.01
),
DgrReconstructor('DIP_0.10_0.80_0.10_0.00',
dip_n_iter=dgr_iter,
net=net,
lr=lr,
reg_std=noise_std,
w_proj_loss=0.10,
w_perceptual_loss=0.80,
w_tv_loss=0.10
),
DgrReconstructor('DIP_0.01_0.98_0.01_0.00',
dip_n_iter=dgr_iter,
net=net,
lr=lr,
reg_std=noise_std,
w_proj_loss=0.01,
w_perceptual_loss=0.98,
w_tv_loss=0.01
),
]
img_sart_bm3d = recons[3].calc(sinogram, theta)
imgs = []
for recon in recons:
if type(recon) == DgrReconstructor:
recon.set_for_metric(gt, img_sart_bm3d, FOCUS=FOCUS, log_dir='../log/dip')
imgs.append(recon.calc(sinogram))
mse, psnr, ssim = recon.eval(gt)
recon.save_result()
logstr = "{}: MSE:{:.5f} PSNR:{:.5f} SSIM:{:.5f}".format(
recon.name, mse, psnr, ssim
)
logging.info(logstr)
plot_grid([gt] + imgs,
FOCUS=FOCUS, save_name=label+'.png', dpi=500)
logging.warning('Done. Results saved as %s', label+'.png')
if __name__ == "__main__":
#
test("../data/shepp_logan.jpg", "shepp_logan_32_35", n_proj=32, noise_pow=35.0)
test("../data/ct2.jpg", "ct2_32_35", n_proj=32, noise_pow=35.0)
test("../data/ct1.jpg", "ct1_32_35", n_proj=32, noise_pow=35.0)
test("../data/LoDoPaB/004013_02_01_119.png", "LoDoPaB1_32_35", n_proj=32, noise_pow=35.0)
test("../data/LoDoPaB/004017_01_01_151.png", "LoDoPaB2_32_35", n_proj=32, noise_pow=35.0)
test("../data/LoDoPaB/004028_01_04_109.png", "LoDoPaB3_32_35", n_proj=32, noise_pow=35.0)
test("../data/LoDoPaB/004043_01_01_169.png", "LoDoPaB4_32_35", n_proj=32, noise_pow=35.0)
test("../data/LoDoPaB/004049_04_01_062.png", "LoDoPaB5_32_35", n_proj=32, noise_pow=35.0)
| 38.167331
| 93
| 0.423173
| 1,101
| 9,580
| 3.333333
| 0.131698
| 0.069482
| 0.029428
| 0.034332
| 0.571662
| 0.5297
| 0.528338
| 0.528338
| 0.435967
| 0.394823
| 0
| 0.113128
| 0.482359
| 9,580
| 250
| 94
| 38.32
| 0.626941
| 0
| 0
| 0.537445
| 0
| 0
| 0.106934
| 0.066416
| 0
| 0
| 0
| 0
| 0
| 1
| 0.004405
| false
| 0
| 0.017621
| 0
| 0.022026
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5738865aace2f3446a95a35c7f51b460031ae67
| 1,607
|
py
|
Python
|
03. Advanced (Nested) Conditional Statements/P09 Fruit Shop #.py
|
KrisBestTech/Python-Basics
|
10bd961bf16d15ddb94bbea53327b4fc5bfdba4c
|
[
"MIT"
] | null | null | null |
03. Advanced (Nested) Conditional Statements/P09 Fruit Shop #.py
|
KrisBestTech/Python-Basics
|
10bd961bf16d15ddb94bbea53327b4fc5bfdba4c
|
[
"MIT"
] | null | null | null |
03. Advanced (Nested) Conditional Statements/P09 Fruit Shop #.py
|
KrisBestTech/Python-Basics
|
10bd961bf16d15ddb94bbea53327b4fc5bfdba4c
|
[
"MIT"
] | null | null | null |
fruit = str(input())
day_of_the_week = str(input())
quantity = float(input())
price = 0
if fruit == 'banana' or \
fruit == 'apple' or \
fruit == 'orange' or \
fruit == 'grapefruit' or \
fruit == 'kiwi' or \
fruit == 'pineapple' or \
fruit == 'grapes':
if day_of_the_week == 'Monday' or day_of_the_week == 'Tuesday' or \
day_of_the_week == 'Wednesday' or \
day_of_the_week == 'Thursday' or \
day_of_the_week == 'Friday':
if fruit == 'banana':
price = 2.50
elif fruit == 'apple':
price = 1.20
elif fruit == 'orange':
price = 0.85
elif fruit == 'grapefruit':
price = 1.45
elif fruit == 'kiwi':
price = 2.70
elif fruit == 'pineapple':
price = 5.50
elif fruit == 'grapes':
price = 3.85
total_price = quantity * price
print(f'{total_price:.2f}')
elif day_of_the_week == 'Saturday' or day_of_the_week == 'Sunday':
if fruit == 'banana':
price = 2.70
elif fruit == 'apple':
price = 1.25
elif fruit == 'orange':
price = 0.90
elif fruit == 'grapefruit':
price = 1.60
elif fruit == 'kiwi':
price = 3
elif fruit == 'pineapple':
price = 5.60
elif fruit == 'grapes':
price = 4.20
total_price = quantity * price
print(f'{total_price:.2f}')
else:
print('error')
else:
print('error')
| 21.716216
| 71
| 0.47542
| 184
| 1,607
| 4
| 0.255435
| 0.146739
| 0.086957
| 0.130435
| 0.516304
| 0.111413
| 0.111413
| 0.111413
| 0.111413
| 0
| 0
| 0.044103
| 0.393279
| 1,607
| 73
| 72
| 22.013699
| 0.710769
| 0
| 0
| 0.461538
| 0
| 0
| 0.144368
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f573e98c3617ee161a5bc2f46171d1b7f2905fc3
| 1,368
|
py
|
Python
|
trajectories/tests/test_DTW.py
|
donsheehy/geomcps
|
b4ef5dbf0fed21927485b01580b724272f84d9ed
|
[
"MIT"
] | null | null | null |
trajectories/tests/test_DTW.py
|
donsheehy/geomcps
|
b4ef5dbf0fed21927485b01580b724272f84d9ed
|
[
"MIT"
] | null | null | null |
trajectories/tests/test_DTW.py
|
donsheehy/geomcps
|
b4ef5dbf0fed21927485b01580b724272f84d9ed
|
[
"MIT"
] | null | null | null |
import unittest
from trajectories.dynamic_time_warper import *
from trajectories.trajectory import Trajectory
from trajectories.point import Point
class TestDTW(unittest.TestCase):
def test_1D_DTW(self):
t1 = [1,2,2,10,2,1]
t2 = [3,3,5,5,2]
self.assertEqual(45, dtw(t1, t2, -1, metricI))
self.assertEqual(0, dtw(t1, t1, -1, metricI))
t1 = Trajectory([Point([1]),Point([2]),Point([2]),Point([10]),Point([2]),Point([1])])
t2 = Trajectory([Point([3]),Point([3]),Point([5]),Point([5]),Point([2])])
self.assertEqual(45, dtw(t1, t2, -1, metricD))
self.assertEqual(0, dtw(t1, t1, -1, metricD))
def test_DTWI(self):
p1 = Point([-7, -4])
p2 = Point([5, 6])
p3 = Point([3, 4])
p4 = Point([-3, 5])
t1 = Trajectory([p1, p2])
t2 = Trajectory([p3, p4])
self.assertEqual(45, dtwI(t1, t2))
t1 = Trajectory([p1, p2, p3, p4])
self.assertEqual(0, dtwI(t1, t1))
def test_ITWD(self):
p1 = Point([-7, -4])
p2 = Point([5, 6])
p3 = Point([3, 4])
p4 = Point([-3, 5])
t1 = Trajectory([p1, p2])
t2 = Trajectory([p3, p4])
self.assertEqual(45, dtwD(t1, t2))
t1 = Trajectory([p1, p2, p3, p4])
self.assertEqual(0, dtwD(t1, t1))
if __name__ == '__main__':
unittest.main()
| 33.365854
| 93
| 0.54386
| 195
| 1,368
| 3.74359
| 0.215385
| 0.164384
| 0.093151
| 0.087671
| 0.490411
| 0.490411
| 0.490411
| 0.424658
| 0.353425
| 0.353425
| 0
| 0.106046
| 0.262427
| 1,368
| 40
| 94
| 34.2
| 0.617443
| 0
| 0
| 0.388889
| 0
| 0
| 0.005848
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 1
| 0.083333
| false
| 0
| 0.111111
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5792851b55e8b741f344366679574e04969bc93
| 1,022
|
py
|
Python
|
backend/repositories/bookmark_repository.py
|
heshikirihasebe/fastapi-instagram-clone
|
7bc265a62160171c5c5c1b2f18b3c86833cb64e7
|
[
"MIT"
] | 1
|
2022-02-08T19:35:22.000Z
|
2022-02-08T19:35:22.000Z
|
backend/repositories/bookmark_repository.py
|
heshikirihasebe/fastapi-instagram-clone
|
7bc265a62160171c5c5c1b2f18b3c86833cb64e7
|
[
"MIT"
] | null | null | null |
backend/repositories/bookmark_repository.py
|
heshikirihasebe/fastapi-instagram-clone
|
7bc265a62160171c5c5c1b2f18b3c86833cb64e7
|
[
"MIT"
] | null | null | null |
import datetime
from ..databases.postgresql import session
from ..models.bookmark_model import Bookmark
# Select one
async def select_one(user_id: int, post_id: int):
bookmark = session.query(Bookmark).filter(Bookmark.user_id == user_id, Bookmark.post_id == post_id).first()
return bookmark
# Insert
async def insert(user_id: int, post_id: int):
bookmark = Bookmark(
user_id=user_id,
post_id=post_id,
)
session.add(bookmark)
session.commit()
session.close()
# Update
async def update(user_id: int, post_id: int, deleted_at: str):
bookmark = session.query(Bookmark).filter(Bookmark.user_id == user_id, Bookmark.post_id == post_id).first()
bookmark.updated_at = datetime.datetime.now()
bookmark.deleted_at = deleted_at
session.commit()
session.close()
# Count by post id
async def countByPostId(post_id: int):
num_bookmarks = session.query(Bookmark).filter(Bookmark.post_id == post_id, Bookmark.deleted_at == None).count()
return num_bookmarks
| 31.9375
| 116
| 0.720157
| 143
| 1,022
| 4.937063
| 0.258741
| 0.110482
| 0.056657
| 0.067989
| 0.405099
| 0.311615
| 0.286119
| 0.223796
| 0.223796
| 0.223796
| 0
| 0
| 0.167319
| 1,022
| 31
| 117
| 32.967742
| 0.829612
| 0.040117
| 0
| 0.26087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.130435
| 0
| 0.217391
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f57b07d03e45e8f7fc9d99adb6fc72590a4d7edd
| 3,326
|
py
|
Python
|
D3_cgi/support/uman.py
|
slzjw26/learn_Pthon
|
9c4053ec1ea4c32a01fa2658499d8e53a4a532f3
|
[
"MIT"
] | null | null | null |
D3_cgi/support/uman.py
|
slzjw26/learn_Pthon
|
9c4053ec1ea4c32a01fa2658499d8e53a4a532f3
|
[
"MIT"
] | null | null | null |
D3_cgi/support/uman.py
|
slzjw26/learn_Pthon
|
9c4053ec1ea4c32a01fa2658499d8e53a4a532f3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
#
# User management application
#
"""
六、用python写一个cgi程序,功能如下:
1. 查询用户 (get)
2. 创建用户 (post)
3. 修改用户 (post)
4. 删除用户 (post)
要点:
1. 通过变量 REQUEST_METHOD 来判断是get还是post
2. 通过变量 QUERY_STRING 来判断是创建还是修改还是删除
3. 通过subprocess.getoutput, 或者os.system 来运行shell命令
4. 相关命令如下:
查用户:grep ^root /etc/passwd
加用户:useradd user-name
改用户:usermod user-name
删用户:userdel user-name
"""
import os
import sys
import subprocess as sub
def response(headers, body):
for h in headers:
print(h)
print()
for b in body:
sys.stdout.write(b)
def get_user_info(params_str, headers):
if params_str:
params = dict(p.split('=') for p in params_str.split('&'))
else:
params = {}
name = params.get('name')
if not name:
headers.append('Status: 400 BAD_REQUEST')
return response(headers, ['name is required'])
info = read_user_info(name)
if not info:
headers.append('Status: 200 OK')
return response(headers, ['name %s not exists' % name])
body = []
body.append('name: %s\n' % info['name'])
body.append('uid: %s\n' % info['uid'])
body.append('gid: %s\n' % info['gid'])
body.append('comment: %s\n' % info['comment'])
body.append('home: %s\n' % info['home'])
body.append('shell: %s\n' % info['shell'])
return response(headers, body)
def read_user_info(name):
"""从系统的用户数据库 /etc/passwd 中读取指定用户的基本信息,返回字典"""
db = '/etc/passwd'
info = [line.split(':') for line in open(db).read().splitlines()]
user_info = [i for i in info if i[0] == name]
if not user_info: # 找不到用户
return
user_info = user_info[0]
colnames = ('name', 'password', 'uid', 'gid', 'comment', 'home', 'shell')
return dict(zip(colnames, user_info))
def alter_user(headers):
data = sys.stdin.read().strip()
if data:
params = dict(p.split('=') for p in data.split('&'))
else:
headers.append('Status: 400 BAD_REQUEST')
return response(headers, ['invalid parameters'])
kind = params['kind'] # add? delete? modify?
if kind == 'add':
cmd = ['useradd', params['name']]
elif kind == 'delete':
cmd = ['userdel', '-r', params['name']]
elif kind == 'mod':
# 目前只支持修改用户的comment字段,后续可以扩展
name = params['name']
comment = params['comment']
cmd = ['usermod', '-c', comment, name]
else:
headers.append('Status: 400 BAD_REQUEST')
return response(headers, ['operation %s not supported' % kind])
# 运行外部的用户管理命令
# 临时修改,用sudo 执行命令
cmd.insert(0, 'sudo')
cmd = ' '.join(cmd)
code, out = sub.getstatusoutput(cmd)
if code == 0:
headers.append('Status: 200 OK')
return response(headers, ['operation success'])
else:
headers.append('Status: 200 OK')
return response(headers, ['failed: %s' % out])
if __name__ == '__main__':
headers = []
headers.append('Content-Type: text/plain')
if os.getenv('REQUEST_METHOD') == 'GET':
params = os.getenv('QUERY_STRING', '')
get_user_info(params, headers)
elif os.getenv('REQUEST_METHOD') == 'POST':
alter_user(headers)
else:
headers.append('Status: 405 METHOD_NOT_ALLOWED')
response(headers, [])
| 27.262295
| 77
| 0.591401
| 422
| 3,326
| 4.575829
| 0.334123
| 0.069912
| 0.068876
| 0.047644
| 0.179182
| 0.179182
| 0.179182
| 0.156396
| 0.086484
| 0.059037
| 0
| 0.013638
| 0.250451
| 3,326
| 121
| 78
| 27.487603
| 0.760931
| 0.164762
| 0
| 0.141026
| 0
| 0
| 0.192963
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051282
| false
| 0.025641
| 0.038462
| 0
| 0.205128
| 0.025641
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f57c524ea058c9eaac99f335f5d9b80e94762f25
| 2,024
|
py
|
Python
|
chmm_files/chmm_gen.py
|
IvanTyulyandin/Lin_alg_Viterbi
|
0359c33ed67f8748cd51e8852555ea2fa35b9365
|
[
"Apache-2.0"
] | null | null | null |
chmm_files/chmm_gen.py
|
IvanTyulyandin/Lin_alg_Viterbi
|
0359c33ed67f8748cd51e8852555ea2fa35b9365
|
[
"Apache-2.0"
] | null | null | null |
chmm_files/chmm_gen.py
|
IvanTyulyandin/Lin_alg_Viterbi
|
0359c33ed67f8748cd51e8852555ea2fa35b9365
|
[
"Apache-2.0"
] | null | null | null |
import random
# Parameters
states_num: int = 900
trans_per_state: int = 3
transitions_num: int = trans_per_state * states_num
num_non_zero_start_probs: int = 2
emit_range: int = 20
file_name: str = "random_" + \
str(states_num) + "_" + str(transitions_num) + "_" + \
str(emit_range) + "_" + str(num_non_zero_start_probs) + ".chmm"
# Implicit parameter for probabilities generation
rng_range: int = 100
def generate_probability_list(length: int) -> list:
# Fill list with random values, then divide all elements to sum of probs,
# so sum(probs) == 1
probs: list = []
for _ in range(length):
probs.append(random.randrange(rng_range))
sum_of_list: int = sum(probs)
# Cast to floats with fixed precision of 6-2 = 4 signs
probs = list(
map(lambda x: str(float(x) / sum_of_list)[:6], probs))
return probs
# Generation
with open(file_name, 'w') as f:
f.write(str(states_num) + '\n')
# Start probabilities pairs info
start_probs: list = generate_probability_list(num_non_zero_start_probs)
f.write(str(num_non_zero_start_probs) + '\n')
for i in range(num_non_zero_start_probs):
f.write(str(i) + ' ' + start_probs[i] + '\n')
# Emissions probabilities for each state
f.write(str(emit_range) + '\n')
for _ in range(states_num):
emit_probs: list = generate_probability_list(emit_range)
emit_str: str = ' '.join(emit_probs) + '\n'
f.write(emit_str)
# Transitions info
f.write(str(transitions_num) + '\n')
for src in range(states_num):
used_dst: list = []
for _ in range(trans_per_state):
dst: int = random.randrange(states_num)
while (dst in used_dst):
dst = random.randrange(states_num)
used_dst.append(dst)
trans_probs: list = generate_probability_list(trans_per_state)
for i in range(trans_per_state):
f.write(str(src) + ' ' + str(used_dst[i]) +
' ' + trans_probs[i] + '\n')
| 32.126984
| 77
| 0.64081
| 289
| 2,024
| 4.211073
| 0.273356
| 0.059162
| 0.044371
| 0.061627
| 0.211175
| 0.082991
| 0.047658
| 0.047658
| 0
| 0
| 0
| 0.009766
| 0.241107
| 2,024
| 62
| 78
| 32.645161
| 0.782552
| 0.148221
| 0
| 0
| 0
| 0
| 0.019837
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02439
| false
| 0
| 0.02439
| 0
| 0.073171
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f580e360a82ba7dad75ab77286f0111cf9d43ab3
| 392
|
py
|
Python
|
new_server.py
|
19bcs2410/flask_updated-web-chat
|
c72644a2b1feb2c6ba3b6c1c8d0ec53817e6d05e
|
[
"MIT"
] | null | null | null |
new_server.py
|
19bcs2410/flask_updated-web-chat
|
c72644a2b1feb2c6ba3b6c1c8d0ec53817e6d05e
|
[
"MIT"
] | null | null | null |
new_server.py
|
19bcs2410/flask_updated-web-chat
|
c72644a2b1feb2c6ba3b6c1c8d0ec53817e6d05e
|
[
"MIT"
] | null | null | null |
import socketio
import socketio
sio = socketio.Client()
@sio.event
def connect():
print('connection established')
@sio.event
def my_message(data):
print('message received with ', data)
sio.emit('my response', {'response': 'my response'})
@sio.event
def disconnect():
print('disconnected from server')
sio.connect('http://localhost:5000')
sio.wait()
| 17.818182
| 57
| 0.660714
| 47
| 392
| 5.489362
| 0.531915
| 0.093023
| 0.127907
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012658
| 0.193878
| 392
| 21
| 58
| 18.666667
| 0.803797
| 0
| 0
| 0.333333
| 0
| 0
| 0.320755
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.133333
| 0
| 0.333333
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5869e041f8cfc604cdaeae8bc529488e18f09e4
| 3,812
|
py
|
Python
|
zarr-dataset/test_anime_faces.py
|
tinon224/experiments
|
cbe066fb9eec20f290eaff5bb19131616af61bee
|
[
"MIT"
] | 103
|
2015-03-28T14:32:44.000Z
|
2021-03-31T08:20:24.000Z
|
zarr-dataset/test_anime_faces.py
|
tinon224/experiments
|
cbe066fb9eec20f290eaff5bb19131616af61bee
|
[
"MIT"
] | 6
|
2016-05-17T13:31:56.000Z
|
2020-11-13T17:19:19.000Z
|
zarr-dataset/test_anime_faces.py
|
tinon224/experiments
|
cbe066fb9eec20f290eaff5bb19131616af61bee
|
[
"MIT"
] | 106
|
2015-05-10T14:29:06.000Z
|
2021-07-13T08:19:19.000Z
|
import os
import zarr
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
from tqdm import tqdm, trange
class FaceDataset(Dataset):
def __init__(self, path, transforms=None):
self.path = path
self.keys = ('images', 'labels')
assert os.path.exists(path), 'file `{}` not exists!'.format(path)
with zarr.LMDBStore(path) as store:
zarr_db = zarr.group(store=store)
self.num_examples = zarr_db['labels'].shape[0]
self.datasets = None
if transforms is None:
transforms = {
'labels': lambda v: torch.tensor(v, dtype=torch.long),
'images': lambda v: torch.tensor((v - 127.5)/127.5, dtype=torch.float32)
}
self.transforms = transforms
def __len__(self):
return self.num_examples
def __getitem__(self, idx):
if self.datasets is None:
store = zarr.LMDBStore(self.path)
zarr_db = zarr.group(store=store)
self.datasets = {key: zarr_db[key] for key in self.keys}
items = []
for key in self.keys:
item = self.datasets[key][idx]
if key in self.transforms:
item = self.transforms[key](item)
items.append(item)
return items
class Model(nn.Module):
def __init__(self, input_size=96 * 96 * 3, output_size=126,
hidden_size=25):
super().__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(3, 16, kernel_size=6, stride=2, padding=2),
nn.BatchNorm2d(16), nn.ReLU(), nn.MaxPool2d(
kernel_size=2, stride=2))
self.layer2 = nn.Sequential(
nn.Conv2d(16, 32, kernel_size=6, stride=2, padding=2),
nn.BatchNorm2d(32), nn.ReLU(), nn.MaxPool2d(
kernel_size=2, stride=2))
self.fc = nn.Linear(6 * 6 * 32, output_size)
self.criteria = nn.CrossEntropyLoss()
def forward(self, inputs):
outputs = self.layer1(inputs)
outputs = self.layer2(outputs)
outputs = outputs.reshape(outputs.size(0), -1)
outputs = self.fc(outputs)
return outputs
def main(batch_size=64, epochs=50):
data_train = FaceDataset('data/anime_faces/train.lmdb')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
loader = DataLoader(data_train, batch_size=batch_size, num_workers=10)
model = Model()
model.to(device)
model.train()
optim = torch.optim.Adam(model.parameters(), lr=0.001)
for epoch in trange(epochs):
t = tqdm(loader)
for i, (images, labels) in enumerate(t):
images = images.to(device)
labels = labels.to(device)
optim.zero_grad()
logits = model(images)
loss = model.criteria(logits, labels)
loss.backward()
optim.step()
predicts = torch.argmax(F.softmax(logits, dim=1), dim=1)
accuracy = (predicts == labels).to(torch.float32).mean()
t.set_postfix(
epoch=epoch, i=i, loss=loss.item(), accuracy=accuracy.item())
data_val = FaceDataset('data/anime_faces/val.lmdb')
val_loader = DataLoader(data_val, batch_size=batch_size, num_workers=0)
total = len(data_val)
total_correct = 0
model.eval()
for images, labels in val_loader:
images = images.to(device)
labels = labels.to(device)
logits = model(images)
predicts = torch.argmax(F.softmax(logits, dim=1), dim=1)
correct = (predicts == labels).sum()
total_correct += correct.item()
print('Val accuracy = {}'.format(total_correct / total))
if __name__ == '__main__':
main()
| 33.147826
| 88
| 0.597587
| 485
| 3,812
| 4.564948
| 0.294845
| 0.020325
| 0.012195
| 0.01626
| 0.248419
| 0.195122
| 0.169828
| 0.143631
| 0.107498
| 0.072267
| 0
| 0.027646
| 0.278856
| 3,812
| 114
| 89
| 33.438596
| 0.777737
| 0
| 0
| 0.12766
| 0
| 0
| 0.035414
| 0.013641
| 0
| 0
| 0
| 0
| 0.010638
| 1
| 0.06383
| false
| 0
| 0.085106
| 0.010638
| 0.202128
| 0.010638
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f586db857714c3a406cc8d011335a90b361a86d4
| 1,066
|
py
|
Python
|
pepper/spiders/pepper.py
|
Guilehm/dr-pepper-crawler
|
0cc02f8b9bf9a739cb1644d4ef4c0c566428f6a2
|
[
"MIT"
] | null | null | null |
pepper/spiders/pepper.py
|
Guilehm/dr-pepper-crawler
|
0cc02f8b9bf9a739cb1644d4ef4c0c566428f6a2
|
[
"MIT"
] | 2
|
2021-03-31T19:47:28.000Z
|
2021-06-08T20:39:41.000Z
|
pepper/spiders/pepper.py
|
Guilehm/dr-pepper-crawler
|
0cc02f8b9bf9a739cb1644d4ef4c0c566428f6a2
|
[
"MIT"
] | null | null | null |
import os
import scrapy
from pepper.items import PepperItem
class PepperSpider(scrapy.Spider):
name = 'pepper'
start_urls = ['https://blog.drpepper.com.br']
def parse(self, response):
images = response.xpath(
'.//img[contains(@class,"size-full")]'
)
images += response.xpath(
'.//img[contains(@class,"alignnone")]'
)
images += response.xpath(
'.//img[contains(@src,"/tirinhas/")]'
)
images = set(images)
for img in images:
link = img.xpath('./@src').get()
yield PepperItem(
name=os.path.basename(link),
description=img.xpath('./parent::p/text()').get(),
link=link,
image_urls=[link],
)
current_page = response.xpath('//span[@class="page-numbers current"]')
next_page = current_page.xpath('./parent::li/following-sibling::li[1]/a/@href').get()
if next_page:
yield scrapy.Request(next_page, callback=self.parse)
| 28.052632
| 93
| 0.541276
| 113
| 1,066
| 5.044248
| 0.513274
| 0.091228
| 0.1
| 0.115789
| 0.175439
| 0.122807
| 0
| 0
| 0
| 0
| 0
| 0.001339
| 0.29925
| 1,066
| 37
| 94
| 28.810811
| 0.761714
| 0
| 0
| 0.068966
| 0
| 0
| 0.231707
| 0.167917
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.103448
| 0
| 0.241379
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5885ba233a8e2203989f8de45355db074bbea32
| 4,334
|
py
|
Python
|
spotseeker_server/test/search/uw_noise_level.py
|
uw-it-aca/spotseeker_server
|
1d8a5bf98b76fdcb807ed4cd32f939bb7e9aa66c
|
[
"Apache-2.0"
] | 5
|
2015-03-12T00:36:33.000Z
|
2022-02-24T16:41:25.000Z
|
spotseeker_server/test/search/uw_noise_level.py
|
uw-it-aca/spotseeker_server
|
1d8a5bf98b76fdcb807ed4cd32f939bb7e9aa66c
|
[
"Apache-2.0"
] | 133
|
2016-02-03T23:54:45.000Z
|
2022-03-30T21:33:58.000Z
|
spotseeker_server/test/search/uw_noise_level.py
|
uw-it-aca/spotseeker_server
|
1d8a5bf98b76fdcb807ed4cd32f939bb7e9aa66c
|
[
"Apache-2.0"
] | 6
|
2015-01-07T23:21:15.000Z
|
2017-12-07T08:26:33.000Z
|
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from django.test import TestCase
from django.test.client import Client
from django.test.utils import override_settings
import simplejson as json
from spotseeker_server.models import Spot, SpotExtendedInfo
from spotseeker_server.org_filters import SearchFilterChain
def spot_with_noise_level(name, noise_level):
"""Create a spot with the given noise level"""
spot = Spot.objects.create(name=name)
spot.spotextendedinfo_set.create(key='noise_level',
value=noise_level)
return spot
@override_settings(SPOTSEEKER_AUTH_MODULE='spotseeker_server.auth.all_ok',
SPOTSEEKER_SEARCH_FILTERS=(
'spotseeker_server.org_filters.uw_search.Filter',))
class UWNoiseLevelTestCase(TestCase):
@classmethod
def setUpClass(cls):
cls.silent_spot = spot_with_noise_level('Silent Spot', 'silent')
cls.quiet_spot = spot_with_noise_level('Quiet Spot', 'quiet')
cls.moderate_spot = spot_with_noise_level('Moderate', 'moderate')
cls.variable_spot = spot_with_noise_level('Var Spot', 'variable')
@classmethod
def tearDownClass(cls):
Spot.objects.all().delete()
def get_spots_for_noise_levels(self, levels):
"""Do a search for spots with particular noise levels"""
c = self.client
response = c.get('/api/v1/spot',
{'extended_info:noise_level': levels},
content_type='application/json')
return json.loads(response.content)
def assertResponseSpaces(self, res_json, spaces):
"""
Assert that a particular decoded response contains exactly the same
spaces as 'spaces'.
"""
def sortfunc(spot_dict):
return spot_dict['id']
expected_json = [spot.json_data_structure() for spot in spaces]
expected_json.sort(key=sortfunc)
res_json.sort(key=sortfunc)
self.assertEqual(expected_json, res_json)
def test_only_silent(self):
"""Searching for silent should return only silent"""
SearchFilterChain._load_filters() # make sure the uw filters is loaded
res_json = self.get_spots_for_noise_levels(['silent'])
self.assertResponseSpaces(res_json, [self.silent_spot])
def test_uw_only_quiet(self):
"""Quiet should return both a quiet spot and variable"""
SearchFilterChain._load_filters() # make sure the uw filters is loaded
res_json = self.get_spots_for_noise_levels(['quiet'])
expected = [self.quiet_spot, self.variable_spot]
self.assertResponseSpaces(res_json, expected)
def test_uw_only_moderate(self):
"""Moderate should return moderate and variable"""
SearchFilterChain._load_filters() # make sure the uw filters is loaded
res_json = self.get_spots_for_noise_levels(['moderate'])
expected = [self.moderate_spot, self.variable_spot]
self.assertResponseSpaces(res_json, expected)
def test_uw_silent_and_quiet(self):
"""Silent+quiet should give everything but moderate"""
SearchFilterChain._load_filters() # make sure the uw filters is loaded
res_json = self.get_spots_for_noise_levels(['silent', 'quiet'])
expected = [self.quiet_spot, self.silent_spot, self.variable_spot]
self.assertResponseSpaces(res_json, expected)
def test_uw_silent_and_moderate(self):
"""Silent+moderate should give everything but quiet"""
SearchFilterChain._load_filters() # make sure the uw filters is loaded
res_json = self.get_spots_for_noise_levels(['silent', 'moderate'])
expected = [self.silent_spot, self.moderate_spot, self.variable_spot]
self.assertResponseSpaces(res_json, expected)
def test_uw_all_three(self):
"""All 3 should give everything"""
SearchFilterChain._load_filters() # make sure the uw filters is loaded
query = ['silent', 'quiet', 'moderate']
res_json = self.get_spots_for_noise_levels(query)
expected = [self.silent_spot,
self.quiet_spot,
self.moderate_spot,
self.variable_spot]
self.assertResponseSpaces(res_json, expected)
| 42.910891
| 79
| 0.682972
| 527
| 4,334
| 5.358634
| 0.220114
| 0.037181
| 0.027266
| 0.03966
| 0.415368
| 0.360836
| 0.342422
| 0.342422
| 0.330737
| 0.330737
| 0
| 0.002384
| 0.225658
| 4,334
| 100
| 80
| 43.34
| 0.839094
| 0.171435
| 0
| 0.185714
| 0
| 0
| 0.07631
| 0.028474
| 0
| 0
| 0
| 0
| 0.114286
| 1
| 0.171429
| false
| 0
| 0.085714
| 0.014286
| 0.314286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f58db2e3a8108081fdad6ca36c2b07a1f84d614d
| 1,476
|
py
|
Python
|
_03_AttributesAndMethodsLab/_02_Integer.py
|
Andrey-V-Georgiev/PythonOOP
|
73aabdccace5ce7183c39e2f5674f7e17475b1cc
|
[
"MIT"
] | 1
|
2021-06-30T10:34:38.000Z
|
2021-06-30T10:34:38.000Z
|
_03_AttributesAndMethodsLab/_02_Integer.py
|
Andrey-V-Georgiev/PythonOOP
|
73aabdccace5ce7183c39e2f5674f7e17475b1cc
|
[
"MIT"
] | null | null | null |
_03_AttributesAndMethodsLab/_02_Integer.py
|
Andrey-V-Georgiev/PythonOOP
|
73aabdccace5ce7183c39e2f5674f7e17475b1cc
|
[
"MIT"
] | null | null | null |
from math import floor
class Integer:
def __init__(self, value):
self.value = value
@classmethod
def from_float(cls, float_value):
if isinstance(float_value, float):
return cls(floor(float_value))
else:
return 'value is not a float'
@classmethod
def from_roman(cls, value):
try:
roman_nums = list(value)
translate = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}
arabic_nums = [translate[r] for r in roman_nums]
arabic_sum = sum(
val if val >= next_val else -val
for val, next_val in zip(arabic_nums[:-1], arabic_nums[1:])
) + arabic_nums[-1]
return cls(int(arabic_sum))
except Exception:
pass
@classmethod
def from_string(cls, value):
if isinstance(value, str):
try:
return cls(int(value))
except Exception:
return 'wrong type'
else:
return 'wrong type'
def add(self, num):
if isinstance(num, Integer):
return self.value + getattr(num, 'value')
else:
return 'number should be an Integer instance'
def __repr__(self):
return self.value
first_num = Integer(10)
second_num = Integer.from_roman("IV")
print(Integer.from_float("2.6"))
print(Integer.from_string(2.6))
print(first_num.add(second_num))
| 25.448276
| 89
| 0.554201
| 185
| 1,476
| 4.254054
| 0.378378
| 0.045743
| 0.068615
| 0.043202
| 0.041931
| 0.041931
| 0
| 0
| 0
| 0
| 0
| 0.025484
| 0.335366
| 1,476
| 57
| 90
| 25.894737
| 0.776758
| 0
| 0
| 0.272727
| 0
| 0
| 0.063008
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.136364
| false
| 0.022727
| 0.022727
| 0.022727
| 0.386364
| 0.068182
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f58e82435946520f98ad569c02443f0eda8332d6
| 1,988
|
py
|
Python
|
bot/finance.py
|
kianhean/ShiokBot
|
948417ead579d7476350592f0a960c2c0ea8b757
|
[
"BSD-2-Clause"
] | 6
|
2017-04-06T02:55:16.000Z
|
2020-01-27T05:14:12.000Z
|
bot/finance.py
|
kianhean/ShiokBot
|
948417ead579d7476350592f0a960c2c0ea8b757
|
[
"BSD-2-Clause"
] | 13
|
2016-09-12T14:24:22.000Z
|
2021-10-22T01:19:43.000Z
|
bot/finance.py
|
kianhean/ShiokBot
|
948417ead579d7476350592f0a960c2c0ea8b757
|
[
"BSD-2-Clause"
] | 1
|
2016-09-12T14:01:49.000Z
|
2016-09-12T14:01:49.000Z
|
import json
from urllib.request import urlopen
import requests
from bs4 import BeautifulSoup
def get_sti():
# https://github.com/hongtaocai/googlefinance
return '<a href="https://chart.finance.yahoo.com/t?s=%5eSTI&lang=en-SG®ion=SG&width=300&height=180" >'
def get_fx():
url = 'https://eservices.mas.gov.sg/api/action/datastore/search.json?resource_id=95932927-c8bc-4e7a-b484-68a66a24edfe&limit=1&sort=end_of_day%20desc'
request = requests.get(url)
data = json.loads(request.text)
result_today = data['result']['records'][0]
AUD = 1/float(result_today['aud_sgd'])*1
CNY = 1/float(result_today['cny_sgd_100'])*100
HKD = 1/float(result_today['hkd_sgd_100'])*100
EUR = 1/float(result_today['eur_sgd'])*1
JPY = 1/float(result_today['jpy_sgd_100'])*100
MYR = 1/float(result_today['myr_sgd_100'])*100
THB = 1/float(result_today['thb_sgd_100'])*100
TWD = 1/float(result_today['twd_sgd_100'])*100
USD = 1/float(result_today['usd_sgd'])*1
VND = 1/float(result_today['vnd_sgd_100'])*100
list_curr = {'AUD': AUD, 'CNY':CNY, 'HKD':HKD, 'EUR':EUR, 'JPY':JPY,
'MYR':MYR, 'THB':THB, 'TWD':TWD, 'USD':USD, 'VND':VND}
text_final = '<b>Latest SGD End of Day Rates ' + result_today['end_of_day'] + '</b>\n\n'
for key in sorted(list_curr.keys()):
text_final += key + " " + str(round(list_curr[key], 3)) + " = 1 SGD \n"
return text_final
def get_sibor():
# Connect to Source
url = 'http://www.moneysmart.sg/home-loan/sibor-trend'
data = urlopen(url)
soup = BeautifulSoup(data, 'html.parser')
# Find latest Result
result = soup.findAll("div", {"class" : "sibor-sor-table"})
result = result[0].findAll("td")
result = result[1:]
text_final = '<b>Latest SIBOR Rates</b>\n\n'
name = result[0:][::2]
rate = result[1:][::2]
for i in range(0, 4):
text_final += name[i].get_text() + " - " + rate[i].get_text() + "\n"
return text_final
| 33.694915
| 153
| 0.639336
| 311
| 1,988
| 3.932476
| 0.379421
| 0.107931
| 0.098119
| 0.139002
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.059291
| 0.177062
| 1,988
| 58
| 154
| 34.275862
| 0.688264
| 0.040241
| 0
| 0.05
| 0
| 0.05
| 0.291645
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075
| false
| 0
| 0.1
| 0.025
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5931d77f9a036d1b90d5e9b889749394d2eff5e
| 1,124
|
py
|
Python
|
pipeline/filterstories.py
|
Xirider/BookGen
|
6eaffa936aea3215944dbfbf7ec92398b6e44587
|
[
"MIT"
] | 1
|
2021-05-31T09:40:19.000Z
|
2021-05-31T09:40:19.000Z
|
pipeline/filterstories.py
|
Xirider/BookGen
|
6eaffa936aea3215944dbfbf7ec92398b6e44587
|
[
"MIT"
] | 1
|
2021-06-30T14:35:22.000Z
|
2021-06-30T14:35:22.000Z
|
pipeline/filterstories.py
|
Xirider/BookGen
|
6eaffa936aea3215944dbfbf7ec92398b6e44587
|
[
"MIT"
] | null | null | null |
from joblib import Memory
cachedir = "cache"
memory = Memory(cachedir, verbose=10)
# @memory.cache
def filter_ff_stories(books, max_rating, min_words, max_words, min_chapters, max_chapters, max_books):
print("filtering ff stories")
ratings = {"K":1, "K+":2, "T":3, "M":4, "MA":5 }
rating_number = ratings[max_rating]
delete_ids = []
for bookid, book in enumerate(books):
if bookid % 1000 == 0:
print(f"filtering book {bookid} now")
removal = False
if book["Language"] != "English":
removal = True
if ratings[book["Rating"]] > rating_number:
removal = True
words = int(book["Words"].replace(",",""))
if not (min_words <= words <= max_words):
removal = True
chapters = int(book["Chapters"].replace(",",""))
if not (min_chapters <= chapters <= max_chapters):
removal = True
if removal:
delete_ids.append(bookid)
for bookid in reversed(delete_ids):
del books[bookid]
books = books[:max_books]
return books
| 25.545455
| 102
| 0.572954
| 133
| 1,124
| 4.699248
| 0.413534
| 0.0704
| 0.0416
| 0.048
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015152
| 0.295374
| 1,124
| 44
| 103
| 25.545455
| 0.77399
| 0.011566
| 0
| 0.142857
| 0
| 0
| 0.085586
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.035714
| 0
| 0.107143
| 0.071429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f594558a69e840af8885fc68a994d40b44b65eaf
| 1,169
|
py
|
Python
|
src/data/CIFAR10_utils.py
|
namanwahi/Transfer-Learning
|
93b9f664fd727a93e0b09b859a20d863602ec743
|
[
"MIT"
] | null | null | null |
src/data/CIFAR10_utils.py
|
namanwahi/Transfer-Learning
|
93b9f664fd727a93e0b09b859a20d863602ec743
|
[
"MIT"
] | null | null | null |
src/data/CIFAR10_utils.py
|
namanwahi/Transfer-Learning
|
93b9f664fd727a93e0b09b859a20d863602ec743
|
[
"MIT"
] | null | null | null |
import torch
import torchvision
import torchvision.transforms as transforms
from torch.utils.data.sampler import SubsetRandomSampler
from torch.utils.data import DataLoader
import os
path = os.path.abspath(__file__)
dir_path = os.path.dirname(path)
resnet_18_default = 224
def _get_dataset(resize=resnet_18_default):
transform = transforms.Compose(
[transforms.Resize(resize),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root=dir_path, train=True, download=True, transform=transform)
testset = torchvision.datasets.CIFAR10(root=dir_path, train=False, download=True, transform=transform)
return trainset, testset
def _get_classes():
return ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
def get_dataloader(train=True, batch_size=16):
animal_indices = [2, 3, 4, 5, 6, 7]
#animal_sampler = SubsetRandomSampler(animal_indices)
if train:
return DataLoader(_get_dataset()[0], batch_size)
else:
return DataLoader(_get_dataset()[1], batch_size)
| 33.4
| 107
| 0.718563
| 151
| 1,169
| 5.390728
| 0.437086
| 0.014742
| 0.018428
| 0.02457
| 0.117936
| 0.117936
| 0.117936
| 0.014742
| 0.014742
| 0.014742
| 0
| 0.033333
| 0.153122
| 1,169
| 34
| 108
| 34.382353
| 0.788889
| 0.044482
| 0
| 0
| 0
| 0
| 0.035874
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.115385
| false
| 0
| 0.230769
| 0.038462
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5970041908938ed814405d6c8377946dc2070bf
| 3,680
|
py
|
Python
|
SVHN/svhn.py
|
Tenant/Densenet-Tensorflow
|
27dca5a3f1a18ae070a8a6387c8a36b2a4be197e
|
[
"MIT"
] | null | null | null |
SVHN/svhn.py
|
Tenant/Densenet-Tensorflow
|
27dca5a3f1a18ae070a8a6387c8a36b2a4be197e
|
[
"MIT"
] | null | null | null |
SVHN/svhn.py
|
Tenant/Densenet-Tensorflow
|
27dca5a3f1a18ae070a8a6387c8a36b2a4be197e
|
[
"MIT"
] | null | null | null |
from scipy import io
import numpy as np
import random
import tensorflow as tf
class_num = 10
image_size = 32
img_channels = 3
def OneHot(label,n_classes):
label=np.array(label).reshape(-1)
label=np.eye(n_classes)[label]
return label
def prepare_data():
classes = 10
data1 = io.loadmat('./data/train_32x32.mat')
data2 = io.loadmat('./data/test_32x32.mat')
data3 = io.loadmat('./data/extra_32x32.mat')
train_data = data1['X']
train_labels = data1['y']
test_data = data2['X']
test_labels = data2['y']
extra_data = data3['X']
extra_labels = data3['y']
train_data = train_data.astype('float32')
test_data = test_data.astype('float32')
extra_data = extra_data.astype('float32')
train_data = np.transpose(train_data, (3, 0, 1, 2))
test_data = np.transpose(test_data, (3, 0, 1, 2))
extra_data = np.transpose(extra_data, (3, 0, 1, 2))
train_labels[train_labels == 10] = 0
test_labels[test_labels == 10] = 0
extra_labels[extra_labels == 10] = 0
train_labels = train_labels[:, 0]
test_labels = test_labels[:, 0]
extra_labels = extra_labels[:, 0]
train_labels = OneHot(train_labels, classes)
test_labels = OneHot(test_labels, classes)
extra_labels = OneHot(extra_labels, classes)
# truncate the train data and test data
train_data = train_data[0:50000,:,:,:]
train_labels = train_labels[0:50000,:]
test_data = test_data[0:10000,:,:,:]
test_labels = test_labels[0:10000,:]
# train_data = np.concatenate((train_data,extra_data),axis=0)
# train_labels = np.concatenate((train_labels,extra_labels),axis=0)
print('Train data:', train_data.shape, ', Train labels:', train_labels.shape)
print('Test data:', test_data.shape, ', Test labels:', test_labels.shape)
return train_data, train_labels, test_data, test_labels
def _random_crop(batch, crop_shape, padding=None):
oshape = np.shape(batch[0])
if padding:
oshape = (oshape[0] + 2 * padding, oshape[1] + 2 * padding)
new_batch = []
npad = ((padding, padding), (padding, padding), (0, 0))
for i in range(len(batch)):
new_batch.append(batch[i])
if padding:
new_batch[i] = np.lib.pad(batch[i], pad_width=npad,
mode='constant', constant_values=0)
nh = random.randint(0, oshape[0] - crop_shape[0])
nw = random.randint(0, oshape[1] - crop_shape[1])
new_batch[i] = new_batch[i][nh:nh + crop_shape[0],
nw:nw + crop_shape[1]]
return new_batch
def _random_flip_leftright(batch):
for i in range(len(batch)):
if bool(random.getrandbits(1)):
batch[i] = np.fliplr(batch[i])
return batch
def color_preprocessing(x_train, x_test):
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train[:, :, :, 0] = (x_train[:, :, :, 0] - np.mean(x_train[:, :, :, 0])) / np.std(x_train[:, :, :, 0])
x_train[:, :, :, 1] = (x_train[:, :, :, 1] - np.mean(x_train[:, :, :, 1])) / np.std(x_train[:, :, :, 1])
x_train[:, :, :, 2] = (x_train[:, :, :, 2] - np.mean(x_train[:, :, :, 2])) / np.std(x_train[:, :, :, 2])
x_test[:, :, :, 0] = (x_test[:, :, :, 0] - np.mean(x_test[:, :, :, 0])) / np.std(x_test[:, :, :, 0])
x_test[:, :, :, 1] = (x_test[:, :, :, 1] - np.mean(x_test[:, :, :, 1])) / np.std(x_test[:, :, :, 1])
x_test[:, :, :, 2] = (x_test[:, :, :, 2] - np.mean(x_test[:, :, :, 2])) / np.std(x_test[:, :, :, 2])
return x_train, x_test
def data_augmentation(batch):
batch = _random_flip_leftright(batch)
batch = _random_crop(batch, [32, 32], 4)
return batch
| 32.857143
| 108
| 0.6
| 534
| 3,680
| 3.902622
| 0.170412
| 0.048944
| 0.020154
| 0.042226
| 0.144434
| 0.018234
| 0
| 0
| 0
| 0
| 0
| 0.04681
| 0.216304
| 3,680
| 112
| 109
| 32.857143
| 0.675798
| 0.045924
| 0
| 0.075949
| 0
| 0
| 0.04675
| 0.018529
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075949
| false
| 0
| 0.050633
| 0
| 0.202532
| 0.025316
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5a27b850295f14cce9d9e2cff15b6524fbbecf8
| 4,562
|
py
|
Python
|
cogs/automod.py
|
ZeroTwo36/midna
|
f78591baacdd32386d9155cb7728de7384016361
|
[
"MIT"
] | 1
|
2022-01-18T09:53:34.000Z
|
2022-01-18T09:53:34.000Z
|
cogs/automod.py
|
ZeroTwo36/midna
|
f78591baacdd32386d9155cb7728de7384016361
|
[
"MIT"
] | null | null | null |
cogs/automod.py
|
ZeroTwo36/midna
|
f78591baacdd32386d9155cb7728de7384016361
|
[
"MIT"
] | null | null | null |
import discord as nextcord
import asyncio
from discord.ext import commands
import json
import time
import typing
def log(*,text):
...
class AutoMod(commands.Cog):
def __init__(self,bot):
self.bot=bot
self._cd = commands.CooldownMapping.from_cooldown(5, 5, commands.BucketType.member) # Change accordingly
def get_ratelimit(self, message: nextcord.Message) -> typing.Optional[int]:
"""Returns the ratelimit left"""
bucket = self._cd.get_bucket(message)
return bucket.update_rate_limit()
@commands.Cog.listener()
async def on_message(self,message):
if message.author.bot:return
with open("config.json") as f:
config = json.load(f)
if message.content == message.content.upper():
print("ALL CAPS")
if config[str(message.guild.id)]["ancap"] == True and not str(message.channel.id) in config[str(message.guild.id)]["whitelists"]:
await message.delete()
await message.author.send("Please don't spam Capital letters")
ratelimit = self.get_ratelimit(message)
if ratelimit is None:
...
else:
role = nextcord.utils.get(message.guild.roles,name="MUTED (By Midna)")
if not role:
role = await message.guild.create_role(name="MUTED (By Midna)",permissions=nextcord.Permissions(send_messages=False,read_messages=True))
await role.edit(position=2)
for c in message.guild.categories:
await c.set_permissions(role,send_messages=False)
await message.author.add_roles(role)
embed = nextcord.Embed(title="🔇 Member silenced | 2m")
embed.add_field(name="Reason",value="Message Spam")
embed.set_footer(text=f'{message.author} | {message.author.id}')
await message.channel.send(embed=embed)
await asyncio.sleep(120)
await message.author.remove_roles(role)
@commands.command()
async def anticaps(self,ctx,enabled:bool=False):
with open("config.json") as f:
config = json.load(f)
config[str(ctx.guild.id)]["ancap"] = enabled
with open("config.json","w+") as f:
json.dump(config,f)
embed = nextcord.Embed(color=nextcord.Color.green())
embed.description = f':white_check_mark: Anti Caps is now set to {enabled}!'
await ctx.send(embed=embed)
@commands.command(help="Open the Lockdown")
@commands.has_permissions(manage_channels=True)
async def openlockdown(self,ctx):
with open("config.json") as f:
config = json.load(f)
await ctx.channel.edit(slowmode_delay=0)
await ctx.send("This channel is no longer under lockdown")
@commands.command(help="Starts a Lockdown in the current channel")
@commands.has_permissions(manage_channels=True)
async def lockdown(self,ctx):
with open("config.json") as f:
config = json.load(f)
await ctx.channel.edit(slowmode_delay=config[str(ctx.guild.id)]["emergencyLock"])
await ctx.send("This channel is now under lockdown")
@commands.command(help="Set the Rate Limit, a channel will be put into upon being spammed")
@commands.has_permissions(manage_channels=True)
async def emratelimit(self,ctx,rate=60):
with open("config.json") as f:
config = json.load(f)
config[str(ctx.guild.id)]["emergencyLock"] = rate
with open("config.json","w+") as f:
json.dump(config,f)
embed = nextcord.Embed(color=nextcord.Color.green())
embed.description = f':white_check_mark: Emergency Member Rate limit is now set to {rate}!'
await ctx.send(embed=embed)
@commands.command(help="The Threshold of how many messages a user can send before its detected as spam")
@commands.has_permissions(manage_channels=True)
async def empanicrate(self,ctx,rate=5):
with open("config.json") as f:
config = json.load(f)
config[str(ctx.guild.id)]["panicRate"] = rate
with open("config.json","w+") as f:
json.dump(config,f)
embed = nextcord.Embed(color=nextcord.Color.green())
embed.description = f':white_check_mark: Emergency Member Rate limit is now set to {rate}!'
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(AutoMod(bot))
| 38.016667
| 153
| 0.622534
| 583
| 4,562
| 4.806175
| 0.283019
| 0.053533
| 0.044968
| 0.057816
| 0.450749
| 0.411492
| 0.377587
| 0.377587
| 0.287652
| 0.287652
| 0
| 0.003257
| 0.259754
| 4,562
| 119
| 154
| 38.336134
| 0.826177
| 0.010083
| 0
| 0.355556
| 0
| 0
| 0.17627
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044444
| false
| 0
| 0.066667
| 0
| 0.133333
| 0.011111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5a40afb92b821bdbd1bca8cea58ac0b9702d2e6
| 960
|
py
|
Python
|
task07.py
|
G00387867/pands-problems
|
01db5fd26eb0327f6f61da7e06dfe1f2b9f0333c
|
[
"MIT"
] | null | null | null |
task07.py
|
G00387867/pands-problems
|
01db5fd26eb0327f6f61da7e06dfe1f2b9f0333c
|
[
"MIT"
] | null | null | null |
task07.py
|
G00387867/pands-problems
|
01db5fd26eb0327f6f61da7e06dfe1f2b9f0333c
|
[
"MIT"
] | null | null | null |
# Adam
# A program that reads in a text
# file and outputs the number of e's it contains
# The program takes the filename from
# an argument on the command line.
# I found information on this website:
# https://www.sanfoundry.com/python-program-read-file-counts-number/
#fname = input("Enter file name: ")
#l = input("Enter letter to be searched: ")
#e = 0
#with open(fname, "r") as f:
#for line in f:
#words = line.split()
#for i in words:
#for letter in i:
#if(letter == e):
#e = e+1
#print("Occurences of the letter: ")
#print(e)
# Requirement for this assignmnet is to only print
# The occurence of letter E.
fname = input("Enter file name: ")
e = 0
with open(fname, "r") as f:
for line in f:
words = line.split()
for i in words:
for letter in i:
if(letter == "e"):
e = e+1
print(e)
| 20.869565
| 68
| 0.558333
| 143
| 960
| 3.748252
| 0.447552
| 0.014925
| 0.05597
| 0.070896
| 0.395522
| 0.309701
| 0.309701
| 0.309701
| 0.309701
| 0.309701
| 0
| 0.006211
| 0.329167
| 960
| 45
| 69
| 21.333333
| 0.826087
| 0.596875
| 0
| 0
| 0
| 0
| 0.052342
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5a59287ceaf7b3b0006e335abd2aae06f9ad302
| 3,936
|
py
|
Python
|
texext/tests/test_tinypages.py
|
effigies/texext
|
545ecf3715ab43bfb95859861fbb17af1fef512d
|
[
"BSD-2-Clause"
] | null | null | null |
texext/tests/test_tinypages.py
|
effigies/texext
|
545ecf3715ab43bfb95859861fbb17af1fef512d
|
[
"BSD-2-Clause"
] | null | null | null |
texext/tests/test_tinypages.py
|
effigies/texext
|
545ecf3715ab43bfb95859861fbb17af1fef512d
|
[
"BSD-2-Clause"
] | null | null | null |
""" Tests for tinypages build using sphinx extensions """
from os.path import (join as pjoin, dirname, isdir)
import sphinx
SPHINX_ge_1p5 = sphinx.version_info[:2] >= (1, 5)
from sphinxtesters import PageBuilder
HERE = dirname(__file__)
PAGES = pjoin(HERE, 'tinypages')
from texext.tests.test_plotdirective import format_math_block
def _pdiff(str1, str2):
# For debugging
from difflib import ndiff
print(''.join(ndiff(str1.splitlines(True), str2.splitlines(True))))
class TestTinyPages(PageBuilder):
# Test build and output of tinypages project
page_source_template = PAGES
def test_some_math(self):
assert isdir(self.out_dir)
assert isdir(self.doctree_dir)
doctree = self.get_doctree('some_math')
assert len(doctree.document) == 1
tree_str = self.doctree2str(doctree)
if SPHINX_ge_1p5:
back_ref = (
'<paragraph>Refers to equation at '
'<pending_xref refdoc="some_math" refdomain="math" '
'refexplicit="False" reftarget="some-label" '
'reftype="eq" refwarn="True">'
'<literal classes="xref eq">some-label</literal>'
'</pending_xref>')
else:
back_ref=(
'<paragraph>Refers to equation at '
'<eqref docname="some_math" '
'target="some-label">(?)</eqref>')
expected = (
'<title>Some math</title>\n'
'<paragraph>Here <math latex="a = 1"/>, except '
'<title_reference>$b = 2$</title_reference>.</paragraph>\n'
'<paragraph>Here <math latex="c = 3"/>, except '
'<literal>$d = 4$</literal>.</paragraph>\n'
'<literal_block xml:space="preserve">'
'Here $e = 5$</literal_block>\n'
'<bullet_list bullet="*">'
'<list_item>'
'<paragraph>'
'A list item containing\n'
'<math latex="f = 6"/> some mathematics.'
'</paragraph>'
'</list_item>'
'<list_item>'
'<paragraph>'
'A list item containing '
'<literal>a literal across\nlines</literal> '
'and also <math latex="g = 7"/> some mathematics.'
'</paragraph>'
'</list_item>'
'</bullet_list>\n'
+ format_math_block('some_math', "10 a + 2 b + q") +
'\n<paragraph>More text</paragraph>\n'
'<target refid="equation-some-label"/>\n'
+ format_math_block(
'some_math', "5 a + 3 b",
label='some-label',
number='1',
ids='equation-some-label') +
'\n<paragraph>Yet more text</paragraph>\n'
+ format_math_block(
"some_math", latex="5 w + 3 x") + '\n' +
r'<paragraph>Math with <math latex="\beta"/> a backslash.'
'</paragraph>\n'
'<paragraph>' # What happens to backslashes?
'A protected whitespace with <math latex="dollars"/>.'
'</paragraph>\n'
'<paragraph>'
'Some * asterisks *. <math latex="dollars"/>. '
r'A line break. Protected \ backslash. '
'Protected n in <math latex="a"/> line.</paragraph>\n'
# Do labels get set as targets?
+ back_ref +
'.</paragraph>')
assert tree_str == expected
class TestTopLevel(TestTinyPages):
# Test we can import math_dollar with just `texext`
@classmethod
def modify_source(cls):
conf_fname = pjoin(cls.page_source, 'conf.py')
with open(conf_fname, 'rt') as fobj:
contents = fobj.read()
contents = contents.replace("'texext.mathcode',\n", "")
contents = contents.replace("'texext.math_dollar'", "'texext'")
with open(conf_fname, 'wt') as fobj:
fobj.write(contents)
| 37.485714
| 71
| 0.54497
| 430
| 3,936
| 4.85814
| 0.386047
| 0.038775
| 0.028722
| 0.022978
| 0.154141
| 0.101484
| 0.067018
| 0
| 0
| 0
| 0
| 0.01037
| 0.314024
| 3,936
| 104
| 72
| 37.846154
| 0.763333
| 0.055132
| 0
| 0.206897
| 0
| 0
| 0.401295
| 0.057713
| 0
| 0
| 0
| 0
| 0.045977
| 1
| 0.034483
| false
| 0
| 0.057471
| 0
| 0.126437
| 0.011494
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5a8efb033fff75dd7f358a028f0ce20386e8ec9
| 3,708
|
py
|
Python
|
core.py
|
marcolcl/django-toolkit
|
f425cccb6f55f3afce4326e7e79770e5c36c9646
|
[
"MIT"
] | 1
|
2021-04-07T14:25:01.000Z
|
2021-04-07T14:25:01.000Z
|
core.py
|
marcolcl/django-toolkit
|
f425cccb6f55f3afce4326e7e79770e5c36c9646
|
[
"MIT"
] | 5
|
2021-03-30T14:08:53.000Z
|
2021-09-22T19:29:42.000Z
|
core.py
|
marcolcl/django-toolkit
|
f425cccb6f55f3afce4326e7e79770e5c36c9646
|
[
"MIT"
] | null | null | null |
import logging
from django.core.exceptions import ObjectDoesNotExist
from django.db import transaction
from django.http import HttpRequest
from rest_framework.exceptions import NotFound
from rest_framework.test import APIRequestFactory
from rest_framework.views import exception_handler, APIView
from typing import List, TypeVar
logger = logging.getLogger(__name__)
T = TypeVar('T')
NON_CLONEABLE_MODELS: List[str] = [
'User',
]
@transaction.atomic
def clone_instance(instance: T) -> T:
"""
Clone any django model instance and its related instances recursively
Ignore many-to-many or one-to-many relationship (reverse foreign key)
Also ignore user model
ref:
https://docs.djangoproject.com/en/2.2/ref/models/fields/#attributes-for-fields-with-relations
https://github.com/jackton1/django-clone/blob/master/model_clone/mixins/clone.py
"""
# initialize a new instance
cloned_instance = instance.__class__()
fields = instance._meta.get_fields()
for field in fields:
# only clone one-to-one or forward foreign key relationship
# ignore many-to-many or reverse foreign key relationship
if field.one_to_one or field.many_to_one:
_related = getattr(instance, field.name)
# skip if related instance is None
if _related is None:
continue
# use the same reference for non-cloneable related models
if field.related_model.__name__ in NON_CLONEABLE_MODELS:
setattr(cloned_instance, field.name, _related)
else:
_cloned_related = clone_instance(_related)
setattr(cloned_instance, field.name, _cloned_related)
# simply copy the value for those non-relation fields
if not field.is_relation:
_value = getattr(instance, field.name)
setattr(cloned_instance, field.name, _value)
# set primary key as None to save a new record in DB
cloned_instance.pk = None
cloned_instance.save()
return cloned_instance
def exception_logging_handler(exc: Exception, context: dict):
"""
Intercept DRF error handler to log the error message
Update the REST_FRAMEWORK setting in settings.py to use this handler
REST_FRAMEWORK = {
'EXCEPTION_HANDLER': 'core.exception_logging_handler',
}
"""
logger.warning(exc)
# translate uncaught Django ObjectDoesNotExist exception to NotFound
if isinstance(exc, ObjectDoesNotExist):
logger.error(f'uncaught ObjectDoesNotExist error: {exc} - {context}')
exc = NotFound(str(exc))
# follow DRF default exception handler
response = exception_handler(exc, context)
return response
def make_drf_request(request: HttpRequest = None, headers: dict = None):
"""
The request object made by APIRequestFactory is `WSGIRequest` which
doesn't have `.query_params` or `.data` method as recommended by DRF.
It only gets "upgraded" to DRF `Request` class after passing through
the `APIView`, which invokes `.initialize_request` internally.
This helper method uses a dummy API view to return a DRF `Request`
object for testing purpose.
Ref:
https://stackoverflow.com/questions/28421797/django-rest-framework-apirequestfactory-request-object-has-no-attribute-query-p
https://github.com/encode/django-rest-framework/issues/3608
"""
class DummyView(APIView):
pass
if request is None:
# use a default request
request = APIRequestFactory().get('/')
drf_request = DummyView().initialize_request(request)
if headers:
drf_request.headers = headers
return drf_request
| 33.107143
| 128
| 0.702805
| 462
| 3,708
| 5.502165
| 0.365801
| 0.035799
| 0.033438
| 0.030685
| 0.049567
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005189
| 0.220334
| 3,708
| 111
| 129
| 33.405405
| 0.874092
| 0.423948
| 0
| 0
| 0
| 0
| 0.028813
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061224
| false
| 0.020408
| 0.163265
| 0
| 0.306122
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5aa196ccf6037cd4fcdad669c9f9252c8778f6e
| 436
|
py
|
Python
|
atcoder/past/past201912_f.py
|
knuu/competitive-programming
|
16bc68fdaedd6f96ae24310d697585ca8836ab6e
|
[
"MIT"
] | 1
|
2018-11-12T15:18:55.000Z
|
2018-11-12T15:18:55.000Z
|
atcoder/past/past201912_f.py
|
knuu/competitive-programming
|
16bc68fdaedd6f96ae24310d697585ca8836ab6e
|
[
"MIT"
] | null | null | null |
atcoder/past/past201912_f.py
|
knuu/competitive-programming
|
16bc68fdaedd6f96ae24310d697585ca8836ab6e
|
[
"MIT"
] | null | null | null |
S = input()
arr = []
now = []
counter = 0
for s in S:
now.append(s.lower())
if s.isupper():
if counter == 0:
counter += 1
else:
arr.append(''.join(now))
now = []
counter = 0
arr.sort()
for word in arr:
for i, s in enumerate(word):
if i == 0 or i == len(word) - 1:
print(s.upper(), end='')
else:
print(s, end='')
print()
| 19.818182
| 40
| 0.428899
| 59
| 436
| 3.169492
| 0.40678
| 0.128342
| 0.117647
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023077
| 0.40367
| 436
| 21
| 41
| 20.761905
| 0.696154
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
190f3d0f2aa0d41a590c2d4d36fe77e3833762f3
| 2,171
|
py
|
Python
|
setup.py
|
biodatageeks/pysequila
|
2fb3b83f008e6b7f874648ea02e7ca307d8519d3
|
[
"Apache-2.0"
] | 1
|
2020-10-14T23:02:04.000Z
|
2020-10-14T23:02:04.000Z
|
setup.py
|
biodatageeks/pysequila
|
2fb3b83f008e6b7f874648ea02e7ca307d8519d3
|
[
"Apache-2.0"
] | 9
|
2020-11-07T23:33:28.000Z
|
2021-12-13T09:22:07.000Z
|
setup.py
|
biodatageeks/pysequila
|
2fb3b83f008e6b7f874648ea02e7ca307d8519d3
|
[
"Apache-2.0"
] | 1
|
2020-11-07T22:35:40.000Z
|
2020-11-07T22:35:40.000Z
|
# -*- coding: utf-8 -*-
"""setup.py"""
import os
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
class Tox(TestCommand):
user_options = [('tox-args=', 'a', 'Arguments to pass to tox')]
def initialize_options(self):
TestCommand.initialize_options(self)
self.tox_args = None
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import tox
import shlex
if self.tox_args:
errno = tox.cmdline(args=shlex.split(self.tox_args))
else:
errno = tox.cmdline(self.test_args)
sys.exit(errno)
def read_content(filepath):
with open(filepath) as fobj:
return fobj.read()
classifiers = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
]
long_description = (
read_content("README.rst") +
read_content(os.path.join("docs/source", "CHANGELOG.rst")))
requires = ['setuptools', 'typeguard==2.5.0', 'pyspark==3.0.1', 'findspark']
extras_require = {
'reST': ['Sphinx'],
}
if os.environ.get('READTHEDOCS', None):
extras_require['reST'].append('recommonmark')
setup(name='pysequila',
version=os.getenv('VERSION', '0.1.0'),
description='An SQL-based solution for large-scale genomic analysis',
long_description=long_description,
long_description_content_type='text/x-rst',
author='biodatageeks',
author_email='team@biodatageeks.org',
url='https://pysequila.biodatageeks.org',
classifiers=classifiers,
packages=['pysequila'],
data_files=[],
install_requires=requires,
include_package_data=True,
extras_require=extras_require,
tests_require=['tox'],
cmdclass={'test': Tox},)
| 28.194805
| 76
| 0.64947
| 247
| 2,171
| 5.582996
| 0.493927
| 0.068891
| 0.090645
| 0.037708
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008182
| 0.211884
| 2,171
| 76
| 77
| 28.565789
| 0.797779
| 0.014279
| 0
| 0
| 0
| 0
| 0.305061
| 0.009841
| 0
| 0
| 0
| 0
| 0
| 1
| 0.067797
| false
| 0.016949
| 0.101695
| 0
| 0.220339
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1911d18a99f00abe9dd822c30eace393500445cb
| 7,785
|
py
|
Python
|
tictactoe.py
|
smrsassa/tic-tac-toe-pygame
|
36f738fb94a3a138ef2aa21d409558e3d1680526
|
[
"MIT"
] | 1
|
2019-10-21T18:19:12.000Z
|
2019-10-21T18:19:12.000Z
|
tictactoe.py
|
smrsassa/tic-tac-toe-pygame
|
36f738fb94a3a138ef2aa21d409558e3d1680526
|
[
"MIT"
] | null | null | null |
tictactoe.py
|
smrsassa/tic-tac-toe-pygame
|
36f738fb94a3a138ef2aa21d409558e3d1680526
|
[
"MIT"
] | null | null | null |
import pygame
import random
from time import sleep
white = (255, 255, 255)
black = (0, 0, 0)
red = (255, 0, 0)
green = (0, 255, 0)
blue = (0, 0, 255)
pygame.init()
largura = 320
altura = 320
fundo = pygame.display.set_mode((largura, altura))
pygame.display.set_caption("TicTacToe")
def texto(msg, cor, tam, x, y):
fonte = pygame.font.SysFont(None, tam)
texto1 = fonte.render(msg, True, cor)
fundo.blit(texto1, [x, y])
def circulo(centro):
if centro == 0 or centro == 1 or centro == 2:
if centro == 0:
centro = 53*(centro+1)
if centro == 1:
centro = 53*(centro+2)
if centro == 2:
centro = 53*(centro+3)
pos_circulo = (centro, 53)
if centro == 3 or centro == 4 or centro == 5:
if centro == 3:
centro = 53*(centro-2)
if centro == 4:
centro = 53*(centro-1)
if centro == 5:
centro = 53*centro
pos_circulo = (centro, 160)
if centro == 6 or centro == 7 or centro == 8:
if centro == 6:
centro = 53*(centro-5)
if centro == 7:
centro = 53*(centro-4)
if centro == 8:
centro = 53*(centro-3)
pos_circulo = (centro, 266)
pygame.draw.circle(fundo, black, pos_circulo, 30)
def cruz(cruzx, cruzy):
pygame.draw.line(fundo, black, (cruzx, cruzy), (cruzx+35, cruzy+35))
pygame.draw.line(fundo, black, (cruzx+35, cruzy), ( cruzx, cruzy+35))
def cerca():
pygame.draw.line(fundo, black,(106, 0), (106, altura))
pygame.draw.line(fundo, black,(212, 0), (212, altura))
pygame.draw.line(fundo, black,(0, 106), (largura, 106))
pygame.draw.line(fundo, black,(0, 212), (largura, 212))
def endgame():
global fimdejogo
global resultado
global trava
if matriz[0] == 1 and matriz[1] == 1 and matriz[2] == 1 or matriz[0] == 2 and matriz[1] == 2 and matriz[2] == 2:
fimdejogo = True
trava = False
if matriz[0] == 1:
resultado = 1
else:
resultado = 2
if matriz[3] == 1 and matriz[4] == 1 and matriz[5] == 1 or matriz[3] == 2 and matriz[4] == 2 and matriz[5] == 2:
fimdejogo = True
trava = False
if matriz[3] == 1:
resultado = 1
else:
resultado = 2
if matriz[6] == 1 and matriz[7] == 1 and matriz[8] == 1 or matriz[6] == 2 and matriz[7] == 2 and matriz[8] == 2:
fimdejogo = True
trava = False
if matriz[6] == 1:
resultado = 1
else:
resultado = 2
if matriz[0] == 1 and matriz[3] == 1 and matriz[6] == 1 or matriz[0] == 2 and matriz[3] == 2 and matriz[6] == 2:
fimdejogo = True
trava = False
if matriz[6] == 1:
resultado = 1
else:
resultado = 2
if matriz[1] == 1 and matriz[4] == 1 and matriz[7] == 1 or matriz[1] == 2 and matriz[4] == 2 and matriz[7] == 2:
fimdejogo = True
trava = False
if matriz[1] == 1:
resultado = 1
else:
resultado = 2
if matriz[2] == 1 and matriz[5] == 1 and matriz[8] == 1 or matriz[2] == 2 and matriz[5] == 2 and matriz[8] == 2:
fimdejogo = True
trava = False
if matriz[2] == 1:
resultado = 1
else:
resultado = 2
if matriz[0] == 1 and matriz[4] == 1 and matriz[8] == 1 or matriz[0] == 2 and matriz[4] == 2 and matriz[8] == 2:
fimdejogo = True
trava = False
if matriz[0] == 1:
resultado = 1
else:
resultado = 2
if matriz[2] == 1 and matriz[4] == 1 and matriz[6] == 1 or matriz[2] == 2 and matriz[4] == 2 and matriz[6] == 2:
fimdejogo = True
trava = False
if matriz[2] == 1:
resultado = 1
else:
resultado = 2
vaziu = 0
for c in range(0, len(matriz)):
if matriz[c] == 0:
vaziu +=1
if vaziu == 0:
if resultado != 1 and resultado != 2:
fimdejogo = True
resultado = 3
vaziu = 0
game = True
fimdejogo = False
evento = True
trava = True
resultado = 0
mousex = -1
mousey = 0
fundo.fill(white)
cerca()
matriz = [0, 0, 0, 0, 0, 0, 0, 0, 0]
pygame.display.update()
while game:
while fimdejogo:
sleep(0.5)
fundo.fill(white)
texto('Fim de Jogo', red, 50, 65, 30)
if resultado == 1:
texto('Vitoria!!!', black, 30, 70, 80)
if resultado == 3:
texto('Velha', black, 30, 70, 80)
if resultado == 2:
texto('Derrota!!', black, 30, 70, 80)
pygame.draw.rect(fundo, black, [45, 120, 135, 27])
texto('Continuar(C)', white, 30, 50, 125)
pygame.draw.rect(fundo, black, [190, 120, 75, 27])
texto('Sair(S)', white, 30, 195, 125)
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
game = False
fimdejogo = False
trava = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_c:
game = True
fimdejogo = False
evento = True
trava = True
resultado = 0
mousex = -1
mousey = 0
fundo.fill(white)
cerca()
matriz = [0, 0, 0, 0, 0, 0, 0, 0, 0]
pygame.display.update()
if event.key == pygame.K_s:
game = False
fimdejogo = False
evento = False
trava = False
while evento:
for event in pygame.event.get():
if event.type == pygame.QUIT:
game = False
evento = False
trava = False
if event.type == pygame.MOUSEBUTTONDOWN:
mousex = pygame.mouse.get_pos()[0]
mousey = pygame.mouse.get_pos()[1]
evento = False
evento = True
if mousex < 106 and mousey < 106 and mousex != -1 and matriz[0] == 0:
cruz(35, 35)
matriz[0] = 1
if mousex < 212 and mousex > 106 and mousey < 106 and matriz[1] == 0:
cruz(141, 35)
matriz[1] = 1
if mousex < 320 and mousex > 212 and mousey < 106 and matriz[2] == 0:
cruz(247, 35)
matriz[2] = 1
if mousex < 106 and mousey > 106 and mousey < 212 and matriz[3] == 0:
cruz(35, 141)
matriz[3] = 1
if mousex < 212 and mousex > 106 and mousey < 212 and mousey > 106 and matriz[4] == 0:
cruz(141, 141)
matriz[4] = 1
if mousex < 320 and mousex > 212 and mousey < 212 and mousey > 106 and matriz[5] == 0:
cruz(247, 141)
matriz[5] = 1
if mousex < 106 and mousey < 320 and mousey > 212 and matriz[6] == 0:
cruz(35, 247)
matriz[6] = 1
if mousex < 212 and mousex > 106 and mousey < 320 and mousey > 212 and matriz[7] == 0:
cruz(141, 247)
matriz[7] = 1
if mousex < 320 and mousex > 212 and mousey < 320 and mousey > 212 and matriz[8] == 0:
cruz(247, 247)
matriz[8] = 1
endgame()
pygame.display.update()
sleep(0.5)
if trava:
while True:
jogada = random.randint(0, 8)
if matriz[jogada] == 0:
circulo(jogada)
matriz[jogada] = 2
break
else:
if 0 in matriz:
jogada = random.randint(0, 8)
else:
break
endgame()
pygame.display.update()
pygame.display.update()
| 31.26506
| 116
| 0.491715
| 1,032
| 7,785
| 3.699612
| 0.119186
| 0.096647
| 0.044526
| 0.012572
| 0.608172
| 0.536407
| 0.452331
| 0.322944
| 0.295443
| 0.248298
| 0
| 0.111527
| 0.382659
| 7,785
| 248
| 117
| 31.391129
| 0.682896
| 0
| 0
| 0.451327
| 0
| 0
| 0.008092
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022124
| false
| 0
| 0.013274
| 0
| 0.035398
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
191359000d3e32159cc42150dd476b64da855e66
| 5,794
|
py
|
Python
|
pyec/distribution/bayes/parser.py
|
hypernicon/pyec
|
7072835c97d476fc45ffc3b34f5c3ec607988e6d
|
[
"MIT"
] | 2
|
2015-03-16T21:18:27.000Z
|
2017-10-09T19:59:24.000Z
|
pyec/distribution/bayes/parser.py
|
hypernicon/pyec
|
7072835c97d476fc45ffc3b34f5c3ec607988e6d
|
[
"MIT"
] | null | null | null |
pyec/distribution/bayes/parser.py
|
hypernicon/pyec
|
7072835c97d476fc45ffc3b34f5c3ec607988e6d
|
[
"MIT"
] | null | null | null |
"""
Copyright (C) 2012 Alan J Lockett
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
"""
Parse .net format for Bayes nets and return a bayes net
"""
from pyec.config import Config
from pyec.distribution.bayes.net import *
from pyec.distribution.bayes.structure.proposal import StructureProposal
class BayesParser(object):
def __init__(self):
self.variables = {}
self.indexMap = {}
self.revIndexMap = {}
self.index = 0
def processLine(self, line):
line = line.strip()
if line == "":
return None
line = line[1:-1]
parts = line.split(" ")
if parts[0] == "var":
name = parts[1].strip("' ")
vals = " ".join(parts[2:])
vals = vals.strip("'()").split(" ")
vals = [v.strip("() \t\r\n") for v in vals]
vals = [v for v in vals if v != ""]
self.variables[name] = {'vals':vals, 'parents':None, 'cpt':None}
self.indexMap[name] = self.index
self.revIndexMap[self.index] = name
self.index += 1
elif parts[0] == "parents":
name = parts[1].strip("'").strip()
parts2 = line.split("'(")
if len(parts2) == 1:
parts2 = line.split("(")
parstr = parts2[1]
cptstr = "(".join(parts2[2:])
else:
parstr = parts2[1]
cptstr = parts2[2]
parents = parstr.strip(") \n").split(" ")
parents = [parent for parent in parents if parent != ""]
sortedParents = sorted(parents, key=lambda x: self.indexMap[x])
self.variables[name]['parents'] = sortedParents
cpt = {}
if len(parents) == 0:
vals = cptstr[:-2].strip("( )\r\n\t").split(" ")
vals = array([float(v) for v in vals][:-1])
cpt[""] = vals
else:
rows = cptstr[:-2].split("((")
for row in rows:
row = row.strip(") \r\n\t")
if row == "": continue
cfg, vals = row.split(")")
keys = [c for c in cfg.split(" ") if c != ""]
keyStr = [[]] * len(parents)
for j, key in enumerate(keys):
options = self.variables[parents[j]]['vals']
idx = options.index(key) + 1
keyStr[sortedParents.index(parents[j])] = idx
keyStr = ",".join([str(i) for i in array(keyStr)])
vals = vals.strip().split(" ")
vals = array([float(v) for v in vals][:-1])
cpt[keyStr] = vals
self.variables[name]['cpt'] = cpt
else:
return False
def parse(self, fname):
f = open(fname)
totalLine = ""
done = False
for line in f:
totalLine += line
lefts = len(totalLine.split("("))
rights = len(totalLine.split(")"))
if lefts == rights:
self.processLine(totalLine)
totalLine = ""
categories = [[]] * self.index
for name, idx in self.indexMap.iteritems():
categories[idx] = self.variables[name]['vals']
cfg = Config()
cfg.numVariables = len(self.variables)
cfg.variableGenerator = MultinomialVariableGenerator(categories)
cfg.randomizer = MultinomialRandomizer()
cfg.sampler = DAGSampler()
cfg.structureGenerator = StructureProposal(cfg)
net = BayesNet(cfg)
for variable in net.variables:
variable.tables = self.variables[self.revIndexMap[variable.index]]['cpt']
#print names[variable.index], self.variables[self.revIndexMap[variable.index]]['parents']
variable.known = [self.indexMap[parent] for parent in self.variables[self.revIndexMap[variable.index]]['parents']]
variable.known = sorted(variable.known)
variable.parents = dict([(i, net.variables[i]) for i in variable.known])
net.dirty = True
net.computeEdgeStatistics()
"""
for variable in net.variables:
print "(var ", self.revIndexMap[variable.index], " (", " ".join(variable.categories[variable.index]), "))"
for variable in net.variables:
print "(parents ", self.revIndexMap[variable.index], " (", " ".join([self.revIndexMap[i] for i in variable.known]), ") "
for key, val in variable.tables.iteritems():
if key == "":
expanded = ""
else:
cfg = array([int(num) for num in key.split(",")])
expanded = " ".join(self.variables[self.revIndexMap[variable.known[k]]]['vals'][c-1] for k,c in enumerate(cfg))
total = val.sum()
vals = " ".join([str(i) for i in val])
print "((", expanded, ") ", vals, (1. - total), ")"
print ")"
"""
return net
| 43.893939
| 460
| 0.576458
| 688
| 5,794
| 4.848837
| 0.287791
| 0.042866
| 0.041367
| 0.041966
| 0.158873
| 0.107614
| 0.056954
| 0.056954
| 0.056954
| 0.020384
| 0
| 0.008017
| 0.28961
| 5,794
| 132
| 461
| 43.893939
| 0.802478
| 0.197791
| 0
| 0.097826
| 0
| 0
| 0.028067
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032609
| false
| 0
| 0.032609
| 0
| 0.108696
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
191840622ba4f376a7f93c8724514c6d2f52d3bb
| 1,393
|
py
|
Python
|
africa/views.py
|
Mutugiii/Pinstagram
|
40436facfb068eea135c6dffcdaf85028ff803c1
|
[
"MIT"
] | null | null | null |
africa/views.py
|
Mutugiii/Pinstagram
|
40436facfb068eea135c6dffcdaf85028ff803c1
|
[
"MIT"
] | 6
|
2021-03-30T13:09:41.000Z
|
2021-09-08T01:50:42.000Z
|
africa/views.py
|
Mutugiii/Pinstagram
|
40436facfb068eea135c6dffcdaf85028ff803c1
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.http import HttpResponse
from django.template import loader
from .models import Location,Category,Image
def index(request):
'''Main view function for the start page'''
images = Image.get_images()
template = loader.get_template('index.html')
context = {
'images': images,
}
return HttpResponse(template.render(context,request))
def search(request):
'''View function to search by category'''
template = loader.get_template('search.html')
if 'image' in request.GET and request.GET['image']:
search_category = request.GET['image']
searched_images = Image.search_images(search_category)
message = f'{search_category}'
context = {
'message': message,
'images': searched_images,
}
return HttpResponse(template.render(context,request))
else:
message = 'The category does not exist!!'
context = {
'message': message,
}
return render(request, 'search.html', {'message': message})
def locations(request, region):
'''View Function to sort based on location'''
template = loader.get_template('location.html')
region_images = Image.filter_by_location(region)
context = {
'images': region_images,
}
return HttpResponse(template.render(context,request))
| 30.955556
| 67
| 0.65542
| 153
| 1,393
| 5.875817
| 0.30719
| 0.03337
| 0.05673
| 0.083426
| 0.173526
| 0.173526
| 0.173526
| 0
| 0
| 0
| 0
| 0
| 0.233309
| 1,393
| 44
| 68
| 31.659091
| 0.84176
| 0.08112
| 0
| 0.257143
| 0
| 0
| 0.114715
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085714
| false
| 0
| 0.114286
| 0
| 0.314286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1918493233bb0f6b63771c2685165671159e3808
| 509
|
py
|
Python
|
src/chapter4/exercise6.py
|
group6BCS1/BCS-2021
|
272b1117922163cde03901cfdd82f8e0cfab9a67
|
[
"MIT"
] | null | null | null |
src/chapter4/exercise6.py
|
group6BCS1/BCS-2021
|
272b1117922163cde03901cfdd82f8e0cfab9a67
|
[
"MIT"
] | null | null | null |
src/chapter4/exercise6.py
|
group6BCS1/BCS-2021
|
272b1117922163cde03901cfdd82f8e0cfab9a67
|
[
"MIT"
] | null | null | null |
x = (input("enters hours"))
y = (input("enters rate"))
def compute_pay(hours, rate):
"""The try block ensures that the user enters a
value between from 0-1 otherwise an error message pops up"""
try:
hours = float(x)
rate = float(y)
if hours <= 40:
pay= float(hours * rate)
else:
pay = float(40 * rate + (hours - 40) * 1.5 * rate)
return pay
except ValueError:
return "INVALID ENTRY"
pay = compute_pay(x, y)
print(pay)
| 23.136364
| 63
| 0.563851
| 71
| 509
| 4.014085
| 0.535211
| 0.077193
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028818
| 0.318271
| 509
| 21
| 64
| 24.238095
| 0.792507
| 0.200393
| 0
| 0
| 0
| 0
| 0.090452
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0
| 0
| 0.2
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1918ecc1cb7ed0d73d2876e4710c8c0ffca95358
| 557
|
py
|
Python
|
phone_numbers.py
|
EdilOndong/beginner_code
|
13b05afb25ec2ba4396f5fbe751febe7cb4bdabb
|
[
"Unlicense"
] | 1
|
2021-09-19T13:33:33.000Z
|
2021-09-19T13:33:33.000Z
|
phone_numbers.py
|
EdilOndong/beginner_code
|
13b05afb25ec2ba4396f5fbe751febe7cb4bdabb
|
[
"Unlicense"
] | null | null | null |
phone_numbers.py
|
EdilOndong/beginner_code
|
13b05afb25ec2ba4396f5fbe751febe7cb4bdabb
|
[
"Unlicense"
] | null | null | null |
import phonenumbers
from phonenumbers import geocoder, carrier
def get_information_about_number(phone_numbers):
number = phonenumbers.parse(phone_numbers, "en")
phone_location = geocoder.description_for_number(number, "en")
phone_carrier = carrier.name_for_number(number, "en")
print("The Location Of This Phone Number is " + str(phone_location) + " " + "And The Phone Carrier is " + phone_carrier)
if __name__ == '__main__':
numbers = input("Please Enter The Target Number : ")
get_information_about_number(numbers)
| 39.785714
| 125
| 0.732496
| 69
| 557
| 5.565217
| 0.449275
| 0.09375
| 0.098958
| 0.130208
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.174147
| 557
| 14
| 126
| 39.785714
| 0.834783
| 0
| 0
| 0
| 0
| 0
| 0.201835
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.2
| 0
| 0.3
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
191b02340ae1fb3a92d5b7d4ecfd3b82e78caed3
| 3,494
|
py
|
Python
|
src/templates/camera.py
|
coherentsolutionsinc/issoft-insights-2019-sdc-carla-ros
|
f6d3e162888bd79d59b771c82ff028df0f70ae11
|
[
"MIT"
] | 8
|
2019-06-04T16:21:07.000Z
|
2021-09-05T07:24:20.000Z
|
src/templates/camera.py
|
coherentsolutionsinc/issoft-insights-2019-sdc-carla-ros
|
f6d3e162888bd79d59b771c82ff028df0f70ae11
|
[
"MIT"
] | null | null | null |
src/templates/camera.py
|
coherentsolutionsinc/issoft-insights-2019-sdc-carla-ros
|
f6d3e162888bd79d59b771c82ff028df0f70ae11
|
[
"MIT"
] | 1
|
2019-06-21T14:37:18.000Z
|
2019-06-21T14:37:18.000Z
|
# TODO: 1. Add indicator that node should be run by python
# line above indicates that python is responsible for running this node
import os
import csv
import rospy
import numpy as np
import pygame
from utilities import pipline
import cv2
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import Image
# set image resolution
RESOLUTION_X = 640
RESOLUTION_Y = 480
# python class definition
class CameraTester(object):
# python constructor definition
def __init__(self):
self.start_time = None
self.image = None
self.got_image = False
self.init_pygame()
self.bridge = CvBridge()
# TODO: 2. Init nide - give node an unique name - overwritten from launch file
# wait master node is initialized and record start time
self.wait_master_initialization()
# TODO: 3. Subscribe to the ROS bridge camera topic and provide callback
# wait till we got first image
self.wait_initialization()
# run node infinite loop
self.loop()
# TODO: 4. Write callback method for the subscriber
# init pygame window to display images
def init_pygame(self):
pygame.init()
pygame.display.set_caption("Camera images")
self.screen = pygame.display.set_mode([RESOLUTION_X, RESOLUTION_Y])
# wait master node is initialized and record start time
def wait_master_initialization(self):
while not self.start_time and not rospy.is_shutdown():
self.start_time = rospy.Time.now().to_nsec()
if not rospy.is_shutdown():
rospy.loginfo('CameraTester: Ros master initialized.')
# wait till we got first image
def wait_initialization(self):
# define sleep rate foe the loop
rate = rospy.Rate(10)
# wait till we get image initialized
while not rospy.is_shutdown() and not self.got_image:
rate.sleep()
if not rospy.is_shutdown():
rospy.loginfo('CameraTester: Connected to vehicle - got camera images')
# main node loop
def loop(self):
# define loop rate in Hz
rate = rospy.Rate(20)
while not rospy.is_shutdown():
if self.image is not None:
# process stored image and display it in pygame window
self.process_frame()
# update pygame window
pygame.display.flip()
# wait 1/20 sec
rate.sleep()
# convert open cv image to pygame image and display
def process_frame(self):
# we need to convert image as it use BGR color scheme and flipped
frame = cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB)
frame = np.rot90(frame)
frame = np.flip(frame, 0)
# TODO: 5. Add sample image processing - for example filters useful to lanes detection - uncomment line below
#frame = pipline(frame)
frame = pygame.surfarray.make_surface(frame)
self.screen.blit(frame,(0,0))
return
# python way to indicate what to do if this file is run as executable rather then imported as library
if __name__ == '__main__':
try:
# create CameraTester instance and initiate loop sequence
CameraTester()
except rospy.ROSInterruptException:
# catch and log ROS errors
rospy.logerr('Could not start camera tester node.')
pass
finally:
pygame.quit()
| 30.920354
| 121
| 0.642244
| 450
| 3,494
| 4.891111
| 0.388889
| 0.020445
| 0.022717
| 0.040891
| 0.122672
| 0.101772
| 0.080872
| 0.080872
| 0.040891
| 0
| 0
| 0.010922
| 0.292501
| 3,494
| 113
| 122
| 30.920354
| 0.87945
| 0.360904
| 0
| 0.068966
| 0
| 0
| 0.066667
| 0
| 0
| 0
| 0
| 0.00885
| 0
| 1
| 0.103448
| false
| 0.017241
| 0.155172
| 0
| 0.293103
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
191ea83d06729e5bde9055413df2bd0a44ff8fe7
| 2,669
|
py
|
Python
|
plugins/beacon/alerta_beacon.py
|
ernadhalilovic/alerta-contrib
|
e12b5cf1e7f5913f641758032ca0d426c7eb8a08
|
[
"MIT"
] | null | null | null |
plugins/beacon/alerta_beacon.py
|
ernadhalilovic/alerta-contrib
|
e12b5cf1e7f5913f641758032ca0d426c7eb8a08
|
[
"MIT"
] | null | null | null |
plugins/beacon/alerta_beacon.py
|
ernadhalilovic/alerta-contrib
|
e12b5cf1e7f5913f641758032ca0d426c7eb8a08
|
[
"MIT"
] | null | null | null |
import logging
import os
import json
import requests
try:
from alerta.plugins import app # alerta >= 5.0
except ImportError:
from alerta.app import app # alerta < 5.0
from alerta.plugins import PluginBase
LOG = logging.getLogger('alerta.plugins.beacon')
BEACON_HEADERS = {
'Content-Type': 'application/json'
}
BEACON_SEND_ON_ACK = os.environ.get('BEACON_SEND_ON_ACK') or app.config.get('BEACON_SEND_ON_ACK', False)
BEACON_SEVERITY_MAP = app.config.get('BEACON_SEVERITY_MAP', {})
BEACON_DEFAULT_SEVERITY_MAP = {'security': '#000000', # black
'critical': '#FF0000', # red
'major': '#FFA500', # orange
'minor': '#FFFF00', # yellow
'warning': '#1E90FF', #blue
'informational': '#808080', #gray
'debug': '#808080', # gray
'trace': '#808080', # gray
'ok': '#00CC00'} # green
class ServiceIntegration(PluginBase):
def __init__(self, name=None):
# override user-defined severities
self._severities = BEACON_DEFAULT_SEVERITY_MAP
self._severities.update(BEACON_SEVERITY_MAP)
super(ServiceIntegration, self).__init__(name)
def pre_receive(self, alert):
return alert
def post_receive(self, alert):
return
def status_change(self, alert, status, text, **kwargs):
BEACON_WEBHOOK_URL = self.get_config('BEACON_WEBHOOK_URL', type=str, **kwargs)
#if BEACON_SEND_ON_ACK == False or status not in ['ack', 'assign']:
#return
LOG.debug('Beacon alert: %s', alert)
LOG.debug('Beacon status: %s', status)
LOG.debug('Beacon text: %s', text)
LOG.debug('Beacon kwargs: %s', kwargs)
payload = dict()
try:
payload['severity'] = alert.severity
payload['status'] = status
payload['environment'] = alert.environment
payload['event'] = alert.event
payload['id'] = alert.id
payload['tags'] = alert.tags
LOG.debug('Beacon payload: %s', payload)
except Exception as e:
LOG.error('Exception formatting payload: %s\n%s' % (e, traceback.format_exc()))
return
try:
r = requests.post(BEACON_WEBHOOK_URL,
data=json.dumps(payload), headers=BEACON_HEADERS, timeout=2)
except Exception as e:
raise RuntimeError("Beacon connection error: %s", e)
LOG.debug('Beacon response: %s\n%s' % (r.status_code, r.text))
| 35.118421
| 104
| 0.573248
| 291
| 2,669
| 5.092784
| 0.381443
| 0.032389
| 0.05668
| 0.040486
| 0.064103
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024285
| 0.305732
| 2,669
| 75
| 105
| 35.586667
| 0.775499
| 0.067066
| 0
| 0.122807
| 0
| 0
| 0.18101
| 0.008485
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070175
| false
| 0
| 0.140351
| 0.035088
| 0.280702
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
192369f557f40b35dc6e1a446089e36a7716438d
| 488
|
py
|
Python
|
discovery-provider/src/models/reward_manager.py
|
AudiusProject/audius-protocol
|
0315c31402121b24faa039e93cea8869d5b80743
|
[
"Apache-2.0"
] | 429
|
2019-08-14T01:34:07.000Z
|
2022-03-30T06:31:38.000Z
|
discovery-provider/src/models/reward_manager.py
|
AudiusProject/audius-protocol
|
0315c31402121b24faa039e93cea8869d5b80743
|
[
"Apache-2.0"
] | 998
|
2019-08-14T01:52:37.000Z
|
2022-03-31T23:17:22.000Z
|
discovery-provider/src/models/reward_manager.py
|
AudiusProject/audius-protocol
|
0315c31402121b24faa039e93cea8869d5b80743
|
[
"Apache-2.0"
] | 73
|
2019-10-04T04:24:16.000Z
|
2022-03-24T16:27:30.000Z
|
from sqlalchemy import (
Column,
Integer,
String,
DateTime,
)
from .models import Base
class RewardManagerTransaction(Base):
__tablename__ = "reward_manager_txs"
signature = Column(String, nullable=False, primary_key=True)
slot = Column(Integer, nullable=False)
created_at = Column(DateTime, nullable=False)
def __repr__(self):
return f"<RewardManagerTransaction\
signature={self.signature},\
slot={self.slot}\
created_at={self.created_at}\
>"
| 25.684211
| 64
| 0.719262
| 54
| 488
| 6.240741
| 0.537037
| 0.115727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170082
| 488
| 19
| 65
| 25.684211
| 0.832099
| 0
| 0
| 0
| 0
| 0
| 0.03681
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.111111
| 0.055556
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1924e772ac06a1b05910f40c7a40911d19ba34ea
| 2,326
|
py
|
Python
|
plugins/roll.py
|
Cyame/OkayuTweetBot
|
5ca257f2faa622f5b88cecc95522f2114e5717fc
|
[
"MIT"
] | 3
|
2020-04-10T16:47:25.000Z
|
2020-05-17T14:44:47.000Z
|
plugins/roll.py
|
Cyame/OkayuTweetBot
|
5ca257f2faa622f5b88cecc95522f2114e5717fc
|
[
"MIT"
] | null | null | null |
plugins/roll.py
|
Cyame/OkayuTweetBot
|
5ca257f2faa622f5b88cecc95522f2114e5717fc
|
[
"MIT"
] | 1
|
2020-04-12T09:38:22.000Z
|
2020-04-12T09:38:22.000Z
|
from nonebot import on_command, CommandSession,permission as perm
import asyncio
import traceback
from helper import getlogger,msgSendToBot,CQsessionToStr,data_read,data_save
from module.roll import match_roll
logger = getlogger(__name__)
__plugin_name__ = 'ROLL骰'
__plugin_usage__ = r"""
roll命令
"""
#预处理
def headdeal(session: CommandSession):
if session.event['message_type'] == "group" and session.event.sub_type != 'normal':
return False
return True
# on_command 装饰器将函数声明为一个命令处理器
@on_command('roll',aliases=['掷骰','掷骰子','骰子'],only_to_me = False)
async def roll(session: CommandSession):
if not headdeal(session):
return
stripped_arg = session.current_arg_text.strip()
logger.info(CQsessionToStr(session))
event = session.event
nick = event['user_id']
if hasattr(event,'sender'):
if 'card' in event.sender and event['sender']['card'] != '':
nick = event['sender']['card']
elif 'nickname' in event.sender and event['sender']['nickname'] != '':
nick = event['sender']['nickname']
#公式
res = stripped_arg.split('#',1)
#注释合成
addmsg = ''
if len(res) == 2:
stripped_arg = res[1]
if len(res[0]) > 25:
addmsg = "---{0}---\n".format(res[0])
else:
addmsg = res[0] + '#'
#Default
if stripped_arg == '':
stripped_arg = '1d100<50'
elif stripped_arg[:1] in ('<','>','!'):
stripped_arg = '1d100' + stripped_arg
elif stripped_arg.isdecimal():
stripped_arg = '1d100<' + stripped_arg
try:
msg = match_roll(nick,stripped_arg)
if msg == '':
await session.send('参数不正确')
return
except:
s = traceback.format_exc(limit=10)
logger.error(s)
await session.send("内部错误!")
return
await session.send(addmsg + msg)
@on_command('rollhelp',aliases=['掷骰帮助','掷骰子帮助','骰子帮助','骰娘帮助'],only_to_me = False)
async def rollhelp(session: CommandSession):
if not headdeal(session):
return
msg = '--掷骰帮助--' + "\n"
msg = msg + '!roll 参数' + "\n"
msg = msg + '无参默认为1d100>50' + "\n"
msg = msg + '1d100固定1-5大成功,96-100大失败' + "\n"
msg = msg + '支持符号>,<,>=,<=,!=,=,+,-,*,/' + "\n"
msg = msg + "代码主体来自:https://github.com/akrisrn/dice"
await session.send(msg)
| 32.305556
| 87
| 0.602322
| 282
| 2,326
| 4.812057
| 0.407801
| 0.097273
| 0.025792
| 0.01916
| 0.179808
| 0.140015
| 0.06927
| 0
| 0
| 0
| 0
| 0.024144
| 0.234308
| 2,326
| 72
| 88
| 32.305556
| 0.737788
| 0.019347
| 0
| 0.095238
| 0
| 0
| 0.138779
| 0.02152
| 0.015873
| 0
| 0
| 0
| 0
| 1
| 0.015873
| false
| 0
| 0.079365
| 0
| 0.190476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
192a9867b561e4cc653889667cda0bafef034b8e
| 4,706
|
py
|
Python
|
Main/APIUsagePatternSearcher.py
|
SMAT-Lab/APIMatchmaker
|
0cc5c68f7f2aba570ad4c583bbc5ec757158c676
|
[
"MIT"
] | null | null | null |
Main/APIUsagePatternSearcher.py
|
SMAT-Lab/APIMatchmaker
|
0cc5c68f7f2aba570ad4c583bbc5ec757158c676
|
[
"MIT"
] | null | null | null |
Main/APIUsagePatternSearcher.py
|
SMAT-Lab/APIMatchmaker
|
0cc5c68f7f2aba570ad4c583bbc5ec757158c676
|
[
"MIT"
] | null | null | null |
# coding:utf-8
import re
from Helper.common import *
class APIUsagePatternSearcher:
def __init__(self, OPTIONS, custom_args, numOfRecs):
self.OPTIONS = OPTIONS
self.custom_args = custom_args
self.numOfRecs = numOfRecs
def searchAPIUsagePatterns(self):
# Collect in allProjects the method invocations for every training project
allProjects = {} # Map<String, Map<String, Set<String>>>
# ???only the most similar projects are considered
trainingProjects = getFileList_from_txt(self.custom_args['Training_Set'])
testingProjects = self.getProjectNames(self.custom_args['Training_Set_filtered'])
for trainingProject in trainingProjects:
# projectMIs - Map<String, Set<String>> projectMIs
projectMIs = self.getProjectDetails(self.OPTIONS.presolve, trainingProject)
allProjects[trainingProject] = projectMIs
# For every testingPro, collect the Jaccard distance
# between the recommendations and the actual invocations
for testingPro in testingProjects:
results = {} # Map<String, Float>
# ordered lists
recommendations = []
testingInvocations = self.getTestingInvocations(self.custom_args['Test_Set'], testingPro)
# Searching API usage pattern for testingPro
# add also the testing invocation(s)
for invocation in testingInvocations:
recommendations.append(invocation)
recommendations.extend(self.readRecommendationFile(self.custom_args['RECOMMENDATION_PATH'], testingPro))
for project in allProjects:
methodInvocations = allProjects[project]
for declaration in methodInvocations:
invocations = methodInvocations[declaration]
allMIs = set()
# Md in training projects
s_train = len(invocations)
# Recoomendations in test project
s_test = len(recommendations)
short_len = min(s_train, s_test)
for i in range(short_len):
allMIs.add(recommendations[i])
size1 = len(invocations.intersection(allMIs))
size2 = len(invocations.union(allMIs))
if size1:
jaccard = (1.0 * size1) / size2
results[project + "#" + declaration] = jaccard
jaccard_sim_list = dict2sortedlist(results)
numOfRecs = self.numOfRecs
if len(jaccard_sim_list) > numOfRecs:
jaccard_sim_list = jaccard_sim_list[:numOfRecs]
headings = ["Project#Declaration", "Jaccard Similarity"]
writeScores(self.custom_args['OUTPUT_PATH'], testingPro, jaccard_sim_list, headings)
def readRecommendationFile(self, path, project):
ret = []
filename = os.path.join(path, project + ".csv")
with open(filename, "r") as fr:
reader = csv.reader(fr)
headings = next(reader)
for line in reader:
mi = line[0]
ret.append(mi)
return ret
def getTestingInvocations(self, path, project):
ret = []
filename = os.path.join(path, project + ".csv")
with open(filename, "r") as fr:
reader = csv.reader(fr)
headings = next(reader)
for line in reader:
md = line[0].strip('\"[] ')
string = line[1].strip('\"[] ')
pattern = r'(<.*?>)'
mi = re.findall(pattern, string)
ret = mi
return ret
def getProjectDetails(self, path, project):
# return a Map<String, Set<String>>
methodInvocations = {}
filename = os.path.join(path, project + ".csv")
with open(filename, "r") as fr:
reader = csv.reader(fr)
headings = next(reader)
for line in reader:
md = line[0].strip('\"[] ')
string = line[1].strip('\"[] ')
pattern = r'(<.*?>)'
mi = re.findall(pattern, string)
mi = set(mi)
if md in methodInvocations:
methodInvocations[md] = methodInvocations[md].union(mi)
else:
methodInvocations[md] = mi
return methodInvocations
def getProjectNames(self, path):
names = []
files = getFileList(path, ".csv")
for file in files:
names.append(os.path.split(file)[-1][:-4])
return names
| 35.923664
| 116
| 0.563536
| 444
| 4,706
| 5.891892
| 0.279279
| 0.030581
| 0.03211
| 0.020642
| 0.196865
| 0.177752
| 0.177752
| 0.177752
| 0.177752
| 0.177752
| 0
| 0.005175
| 0.342966
| 4,706
| 130
| 117
| 36.2
| 0.84088
| 0.112622
| 0
| 0.306818
| 0
| 0
| 0.038989
| 0.005054
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068182
| false
| 0
| 0.022727
| 0
| 0.147727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
192b90d17689e6aeda21369042966d2de1a7f460
| 335
|
py
|
Python
|
Beginner/Ambiguous Permutations (PERMUT2)/permutation.py
|
anishsingh42/CodeChef
|
50f5c0438516210895e513bc4ee959b9d99ef647
|
[
"Apache-2.0"
] | 127
|
2020-10-13T18:04:35.000Z
|
2022-02-17T10:56:27.000Z
|
Beginner/Ambiguous Permutations (PERMUT2)/permutation.py
|
anishsingh42/CodeChef
|
50f5c0438516210895e513bc4ee959b9d99ef647
|
[
"Apache-2.0"
] | 132
|
2020-10-13T18:06:53.000Z
|
2021-10-17T18:44:26.000Z
|
Beginner/Ambiguous Permutations (PERMUT2)/permutation.py
|
anishsingh42/CodeChef
|
50f5c0438516210895e513bc4ee959b9d99ef647
|
[
"Apache-2.0"
] | 364
|
2020-10-13T18:04:52.000Z
|
2022-03-04T14:34:53.000Z
|
while True :
n = int(input())
if n == 0 :
break
else :
arr = input().split()
check = True
for i in range(n) :
if int(arr[int(arr[i]) - 1]) != i + 1 :
check = False
if check :
print('ambiguous')
else :
print('not ambiguous')
| 23.928571
| 51
| 0.402985
| 39
| 335
| 3.461538
| 0.538462
| 0.088889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01676
| 0.465672
| 335
| 14
| 52
| 23.928571
| 0.73743
| 0
| 0
| 0.142857
| 0
| 0
| 0.065476
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
192bad6eff2c66e4ca11db59cd7ea795ca554716
| 2,140
|
py
|
Python
|
src/voiceassistant/integrations/respeaker.py
|
vadimtitov/voice-assistant
|
9ed6a799f44d5a546eb712195e3e84e6ff10d2fa
|
[
"Apache-2.0"
] | 1
|
2021-12-19T14:59:31.000Z
|
2021-12-19T14:59:31.000Z
|
src/voiceassistant/integrations/respeaker.py
|
vadimtitov/voice-assistant
|
9ed6a799f44d5a546eb712195e3e84e6ff10d2fa
|
[
"Apache-2.0"
] | 3
|
2021-09-16T20:47:58.000Z
|
2021-12-19T02:45:59.000Z
|
src/voiceassistant/integrations/respeaker.py
|
vadimtitov/voice-assistant
|
9ed6a799f44d5a546eb712195e3e84e6ff10d2fa
|
[
"Apache-2.0"
] | null | null | null |
"""Add-On functions for speech interface."""
from __future__ import annotations
from typing import TYPE_CHECKING, List
from voiceassistant.addons.create import Addon, CoreAttribute, addon_begin, addon_end
from voiceassistant.exceptions import IntegrationError
from .base import Integration
if TYPE_CHECKING:
from voiceassistant.core import VoiceAssistant
try:
from pixel_ring import pixel_ring
from pixel_ring import apa102_pixel_ring
if isinstance(pixel_ring, apa102_pixel_ring.PixelRing):
print("Found ReSpeaker 4 Mic Array")
from gpiozero import LED
power = LED(5)
power.on()
pixel_ring.change_pattern("echo")
class PixelRingState:
"""Host pixel ring states."""
off = 0
speak = 1
think = 2
pixel_ring.off()
ring_state = PixelRingState.off
except Exception as e:
raise IntegrationError(f"No ReSpeaker Microphone detected or not able to connect: {e}") from e
class RespeakerMicrophoneArray(Integration):
"""Respeaker Microphone Array integration."""
name = "respeaker"
def __init__(self, vass: VoiceAssistant) -> None:
"""Init."""
pass
@property
def addons(self) -> List[Addon]:
"""Get addons."""
return [processing_starts, processing_ends, tts_starts, tts_ends]
@addon_begin(CoreAttribute.SPEECH_PROCESSING)
def processing_starts(vass: VoiceAssistant) -> None:
"""Do before NLP starts."""
pixel_ring.speak()
global ring_state
ring_state = PixelRingState.speak
@addon_end(CoreAttribute.SPEECH_PROCESSING)
def processing_ends(vass: VoiceAssistant) -> None:
"""Do when NLP ends."""
pixel_ring.off()
global ring_state
ring_state = PixelRingState.off
@addon_begin(CoreAttribute.SPEECH_OUTPUT)
def tts_starts(vass: VoiceAssistant) -> None:
"""Do before voice output starts."""
pixel_ring.think()
@addon_end(CoreAttribute.SPEECH_OUTPUT)
def tts_ends(vass: VoiceAssistant) -> None:
"""Do when voice output ends."""
if ring_state == PixelRingState.speak:
pixel_ring.speak()
else:
pixel_ring.off()
| 24.883721
| 98
| 0.700935
| 255
| 2,140
| 5.686275
| 0.368627
| 0.086897
| 0.075862
| 0.066207
| 0.246897
| 0.146207
| 0
| 0
| 0
| 0
| 0
| 0.006463
| 0.204673
| 2,140
| 85
| 99
| 25.176471
| 0.845476
| 0.101869
| 0
| 0.18
| 0
| 0
| 0.053305
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12
| false
| 0.02
| 0.18
| 0
| 0.44
| 0.02
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
192bfb70b6700b39e9f6c097fb207ffc155ff246
| 4,602
|
py
|
Python
|
src/driving_curriculum/agents/neural_networks/tf/tf_novelty_detector.py
|
takeitallsource/pac-simulator
|
2c00d878047ec4a0247167e8a7de5aec8b474086
|
[
"MIT"
] | 1
|
2018-07-14T07:09:23.000Z
|
2018-07-14T07:09:23.000Z
|
src/driving_curriculum/agents/neural_networks/tf/tf_novelty_detector.py
|
takeitallsource/pac-simulator
|
2c00d878047ec4a0247167e8a7de5aec8b474086
|
[
"MIT"
] | null | null | null |
src/driving_curriculum/agents/neural_networks/tf/tf_novelty_detector.py
|
takeitallsource/pac-simulator
|
2c00d878047ec4a0247167e8a7de5aec8b474086
|
[
"MIT"
] | null | null | null |
from math import cos, sin
import numpy as np
import tensorflow as tf
from .....simulator import Agent
# from simulator import Agent
tf.set_random_seed(1234)
class TensorflowNoveltyDetector(Agent):
def execute(self, action):
raise NotImplementedError()
def __init__(self, world, learning=True, x=0.0, y=0.0, theta=0.0, v=0.0, checkpoint_file=None):
Agent.__init__(self, world, x, y, theta, v)
self.state_tensor = None
self.action_tensor = None
self.encoder_model = None
self.optimization_algorithm = None
self.loss_function = None
self.last_loss = None
self.tf_session = tf.InteractiveSession()
self.tf_checkpoint = checkpoint_file
self.tf_saver = None
self.summary_merge = None
self.summary_writer = None
self.global_step = tf.Variable(0, trainable=False, name='global_step')
self.learning_tensor = tf.placeholder(dtype=tf.bool, name='learning')
self.learning = learning
def is_learning(self):
return self.learning
def exploit(self, state, action, horizon=1):
feed_dict = dict()
feed_dict[self.state_tensor] = [state]
if action is not None:
feed_dict[self.action_tensor] = [action]
model, loss = self.tf_session.run(
fetches=[
self.encoder_model,
self.loss_function
],
feed_dict=feed_dict
)
return model, loss
def explore(self, state, horizon=1):
pass
def learn(self, state, action):
feed_dict = dict()
feed_dict[self.state_tensor] = [state]
feed_dict[self.learning_tensor] = self.learning
if action is not None:
feed_dict[self.action_tensor] = [action]
summary, step, _, learning_loss, _ = self.tf_session.run(
fetches=[
self.summary_merge,
self.global_step,
self.optimization_algorithm,
self.loss_function,
self.encoder_model
],
feed_dict=feed_dict
)
self.summary_writer.add_summary(summary, step)
self.last_loss = learning_loss
return learning_loss
def commit(self):
self.tf_saver.save(self.tf_session, self.tf_checkpoint, global_step=self.global_step)
def architecture(self):
raise NotImplementedError()
def train(self, state_dims, action_dims, storage_location):
if not self.encoder_model:
self._state_action_tensors(state_dims, action_dims)
self.encoder_model, self.loss_function = self.architecture()
self.optimization_algorithm = self.get_optimizer(self.loss_function)
self.tf_session.run(tf.global_variables_initializer())
tf.train.global_step(self.tf_session, self.global_step)
self.summary_merge = tf.summary.merge_all()
self.last_loss = float('inf')
self.tf_checkpoint = tf.train.latest_checkpoint(storage_location)
self.tf_saver = tf.train.Saver(filename='model')
if self.tf_checkpoint:
self.tf_saver.restore(self.tf_session, self.tf_checkpoint)
else:
self.tf_checkpoint = storage_location + 'model'
self.summary_writer = tf.summary.FileWriter(storage_location, self.tf_session.graph)
def test(self, state_dims, action_dims, storage_location):
if not self.encoder_model:
self._state_action_tensors(state_dims, action_dims)
self.encoder_model, self.loss_function = self.architecture()
self.tf_session.run(tf.global_variables_initializer())
self.tf_checkpoint = tf.train.latest_checkpoint(storage_location)
self.tf_saver = tf.train.Saver()
if self.tf_checkpoint:
self.tf_saver.restore(self.tf_session, self.tf_checkpoint)
else:
print("NO TRAINING!")
def _state_action_tensors(self, input_shape=(None, 1), output_shape=(1, 1)):
if len(input_shape) == 3:
input_shape = (1, input_shape[0], input_shape[1], input_shape[2])
with tf.name_scope('data'):
self.state_tensor = tf.placeholder(dtype=tf.float32, shape=input_shape, name='state')
if output_shape:
self.action_tensor = tf.placeholder(dtype=tf.float32, shape=output_shape, name='action')
tf.summary.image('state', self.state_tensor, 1)
def get_optimizer(self, loss):
raise NotImplementedError()
| 37.112903
| 100
| 0.634941
| 562
| 4,602
| 4.948399
| 0.197509
| 0.053937
| 0.046746
| 0.035958
| 0.401654
| 0.377202
| 0.356706
| 0.307084
| 0.276879
| 0.248112
| 0
| 0.008331
| 0.269665
| 4,602
| 123
| 101
| 37.414634
| 0.819101
| 0.005867
| 0
| 0.326733
| 0
| 0
| 0.013995
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.118812
| false
| 0.009901
| 0.039604
| 0.009901
| 0.19802
| 0.009901
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
192c65ff044acb45e1b0a8921920efeebef0c02a
| 4,093
|
py
|
Python
|
setup.py
|
opalmer/pycffiwin32
|
39210182a92e93c37a9f1c644fd5fcc1aa32f6d1
|
[
"MIT"
] | 10
|
2015-11-19T12:39:50.000Z
|
2021-02-21T20:15:29.000Z
|
setup.py
|
opalmer/pycffiwin32
|
39210182a92e93c37a9f1c644fd5fcc1aa32f6d1
|
[
"MIT"
] | 109
|
2015-06-15T05:03:33.000Z
|
2018-01-14T10:18:48.000Z
|
setup.py
|
opalmer/pycffiwin32
|
39210182a92e93c37a9f1c644fd5fcc1aa32f6d1
|
[
"MIT"
] | 8
|
2015-07-29T04:18:27.000Z
|
2018-11-02T17:15:40.000Z
|
from __future__ import print_function
import os
import sys
from errno import ENOENT
from os.path import dirname, abspath, join, isdir
from setuptools import setup, find_packages
from distutils.command.upload import upload
from pywincffi import __version__
try:
WindowsError
except NameError:
WindowsError = OSError
try:
with open("README.rst") as readme:
long_description = readme.read()
except (OSError, IOError, WindowsError) as error:
if error.errno == ENOENT:
long_description = ""
else:
raise
requirements = [
"cffi>=1.6.0",
"six"
]
ROOT = dirname(abspath(__file__))
DISTS = join(ROOT, "dist")
class AppVeyorArtifactUpload(upload):
"""
A subclass of the normal upload command which
"""
def run(self):
if not isdir(DISTS):
print("%s does not exist" % DISTS, file=sys.stderr)
sys.exit(1)
# Clean out everything in dist/* first. This ensures that
# if we have local files they'll be replaced by the artifacts
# that we're downloading.
for root, dirs, files in os.walk(DISTS):
for name in files:
os.remove(join(root, name))
from pywincffi.dev.release import AppVeyor
appveyor = AppVeyor()
for artifact in appveyor.artifacts(directory=DISTS):
extension = artifact.path.split(".")[-1]
if extension not in ("whl", "zip", "msi", "exe"):
continue
for root, dirs, files in os.walk(DISTS):
for filename in files:
if filename.endswith(".zip"):
command = "sdist"
pyversion = "source"
elif filename.endswith(".whl"):
command = "bdist_wheel"
_, _, pyversion, _, _ = filename.rstrip(".whl").split("-")
pyversion = ".".join(list(pyversion.lstrip("cp")))
elif filename.endswith(".msi"):
command = "bdist_msi"
pyversion = \
filename.rstrip(".msi").split("-")[-1].lstrip("py")
elif filename.endswith(".exe"):
command = "bdist_wininst"
raise NotImplementedError(
"Don't have `pyversion` implemented for %r" % filename)
else:
print(
"Unknown file type: %r" % filename.split(".")[-1],
file=sys.stderr)
sys.exit(1)
filename = join(root, filename)
self.upload_file(command, pyversion, filename)
setup_keywords = dict(
name="pywincffi",
version=".".join(map(str, __version__)),
cmdclass={
"upload_from_appveyor": AppVeyorArtifactUpload
},
packages=find_packages(
include=("pywincffi*", )
),
include_package_data=True,
author="Oliver Palmer",
author_email="oliverpalmer@opalmer.com",
url="http://github.com/opalmer/pywincffi",
description="A Python library which wraps Windows functions using CFFI",
long_description=long_description,
setup_requires=requirements,
install_requires=requirements,
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Environment :: Win32 (MS Windows)",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development :: Libraries"
]
)
# Only add cffi_modules if we're running on Windows. Otherwise
# things like the documentation build, which can run on Linux, may
# not work.
if os.name == "nt":
setup_keywords.update(
cffi_modules=["pywincffi/core/dist.py:_ffi"]
)
setup(**setup_keywords)
| 31.728682
| 79
| 0.583435
| 424
| 4,093
| 5.528302
| 0.476415
| 0.025597
| 0.042662
| 0.013652
| 0.045222
| 0.045222
| 0.027304
| 0.027304
| 0.027304
| 0
| 0
| 0.004912
| 0.303689
| 4,093
| 128
| 80
| 31.976563
| 0.817544
| 0.07916
| 0
| 0.08
| 0
| 0
| 0.219317
| 0.013607
| 0
| 0
| 0
| 0
| 0
| 1
| 0.01
| false
| 0
| 0.09
| 0
| 0.11
| 0.03
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
192f1d7e5401a66f3ca654feee18cca382797d01
| 2,941
|
py
|
Python
|
generate.py
|
fnrcum/dungeon_generator
|
7f5d1bd1b612f66e39f2782eac6fcd40abe7f7f0
|
[
"MIT"
] | null | null | null |
generate.py
|
fnrcum/dungeon_generator
|
7f5d1bd1b612f66e39f2782eac6fcd40abe7f7f0
|
[
"MIT"
] | null | null | null |
generate.py
|
fnrcum/dungeon_generator
|
7f5d1bd1b612f66e39f2782eac6fcd40abe7f7f0
|
[
"MIT"
] | null | null | null |
import random
from helpers import Leaf, Rect, RoomList
from renderer import MapRenderer
from typing import List, Any
class BSPTree:
def __init__(self):
self.level: List = []
self.room: object = None
self._leafs: List = []
self.MAX_LEAF_SIZE: int = 32
self.ROOM_MAX_SIZE: int = 20
self.ROOM_MIN_SIZE: int = 6
def generateLevel(self, map_width: int, map_height: int, room_list: RoomList):
# Creates an empty 2D array or clears existing array
self.level = [["#"
for y in range(map_height)]
for x in range(map_width)]
rootLeaf = Leaf(0, 0, map_width, map_height)
self._leafs.append(rootLeaf)
split_successfully = True
# loop through all leaves until they can no longer split successfully
while split_successfully:
split_successfully = False
for l in self._leafs:
if (l.child_1 is None) and (l.child_2 is None):
if (l.width > self.MAX_LEAF_SIZE or
(l.height > self.MAX_LEAF_SIZE) or
(random.random() > 0.7)):
if l.split_leaf(): # try to split the leaf
self._leafs.append(l.child_1)
self._leafs.append(l.child_2)
split_successfully = True
rootLeaf.createRooms(self, room_list)
return self.level
def createRoom(self, room: Rect):
# set all tiles within a rectangle to 0
for x in range(room.x1 + 1, room.x2):
for y in range(room.y1 + 1, room.y2):
self.level[x][y] = " "
def createHall(self, room1: Rect, room2: Rect):
# connect two rooms by hallways
x1, y1 = room1.get_wall()
x2, y2 = room2.get_wall()
# 50% chance that a tunnel will start horizontally
if random.randint(0, 1) == 1:
self.createHorTunnel(x1, x2, y1)
self.createVirTunnel(y1, y2, x2)
else: # else it starts virtically
self.createVirTunnel(y1, y2, x1)
self.createHorTunnel(x1, x2, y2)
def createHorTunnel(self, x1: int, x2: int, y: int):
_x1, _x2, _y = int(x1), int(x2), int(y)
for x in range(min(_x1, _x2), max(_x1, _x2) + 1):
if self.level[x][_y] is not " ":
self.level[x][_y] = "c"
# self.level[x][_y] = "c"
def createVirTunnel(self, y1: int, y2: int, x: int):
_y1, _y2, _x = int(y1), int(y2), int(x)
for y in range(min(_y1, _y2), max(_y1, _y2) + 1):
if self.level[_x][y] is not " ":
self.level[_x][y] = "c"
# self.level[_x][y] = "c"
room_list = RoomList()
tree = BSPTree().generateLevel(64, 128, room_list)
MapRenderer(tree).render_map()
print(room_list.get_rooms()[5].get_random_point_in_room())
| 35.011905
| 82
| 0.550493
| 398
| 2,941
| 3.899497
| 0.298995
| 0.05799
| 0.045103
| 0.049613
| 0.132732
| 0.055412
| 0.055412
| 0.055412
| 0.055412
| 0.055412
| 0
| 0.037988
| 0.33764
| 2,941
| 83
| 83
| 35.433735
| 0.758727
| 0.112547
| 0
| 0.033898
| 0
| 0
| 0.002309
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.101695
| false
| 0
| 0.067797
| 0
| 0.20339
| 0.016949
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
19340df43351011be81d21c8afe59df9e5f9d483
| 1,821
|
py
|
Python
|
Sensors/MaxSonarTTY.py
|
paceaux/pi-projects
|
c9eb1f138868d41f8304c4251382cc4a6d161ba8
|
[
"MIT"
] | null | null | null |
Sensors/MaxSonarTTY.py
|
paceaux/pi-projects
|
c9eb1f138868d41f8304c4251382cc4a6d161ba8
|
[
"MIT"
] | null | null | null |
Sensors/MaxSonarTTY.py
|
paceaux/pi-projects
|
c9eb1f138868d41f8304c4251382cc4a6d161ba8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# Filename: maxSonarTTY.py
# Reads serial data from Maxbotix ultrasonic rangefinders
# Gracefully handles most common serial data glitches
# Use as an importable module with "import MaxSonarTTY"
# Returns an integer value representing distance to target in millimeters
from time import time
from serial import Serial
class MaxSonarTTY:
"""Uses the MaxSonarTTY to find a range"""
def __init__(self, serialDevice = "/dev/ttyS0", maxWait = 3):
self.serialDevice = serialDevice
self.maxWait = maxWait
print('self.device', self.serialDevice)
print('self.maxWait', self.maxWait)
def measure(self):
print("serialDevice", self.serialDevice)
ser = Serial(self.serialDevice, 9600, 8, 'N', 1, timeout=1)
timeStart = time()
valueCount = 0
print(ser);
while time() < timeStart + self.maxWait:
if ser.inWaiting():
print("it's waiting")
bytesToRead = ser.inWaiting()
valueCount += 1
if valueCount < 2: # 1st reading may be partial number; throw it out
continue
testData = ser.read(bytesToRead)
if not testData.startswith(b'R'):
# data received did not start with R
continue
try:
sensorData = testData.decode('utf-8').lstrip('R')
except UnicodeDecodeError:
# data received could not be decoded properly
continue
try:
mm = int(sensorData)
except ValueError:
# value is not a number
continue
ser.close()
return(mm)
ser.close()
| 33.109091
| 84
| 0.556837
| 186
| 1,821
| 5.430108
| 0.553763
| 0.079208
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012965
| 0.364635
| 1,821
| 54
| 85
| 33.722222
| 0.859983
| 0.253707
| 0
| 0.228571
| 0
| 0
| 0.048399
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057143
| false
| 0
| 0.057143
| 0
| 0.142857
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1935e935a94ea899193f63cf6a01d898e2f578ec
| 2,577
|
py
|
Python
|
tests/output/TestFile.py
|
dstore-dbap/LumberMill
|
b7cbadc209a83386871735b8ad88b61da917a6ab
|
[
"Apache-2.0"
] | 15
|
2015-12-14T19:07:28.000Z
|
2022-02-28T13:32:11.000Z
|
tests/output/TestFile.py
|
dstore-dbap/LumberMill
|
b7cbadc209a83386871735b8ad88b61da917a6ab
|
[
"Apache-2.0"
] | null | null | null |
tests/output/TestFile.py
|
dstore-dbap/LumberMill
|
b7cbadc209a83386871735b8ad88b61da917a6ab
|
[
"Apache-2.0"
] | 4
|
2017-02-08T10:49:55.000Z
|
2019-03-19T18:47:46.000Z
|
import sys
import os
import io
import gzip
import mock
import tempfile
import lumbermill.utils.DictUtils as DictUtils
from tests.ModuleBaseTestCase import ModuleBaseTestCase
from lumbermill.output import File
class TestFile(ModuleBaseTestCase):
def setUp(self):
super(TestFile, self).setUp(File.File(mock.Mock()))
def getTempFileName(self):
temp_file = tempfile.NamedTemporaryFile()
temp_file_name = temp_file.name
temp_file.close()
return temp_file_name
def deleteTempFile(self, temp_file_name):
try:
os.remove(temp_file_name)
except:
etype, evalue, etb = sys.exc_info()
self.logger.error('Could no delete temporary file %s. Excpeption: %s, Error: %s.' % (temp_file_name, etype, evalue))
sys.exit(255)
def inflateGzipData(self, data):
buffer = io.BytesIO(data)
compressor = gzip.GzipFile(mode='rb', fileobj=buffer)
try:
inflated_data = str(compressor.read(), "utf-8")
except:
inflated_data = None
return inflated_data
def test(self):
temp_file_name = self.getTempFileName()
self.test_object.configure({'file_name': temp_file_name,
'store_interval_in_secs': 1})
self.checkConfiguration()
event = DictUtils.getDefaultEventDict({'data': 'One thing is for sure; a sheep is not a creature of the air.'})
self.test_object.receiveEvent(event)
self.test_object.shutDown()
with open(temp_file_name) as temp_file:
for line in temp_file:
self.assertEqual(line.rstrip(), event['data'])
self.deleteTempFile(temp_file_name)
def testGzipCompression(self):
temp_file_name = self.getTempFileName()
self.test_object.configure({'file_name': temp_file_name,
'store_interval_in_secs': 1,
'compress': 'gzip'})
self.checkConfiguration()
event = DictUtils.getDefaultEventDict({'data': 'One thing is for sure; a sheep is not a creature of the air.'})
self.test_object.receiveEvent(event)
self.test_object.shutDown()
with open("%s.gz" % temp_file_name, "rb") as temp_file:
for line in temp_file:
defalted_data = self.inflateGzipData(line)
self.assertIsNotNone(defalted_data)
self.assertEqual(defalted_data.rstrip(), event['data'])
self.deleteTempFile("%s.gz" % temp_file_name)
| 37.347826
| 128
| 0.629414
| 299
| 2,577
| 5.244147
| 0.324415
| 0.102041
| 0.107143
| 0.040816
| 0.451531
| 0.369898
| 0.369898
| 0.369898
| 0.335459
| 0.335459
| 0
| 0.003203
| 0.273186
| 2,577
| 69
| 129
| 37.347826
| 0.833956
| 0
| 0
| 0.305085
| 0
| 0
| 0.11249
| 0.017067
| 0
| 0
| 0
| 0
| 0.050847
| 1
| 0.101695
| false
| 0
| 0.152542
| 0
| 0.305085
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
19368974491b6add6e004f6c293a6ac67a000708
| 4,517
|
py
|
Python
|
user_scripts/decode_logits.py
|
DavidHribek/pero-ocr
|
8d274282813878b3e31dd560563a36b3f02e5c33
|
[
"BSD-3-Clause"
] | null | null | null |
user_scripts/decode_logits.py
|
DavidHribek/pero-ocr
|
8d274282813878b3e31dd560563a36b3f02e5c33
|
[
"BSD-3-Clause"
] | null | null | null |
user_scripts/decode_logits.py
|
DavidHribek/pero-ocr
|
8d274282813878b3e31dd560563a36b3f02e5c33
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
import argparse
import pickle
import time
import sys
from safe_gpu.safe_gpu import GPUOwner
from pero_ocr.decoding import confusion_networks
from pero_ocr.decoding.decoding_itf import prepare_dense_logits, construct_lm, get_ocr_charset, BLANK_SYMBOL
import pero_ocr.decoding.decoders as decoders
from pero_ocr.transcription_io import save_transcriptions
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('-j', '--ocr-json', help='Path to OCR config', required=True)
parser.add_argument('-k', '--beam-size', type=int, help='Width of the beam')
parser.add_argument('-l', '--lm', help='File with a language model')
parser.add_argument('--insertion-bonus', type=float, help='Flat bonus for every letter introduced in transcription')
parser.add_argument('--lm-scale', type=float, default=1.0, help='File with a language model')
parser.add_argument('-g', '--greedy', action='store_true', help='Decode with a greedy decoder')
parser.add_argument('--eval', action='store_true', help='Turn dropouts and batchnorms to eval mode')
parser.add_argument('--use-gpu', action='store_true', help='Make the decoder utilize a GPU')
parser.add_argument('--report-eta', action='store_true', help='Keep updating stdout with ETA')
parser.add_argument('--model-eos', action='store_true', help='Make the decoder model end of sentences')
parser.add_argument('-i', '--input', help='Pickled dictionary with names and sparse logits', required=True)
parser.add_argument('-b', '--best', help='Where to store 1-best output', required=True)
parser.add_argument('-p', '--confidence', help='Where to store posterior probability of the 1-best', required=True)
parser.add_argument('-d', '--cn-best', help='Where to store 1-best from confusion network')
args = parser.parse_args()
return args
class Reporter:
def __init__(self, stream=sys.stdout, nop=False):
self.nop = nop
self.last_len = None
self.stream = stream
def report(self, msg):
if self.nop:
return
self.stream.write('\r')
self.stream.write(msg)
if self.last_len and len(msg) < self.last_len:
self.stream.write(' ' * (self.last_len-len(msg)))
self.last_len = len(msg)
def clear(self):
if self.nop:
return
self.stream.write('\r\n')
def main(args):
print(args)
ocr_engine_chars = get_ocr_charset(args.ocr_json)
if args.greedy:
decoder = decoders.GreedyDecoder(ocr_engine_chars + [BLANK_SYMBOL])
else:
if args.lm:
lm = construct_lm(args.lm)
else:
lm = None
decoder = decoders.CTCPrefixLogRawNumpyDecoder(
ocr_engine_chars + [BLANK_SYMBOL],
k=args.beam_size,
lm=lm,
lm_scale=args.lm_scale,
use_gpu=args.use_gpu,
insertion_bonus=args.insertion_bonus,
)
if lm and args.eval:
lm.eval()
with open(args.input, 'rb') as f:
complete_input = pickle.load(f)
names = complete_input['names']
logits = complete_input['logits']
decodings = {}
confidences = {}
if args.cn_best:
cn_decodings = {}
t_0 = time.time()
reporter = Reporter(nop=not args.report_eta)
for i, (name, sparse_logits) in enumerate(zip(names, logits)):
time_per_line = (time.time() - t_0) / (i+1)
nb_lines_ahead = len(names) - (i+1)
reporter.report('Processing {} [{}/{}, {:.2f}s/line, ETA {:.2f}s]'.format(name, i+1, len(names), time_per_line, time_per_line*nb_lines_ahead))
dense_logits = prepare_dense_logits(sparse_logits)
if args.greedy:
boh = decoder(dense_logits)
else:
boh = decoder(dense_logits, args.model_eos)
one_best = boh.best_hyp()
decodings[name] = one_best
confidences[name] = boh.confidence()
if args.cn_best:
cn = confusion_networks.produce_cn_from_boh(boh)
cn_decodings[name] = confusion_networks.best_cn_path(cn)
reporter.clear()
save_transcriptions(args.best, decodings)
with open(args.confidence, 'w') as f:
for name in decodings:
f.write('{} {:.3f}\n'.format(name, confidences[name]))
if args.cn_best:
save_transcriptions(args.cn_best, cn_decodings)
if __name__ == "__main__":
args = parse_arguments()
gpu_owner = GPUOwner()
main(args)
| 34.480916
| 150
| 0.651317
| 611
| 4,517
| 4.621931
| 0.276596
| 0.044618
| 0.084278
| 0.03364
| 0.190864
| 0.093484
| 0.093484
| 0.052408
| 0.030453
| 0
| 0
| 0.003977
| 0.220722
| 4,517
| 130
| 151
| 34.746154
| 0.798295
| 0.004649
| 0
| 0.12
| 0
| 0
| 0.169967
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.09
| 0
| 0.18
| 0.01
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1936ec832cd585d63cf22975d9e0473abed83035
| 1,608
|
py
|
Python
|
PiCN/Layers/ICNLayer/ContentStore/ContentStoreMemoryExact.py
|
NikolaiRutz/PiCN
|
7775c61caae506a88af2e4ec34349e8bd9098459
|
[
"BSD-3-Clause"
] | null | null | null |
PiCN/Layers/ICNLayer/ContentStore/ContentStoreMemoryExact.py
|
NikolaiRutz/PiCN
|
7775c61caae506a88af2e4ec34349e8bd9098459
|
[
"BSD-3-Clause"
] | 5
|
2020-07-15T09:01:42.000Z
|
2020-09-28T08:45:21.000Z
|
PiCN/Layers/ICNLayer/ContentStore/ContentStoreMemoryExact.py
|
NikolaiRutz/PiCN
|
7775c61caae506a88af2e4ec34349e8bd9098459
|
[
"BSD-3-Clause"
] | null | null | null |
""" An in-memory content store with exact matching"""
import time
from PiCN.Packets import Content, Name
from PiCN.Layers.ICNLayer.ContentStore import BaseContentStore, ContentStoreEntry
class ContentStoreMemoryExact(BaseContentStore):
""" A in memory Content Store using exact matching"""
def __init__(self, cs_timeout: int = 10):
BaseContentStore.__init__(self, cs_timeout=cs_timeout)
def find_content_object(self, name: Name) -> ContentStoreEntry:
for c in self._container:
if c.content.name == name: #and c.content.name_payload == name_payload:
return c
return None
def add_content_object(self, content: Content, static: bool=False):
for c in self._container:
if content == c.content:
return
self._container.append(ContentStoreEntry(content, static=static))
def remove_content_object(self, name: Name):
rem = self.find_content_object(name)
if rem is not None:
self._container.remove(rem)
def update_timestamp(self, cs_entry: ContentStoreEntry):
self._container.remove(cs_entry)
cs_entry.timestamp = time.time()
self._container.append(cs_entry)
def ageing(self):
cur_time = time.time()
remove = []
for cs_entry in self._container:
if cs_entry.static is True:
continue
if cs_entry.timestamp + self._cs_timeout < cur_time:
remove.append(cs_entry)
for cs_entry in remove:
self.remove_content_object(cs_entry.content.name)
| 34.212766
| 83
| 0.65796
| 196
| 1,608
| 5.168367
| 0.285714
| 0.069102
| 0.0385
| 0.050346
| 0.090819
| 0.041461
| 0
| 0
| 0
| 0
| 0
| 0.001676
| 0.258085
| 1,608
| 46
| 84
| 34.956522
| 0.847443
| 0.085199
| 0
| 0.058824
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.176471
| false
| 0
| 0.088235
| 0
| 0.382353
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
193ab624d131e849acb875b0bc59e01faf091e1d
| 279
|
py
|
Python
|
texaslan/slack/pipelines/on_success.py
|
hsmeans/texaslan.org
|
a981e7835381e77320e39536a619981ba9d03451
|
[
"MIT"
] | 2
|
2018-02-06T06:24:03.000Z
|
2018-03-20T03:32:13.000Z
|
texaslan/slack/pipelines/on_success.py
|
hsmeans/texaslan.org
|
a981e7835381e77320e39536a619981ba9d03451
|
[
"MIT"
] | 32
|
2017-02-21T20:01:43.000Z
|
2020-02-08T21:52:16.000Z
|
texaslan/slack/pipelines/on_success.py
|
hsmeans/texaslan.org
|
a981e7835381e77320e39536a619981ba9d03451
|
[
"MIT"
] | 6
|
2017-03-21T21:16:40.000Z
|
2020-02-08T20:46:20.000Z
|
from django_slack_oauth.models import SlackOAuthRequest
def register_token(request, api_data):
SlackOAuthRequest.objects.create(
associated_user=request.user,
access_token=api_data.pop('access_token'),
extras=api_data
)
return request, api_data
| 27.9
| 55
| 0.749104
| 34
| 279
| 5.852941
| 0.617647
| 0.140704
| 0.140704
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175627
| 279
| 10
| 56
| 27.9
| 0.865217
| 0
| 0
| 0
| 0
| 0
| 0.042857
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.125
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
193cb661b098c4c5b452e6a65209cb9479f364c3
| 4,326
|
py
|
Python
|
get_git/github_client.py
|
alanahanson/get-git
|
a3b078a64ce8f4bb7103fcd46a0eee80cd35f87c
|
[
"MIT"
] | null | null | null |
get_git/github_client.py
|
alanahanson/get-git
|
a3b078a64ce8f4bb7103fcd46a0eee80cd35f87c
|
[
"MIT"
] | null | null | null |
get_git/github_client.py
|
alanahanson/get-git
|
a3b078a64ce8f4bb7103fcd46a0eee80cd35f87c
|
[
"MIT"
] | null | null | null |
import os
from get_git.utils import make_request
GH_URL = 'https://api.github.com/graphql'
TOKEN=os.environ.get('GH_API_TOKEN')
class GithubClient:
def __init__(self, username):
self.username = username
def get_user(self):
query = """
{user(login:"%s") {
starredRepositories { totalCount }
followers { totalCount }
repositories(first:100) {
totalDiskUsage
totalCount
pageInfo { hasNextPage, endCursor }
nodes {
id
name
isFork
primaryLanguage { name }
issues(states:OPEN) { totalCount }
commitComments { totalCount }
watchers { totalCount }
stargazers { totalCount }
languages(first:100) {
totalCount
pageInfo { hasNextPage, endCursor }
nodes { name }
}
repositoryTopics(first:100) {
totalCount
pageInfo { hasNextPage, endCursor }
nodes {
topic { name }
}
}
}
}
}
}
""" % (self.username)
return make_request(GH_URL, 'post', token=TOKEN, data=query)
def _get_additional_repos(self, cursor=None):
query = """
{user(login:"%s") {
repositories(first:100%s) {
pageInfo { hasNextPage, endCursor }
nodes {
id
name
isFork
primaryLanguage { name }
issues(states:OPEN) { totalCount }
commitComments { totalCount }
watchers { totalCount }
stargazers { totalCount }
repositoryTopics(first:100) {
totalCount
pageInfo { hasNextPage, endCursor }
nodes {
topic { name }
}
}
}
}
}
}
""" % (self.username, self._add_cursor(cursor))
return make_request(GH_URL, 'post', token=TOKEN, data=query)
def _get_additional_topics(self, repo_name, cursor=None):
query = """
{repository(owner:"%s", name:"%s") {
repositoryTopics(first:100%s) {
pageInfo { hasNextPage, endCursor }
nodes {
topic { name }
}
}
}
}
""" % (self.username, repo_name, self._add_cursor(cursor))
return make_request(GH_URL, 'post', token=TOKEN, data=query)
def _add_cursor(self, cursor):
if cursor:
return f',after:"{cursor}"'
return ''
def _get_all_repos(self, repo_data):
repos = repo_data['nodes']
while repo_data['pageInfo']['hasNextPage']:
cursor = repo_data['pageInfo']['endCursor']
next_page = self._get_additional_repos(cursor=cursor)
repos.extend(next_page['data']['user']['repositories']['nodes'])
repo_data['pageInfo'] = next_page['data']['user']['repositories']['pageInfo']
return repos
def _get_all_topics(self, topic_data):
topics = topic_data['nodes']
while topic_data['pageInfo']['hasNextPage']:
cursor = topic_data['pageInfo']['endCursor']
next_page = self.get_additional_topics(repo['name'], cursor)
topics.extend(next_page['data']['repository']['repositoryTopics'])
topic_data['pageInfo'] = next_page['data']['repository']['repositoryTopics']['pageInfo']
return topics
def get_data(self):
result = self.get_user()['data']['user']
result['repositories']['nodes'] = self._get_all_repos(result['repositories'])
for repo in result['repositories']['nodes']:
if repo['repositoryTopics']['totalCount']:
repo['repositoryTopics']['nodes'] = self._get_all_topics(repo['repositoryTopics'])
return result
| 35.170732
| 100
| 0.487286
| 350
| 4,326
| 5.837143
| 0.211429
| 0.0744
| 0.082232
| 0.096916
| 0.52325
| 0.441997
| 0.441997
| 0.408223
| 0.33676
| 0.33676
| 0
| 0.006998
| 0.405455
| 4,326
| 122
| 101
| 35.459016
| 0.787325
| 0
| 0
| 0.4
| 0
| 0
| 0.596625
| 0.035599
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072727
| false
| 0
| 0.018182
| 0
| 0.172727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
193eb2395e6afc892c407dab660196002686ac81
| 15,517
|
py
|
Python
|
Naluno/model.py
|
dstarrago/Naluno
|
de2a498b65ac7e10599f797e41c77d0ceae56c3e
|
[
"MIT"
] | null | null | null |
Naluno/model.py
|
dstarrago/Naluno
|
de2a498b65ac7e10599f797e41c77d0ceae56c3e
|
[
"MIT"
] | null | null | null |
Naluno/model.py
|
dstarrago/Naluno
|
de2a498b65ac7e10599f797e41c77d0ceae56c3e
|
[
"MIT"
] | null | null | null |
from __future__ import division, print_function, unicode_literals
from config import *
__all__ = ['Map', 'Vertex', 'Edge', 'State']
class State:
FREE = 0
CLOSED = 1
MANDATORY = 2
OPTIONAL = 3
class Square:
def __init__(self):
self._has_card = False
@property
def has_card(self):
return self._has_card
@has_card.setter
def has_card(self, value):
self._has_card = value
def copy_to(self, square):
square.has_card = self._has_card
class Edge:
def __init__(self):
self._port = [State.FREE, State.FREE, State.FREE]
self._num_cards = 0
self._contact_number = 0
@property
def num_cards(self):
return self._num_cards
@num_cards.setter
def num_cards(self, value):
self._num_cards = value
@property
def port(self):
return self._port
@property
def contact_number(self):
return self._contact_number
@contact_number.setter
def contact_number(self, value):
self._contact_number = value
def update(self, edge):
self._contact_number = 0
for i in range(3):
if self.port[i] != State.FREE:
if self.port[i] != State.CLOSED and edge.port[i] != State.CLOSED:
self._contact_number += 1
self.port[i] = edge.port[i]
self.num_cards += 1
def match(self, edge):
self._contact_number = 0
if self.port[1] == State.FREE:
return True
for i in range(3):
if self.port[i] == State.MANDATORY:
if edge.port[i] == State.CLOSED:
return False
elif self.port[i] == State.CLOSED:
if edge.port[i] == State.MANDATORY:
return False
if self.port[i] != State.CLOSED and edge.port[i] != State.CLOSED:
self._contact_number += 1
return True
@property
def dock_count(self):
dock_count = 0
for i in range(3):
if self.port[i] == State.MANDATORY or self.port[i] == State.OPTIONAL:
dock_count += 1
return dock_count
@property
def mandatory_dock_count(self):
dock_count = 0
for i in range(3):
if self.port[i] == State.MANDATORY:
dock_count += 1
return dock_count
def copy_to(self, edge):
edge.num_cards = self._num_cards
edge.contact_number = self._contact_number
for i in range(3):
edge.port[i] = self._port[i]
class Vertex:
def __init__(self):
self._state = State.FREE
self._contact_number = 0
self._num_cards = 0
@property
def state(self):
return self._state
@state.setter
def state(self, value):
self._state = value
@property
def contact_number(self):
return self._contact_number
@contact_number.setter
def contact_number(self, value):
self._contact_number = value
@property
def num_cards(self):
return self._num_cards
@num_cards.setter
def num_cards(self, value):
self._num_cards = value
def update(self, vertex):
self._contact_number = 0
if self._state == State.CLOSED:
self._state = vertex.state
elif self._state == State.MANDATORY:
if vertex.state != State.CLOSED:
self._state = State.OPTIONAL
self._contact_number = 1
elif self._state == State.OPTIONAL:
if vertex.state != State.CLOSED:
self._contact_number = 1
elif self._state == State.FREE:
self._state = vertex.state
self._num_cards += 1
def match(self, vertex):
self._contact_number = 0
if self._state == State.FREE:
return True
if self._state == State.MANDATORY:
if vertex.state == State.CLOSED and self._num_cards == 3:
return False
elif self._state == State.CLOSED:
if vertex.state == State.MANDATORY and self._num_cards == 3:
return False
if self._state != State.CLOSED and vertex.state != State.CLOSED:
self._contact_number = 1
return True
def copy_to(self, vertex):
vertex.state = self._state
vertex.num_cards = self._num_cards
vertex.contact_number = self._contact_number
@property
def dock_count(self):
if self._state == State.MANDATORY or self._state == State.OPTIONAL:
return 1
else:
return 0
@property
def mandatory_dock_count(self):
if self._state == State.MANDATORY:
return 1
else:
return 0
class Move:
def __init__(self, card, row, col):
self._card = card
self._row = row
self._col = col
def get_clone(self):
return Move(self._card, self._row, self._col)
@property
def card(self):
return self._card
@property
def row(self):
return self._row
@property
def col(self):
return self._col
class Map:
def __init__(self, matrix=None):
self._most_right_move = None
self._most_left_move = None
self._top_move = None
self._bottom_move = None
self._contact_count = 0
self._move_history = []
self.vsize = VER_MAP_SIZE * 3
self.hsize = HOR_MAP_SIZE * 3
self._matrix = []
self.init_matrix(matrix)
@property
def most_right_move(self):
return self._most_right_move
@most_right_move.setter
def most_right_move(self, move):
self._most_right_move = move
@property
def most_left_move(self):
return self.most_left_move
@most_left_move.setter
def most_left_move(self, move):
self._most_left_move = move
@property
def top_move(self):
return self._top_move
@top_move.setter
def top_move(self, move):
self._top_move = move
@property
def bottom_move(self):
return self._bottom_move
@bottom_move.setter
def bottom_move(self, move):
self._bottom_move = move
@property
def contact_count(self):
return self._contact_count
@contact_count.setter
def contact_count(self, value):
self._contact_count = value
@property
def move_history(self):
return self._move_history
def init_matrix(self, matrix):
if matrix is None:
for i in range(self.vsize):
row = []
for j in range(self.hsize):
row.append(None)
self._matrix.append(row)
tile_row = False
tile_col = False
for i in range(self.vsize):
for j in range(self.hsize):
if tile_row:
if tile_col:
self._matrix[i][j] = Square()
else:
self._matrix[i][j] = Edge()
else:
if tile_col:
self._matrix[i][j] = Edge()
else:
self._matrix[i][j] = Vertex()
tile_col = not tile_col
tile_row = not tile_row
tile_col = False
else:
for i in range(self.vsize):
self._matrix[i].extend(matrix[i, :])
def clone(self):
m = Map(self._matrix)
m.contact_count = self._contact_count
m.move_history.extend(self._move_history[:])
m.most_left_move = self._most_left_move
m.most_right_move = self._most_right_move
m.top_move = self._top_move
m.bottom_move = self._bottom_move
return m
@property
def center_col(self):
return HOR_MAP_SIZE // 2
@property
def center_row(self):
return VER_MAP_SIZE // 2
def square_at(self, col, row):
return self._matrix[row * 2 + 1][col * 2 + 1]
def top_square(self, col, row):
return self.square_at(col, row - 1) # NO using GL coord system
def bottom_square(self, col, row):
return self.square_at(col, row + 1) # NO using GL coord system
def left_square(self, col, row):
return self.square_at(col - 1, row)
def right_square(self, col, row):
return self.square_at(col + 1, row)
def top_edge(self, col, row):
return self._matrix[row * 2][col * 2 + 1]
def bottom_edge(self, col, row):
return self._matrix[row * 2 + 2][col * 2 + 1]
def left_edge(self, col, row):
return self._matrix[row * 2 + 1][col * 2]
def right_edge(self, col, row):
return self._matrix[row * 2 + 1][col * 2 + 2]
def top_left_vertex(self, col, row):
return self._matrix[row * 2][col * 2]
def bottom_left_vertex(self, col, row):
return self._matrix[row * 2 + 2][col * 2]
def top_right_vertex(self, col, row):
return self._matrix[row * 2][col * 2 + 2]
def bottom_right_vertex(self, col, row):
return self._matrix[row * 2 + 2][col * 2 + 2]
@property
def most_left_card(self):
return self.most_left_move.card
@property
def most_right_card(self):
return self.most_right_move.card
@property
def top_card(self):
return self.top_move.card
@property
def bottom_card(self):
return self.bottom_move.card
def dock_count(self, col, row):
result = self.top_edge(col, row).dock_count + \
self.bottom_edge(col, row).dock_count + \
self.left_edge(col, row).dock_count + \
self.right_edge(col, row).dock_count + \
self.top_left_vertex(col, row).dock_count + \
self.top_right_vertex(col, row).dock_count + \
self.bottom_left_vertex(col, row).dock_count + \
self.bottom_right_vertex(col, row).dock_count
return result
def mandatory_dock_count(self, col, row):
result = self.top_edge(col, row).mandatory_dock_count + \
self.bottom_edge(col, row).mandatory_dock_count + \
self.left_edge(col, row).mandatory_dock_count + \
self.right_edge(col, row).mandatory_dock_count + \
self.top_left_vertex(col, row).mandatory_dock_count + \
self.top_right_vertex(col, row).mandatory_dock_count + \
self.bottom_left_vertex(col, row).mandatory_dock_count + \
self.bottom_right_vertex(col, row).mandatory_dock_count
return result
def have_adjacent_card(self, col, row):
up = self.top_square(col, row)
down = self.bottom_square(col, row)
left = self.left_square(col, row)
right = self.right_square(col, row)
return up.has_card or down.has_card or left.has_card or right.has_card
def move_card(self, move):
self.put_card(move.col, move.row, move.card)
def put_card(self, col, row, card):
self.square_at(col, row).has_card = True
self.top_left_vertex(col, row).update(card.vertex(card.TOP_LEFT_VERTEX))
self.top_right_vertex(col, row).update(card.vertex(card.TOP_RIGHT_VERTEX))
self.bottom_left_vertex(col, row).update(card.vertex(card.BOTTOM_LEFT_VERTEX))
self.bottom_right_vertex(col, row).update(card.vertex(card.BOTTOM_RIGHT_VERTEX))
self.top_edge(col, row).update(card.vertex(card.TOP_EDGE))
self.bottom_edge(col, row).update(card.vertex(card.BOTTOM_EDGE))
self.left_edge(col, row).update(card.vertex(card.LEFT_EDGE))
self.right_edge(col, row).update(card.vertex(card.RIGHT_EDGE))
self._contact_count = \
self.top_edge(col, row).contact_number + \
self.bottom_edge(col, row).contact_number + \
self.left_edge(col, row).contact_number + \
self.right_edge(col, row).contact_number + \
self.top_left_vertex(col, row).contact_number + \
self.top_right_vertex(col, row).contact_number + \
self.bottom_left_vertex(col, row).contact_number + \
self.bottom_right_vertex(col, row).contact_number
move = Move(col, row, card)
self._move_history.append(move)
self.update_extreme_cards(move)
return self._contact_count
def match(self, col, row, card):
match = \
self.top_left_vertex(col, row).match(card.vertex(card.TOP_LEFT_VERTEX)) and \
self.top_right_vertex(col, row).match(card.vertex(card.TOP_RIGHT_VERTEX)) and \
self.bottom_left_vertex(col, row).match(card.vertex(card.BOTTOM_LEFT_VERTEX)) and \
self.bottom_right_vertex(col, row).match(card.vertex(card.BOTTOM_RIGHT_VERTEX)) and \
self.top_edge(col, row).match(card.vertex(card.TOP_EDGE)) and \
self.bottom_edge(col, row).match(card.vertex(card.BOTTOM_EDGE)) and \
self.left_edge(col, row).match(card.vertex(card.LEFT_EDGE)) and \
self.right_edge(col, row).match(card.vertex(card.RIGHT_EDGE))
if not match:
self._contact_count = 0
return False
self._contact_count = \
self.top_edge(col, row).contact_number + \
self.bottom_edge(col, row).contact_number + \
self.left_edge(col, row).contact_number + \
self.right_edge(col, row).contact_number + \
self.top_left_vertex(col, row).contact_number + \
self.top_right_vertex(col, row).contact_number + \
self.bottom_left_vertex(col, row).contact_number + \
self.bottom_right_vertex(col, row).contact_number
return self._contact_count > 0
def try_move(self, move):
return not self.square_at(move.col, move.row).has_card and \
self.have_adjacent_card(move.col, move.row) and \
self.match(move.col, move.row, move.card)
def play_card(self, col, row, card):
if not self.square_at(col, row).has_card and \
self.have_adjacent_card(col, row) and \
self.match(col, row, card):
self.put_card(col, row, card)
card.played = True
return True
def update_extreme_cards(self, move):
if len(self._move_history) == 1:
self._most_right_move = move
self._most_left_move = move
self._top_move = move
self._bottom_move = move
else:
if move.col > self._most_right_move.col:
self._most_right_move = move
if move.col < self._most_left_move.col:
self._most_left_move = move
if move.row < self._top_move: # NO using GL coord system
self._top_move = move
if move.row > self._bottom_move: # NO using GL coord system
self._bottom_move = move
| 32.875
| 98
| 0.569827
| 1,980
| 15,517
| 4.198485
| 0.049495
| 0.057019
| 0.02887
| 0.036569
| 0.654276
| 0.549621
| 0.463491
| 0.371587
| 0.261037
| 0.248767
| 0
| 0.007443
| 0.333312
| 15,517
| 471
| 99
| 32.944798
| 0.796133
| 0.00638
| 0
| 0.400517
| 0
| 0
| 0.001205
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.191214
| false
| 0
| 0.005168
| 0.095607
| 0.374677
| 0.002584
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
194088485187df0c1ce817432f67940ecca472cb
| 1,035
|
py
|
Python
|
combo/search/score.py
|
yanpei18345156216/COMBO_Python3
|
666a116dfece71e6236291e89ea2ab4d6db0ead9
|
[
"MIT"
] | 139
|
2016-02-18T02:31:04.000Z
|
2022-02-18T10:38:06.000Z
|
combo/search/score.py
|
yanpei18345156216/COMBO_Python3
|
666a116dfece71e6236291e89ea2ab4d6db0ead9
|
[
"MIT"
] | 8
|
2016-04-18T08:10:44.000Z
|
2020-12-30T08:49:33.000Z
|
combo/search/score.py
|
yanpei18345156216/COMBO_Python3
|
666a116dfece71e6236291e89ea2ab4d6db0ead9
|
[
"MIT"
] | 50
|
2016-05-21T01:17:23.000Z
|
2022-02-18T01:27:41.000Z
|
import numpy as np
import scipy.stats
def EI(predictor, training, test, fmax=None):
fmean = predictor.get_post_fmean(training, test)
fcov = predictor.get_post_fcov(training, test)
fstd = np.sqrt(fcov)
if fmax is None:
fmax = np.max(predictor.get_post_fmean(training, training))
temp1 = (fmean - fmax)
temp2 = temp1 / fstd
score = temp1 * scipy.stats.norm.cdf(temp2) \
+ fstd * scipy.stats.norm.pdf(temp2)
return score
def PI(predictor, training, test, fmax=None):
fmean = predictor.get_post_fmean(training, test)
fcov = predictor.get_post_fcov(training, test)
fstd = np.sqrt(fcov)
if fmax is None:
fmax = np.max(predictor.get_post_fmean(training, training))
temp = (fmean - fmax)/fstd
score = scipy.stats.norm.cdf(temp)
return score
def TS(predictor, training, test, alpha=1):
score = predictor.get_post_samples(training, test, alpha=alpha)
try:
score.shape[1]
score[0, :]
except:
pass
return score
| 24.069767
| 67
| 0.656039
| 142
| 1,035
| 4.683099
| 0.28169
| 0.144361
| 0.168421
| 0.126316
| 0.526316
| 0.526316
| 0.526316
| 0.526316
| 0.526316
| 0.526316
| 0
| 0.011321
| 0.231884
| 1,035
| 42
| 68
| 24.642857
| 0.825157
| 0
| 0
| 0.433333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0.033333
| 0.066667
| 0
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
19414c412df3d7fe628fab1103ed1b978f8a57d8
| 1,528
|
py
|
Python
|
dags/calculating_google_ads_network.py
|
BjMrq/Python-AirflowReportPipeline
|
261812488d661580cb0f41808d94249cc8e0951b
|
[
"MIT"
] | 2
|
2019-06-28T20:08:56.000Z
|
2021-03-30T15:24:10.000Z
|
dags/calculating_google_ads_network.py
|
BjMrq/Python-AirflowReportPipeline
|
261812488d661580cb0f41808d94249cc8e0951b
|
[
"MIT"
] | null | null | null |
dags/calculating_google_ads_network.py
|
BjMrq/Python-AirflowReportPipeline
|
261812488d661580cb0f41808d94249cc8e0951b
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
LOCAL_DIR = '/tmp/'
def main(**kwargs):
# Retrieve acampus from Xcom
ti = kwargs["ti"]
source = ti.xcom_pull(
task_ids="report_init_task")
campus_name = source["campus"]
# Read data file to create a data frame
df = pd.read_csv(LOCAL_DIR + campus_name + '_google_ads_data_cleaned.csv')
# Make sure Cost is the right data type
df.Cost = df.Cost.astype(int)
# Format networks
df['AdNetworkType1'] = df['AdNetworkType1'].str.replace(
"Display Network", "| Display").str.replace(
"Search Network", "| Search").str.replace(
"YouTube Search", "| YouTube").str.replace(
"YouTube Videos", "| YouTube")
# Create a pivo table depending of network type per school
pivot = pd.pivot_table(
df, values='Cost', index=['school'], columns=['AdNetworkType1'],
aggfunc=np.sum, fill_value=0, margins=True).reset_index()
# Create network array to loop throught
networks = df["AdNetworkType1"].unique()
# Format
for i in networks:
pivot[i] = '| ' + pivot[i].astype(str) + "$"
pivot['All'] = '| ' + pivot['All'].astype(str) + "$"
pivot = pivot[pivot.school != 'All']
# Drop columns containing no data
pivot = pivot.loc[:, (pivot != '| 0$').any(axis=0)]
# Save in new file
pivot.to_csv(LOCAL_DIR + campus_name + '_google_spent_per_network.csv',
header=True, index=False, index_label=False)
if __name__ == '__main__':
main()
| 27.781818
| 78
| 0.621073
| 198
| 1,528
| 4.631313
| 0.479798
| 0.043621
| 0.023991
| 0.037077
| 0.058888
| 0.058888
| 0
| 0
| 0
| 0
| 0
| 0.005978
| 0.233639
| 1,528
| 54
| 79
| 28.296296
| 0.777114
| 0.176047
| 0
| 0
| 0
| 0
| 0.216974
| 0.045637
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.071429
| 0
| 0.107143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1941b9b85d89dbfd9a868d046110eb8fc8e84d5a
| 1,401
|
py
|
Python
|
src/auditor/org_checker.py
|
agrc/agol-validator
|
b17f3fee55bf0b1f7d2ed21ae86b1556072da4d8
|
[
"MIT"
] | null | null | null |
src/auditor/org_checker.py
|
agrc/agol-validator
|
b17f3fee55bf0b1f7d2ed21ae86b1556072da4d8
|
[
"MIT"
] | 21
|
2020-01-29T22:03:54.000Z
|
2020-07-29T17:55:44.000Z
|
src/auditor/org_checker.py
|
agrc/agol-validator
|
b17f3fee55bf0b1f7d2ed21ae86b1556072da4d8
|
[
"MIT"
] | null | null | null |
"""
Holds an OrgChecker object that runs checks at the organization level (instead of at the item level)
"""
class OrgChecker:
"""
An OrgChecker runs checks at the org level, as opposed to the item level. For example, checking whether there are
any items with the same title.
To use, instantiate and then call run_checks(), which will run all checks.
"""
def __init__(self, item_list):
self.item_list = item_list
def run_checks(self):
"""
Run all checks in the OrgChecker. Any new checks should be added to this method.
"""
results_dict = {}
results_dict['check_for_duplicate_titles'] = self.check_for_duplicate_titles()
return results_dict
def check_for_duplicate_titles(self):
"""
Report any items in self.item_list that have duplicate titles.
Returns: Dictionary of item ids for each duplicate title: {duplicate_title: [itemid, itemid, ...]}
"""
seen_titles = {}
duplicates = {}
for item in self.item_list:
if item.title in seen_titles:
seen_titles[item.title].append(item.itemid)
else:
seen_titles[item.title] = [item.itemid]
for title in seen_titles:
if len(seen_titles[title]) > 1:
duplicates[title] = seen_titles[title]
return duplicates
| 29.808511
| 117
| 0.628837
| 180
| 1,401
| 4.722222
| 0.388889
| 0.082353
| 0.056471
| 0.081176
| 0.063529
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001007
| 0.291221
| 1,401
| 46
| 118
| 30.456522
| 0.854985
| 0.403283
| 0
| 0
| 0
| 0
| 0.034621
| 0.034621
| 0
| 0
| 0
| 0
| 0
| 1
| 0.157895
| false
| 0
| 0
| 0
| 0.315789
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1941f729283db4adc38960fd6ecd423a78269f4b
| 994
|
py
|
Python
|
create_post.py
|
schlop/blog
|
74fe7d5ce4e1c00942cb033710720098ac493844
|
[
"MIT"
] | null | null | null |
create_post.py
|
schlop/blog
|
74fe7d5ce4e1c00942cb033710720098ac493844
|
[
"MIT"
] | null | null | null |
create_post.py
|
schlop/blog
|
74fe7d5ce4e1c00942cb033710720098ac493844
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
from datetime import datetime
import sys
def create_blog_post(title=""):
file_date = datetime.now().strftime("%Y-%m-%d")
file_name = file_date + "---" + title.replace(" ", "-") + ".md"
print(file_name)
try:
file = open("content/posts/" + file_name, "x")
except FileExistsError:
print("Post already exists. Delete old post first to create a new one")
exit(1)
content_date = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
content_slug = title.lower().replace(" ", "-")
content = f"""---
title: {title}
date: \"{content_date}\"
template: \"post\"
draft: false
slug: {content_slug}
category: \"\"
description: \"\"
socialImage: \"\"
---""".replace(" ", "")
file.write(content)
file.close()
print("Post sucesfully created!")
if __name__ == '__main__':
if len(sys.argv):
create_blog_post(sys.argv[1])
else:
create_blog_post()
| 26.157895
| 80
| 0.573441
| 120
| 994
| 4.558333
| 0.541667
| 0.054845
| 0.076782
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005249
| 0.2334
| 994
| 37
| 81
| 26.864865
| 0.712598
| 0.017103
| 0
| 0
| 0
| 0
| 0.347336
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0.064516
| 0
| 0.096774
| 0.096774
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
194306ac920374768433626240e00df6f0b039ac
| 1,436
|
py
|
Python
|
src/python/procyon/py3.py
|
orbea/procyon
|
469d94427d3b6e7cc2ab93606bdf968717a49150
|
[
"Apache-2.0"
] | null | null | null |
src/python/procyon/py3.py
|
orbea/procyon
|
469d94427d3b6e7cc2ab93606bdf968717a49150
|
[
"Apache-2.0"
] | null | null | null |
src/python/procyon/py3.py
|
orbea/procyon
|
469d94427d3b6e7cc2ab93606bdf968717a49150
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2017 The Procyon Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines names that are no longer available in python3.
name py2 meaning py3 meaning
---- ----------- -----------
unicode unicode str
long long int
xrange xrange range
iteritems dict.iteritems dict.items
iterkeys dict.iterkeys dict.keys
itervalues dict.itervalues dict.values
"""
try:
unicode = unicode
repr = (lambda r: lambda x: r(x).decode("utf-8"))(repr)
except NameError:
unicode = str
repr = repr
try:
long = long
except NameError:
long = int
try:
xrange = xrange
except NameError:
xrange = range
iteritems = lambda d: getattr(d, "iteritems", d.items)()
iterkeys = lambda d: getattr(d, "iterkeys", d.keys)()
itervalues = lambda d: getattr(d, "itervalues", d.values)()
| 29.306122
| 74
| 0.664345
| 194
| 1,436
| 4.917526
| 0.530928
| 0.062893
| 0.044025
| 0.04717
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012658
| 0.229805
| 1,436
| 48
| 75
| 29.916667
| 0.84991
| 0.673398
| 0
| 0.352941
| 0
| 0
| 0.071588
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1943cab210caf1760efe3b398e0efc3f17bdc7ab
| 693
|
py
|
Python
|
interlens/criterions/criterion.py
|
cctien/bimultialign
|
d0dad62651c25545fb7539639cb72fc8ea2570aa
|
[
"MIT"
] | null | null | null |
interlens/criterions/criterion.py
|
cctien/bimultialign
|
d0dad62651c25545fb7539639cb72fc8ea2570aa
|
[
"MIT"
] | null | null | null |
interlens/criterions/criterion.py
|
cctien/bimultialign
|
d0dad62651c25545fb7539639cb72fc8ea2570aa
|
[
"MIT"
] | null | null | null |
from allennlp.common import Registrable
import torch
class Criterion(torch.nn.Module, Registrable):
"""
A `Criterion` is a `Module` that ...
"""
def __init__(self,
reduction: str = 'mean',
verbose: bool = False,) -> None:
super().__init__()
self.reduction = reduction
if reduction == 'mean':
self._average = torch.mean
elif reduction == 'sum':
self._average = torch.sum
else:
raise NotImplementedError
self._verbose = verbose
def _forward_verbose(self) -> None:
raise NotImplementedError
# Losses = Dict[str, Dict[str, Union[float, Loss]]]
| 23.896552
| 51
| 0.572872
| 69
| 693
| 5.565217
| 0.536232
| 0.041667
| 0.088542
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.31746
| 693
| 28
| 52
| 24.75
| 0.811839
| 0.125541
| 0
| 0.117647
| 0
| 0
| 0.018644
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.117647
| 0
| 0.294118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1945a3089e1f6313c2cc75593bf5a6b3e3eaea61
| 4,767
|
py
|
Python
|
rain/models/posemb_transformer.py
|
qq1418381215/caat
|
1422707bef7a2aeca272fa085f410bff07ced760
|
[
"MIT"
] | 14
|
2021-09-15T02:49:18.000Z
|
2022-03-15T06:00:54.000Z
|
rain/models/posemb_transformer.py
|
qq1418381215/caat
|
1422707bef7a2aeca272fa085f410bff07ced760
|
[
"MIT"
] | 11
|
2021-09-17T03:17:07.000Z
|
2022-02-08T03:12:41.000Z
|
rain/models/posemb_transformer.py
|
qq1418381215/caat
|
1422707bef7a2aeca272fa085f410bff07ced760
|
[
"MIT"
] | 2
|
2021-11-06T19:22:29.000Z
|
2022-03-24T11:56:11.000Z
|
import torch
import os
from torch import Tensor
import torch.nn as nn
from fairseq import options, utils, checkpoint_utils
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.models import (
transformer,
FairseqLanguageModel,
register_model,
register_model_architecture,
FairseqEncoder, FairseqIncrementalDecoder,
BaseFairseqModel,FairseqEncoderDecoderModel
)
from rain.layers.rand_pos import PositionalEmbedding
from .speech_transformer import SpeechTransformerModelConfig
@register_model("randpos_transformer", dataclass=SpeechTransformerModelConfig)
class RandposTransformer(transformer.TransformerModel):
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
if getattr(args,"max_text_positions", None) is None:
args.max_text_positions= 1024
args.max_source_positions = args.max_text_positions
args.max_target_positions = args.max_text_positions
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
if args.share_all_embeddings:
if src_dict != tgt_dict:
raise ValueError("--share-all-embeddings requires a joined dictionary")
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
"--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim"
)
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path
):
raise ValueError(
"--share-all-embeddings not compatible with --decoder-embed-path"
)
encoder_embed_tokens = cls.build_embedding(
args, src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = cls.build_embedding(
args, src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = cls.build_embedding(
args, tgt_dict, args.decoder_embed_dim, args.decoder_embed_path
)
encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)
decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)
return cls(args, encoder, decoder)
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
model = transformer.TransformerEncoder(args, src_dict, embed_tokens)
embed_dim= embed_tokens.embedding_dim
if model.embed_positions is not None and args.rand_pos_encoder >0:
model.embed_positions= PositionalEmbedding(
model.max_source_positions,
embed_dim,
model.padding_idx,
rand_max = args.rand_pos_encoder,
learned=args.decoder_learned_pos,
)
return model
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
model = transformer.TransformerDecoder(
args,
tgt_dict,
embed_tokens,
no_encoder_attn=getattr(args, "no_cross_attention", False),
)
if model.embed_positions is not None and args.rand_pos_decoder >0:
model.embed_positions= PositionalEmbedding(
model.max_target_positions,
model.embed_dim,
model.padding_idx,
rand_max = args.rand_pos_decoder,
learned=args.decoder_learned_pos,
)
return model
@register_model_architecture("randpos_transformer", "randpos_transformer2")
def randpos_transformer(args):
args.rand_pos_encoder= getattr(args, "rand_pos_encoder", 30)
args.rand_pos_decoder= getattr(args, "rand_pos_decoder", 30)
transformer.base_architecture(args)
@register_model_architecture("randpos_transformer", "randpos_transformer_small")
def randpos_transformer_small(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 256 * 8)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.activation_dropout = getattr(args, "activation_dropout", 0.1)
args.dropout = getattr(args, "dropout", 0.1)
randpos_transformer(args)
| 42.5625
| 102
| 0.680092
| 540
| 4,767
| 5.688889
| 0.209259
| 0.041667
| 0.041667
| 0.030924
| 0.374349
| 0.244466
| 0.178385
| 0.122396
| 0.122396
| 0.122396
| 0
| 0.008071
| 0.246276
| 4,767
| 111
| 103
| 42.945946
| 0.846925
| 0.016782
| 0
| 0.171717
| 0
| 0.010101
| 0.10831
| 0.033753
| 0
| 0
| 0
| 0
| 0
| 1
| 0.050505
| false
| 0
| 0.090909
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1946122a44cafe21fe0a3f27b222402b8e3d88b9
| 6,345
|
py
|
Python
|
ensembl_map/symbol.py
|
mattdoug604/ensembl_map
|
5edb8a48943df4b53effe3cd7ddf4d461fdd4bae
|
[
"MIT"
] | null | null | null |
ensembl_map/symbol.py
|
mattdoug604/ensembl_map
|
5edb8a48943df4b53effe3cd7ddf4d461fdd4bae
|
[
"MIT"
] | 1
|
2020-03-24T18:20:15.000Z
|
2020-03-25T22:56:06.000Z
|
ensembl_map/symbol.py
|
mattdoug604/ensembl_map
|
5edb8a48943df4b53effe3cd7ddf4d461fdd4bae
|
[
"MIT"
] | null | null | null |
from .ensembl import Ensembl
from .util import is_ensembl_id
##########
## Exon ##
##########
def get_exons(feature, feature_type):
exons = []
for exon_id in get_exon_ids(feature, feature_type):
exons.append(_query(exon_id, "exon", Ensembl().data.exon_by_id))
return exons
def get_exon_ids(feature, feature_type):
if is_ensembl_id(feature):
exon_ids = _get_exon_ids_by_id(feature, feature_type)
else:
exon_ids = _get_exon_ids_by_name(feature, feature_type)
if exon_ids and not isinstance(exon_ids, list):
exon_ids = [exon_ids]
return sorted(exon_ids)
def _get_exon_ids_by_id(feature, feature_type):
if feature_type == "cds" or feature_type == "transcript":
return _query(feature, feature_type, Ensembl().data.exon_ids_of_transcript_id)
elif feature_type == "exon":
return feature
elif feature_type == "gene":
return _query(feature, feature_type, Ensembl().data.exon_ids_of_gene_id)
elif feature_type == "protein":
exon_ids = []
for transcript_id in get_transcript_ids(feature, "protein"):
exon_ids.extend(_get_exon_ids_by_id(transcript_id, "transcript"))
return exon_ids
else:
raise TypeError(f"Cannot get exon IDs from (ID={feature}, type={feature_type})")
def _get_exon_ids_by_name(feature, feature_type):
if feature_type == "cds" or feature_type == "transcript":
return _query(feature, "transcript", Ensembl().data.exon_ids_of_transcript_name)
elif feature_type == "gene":
return _query(feature, feature_type, Ensembl().data.exon_ids_of_gene_name)
else:
raise TypeError(f"Cannot get exon IDs from (name={feature}, type={feature_type})")
##########
## Gene ##
##########
def get_genes(feature, feature_type):
genes = []
for gene_id in get_gene_ids(feature, feature_type):
genes.append(_query(gene_id, "gene", Ensembl().data.gene_by_id))
return genes
def get_gene_ids(feature, feature_type):
if is_ensembl_id(feature):
gene_ids = _get_gene_ids_by_id(feature, feature_type)
else:
gene_ids = _get_gene_ids_by_name(feature, feature_type)
if gene_ids and not isinstance(gene_ids, list):
gene_ids = [gene_ids]
return sorted(gene_ids)
def _get_gene_ids_by_id(feature, feature_type):
if feature_type == "cds" or feature_type == "transcript":
gene_name = _query(feature, feature_type, Ensembl().data.gene_name_of_transcript_id)
return _gene_name_to_id(gene_name)
elif feature_type == "exon":
gene_name = _query(feature, feature_type, Ensembl().data.gene_name_of_exon_id)
return _gene_name_to_id(gene_name)
elif feature_type == "gene":
return feature
elif feature_type == "protein":
return _query(feature, feature_type, Ensembl().data.gene_id_of_protein_id)
else:
raise TypeError(f"Cannot get gene IDs from (ID={feature}, type={feature_type})")
def _get_gene_ids_by_name(feature, feature_type):
if feature_type == "cds" or feature_type == "transcript":
gene_name = _query(feature, "transcript", Ensembl().data.gene_name_of_transcript_name)
return _gene_name_to_id(gene_name)
elif feature_type == "gene":
return _query(feature, feature_type, Ensembl().data.gene_ids_of_gene_name)
else:
raise TypeError(f"Cannot get gene IDs from (name={feature}, type={feature_type})")
def _gene_name_to_id(gene_name):
return Ensembl().data.gene_ids_of_gene_name(gene_name)
#############
## Protein ##
#############
def get_protein_ids(feature, feature_type):
protein_ids = []
for transcript in get_transcripts(feature, feature_type):
if transcript.protein_id:
protein_ids.append(transcript.protein_id)
return sorted(protein_ids)
#################
## Transcripts ##
#################
def get_transcripts(feature, feature_type):
transcripts = []
for transcript_id in get_transcript_ids(feature, feature_type):
transcripts.append(_query(transcript_id, "transcript", Ensembl().data.transcript_by_id))
return transcripts
def get_transcript_ids(feature, feature_type):
if is_ensembl_id(feature):
transcript_ids = _get_transcript_ids_by_id(feature, feature_type)
else:
transcript_ids = _get_transcript_ids_by_name(feature, feature_type)
if transcript_ids and not isinstance(transcript_ids, list):
transcript_ids = [transcript_ids]
return sorted(transcript_ids)
def _get_transcript_ids_with_exon(feature):
# NOTE: with `pyensembl==1.8.5` calling `transcript_ids_of_exon_ids` does not
# match anything. As a workaround, we can map the exon to its gene then return
# all transcripts of that gene that contain the exon.
transcript_ids = []
exon = _query(feature, "exon", Ensembl().data.exon_by_id)
for transcript in get_transcripts(exon.gene_id, "gene"):
if feature in [i.exon_id for i in transcript.exons]:
transcript_ids.append(transcript.transcript_id)
return transcript_ids
def _get_transcript_ids_by_id(feature, feature_type):
if feature_type == "cds" or feature_type == "transcript":
return feature
elif feature_type == "exon":
return _get_transcript_ids_with_exon(feature)
elif feature_type == "gene":
return _query(feature, feature_type, Ensembl().data.transcript_ids_of_gene_id)
elif feature_type == "protein":
return _query(feature, feature_type, Ensembl().data.transcript_id_of_protein_id)
else:
raise TypeError(f"Cannot get transcript IDs from (ID={feature}, type={feature_type})")
def _get_transcript_ids_by_name(feature, feature_type):
if feature_type == "cds" or feature_type == "transcript":
return _query(feature, "transcript", Ensembl().data.transcript_ids_of_transcript_name)
elif feature_type == "gene":
return _query(feature, feature_type, Ensembl().data.transcript_ids_of_gene_name)
else:
raise TypeError(f"Cannot get transcript IDs from (name={feature}, type={feature_type})")
#####################
## Query functions ##
#####################
def _query(feature, feature_type, func):
try:
return func(feature)
except ValueError:
raise ValueError(f"No match for {feature_type} '{feature}'")
| 35.446927
| 96
| 0.695035
| 859
| 6,345
| 4.759022
| 0.083818
| 0.191047
| 0.149706
| 0.063601
| 0.710861
| 0.652887
| 0.565802
| 0.536937
| 0.475049
| 0.364726
| 0
| 0.00058
| 0.18424
| 6,345
| 178
| 97
| 35.646067
| 0.789219
| 0.040032
| 0
| 0.297521
| 0
| 0
| 0.105503
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.132231
| false
| 0
| 0.016529
| 0.008264
| 0.380165
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
194675cce0a60e3494b3def09e1010cda20f0f00
| 1,113
|
py
|
Python
|
MiGRIDS/InputHandler/readAllTimeSeries.py
|
mmuellerstoffels/GBSTools
|
aebd8aa6667a2284aaa16424f9b9d22ca3a2a375
|
[
"MIT"
] | 8
|
2019-02-18T14:18:55.000Z
|
2022-03-04T12:34:24.000Z
|
MiGRIDS/InputHandler/readAllTimeSeries.py
|
mmuellerstoffels/GBSTools
|
aebd8aa6667a2284aaa16424f9b9d22ca3a2a375
|
[
"MIT"
] | 3
|
2018-09-01T00:30:19.000Z
|
2018-09-01T01:09:50.000Z
|
MiGRIDS/InputHandler/readAllTimeSeries.py
|
acep-uaf/GBSTools
|
aebd8aa6667a2284aaa16424f9b9d22ca3a2a375
|
[
"MIT"
] | 3
|
2019-06-10T19:49:22.000Z
|
2021-05-08T08:42:57.000Z
|
from MiGRIDS.InputHandler.readCsv import readCsv
def readAllTimeSeries(inputDict):
'''
Cycles through a list of files in the AVEC format and imports them into a single dataframe.
:param inputDict:
:return: pandas.DataFrame with data from all input files.
'''
df = None
for i in range(len(inputDict['fileNames'])):
print(inputDict['fileNames'][i])# for each data file
inputDict['fileName'] = inputDict['fileNames'][i]
if i == 0: # read data file into a new dataframe if first iteration
df = readCsv(inputDict)
else: # otherwise append
df2 = readCsv(inputDict) # the new file
# get intersection of columns,
df2Col = df2.columns
dfCol = df.columns
dfNewCol = [val for val in dfCol if val in df2Col]
# resize dataframes to only contain columns contained in both dataframes
df = df[dfNewCol]
df2 = df2[dfNewCol]
df = df.append(df2) # append
df = df.sort_values('DATE')
return df
| 33.727273
| 95
| 0.591195
| 132
| 1,113
| 4.977273
| 0.522727
| 0.082192
| 0.057839
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010724
| 0.329739
| 1,113
| 32
| 96
| 34.78125
| 0.869973
| 0.340521
| 0
| 0
| 0
| 0
| 0.055477
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.055556
| 0
| 0.166667
| 0.055556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
19476d50e68179e3181c58bfc67d757bccf5c292
| 6,191
|
py
|
Python
|
aatrn.py
|
kmkurn/uxtspwsd
|
ea4da18cec023d0dc487ee061861e6715edc2e85
|
[
"MIT"
] | null | null | null |
aatrn.py
|
kmkurn/uxtspwsd
|
ea4da18cec023d0dc487ee061861e6715edc2e85
|
[
"MIT"
] | null | null | null |
aatrn.py
|
kmkurn/uxtspwsd
|
ea4da18cec023d0dc487ee061861e6715edc2e85
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021 Kemal Kurniawan
from typing import Optional
import math
from einops import rearrange
from torch import BoolTensor, Tensor
from crf import DepTreeCRF, LinearCRF
def compute_aatrn_loss(
scores: Tensor,
aa_mask: BoolTensor,
mask: Optional[BoolTensor] = None,
projective: bool = False,
multiroot: bool = True,
) -> Tensor:
assert aa_mask.shape == scores.shape
masked_scores = scores.masked_fill(~aa_mask, -1e9)
crf = DepTreeCRF(masked_scores, mask, projective, multiroot)
crf_z = DepTreeCRF(scores, mask, projective, multiroot)
return -crf.log_partitions().sum() + crf_z.log_partitions().sum()
def compute_ambiguous_arcs_mask(
scores: Tensor,
threshold: float = 0.95,
projective: bool = False,
multiroot: bool = True,
is_log_marginals: bool = False,
) -> BoolTensor:
"""If is_log_marginals then scores are assumed to be the log marginals."""
assert scores.dim() == 4
assert 0 <= threshold <= 1
if is_log_marginals:
return _compute_ambiguous_arcs_mask_from_log_marginals(
scores, threshold, projective, multiroot
)
return _compute_ambiguous_arcs_mask(scores, threshold, projective, multiroot)
def compute_ambiguous_tag_pairs_mask(
scores: Tensor, threshold: float = 0.95, is_log_marginals: bool = False
) -> BoolTensor:
if is_log_marginals:
return _compute_ambiguous_tag_pairs_mask_from_log_marginals(scores, threshold)
return _compute_ambiguous_tag_pairs_mask(scores, threshold)
def _compute_ambiguous_arcs_mask(
scores, threshold, projective, multiroot, include_max_tree=True
):
_, slen, _, n_types = scores.shape
crf = DepTreeCRF(scores, projective=projective, multiroot=multiroot)
marginals = crf.marginals()
# select high-prob arcs until their cumulative probability exceeds threshold
marginals = rearrange(marginals, "bsz hlen dlen ntypes -> bsz dlen (hlen ntypes)")
marginals, orig_indices = marginals.sort(dim=2, descending=True)
arc_mask = marginals.cumsum(dim=2) < threshold
# mark the arc that makes the cum sum exceeds threshold
last_idx = arc_mask.long().sum(dim=2, keepdim=True).clamp(max=slen * n_types - 1)
arc_mask = arc_mask.scatter(2, last_idx, True)
# restore the arc_mask order and shape
_, restore_indices = orig_indices.sort(dim=2)
arc_mask = arc_mask.gather(2, restore_indices)
if include_max_tree:
# ensure maximum scoring tree is selected
# each shape: (bsz, slen)
best_heads, best_types = crf.argmax()
best_idx = best_heads * n_types + best_types
arc_mask = arc_mask.scatter(2, best_idx.unsqueeze(2), True)
arc_mask = rearrange(arc_mask, "bsz dlen (hlen ntypes) -> bsz hlen dlen ntypes", hlen=slen)
return arc_mask
def _compute_ambiguous_arcs_mask_from_log_marginals(
log_marginals, threshold, projective, multiroot
):
_, slen, _, n_types = log_marginals.shape
# select high-prob arcs until their cumulative probability exceeds threshold
log_marginals = rearrange(log_marginals, "bsz hlen dlen ntypes -> bsz dlen (hlen ntypes)")
log_marginals, orig_indices = log_marginals.sort(dim=2, descending=True)
arc_mask = _logcumsumexp(log_marginals, dim=2) < math.log(threshold)
# mark the arc that makes the cum sum exceeds threshold
last_idx = arc_mask.long().sum(dim=2, keepdim=True).clamp(max=slen * n_types - 1)
arc_mask = arc_mask.scatter(2, last_idx, True)
# restore the arc_mask order and shape
_, restore_indices = orig_indices.sort(dim=2)
arc_mask = arc_mask.gather(2, restore_indices)
arc_mask = rearrange(arc_mask, "bsz dlen (hlen ntypes) -> bsz hlen dlen ntypes", hlen=slen)
return arc_mask
def _compute_ambiguous_tag_pairs_mask(
scores: Tensor, threshold: float = 0.95, include_max_tags: bool = True
) -> BoolTensor:
bsz, slen, n_next_tags, n_tags = scores.shape
crf = LinearCRF(scores)
margs = crf.marginals()
# select high prob tag pairs until their cumulative probability exceeds threshold
margs = rearrange(margs, "bsz slen nntags ntags -> bsz slen (nntags ntags)")
margs, orig_indices = margs.sort(dim=2, descending=True)
tp_mask = margs.cumsum(dim=2) < threshold
# select the tag pairs that make the cum sum exceeds threshold
last_idx = tp_mask.long().sum(dim=2, keepdim=True).clamp(max=n_next_tags * n_tags - 1)
tp_mask = tp_mask.scatter(2, last_idx, True)
# restore the order and shape
_, restore_indices = orig_indices.sort(dim=2)
tp_mask = tp_mask.gather(2, restore_indices)
if include_max_tags:
best_tags = crf.argmax()
assert best_tags.shape == (bsz, slen + 1)
best_idx = best_tags[:, 1:] * n_tags + best_tags[:, :-1]
assert best_idx.shape == (bsz, slen)
tp_mask = tp_mask.scatter(2, best_idx.unsqueeze(2), True)
tp_mask = rearrange(
tp_mask, "bsz slen (nntags ntags) -> bsz slen nntags ntags", nntags=n_next_tags
)
return tp_mask # type: ignore
def _compute_ambiguous_tag_pairs_mask_from_log_marginals(
log_marginals: Tensor, threshold: float = 0.95
) -> BoolTensor:
_, _, n_next_tags, n_tags = log_marginals.shape
# select high prob tag pairs until their cumulative probability exceeds threshold
log_margs = rearrange(log_marginals, "bsz slen nntags ntags -> bsz slen (nntags ntags)")
log_margs, orig_indices = log_margs.sort(dim=2, descending=True)
tp_mask = _logcumsumexp(log_margs, dim=2) < math.log(threshold)
# select the tag pairs that make the cum sum exceeds threshold
last_idx = tp_mask.long().sum(dim=2, keepdim=True).clamp(max=n_next_tags * n_tags - 1)
tp_mask = tp_mask.scatter(2, last_idx, True)
# restore the order and shape
_, restore_indices = orig_indices.sort(dim=2)
tp_mask = tp_mask.gather(2, restore_indices)
tp_mask = rearrange(
tp_mask, "bsz slen (nntags ntags) -> bsz slen nntags ntags", nntags=n_next_tags
)
return tp_mask # type: ignore
def _logcumsumexp(x: Tensor, dim: int = -1) -> Tensor:
max = x.max(dim, keepdim=True)[0]
return (x - max).exp().cumsum(dim).log() + max
| 37.295181
| 95
| 0.709255
| 876
| 6,191
| 4.769406
| 0.139269
| 0.03686
| 0.015318
| 0.034466
| 0.689086
| 0.664433
| 0.606032
| 0.574677
| 0.453088
| 0.432504
| 0
| 0.011621
| 0.19383
| 6,191
| 165
| 96
| 37.521212
| 0.825486
| 0.139557
| 0
| 0.363636
| 0
| 0
| 0.070877
| 0
| 0
| 0
| 0
| 0
| 0.045455
| 1
| 0.072727
| false
| 0
| 0.045455
| 0
| 0.209091
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
194979bac6f323e9a28bd3fab05ed2877e60ddea
| 605
|
py
|
Python
|
CE_to_AE_enemy_converter.py
|
Plouni/mari0_se_ce_to_ae_level_converter
|
9aa0d0ebffac4df1b5d541ff003bd9abeb187a0a
|
[
"MIT"
] | 1
|
2022-02-03T23:07:20.000Z
|
2022-02-03T23:07:20.000Z
|
CE_to_AE_enemy_converter.py
|
Plouni/mari0_se_ce_to_ae_level_converter
|
9aa0d0ebffac4df1b5d541ff003bd9abeb187a0a
|
[
"MIT"
] | null | null | null |
CE_to_AE_enemy_converter.py
|
Plouni/mari0_se_ce_to_ae_level_converter
|
9aa0d0ebffac4df1b5d541ff003bd9abeb187a0a
|
[
"MIT"
] | null | null | null |
import os
import json
import logging
cwd = os.getcwd()
list_enemy = [file for file in os.listdir(cwd) if '.json' in file[-5:]]
for enemy in list_enemy:
try:
with open(cwd + '\\' + enemy, 'r') as f:
enemy_txt = f.read()
enemy_txt = enemy_txt.replace('offsetx','offsetX').replace('offsety','offsetY').replace('quadcenterx','quadcenterX').replace('quadcentery','quadcenterY').replace('quadcount','quadCount')
with open(cwd + '\\' + enemy, 'w+') as f:
f.write(enemy_txt)
except:
print("Error for enemy: ", enemy_txt)
| 26.304348
| 194
| 0.591736
| 77
| 605
| 4.558442
| 0.428571
| 0.11396
| 0.062678
| 0.091168
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002188
| 0.244628
| 605
| 22
| 195
| 27.5
| 0.765864
| 0
| 0
| 0
| 0
| 0
| 0.196694
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.214286
| 0
| 0.214286
| 0.071429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
195261959efe1d29efc067b8292d053eeea3aa60
| 1,639
|
py
|
Python
|
Chapter05/non-model-view_code.py
|
trappn/Mastering-GUI-Programming-with-Python
|
14392c06dd3b9cf655420d09853bce6bfe8fe16d
|
[
"MIT"
] | 138
|
2018-12-06T15:48:07.000Z
|
2022-03-28T12:23:12.000Z
|
Chapter05/non-model-view_code.py
|
thema27/Mastering-GUI-Programming-with-Python
|
66f33ff6c07b7e22a396a982a5502bd93c20d785
|
[
"MIT"
] | 16
|
2019-11-21T08:17:42.000Z
|
2020-08-19T06:56:48.000Z
|
Chapter05/non-model-view_code.py
|
thema27/Mastering-GUI-Programming-with-Python
|
66f33ff6c07b7e22a396a982a5502bd93c20d785
|
[
"MIT"
] | 116
|
2018-12-08T18:13:02.000Z
|
2022-03-22T14:30:57.000Z
|
import sys
from os import path
from PyQt5 import QtWidgets as qtw
from PyQt5 import QtGui as qtg
from PyQt5 import QtCore as qtc
class MainWindow(qtw.QMainWindow):
def __init__(self):
"""MainWindow constructor.
This widget will be our main window.
We'll define all the UI components in here.
"""
super().__init__()
# Main UI code goes here
form = qtw.QWidget()
self.setCentralWidget(form)
form.setLayout(qtw.QVBoxLayout())
self.filename = qtw.QLineEdit()
self.filecontent = qtw.QTextEdit()
self.savebutton = qtw.QPushButton(
'Save',
clicked=self.save
)
form.layout().addWidget(self.filename)
form.layout().addWidget(self.filecontent)
form.layout().addWidget(self.savebutton)
# End main UI code
self.show()
def save(self):
filename = self.filename.text()
error = ''
if not filename:
error = 'Filename empty'
elif path.exists(filename):
error = f'Will not overwrite {filename}'
else:
try:
with open(filename, 'w') as fh:
fh.write(self.filecontent.toPlainText())
except Exception as e:
error = f'Cannot write file: {e}'
if error:
qtw.QMessageBox.critical(None, 'Error', error)
if __name__ == '__main__':
app = qtw.QApplication(sys.argv)
# it's required to save a reference to MainWindow.
# if it goes out of scope, it will be destroyed.
mw = MainWindow()
sys.exit(app.exec())
| 27.779661
| 60
| 0.583282
| 191
| 1,639
| 4.921466
| 0.513089
| 0.051064
| 0.047872
| 0.073404
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002686
| 0.318487
| 1,639
| 58
| 61
| 28.258621
| 0.838854
| 0.147651
| 0
| 0
| 0
| 0
| 0.060895
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.125
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1953a970695d2673fad8c3f0a83d3f344f3fbfa4
| 5,647
|
py
|
Python
|
sandbox/kl_div/kl.py
|
samuelfneumann/RLControl
|
71430b1de2e4262483908932eb44579c2ec8216d
|
[
"Apache-2.0"
] | 9
|
2018-07-30T20:12:47.000Z
|
2021-02-05T17:02:04.000Z
|
sandbox/kl_div/kl.py
|
samuelfneumann/RLControl
|
71430b1de2e4262483908932eb44579c2ec8216d
|
[
"Apache-2.0"
] | 14
|
2020-01-28T22:38:58.000Z
|
2022-02-10T00:11:21.000Z
|
sandbox/kl_div/kl.py
|
samuelfneumann/RLControl
|
71430b1de2e4262483908932eb44579c2ec8216d
|
[
"Apache-2.0"
] | 3
|
2018-08-08T14:52:53.000Z
|
2021-01-23T18:00:05.000Z
|
import numpy as np
import scipy as sp
import scipy.stats
import matplotlib.pyplot as plt
class GaussianMixture1D:
def __init__(self, mixture_probs, means, stds):
self.num_mixtures = len(mixture_probs)
self.mixture_probs = mixture_probs
self.means = means
self.stds = stds
def sample(self, num_samples=1):
mixture_ids = np.random.choice(self.num_mixtures, size=num_samples, p=self.mixture_probs)
result = np.zeros([num_samples])
for sample_idx in range(num_samples):
result[sample_idx] = np.random.normal(
loc=self.means[mixture_ids[sample_idx]],
scale=self.stds[mixture_ids[sample_idx]]
)
return result
def logpdf(self, samples):
mixture_logpdfs = np.zeros([len(samples), self.num_mixtures])
for mixture_idx in range(self.num_mixtures):
mixture_logpdfs[:, mixture_idx] = scipy.stats.norm.logpdf(
samples,
loc=self.means[mixture_idx],
scale=self.stds[mixture_idx]
)
return sp.special.logsumexp(mixture_logpdfs + np.log(self.mixture_probs), axis=1)
def pdf(self, samples):
return np.exp(self.logpdf(samples))
def approx_kl(gmm_1, gmm_2, xs):
ys = gmm_1.pdf(xs) * (gmm_1.logpdf(xs) - gmm_2.logpdf(xs))
return np.trapz(ys, xs)
def minimize_pq(p, xs, q_means, q_stds):
q_mean_best = None
q_std_best = None
kl_best = np.inf
for q_mean in q_means:
for q_std in q_stds:
q = GaussianMixture1D(np.array([1]), np.array([q_mean]), np.array([q_std]))
kl = approx_kl(p, q, xs)
if kl < kl_best:
kl_best = kl
q_mean_best = q_mean
q_std_best = q_std
q_best = GaussianMixture1D(np.array([1]), np.array([q_mean_best]), np.array([q_std_best]))
return q_best, kl_best
def minimize_qp(p, xs, q_means, q_stds):
q_mean_best = None
q_std_best = None
kl_best = np.inf
for q_mean in q_means:
for q_std in q_stds:
q = GaussianMixture1D(np.array([1]), np.array([q_mean]), np.array([q_std]))
kl = approx_kl(q, p, xs)
if kl < kl_best:
kl_best = kl
q_mean_best = q_mean
q_std_best = q_std
q_best = GaussianMixture1D(np.array([1]), np.array([q_mean_best]), np.array([q_std_best]))
return q_best, kl_best
def main():
p_second_means_min = 0
p_second_means_max = 2
num_p_second_means = 5
p_second_mean_list = np.linspace(p_second_means_min, p_second_means_max, num_p_second_means)
print('second mean: {}'.format(p_second_mean_list))
p = [None] * num_p_second_means
q_best_forward = [None] * num_p_second_means
kl_best_forward = [None] * num_p_second_means
q_best_reverse = [None] * num_p_second_means
kl_best_reverse = [None] * num_p_second_means
for p_second_mean_idx, p_second_mean in enumerate(p_second_mean_list):
p_mixture_probs = np.array([0.5, 0.5])
p_means = np.array([0, p_second_mean])
p_stds = np.array([0.2, 0.2])
p[p_second_mean_idx] = GaussianMixture1D(p_mixture_probs, p_means, p_stds)
q_means_min = np.min(p_means) - 1
q_means_max = np.max(p_means) + 1
num_q_means = 100
q_means = np.linspace(q_means_min, q_means_max, num_q_means)
q_stds_min = 0.37
q_stds_max = 5
num_q_stds = 100
q_stds = np.linspace(q_stds_min, q_stds_max, num_q_stds)
trapz_xs_min = np.min(np.append(p_means, q_means_min)) - 3 * np.max(np.append(p_stds, q_stds_max))
trapz_xs_max = np.max(np.append(p_means, q_means_min)) + 3 * np.max(np.append(p_stds, q_stds_max))
num_trapz_points = 1000
trapz_xs = np.linspace(trapz_xs_min, trapz_xs_max, num_trapz_points)
q_best_forward[p_second_mean_idx], kl_best_forward[p_second_mean_idx] = minimize_pq(
p[p_second_mean_idx], trapz_xs, q_means, q_stds
)
q_best_reverse[p_second_mean_idx], kl_best_reverse[p_second_mean_idx] = minimize_qp(
p[p_second_mean_idx], trapz_xs, q_means, q_stds
)
# plotting
fig, axs = plt.subplots(nrows=1, ncols=num_p_second_means, sharex=True, sharey=True)
# fig.set_size_inches(8, 1.5)
for p_second_mean_idx, p_second_mean in enumerate(p_second_mean_list):
xs_min = -1
xs_max = 4
num_plot_points = 1000
xs = np.linspace(xs_min, xs_max, num_plot_points)
axs[p_second_mean_idx].plot(xs, p[p_second_mean_idx].pdf(xs), label='$p$', color='black')
axs[p_second_mean_idx].plot(xs, q_best_forward[p_second_mean_idx].pdf(xs), label='$\mathrm{argmin}_q \,\mathrm{KL}(p || q)$', color='black', linestyle='dashed')
axs[p_second_mean_idx].plot(xs, q_best_reverse[p_second_mean_idx].pdf(xs), label='$\mathrm{argmin}_q \,\mathrm{KL}(q || p)$', color='black', linestyle='dotted')
axs[p_second_mean_idx].spines['right'].set_visible(False)
axs[p_second_mean_idx].spines['top'].set_visible(False)
# axs[p_second_mean_idx].set_yticks([])
# axs[p_second_mean_idx].set_xticks([])
axs[p_second_mean_idx].set_title('mean: [0, {}]'.format(p_second_mean))
axs[2].legend(ncol=3, loc='upper center', bbox_to_anchor=(0.5, 0), fontsize='small')
filenames = ['reverse_forward_kl.pdf', 'reverse_forward_kl.png']
for filename in filenames:
fig.savefig(filename, bbox_inches='tight', dpi=200)
print('Saved to {}'.format(filename))
plt.show()
if __name__ == '__main__':
main()
| 38.155405
| 168
| 0.64583
| 899
| 5,647
| 3.684093
| 0.150167
| 0.084541
| 0.092995
| 0.084541
| 0.452899
| 0.422705
| 0.384662
| 0.306763
| 0.28744
| 0.270531
| 0
| 0.015242
| 0.233221
| 5,647
| 147
| 169
| 38.414966
| 0.749654
| 0.019834
| 0
| 0.243478
| 0
| 0
| 0.042134
| 0.007957
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069565
| false
| 0
| 0.034783
| 0.008696
| 0.165217
| 0.017391
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
19556d177fed4def9f6818303c33e5aa562c38b8
| 904
|
py
|
Python
|
SortingAlgorithm/shell_sort.py
|
hpf0532/algorithms_demo
|
4f02444ee634295e5cbf8e5624d4e5b65931897d
|
[
"MIT"
] | null | null | null |
SortingAlgorithm/shell_sort.py
|
hpf0532/algorithms_demo
|
4f02444ee634295e5cbf8e5624d4e5b65931897d
|
[
"MIT"
] | null | null | null |
SortingAlgorithm/shell_sort.py
|
hpf0532/algorithms_demo
|
4f02444ee634295e5cbf8e5624d4e5b65931897d
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
# author: hpf
# create time: 2020/7/16 21:33
# file: shell_sort.py
# IDE: PyCharm
# 希尔排序(Shell Sort)是插入排序的一种。也称缩小增量排序,是直接插入排序算法的一种更高效的改进版本。
# 希尔排序是非稳定排序算法。该方法因DL.Shell于1959年提出而得名。 希尔排序是把记录按下标的一定增量分组,
# 对每组使用直接插入排序算法排序;随着增量逐渐减少,每组包含的关键词越来越多,当增量减至1时,整个文件恰被分成一组,算法便终止。
def shell_sort(alist):
n = len(alist)
# 初始步长
gap = n // 2
while gap > 0:
# 按步长进行插入排序
for j in range(gap, n):
i = j
# 按步长进行插入排序
while i > 0:
if alist[i] < alist[i-gap]:
alist[i-gap], alist[i] = alist[i], alist[i-gap]
i -= gap
else:
break
# 得到新的步长
gap //= 2
if __name__ == '__main__':
li = [34, 2, 13, 76, 54, 22, 90, 46, 13]
print(li)
shell_sort(li)
print(li)
# 时间复杂度
# 最优时间复杂度:根据步长序列的不同而不同
# 最坏时间复杂度:O(n2)
# 稳定想:不稳定
| 21.52381
| 67
| 0.535398
| 112
| 904
| 4.223214
| 0.625
| 0.07611
| 0.069767
| 0.07611
| 0.095137
| 0
| 0
| 0
| 0
| 0
| 0
| 0.064784
| 0.334071
| 904
| 42
| 68
| 21.52381
| 0.72093
| 0.390487
| 0
| 0.111111
| 0
| 0
| 0.014953
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0
| 0
| 0.055556
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1956211e1137a8ec3efab729e58887fd70b0e317
| 7,307
|
py
|
Python
|
data-structures/double-list.py
|
costincaraivan/cs-refresher
|
008fdb2af661310c65f656f017ec34e5df004424
|
[
"MIT"
] | 1
|
2018-06-12T12:00:33.000Z
|
2018-06-12T12:00:33.000Z
|
data-structures/double-list.py
|
costincaraivan/cs-refresher
|
008fdb2af661310c65f656f017ec34e5df004424
|
[
"MIT"
] | null | null | null |
data-structures/double-list.py
|
costincaraivan/cs-refresher
|
008fdb2af661310c65f656f017ec34e5df004424
|
[
"MIT"
] | null | null | null |
# Completely silly exercises, in real life use:
# Python lists: https://docs.python.org/3/tutorial/datastructures.html
import unittest
import logging
logging.basicConfig(level=logging.INFO)
# - DoublyLinkedListNode class.
class DoublyLinkedListNode:
value = None
previousNode = None
nextNode = None
def __init__(self, value, previousNode, nextNode):
self.value = value
self.previousNode = previousNode
self.nextNode = nextNode
def __str__(self):
return str(self.value)
def __eq__(self, other):
return isinstance(other, self.__class__) \
and self.value == other.value
#-##
# - DoublyLinkedList class.
# Search method not included, has its own category.
class DoublyLinkedList:
# - Create. O(1).
def __init__(self):
self.head = None
#-##
# - Delete. O(n) (sort of: garbage collection).
def delete(self):
self.head = None
#-##
# - Insert at start. O(1).
def insert_start(self, element):
tempNode = DoublyLinkedListNode(element, None, self.head)
self.head = tempNode
#-##
# - Set at start. O(1).
def set_start(self, element):
self.head.value = element
#-##
# - Insert at arbitrary position. O(n).
def insert_position(self, position, element):
if(self.head is None):
self.insert_start(element)
return
tempNode = DoublyLinkedListNode(element, None, None)
current = self.head
count = 0
while((current.nextNode is not None) and (count < position)):
count += 1
current = current.nextNode
tempNode.nextNode = current.nextNode
tempNode.previousNode = current.previousNode
current.nextNode.previousNode = tempNode
current.nextNode = tempNode
#-##
# - Set at arbitrary position. O(n).
def set_position(self, position, element):
if(self.head is None):
return
current = self.head
count = 0
while((current.nextNode is not None) and (count < position)):
count += 1
current = current.nextNode
current.value = element
#-##
# - Insert at end. O(n).
def insert_end(self, element):
if(self.head is None):
self.insert_start(element)
return
tempNode = DoublyLinkedListNode(element, None, None)
current = self.head
count = 0
while(current.nextNode is not None):
count += 1
current = current.nextNode
tempNode.previousNode = current
current.nextNode = tempNode
#-##
# - Set at end. O(n).
def set_end(self, element):
if(self.head is None):
return
current = self.head
count = 0
while(current.nextNode is None):
count += 1
current = current.nextNode
current.value = element
#-##
# - Join. O(n).
def join(self, other):
if(self.head is None):
self.insert_start(other)
return
current = self.head
count = 0
while(current.nextNode is not None):
count += 1
current = current.nextNode
other.head.previousNode = current
current.nextNode = other.head
#-##
# - Utility methods.
def __str__(self):
if(self.head is None):
return ""
listString = str(self.head)
current = self.head.nextNode
while(current is not None):
listString += ", {}".format(str(current))
current = current.nextNode
return listString
def __eq__(self, other):
if(not isinstance(other, self.__class__)):
return False
currentSelf = self.head
currentOther = other.head
while(currentSelf is not None):
if(currentOther is not None):
# Different nodes.
if(currentSelf.value != currentOther.value):
return False
currentSelf = currentSelf.nextNode
currentOther = currentOther.nextNode
# We ran out of nodes in the other list.
elif(currentOther is None):
return False
# We ran out of nodes in the self list.
if(currentOther is not None):
return False
# Full comparison, everything the same.
return True
#-##
#-##
# - TestDoublyLinkedList class.
class TestDoublyLinkedList(unittest.TestCase):
sut = None
def setUp(self):
self.sut = DoublyLinkedList()
# Since we're inserting from the start, the values are reversed.
# So the actual list is [ 1, 2, 3 ].
for i in range(3, 0, -1):
self.sut.insert_start(i)
def test_create(self):
self.assertTrue(hasattr(self, "sut"))
def test_delete(self):
sut = DoublyLinkedList()
sut.head = True
sut.delete()
self.assertEqual(sut.head, None)
def test_insert_start(self):
# Make an expected list of [ 0, 1, 2, 3 ].
expectedList = DoublyLinkedList()
for i in range(3, -1, -1):
expectedList.insert_start(i)
self.sut.insert_start(0)
self.assertEqual(self.sut, expectedList)
def test_set_start(self):
# Make an expected list of [ 0, 2, 3 ].
expectedList = DoublyLinkedList()
for i in range(3, 1, -1):
expectedList.insert_start(i)
expectedList.insert_start(0)
self.sut.set_start(0)
self.assertEqual(self.sut, expectedList)
def test_insert_position(self):
expectedList = DoublyLinkedList()
expectedList.insert_start(3)
expectedList.insert_start(6)
expectedList.insert_start(2)
expectedList.insert_start(1)
self.sut.insert_position(1, 6)
self.assertEqual(self.sut, expectedList)
def test_set_position(self):
expectedList = DoublyLinkedList()
expectedList.insert_start(6)
expectedList.insert_start(2)
expectedList.insert_start(1)
self.sut.set_position(2, 6)
def test_insert_end(self):
expectedList = DoublyLinkedList()
expectedList.insert_start(6)
expectedList.insert_start(3)
expectedList.insert_start(2)
expectedList.insert_start(1)
self.sut.insert_end(6)
self.assertEqual(self.sut, expectedList)
def test_set_end(self):
expectedList = DoublyLinkedList()
expectedList.insert_start(6)
expectedList.insert_start(2)
expectedList.insert_start(1)
self.sut.set_end(6)
self.assertEqual(self.sut, expectedList)
def test_join(self):
expectedList = DoublyLinkedList()
expectedList.insert_start(5)
expectedList.insert_start(4)
expectedList.insert_start(3)
expectedList.insert_start(2)
expectedList.insert_start(1)
otherList = DoublyLinkedList()
otherList.insert_start(5)
otherList.insert_start(4)
self.sut.join(otherList)
self.assertEqual(self.sut, expectedList)
self.assertEqual(self.sut, expectedList)
#-##
if __name__ == "__main__":
unittest.main(verbosity=2)
| 26.765568
| 72
| 0.596141
| 801
| 7,307
| 5.319601
| 0.154806
| 0.080028
| 0.118751
| 0.036142
| 0.547524
| 0.47172
| 0.437691
| 0.411406
| 0.37057
| 0.306736
| 0
| 0.012621
| 0.306008
| 7,307
| 272
| 73
| 26.863971
| 0.827647
| 0.11359
| 0
| 0.505682
| 0
| 0
| 0.002339
| 0
| 0
| 0
| 0
| 0
| 0.051136
| 1
| 0.136364
| false
| 0
| 0.011364
| 0.011364
| 0.267045
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
195cbcd5dccdd7c73a9db51970b9798eb35a32b9
| 466
|
py
|
Python
|
Python Programs/guess the number.py
|
sayanpoddar123/RTU-DigitalLibrary
|
658500ce3ee089d622cea0f6b49dfb8b485d0be6
|
[
"MIT"
] | null | null | null |
Python Programs/guess the number.py
|
sayanpoddar123/RTU-DigitalLibrary
|
658500ce3ee089d622cea0f6b49dfb8b485d0be6
|
[
"MIT"
] | null | null | null |
Python Programs/guess the number.py
|
sayanpoddar123/RTU-DigitalLibrary
|
658500ce3ee089d622cea0f6b49dfb8b485d0be6
|
[
"MIT"
] | null | null | null |
#Guess program
n=18
a=0
y = 1
print("Number of guesses is limited to only 4 times")
while a<=3:
z=int(input("Enter your choice="))
if z>n:
print("Please less your number")
a+=1
elif z<n:
print("Please increase your number")
a += 1
else:
print("You win")
print(y,"Number of guesses you take to finish the game")
break
print(4-y,"Guesses left")
y+=1
if(a>3):
print("Game over")
| 16.642857
| 64
| 0.555794
| 76
| 466
| 3.407895
| 0.539474
| 0.015444
| 0.11583
| 0.100386
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034375
| 0.313305
| 466
| 27
| 65
| 17.259259
| 0.775
| 0.027897
| 0
| 0.1
| 0
| 0
| 0.412027
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.35
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
195cc099346d6a0faa355accfa24ab213925cda9
| 8,019
|
py
|
Python
|
src/soundsystem.py
|
WinterLicht/Chaos-Projectile
|
3fffb788b241b7baa4247c1e630d83a7210ddc2e
|
[
"CC-BY-4.0"
] | 59
|
2015-03-25T21:29:06.000Z
|
2022-01-17T22:48:05.000Z
|
src/soundsystem.py
|
WinterLicht/Chaos-Projectile
|
3fffb788b241b7baa4247c1e630d83a7210ddc2e
|
[
"CC-BY-4.0"
] | 11
|
2015-07-07T07:10:42.000Z
|
2021-11-21T12:47:42.000Z
|
src/soundsystem.py
|
WinterLicht/Chaos-Projectile
|
3fffb788b241b7baa4247c1e630d83a7210ddc2e
|
[
"CC-BY-4.0"
] | 19
|
2015-07-13T06:44:44.000Z
|
2022-02-05T03:09:27.000Z
|
"""
.. module:: soundsystem
:Platform: Unix, Windows
:Synopsis: Sound system
"""
import os
import pygame
import events
import ai
class SoundSystem(object):
"""Render system.
:Attributes:
- *evManager*: event manager
- *world*: game world
- *screen*: game screen
"""
def __init__(self, event_manager, world):
"""
:param event_manager: event Manager
:type event_manager: events.EventManager
:param world: game world
:type world: gameWorld.GameWorld
"""
self.world = world
self.event_manager = event_manager
self.event_manager.register_listener(self)
#Load all sounds
filename = self.get_sound_file('BG_loop1.ogg')
self.bg_no_enemy = pygame.mixer.Sound(filename)
self.bg_no_enemy.play(-1)
filename = self.get_sound_file('BG_loop2.ogg')
self.bg_enemy_near = pygame.mixer.Sound(filename)
filename = self.get_sound_file('BG_loop3.ogg')
self.bg_boss = pygame.mixer.Sound(filename)
self.bg_enemy_near_running = False
self.bg_boss_running = False
#Player Sounds
filename = self.get_sound_file('Shot1.ogg')
self.shot_1_sound = pygame.mixer.Sound(filename)
filename = self.get_sound_file('Shot2.ogg')
self.shot_2_sound = pygame.mixer.Sound(filename)
filename = self.get_sound_file('Shot 3.ogg')
self.shot_3_sound = pygame.mixer.Sound(filename)
filename = self.get_sound_file('Hit Female 1.ogg')
self.hit_female_sound = pygame.mixer.Sound(filename)
filename = self.get_sound_file('Jump.ogg')
self.jump_sound = pygame.mixer.Sound(filename)
filename = self.get_sound_file('Landing.ogg')
self.landing_sound = pygame.mixer.Sound(filename)
filename = self.get_sound_file('Aim Short.ogg')
self.aim_sound = pygame.mixer.Sound(filename)
filename = self.get_sound_file('Footsteps_loop.ogg')
self.footsteps_sound = pygame.mixer.Sound(filename)
filename = self.get_sound_file('Collect Item.ogg')
self.collect_item_sound = pygame.mixer.Sound(filename)
filename = self.get_sound_file('Portal.ogg')
self.portal_enter_sound = pygame.mixer.Sound(filename)
filename = self.get_sound_file('Appear and Fly 1.ogg')
self.fly_appear_1_sound = pygame.mixer.Sound(filename)
filename = self.get_sound_file('Appear and Fly 2.ogg')
self.fly_appear_2_sound = pygame.mixer.Sound(filename)
filename = self.get_sound_file('Appear and Fly 3.ogg')
self.fly_appear_3_sound = pygame.mixer.Sound(filename)
filename = self.get_sound_file('Disappear.ogg')
self.fly_disappear_sound = pygame.mixer.Sound(filename)
#Helper
self.helper_player_jump = True
self.helper_player_walk = False
self.player_footsteps_playing = False
#Green Curse sounds
filename = self.get_sound_file('Die.ogg')
self.player_dies_sound = pygame.mixer.Sound(filename)
def notify(self, event):
"""Notify, when event occurs and stop CPUSpinner when it's quit event.
:param event: occurred event
:type event: events.Event
"""
fade_out_time = 1200
if isinstance(event, events.TickEvent):
pass
elif isinstance(event, events.EnemyNear):
if isinstance(self.world.ai[event.entity_ID], ai.AI_Boss_2):
if not self.bg_boss_running:
self.bg_no_enemy.fadeout(fade_out_time)
self.bg_boss.play(-1)
self.bg_boss_running = True
if self.bg_enemy_near_running:
self.bg_enemy_near.fadeout(fade_out_time)
self.bg_enemy_near_running = False
else:
if not self.bg_enemy_near_running:
self.bg_no_enemy.fadeout(fade_out_time)
self.bg_enemy_near.play(-1)
self.bg_enemy_near_running = True
if self.bg_boss_running:
self.bg_boss.fadeout(fade_out_time)
self.bg_boss_running = False
elif isinstance(event, events.NoEnemysNear):
if self.bg_enemy_near_running:
self.bg_enemy_near.fadeout(fade_out_time)
self.bg_no_enemy.play(-1)
self.bg_enemy_near_running = False
if self.bg_boss_running:
self.bg_boss.fadeout(fade_out_time)
self.bg_no_enemy.play(-1)
self.bg_boss_running = False
elif isinstance(event, events.EntityAttacks):
entity_ID = event.entity_ID
if entity_ID == self.world.player:
self.footsteps_sound.stop()
random_nr = ai.random_(3)
if random_nr == 0:
self.shot_1_sound.play()
elif random_nr == 1:
self.shot_2_sound.play()
else:
self.shot_3_sound.play()
elif entity_ID in self.world.ai:
ai_ = self.world.ai[entity_ID]
if isinstance(ai_, ai.Level1_curse):
random_nr = ai.random_(3)
if random_nr == 0:
self.fly_appear_1_sound.play()
elif random_nr == 1:
self.fly_appear_2_sound.play()
else:
self.fly_appear_3_sound.play()
elif isinstance(event, events.EntityStunned):
entity_ID = event.entity_ID
if entity_ID == self.world.player:
self.footsteps_sound.stop()
self.hit_female_sound.play()
elif isinstance(event, events.EntityJump):
entity_ID = event.entity_ID
if entity_ID == self.world.player and self.helper_player_jump:
self.footsteps_sound.stop()
self.player_footsteps_playing = False
self.jump_sound.play()
self.helper_player_jump = False
elif isinstance(event, events.EntityGrounded):
entity_ID = event.entity_ID
if entity_ID == self.world.player and not self.helper_player_jump:
self.landing_sound.play()
self.helper_player_jump = True
elif isinstance(event, events.PlayerAims):
self.aim_sound.play()
elif isinstance(event, events.EntityMovesRight) or isinstance(event, events.EntityMovesLeft):
player_vel_x = self.world.velocity[self.world.player].x
if player_vel_x == 0:
self.footsteps_sound.stop()
self.player_footsteps_playing = False
else:
if not self.player_footsteps_playing and self.helper_player_jump:
self.footsteps_sound.play(-1)
self.player_footsteps_playing = True
elif isinstance(event, events.EntityStopMovingRight) or isinstance(event, events.EntityStopMovingLeft):
entity_ID = event.entity_ID
if entity_ID == self.world.player:
self.footsteps_sound.stop()
self.player_footsteps_playing = False
elif isinstance(event, events.PortalEntered):
self.portal_enter_sound.play()
elif isinstance(event, events.CollectedItem):
self.collect_item_sound.play()
elif isinstance(event, events.EntityDies):
entity_ID = event.entity_ID
if entity_ID == self.world.player:
self.player_dies_sound.play()
def get_sound_file(self, filename):
"""Simple helper function to merge the file name and the directory name.
:param filename: file name of TMX file
:type filename: string
"""
return os.path.join('data', os.path.join('sounds', filename) )
| 43.819672
| 111
| 0.60419
| 959
| 8,019
| 4.790407
| 0.148071
| 0.036569
| 0.04963
| 0.078363
| 0.623422
| 0.535263
| 0.428603
| 0.415107
| 0.386156
| 0.328907
| 0
| 0.007389
| 0.308018
| 8,019
| 183
| 112
| 43.819672
| 0.820508
| 0.07981
| 0
| 0.344828
| 0
| 0
| 0.034091
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02069
| false
| 0.006897
| 0.027586
| 0
| 0.062069
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
195d27bed09f6f47effd9b2ab9128a9b8b6d2db2
| 2,227
|
py
|
Python
|
hammer/django_bulk/bulk_create_test.py
|
awolfly9/hammer
|
03add3037461154fd764bb3340e68393e16f015f
|
[
"MIT"
] | null | null | null |
hammer/django_bulk/bulk_create_test.py
|
awolfly9/hammer
|
03add3037461154fd764bb3340e68393e16f015f
|
[
"MIT"
] | null | null | null |
hammer/django_bulk/bulk_create_test.py
|
awolfly9/hammer
|
03add3037461154fd764bb3340e68393e16f015f
|
[
"MIT"
] | null | null | null |
# -*- coding=utf-8 -*-
import django
import os
import sys
import datetime
import random
import time
os.environ['DJANGO_SETTINGS_MODULE'] = 'web.settings'
django.setup()
from web.other.models import BilibiliPlay
from .helper import bulk_create
def test_once_get():
start = time.time()
for i in range(0, 1000):
info = {
'insert_date': datetime.datetime.today(),
'season_id': i,
'pub_time': datetime.datetime.today(),
}
BilibiliPlay.objects.get_or_create(season_id=i, insert_date=datetime.datetime.today(), defaults=info)
print('test_once_get use time:%s' % (time.time() - start))
def test_once_update():
start = time.time()
for i in range(1000, 2000):
info = {
'insert_date': datetime.datetime.today(),
'season_id': i,
'pub_time': datetime.datetime.today(),
}
BilibiliPlay.objects.update_or_create(season_id=i, insert_date=datetime.datetime.today(), defaults=info)
print('test_once_update use time:%s' % (time.time() - start))
def test_default_bulk():
start = time.time()
objs = []
for i in range(2000, 3000):
info = {
'insert_date': datetime.datetime.today(),
'season_id': i,
'pub_time': datetime.datetime.today(),
}
objs.append(BilibiliPlay(**info))
BilibiliPlay.objects.bulk_create(objs)
print('test_default use time:%s' % (time.time() - start))
def test_custom_bulk():
start = time.time()
objs = []
for i in range(3000, 4000):
info = {
'insert_date': datetime.datetime.today(),
'season_id': i,
'pub_time': datetime.datetime.today(),
}
objs.append(BilibiliPlay(**info))
bulk_create(objs)
print('test_custom use time:%s' % (time.time() - start))
if __name__ == '__main__':
BilibiliPlay.objects.all().delete()
# test_once_get()
# test_once_update()
# test_default_bulk()
test_custom_bulk()
'''
'UPDATE `other_bilibili_play` SET `name` = (CASE `id` WHEN %s THEN %s WHEN %s THEN %s WHEN %s THEN %s WHEN %s THEN %s WHEN %s THEN %s ELSE `name` END) WHERE `id` in (%s, %s, %s, %s, %s)'
'''
| 26.511905
| 186
| 0.606646
| 287
| 2,227
| 4.508711
| 0.236934
| 0.123648
| 0.162287
| 0.120556
| 0.646832
| 0.611283
| 0.595054
| 0.55796
| 0.493045
| 0.443586
| 0
| 0.017857
| 0.245622
| 2,227
| 83
| 187
| 26.831325
| 0.752381
| 0.033678
| 0
| 0.421053
| 0
| 0
| 0.130123
| 0.01127
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070175
| false
| 0
| 0.140351
| 0
| 0.210526
| 0.070175
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
195f3150d0257121a8dd90bf3f90e35c01b0fa1c
| 1,921
|
py
|
Python
|
misago/threads/tests/test_thread_poll_api.py
|
HenryChenV/iJiangNan
|
68f156d264014939f0302222e16e3125119dd3e3
|
[
"MIT"
] | 1
|
2017-07-25T03:04:36.000Z
|
2017-07-25T03:04:36.000Z
|
misago/threads/tests/test_thread_poll_api.py
|
HenryChenV/iJiangNan
|
68f156d264014939f0302222e16e3125119dd3e3
|
[
"MIT"
] | null | null | null |
misago/threads/tests/test_thread_poll_api.py
|
HenryChenV/iJiangNan
|
68f156d264014939f0302222e16e3125119dd3e3
|
[
"MIT"
] | null | null | null |
import json
from django.urls import reverse
from misago.acl.testutils import override_acl
from misago.categories.models import Category
from misago.threads import testutils
from misago.users.testutils import AuthenticatedUserTestCase
class ThreadPollApiTestCase(AuthenticatedUserTestCase):
def setUp(self):
super(ThreadPollApiTestCase, self).setUp()
self.category = Category.objects.get(slug='first-category')
self.thread = testutils.post_thread(self.category, poster=self.user)
self.override_acl()
self.api_link = reverse(
'misago:api:thread-poll-list', kwargs={
'thread_pk': self.thread.pk,
}
)
def post(self, url, data=None):
return self.client.post(url, json.dumps(data or {}), content_type='application/json')
def put(self, url, data=None):
return self.client.put(url, json.dumps(data or {}), content_type='application/json')
def override_acl(self, user=None, category=None):
new_acl = self.user.acl_cache
new_acl['categories'][self.category.pk].update({
'can_see': 1,
'can_browse': 1,
'can_close_threads': 0,
})
new_acl.update({
'can_start_polls': 1,
'can_edit_polls': 1,
'can_delete_polls': 1,
'poll_edit_time': 0,
'can_always_see_poll_voters': 0,
})
if user:
new_acl.update(user)
if category:
new_acl['categories'][self.category.pk].update(category)
override_acl(self.user, new_acl)
def mock_poll(self):
self.poll = self.thread.poll = testutils.post_poll(self.thread, self.user)
self.api_link = reverse(
'misago:api:thread-poll-detail',
kwargs={
'thread_pk': self.thread.pk,
'pk': self.poll.pk,
}
)
| 30.015625
| 93
| 0.605934
| 228
| 1,921
| 4.947368
| 0.289474
| 0.031915
| 0.039894
| 0.031915
| 0.31383
| 0.31383
| 0.267731
| 0.148936
| 0.083333
| 0.083333
| 0
| 0.005764
| 0.27746
| 1,921
| 63
| 94
| 30.492063
| 0.806916
| 0
| 0
| 0.122449
| 0
| 0
| 0.135867
| 0.042686
| 0
| 0
| 0
| 0
| 0
| 1
| 0.102041
| false
| 0
| 0.122449
| 0.040816
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
196317379bcca4ea114372256f94af6d980d0618
| 9,717
|
py
|
Python
|
demisto_sdk/commands/run_test_playbook/test_playbook_runner.py
|
SergeBakharev/demisto-sdk
|
17d00942a1bd33039a8aba9ddffecfd81008d275
|
[
"MIT"
] | null | null | null |
demisto_sdk/commands/run_test_playbook/test_playbook_runner.py
|
SergeBakharev/demisto-sdk
|
17d00942a1bd33039a8aba9ddffecfd81008d275
|
[
"MIT"
] | null | null | null |
demisto_sdk/commands/run_test_playbook/test_playbook_runner.py
|
SergeBakharev/demisto-sdk
|
17d00942a1bd33039a8aba9ddffecfd81008d275
|
[
"MIT"
] | null | null | null |
import os
import re
import time
import demisto_client
from demisto_client.demisto_api.rest import ApiException
from demisto_sdk.commands.common.tools import LOG_COLORS, get_yaml, print_color
from demisto_sdk.commands.upload.uploader import Uploader
SUCCESS_RETURN_CODE = 0
ERROR_RETURN_CODE = 1
ENTRY_TYPE_ERROR = 4
class TestPlaybookRunner:
"""TestPlaybookRunner is a class that's designed to run a test playbook in a given XSOAR instance.
Attributes:
test_playbook_path (str): the input of the test playbook to run
all (bool): whether to wait until the playbook run is completed or not.
should_wait (bool): whether to wait until the test playbook run is completed or not.
timeout (int): timeout for the command. The test playbook will continue to run in your xsoar instance.
demisto_client (DefaultApi): Demisto-SDK client object.
base_link_to_workplan (str): the base link to see the full test playbook run in your xsoar instance.
"""
def __init__(self, test_playbook_path: str = '', all: bool = False, wait: bool = True, timeout: int = 90,
insecure: bool = False):
self.test_playbook_path = test_playbook_path
self.all_test_playbooks = all
self.should_wait = wait
self.timeout = timeout
# we set to None so demisto_client will use env var DEMISTO_VERIFY_SSL
self.verify = (not insecure) if insecure else None
self.demisto_client = demisto_client.configure(verify_ssl=self.verify)
self.base_link_to_workplan = self.get_base_link_to_workplan()
def manage_and_run_test_playbooks(self):
"""
Manages all ru-test-playbook command flows
return The exit code of each flow
"""
status_code = SUCCESS_RETURN_CODE
test_playbooks: list = []
if not self.validate_tpb_path():
status_code = ERROR_RETURN_CODE
test_playbooks.extend(self.collect_all_tpb_files_paths())
for tpb in test_playbooks:
self.upload_tpb(tpb_file=tpb)
test_playbook_id = self.get_test_playbook_id(tpb)
status_code = self.run_test_playbook_by_id(test_playbook_id)
return status_code
def collect_all_tpb_files_paths(self):
test_playbooks: list = []
# Run all repo test playbooks
if self.all_test_playbooks:
test_playbooks.extend(self.get_all_test_playbooks())
# Run all pack test playbooks
elif os.path.isdir(self.test_playbook_path):
test_playbooks.extend(self.get_test_playbooks_from_folder(self.test_playbook_path))
# Run specific test playbook
elif os.path.isfile(self.test_playbook_path):
test_playbooks.append(self.test_playbook_path)
return test_playbooks
def validate_tpb_path(self) -> bool:
"""
Verifies that the input path configuration given by the user is correct
:return: The verification result
"""
is_path_valid = True
if not self.all_test_playbooks:
if not self.test_playbook_path:
print_color("Error: Missing option '-tpb' / '--test-playbook-path'.", LOG_COLORS.RED)
is_path_valid = False
elif not os.path.exists(self.test_playbook_path):
print_color(f'Error: Given input path: {self.test_playbook_path} does not exist', LOG_COLORS.RED)
is_path_valid = False
return is_path_valid
def get_test_playbook_id(self, file_path):
"""
Get test playbook ID from test playbook file name
"""
test_playbook_data = get_yaml(file_path=file_path)
return test_playbook_data.get('id')
def get_test_playbooks_from_folder(self, folder_path):
"""
Get all pack test playbooks
"""
full_path = f'{folder_path}/TestPlaybooks'
list_test_playbooks_files = os.listdir(full_path)
list_test_playbooks_files = [f'{full_path}/{tpb}' for tpb in list_test_playbooks_files]
return list_test_playbooks_files
def get_all_test_playbooks(self):
"""
Get all the repo test playbooks
"""
tpb_list: list = []
packs_list = os.listdir('Packs')
for pack in packs_list:
if os.path.isdir(f'Packs/{pack}'):
tpb_list.extend(self.get_test_playbooks_from_folder(f'Packs/{pack}'))
return tpb_list
def run_test_playbook_by_id(self, test_playbook_id):
"""Run a test playbook in your xsoar instance.
Returns:
int. 0 in success, 1 in a failure.
"""
status_code: int = SUCCESS_RETURN_CODE
# create an incident with the given playbook
try:
incident_id = self.create_incident_with_test_playbook(
incident_name=f'inc_{test_playbook_id}', test_playbook_id=test_playbook_id)
except ApiException as a:
print_color(str(a), LOG_COLORS.RED)
status_code = ERROR_RETURN_CODE
work_plan_link = self.base_link_to_workplan + str(incident_id)
if self.should_wait:
status_code = self.run_and_check_tpb_status(test_playbook_id, work_plan_link, incident_id)
else:
print_color(f'To see results please go to : {work_plan_link}', LOG_COLORS.NATIVE)
return status_code
def run_and_check_tpb_status(self, test_playbook_id, work_plan_link, incident_id):
status_code = SUCCESS_RETURN_CODE
print_color(f'Waiting for the test playbook to finish running.. \n'
f'To see the test playbook run in real-time please go to : {work_plan_link}', LOG_COLORS.GREEN)
elapsed_time = 0
start_time = time.time()
while elapsed_time < self.timeout:
test_playbook_result = self.get_test_playbook_results_dict(incident_id)
if test_playbook_result['state'] == "inprogress":
time.sleep(10)
elapsed_time = int(time.time() - start_time)
else: # the test playbook has finished running
break
# Ended the loop because of timeout
if elapsed_time >= self.timeout:
print_color(f'The command had timed out while the playbook is in progress.\n'
f'To keep tracking the test playbook please go to : {work_plan_link}', LOG_COLORS.RED)
else:
if test_playbook_result['state'] == "failed":
self.print_tpb_error_details(test_playbook_result, test_playbook_id)
print_color("The test playbook finished running with status: FAILED", LOG_COLORS.RED)
status_code = ERROR_RETURN_CODE
else:
print_color("The test playbook has completed its run successfully", LOG_COLORS.GREEN)
return status_code
def create_incident_with_test_playbook(self, incident_name, test_playbook_id):
# type: (str, str) -> int
"""Create an incident in your xsoar instance with the given incident_name and the given test_playbook_id
Args:
incident_name (str): The name of the incident
test_playbook_id (str): The id of the playbook
Raises:
ApiException: if the client has failed to create an incident
Returns:
int. The new incident's ID.
"""
create_incident_request = demisto_client.demisto_api.CreateIncidentRequest()
create_incident_request.create_investigation = True
create_incident_request.playbook_id = test_playbook_id
create_incident_request.name = incident_name
try:
response = self.demisto_client.create_incident(create_incident_request=create_incident_request)
except ApiException as e:
print_color(f'Failed to create incident with playbook id : "{test_playbook_id}", '
'possible reasons are:\n'
'1. This playbook name does not exist \n'
'2. Schema problems in the playbook \n'
'3. Unauthorized api key', LOG_COLORS.RED)
raise e
print_color(f'The test playbook: {self.test_playbook_path} was triggered successfully.', LOG_COLORS.GREEN)
return response.id
def get_test_playbook_results_dict(self, inc_id):
test_playbook_results = self.demisto_client.generic_request(method='GET', path=f'/inv-playbook/{inc_id}')
return eval(test_playbook_results[0])
def print_tpb_error_details(self, tpb_res, tpb_id):
entries = tpb_res.get('entries')
if entries:
print_color(f'Test Playbook {tpb_id} has failed:', LOG_COLORS.RED)
for entry in entries:
if entry['type'] == ENTRY_TYPE_ERROR and entry['parentContent']:
print_color(f'- Task ID: {entry["taskId"]}', LOG_COLORS.RED)
# Checks for passwords and replaces them with "******"
parent_content = re.sub(r' (P|p)assword="[^";]*"', ' password=******', entry['parentContent'])
print_color(f' Command: {parent_content}', LOG_COLORS.RED)
print_color(f' Body:\n{entry["contents"]}', LOG_COLORS.RED)
def get_base_link_to_workplan(self):
"""Create a base link to the workplan in the specified xsoar instance
Returns:
str: The link to the workplan
"""
base_url = os.environ.get('DEMISTO_BASE_URL')
return f'{base_url}/#/WorkPlan/'
def upload_tpb(self, tpb_file):
uploader = Uploader(input=tpb_file, insecure=self.verify) # type: ignore
uploader.upload()
| 41.348936
| 115
| 0.653185
| 1,281
| 9,717
| 4.675254
| 0.18345
| 0.122224
| 0.03974
| 0.033395
| 0.251461
| 0.112373
| 0.061279
| 0.039907
| 0
| 0
| 0
| 0.001973
| 0.269631
| 9,717
| 234
| 116
| 41.525641
| 0.841905
| 0.184522
| 0
| 0.130435
| 0
| 0
| 0.152725
| 0.030729
| 0
| 0
| 0
| 0
| 0
| 1
| 0.101449
| false
| 0.007246
| 0.050725
| 0
| 0.23913
| 0.123188
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
19646d9adaadbd2e2fc9af7b3104bea3fb1c2bae
| 1,030
|
py
|
Python
|
docs/examples/template_query.py
|
Fourcast/flycs_sdk
|
4bf206c26f59726d0ce0caa51bd3a893a34fed2a
|
[
"MIT"
] | 7
|
2020-12-15T13:25:43.000Z
|
2021-08-31T14:35:06.000Z
|
docs/examples/template_query.py
|
Fourcast/flycs_sdk
|
4bf206c26f59726d0ce0caa51bd3a893a34fed2a
|
[
"MIT"
] | 2
|
2020-11-12T12:46:28.000Z
|
2021-12-21T07:26:28.000Z
|
docs/examples/template_query.py
|
Fourcast/flycs_sdk
|
4bf206c26f59726d0ce0caa51bd3a893a34fed2a
|
[
"MIT"
] | null | null | null |
from datetime import datetime, timezone
from flycs_sdk.entities import Entity
from flycs_sdk.pipelines import Pipeline, PipelineKind
from flycs_sdk.transformations import Transformation
# Define your transformation SQL query using jinja template for the table name and define the list of table on which this transformation should be applied
query = Transformation(
name="my_query",
query="SELECT * FROM {table_name}",
version="1.0.0",
tables=["tables1", "tables2"],
)
# Then define your entity and pipeline as usual
stage_config = {
"raw": {"my_query": "1.0.0"},
"staging": {"my_query": "1.0.0"},
}
entity1 = Entity("entity1", "1.0.0", stage_config)
entity1.transformations = {
"raw": {"my_query": query},
"staging": {"my_query": query},
}
p1 = Pipeline(
name="my_pipeline",
version="1.0.0",
schedule="* 12 * * *",
entities=[entity1],
kind=PipelineKind.VANILLA,
start_time=datetime.now(tz=timezone.utc),
)
# expose the pipeline to the module as usual
pipelines = [p1]
| 27.105263
| 154
| 0.694175
| 139
| 1,030
| 5.05036
| 0.446043
| 0.049858
| 0.021368
| 0.02849
| 0.02849
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029343
| 0.172816
| 1,030
| 37
| 155
| 27.837838
| 0.794601
| 0.233981
| 0
| 0.071429
| 0
| 0
| 0.194904
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1965469fd09240b19440049eb433930a5143a25d
| 13,252
|
py
|
Python
|
try/run_eval.py
|
CleverShovel/AIJ2020-digital-peter
|
baf07200e607cd39398fc0db1ba699c7af5cea77
|
[
"MIT"
] | null | null | null |
try/run_eval.py
|
CleverShovel/AIJ2020-digital-peter
|
baf07200e607cd39398fc0db1ba699c7af5cea77
|
[
"MIT"
] | null | null | null |
try/run_eval.py
|
CleverShovel/AIJ2020-digital-peter
|
baf07200e607cd39398fc0db1ba699c7af5cea77
|
[
"MIT"
] | null | null | null |
import torch.nn.functional as F
import torch.nn as nn
import torch
import torchvision.transforms.functional as VF
from PIL import Image
import numpy as np
import os
from os.path import join
from collections import Counter
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
import math
from ctcdecode import CTCBeamDecoder
import multiprocessing
n_cpus = multiprocessing.cpu_count()
# letters = [' ', ')', '+', '/', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '[', ']', 'i', 'k', 'l', '|', '×', 'ǂ',
# 'а', 'б', 'в', 'г', 'д', 'е', 'ж', 'з', 'и', 'й', 'к', 'л', 'м', 'н', 'о', 'п', 'р', 'с', 'т', 'у', 'ф', 'х',
# 'ц', 'ч', 'ш', 'щ', 'ъ', 'ы', 'ь', 'э', 'ю', 'я', 'і', 'ѣ', '–', '…', '⊕', '⊗']
letters = list(' ()+/0123456789[]abdefghiklmnoprstu|×ǂабвгдежзийклмнопрстуфхцчшщъыьэюяѣ–⊕⊗')
std, mean = (0.3847, 0.3815, 0.3763), (0.6519, 0.6352, 0.5940)
def process_image(img):
img = VF.resize(img, 128)
img = VF.pad(img, (0, 0, max(1024 - img.size[0], 0), max(128 - img.size[1], 0)))
img = VF.resize(img, (128, 1024))
img = VF.to_tensor(img)
img = VF.normalize(img, mean, std)
return img
# CNN-BLSTM
class CNNBLSTM(nn.Module):
def __init__(self, conv_drop=0.2, lstm_drop=0.5):
super().__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1)
self.norm1 = nn.BatchNorm2d(16)
self.lelu1 = nn.LeakyReLU()
self.pool1 = nn.MaxPool2d(2)
self.conv2 = nn.Conv2d(16, 32, kernel_size=3, padding=1)
self.norm2 = nn.BatchNorm2d(32)
self.lelu2 = nn.LeakyReLU()
self.pool2 = nn.MaxPool2d(2)
self.dropout3 = nn.Dropout2d(conv_drop)
self.conv3 = nn.Conv2d(32, 48, kernel_size=3, padding=1)
self.norm3 = nn.BatchNorm2d(48)
self.lelu3 = nn.LeakyReLU()
self.pool3 = nn.MaxPool2d(2)
self.dropout4 = nn.Dropout2d(conv_drop)
self.conv4 = nn.Conv2d(48, 64, kernel_size=3, padding=1)
self.norm4 = nn.BatchNorm2d(64)
self.lelu4 = nn.LeakyReLU()
self.dropout5 = nn.Dropout2d(conv_drop)
self.conv5 = nn.Conv2d(64, 80, kernel_size=3, padding=1)
self.norm5 = nn.BatchNorm2d(80)
self.lelu5 = nn.LeakyReLU()
self.flatten1 = nn.Flatten(1, 2)
self.dropout6 = nn.Dropout(lstm_drop)
self.lstm6 = nn.LSTM(80*16, hidden_size=256, num_layers=3, dropout=lstm_drop, bidirectional=True, batch_first=True)
self.dropout7 = nn.Dropout(lstm_drop)
self.linear = nn.Linear(2*256, len(letters) + 1)
def forward(self, x):
x = self.pool1(self.lelu1(self.norm1(self.conv1(x))))
x = self.pool2(self.lelu2(self.norm2(self.conv2(x))))
x = self.pool3(self.lelu3(self.norm3(self.conv3(self.dropout3(x)))))
x = self.lelu4(self.norm4(self.conv4(self.dropout4(x))))
x = self.lelu5(self.norm5(self.conv5(self.dropout5(x))))
x = self.flatten1(x)
x = x.transpose(1, 2)
x, _ = self.lstm6(x)
x = self.dropout7(x)
x = self.linear(x)
return x
def init_weights(m):
if type(m) == nn.Conv2d or type(m) == FullGatedConv2d:
nn.init.kaiming_uniform_(m.weight)
# # https://github.com/mf1024/Batch-Renormalization-PyTorch/blob/master/batch_renormalization.py
# # Batch Renormalization for convolutional neural nets (2D) implementation based
# # on https://arxiv.org/abs/1702.03275
# class BatchNormalization2D(nn.Module):
# def __init__(self, num_features, eps=1e-05, momentum = 0.1):
# super().__init__()
# self.eps = eps
# self.momentum = torch.tensor( (momentum), requires_grad = False)
# self.gamma = nn.Parameter(torch.ones((1, num_features, 1, 1), requires_grad=True))
# self.beta = nn.Parameter(torch.zeros((1, num_features, 1, 1), requires_grad=True))
# self.running_avg_mean = torch.ones((1, num_features, 1, 1), requires_grad=False)
# self.running_avg_std = torch.zeros((1, num_features, 1, 1), requires_grad=False)
# def forward(self, x):
# device = self.gamma.device
# batch_ch_mean = torch.mean(x, dim=(0,2,3), keepdim=True).to(device)
# batch_ch_std = torch.clamp(torch.std(x, dim=(0,2,3), keepdim=True), self.eps, 1e10).to(device)
# self.running_avg_std = self.running_avg_std.to(device)
# self.running_avg_mean = self.running_avg_mean.to(device)
# self.momentum = self.momentum.to(device)
# if self.training:
# x = (x - batch_ch_mean) / batch_ch_std
# x = x * self.gamma + self.beta
# else:
# x = (x - self.running_avg_mean) / self.running_avg_std
# x = self.gamma * x + self.beta
# self.running_avg_mean = self.running_avg_mean + self.momentum * (batch_ch_mean.data.to(device) - self.running_avg_mean)
# self.running_avg_std = self.running_avg_std + self.momentum * (batch_ch_std.data.to(device) - self.running_avg_std)
# return x
# class BatchRenormalization2D(nn.Module):
# def __init__(self, num_features, eps=1e-05, momentum=0.01, r_d_max_inc_step = 0.0001):
# super().__init__()
# self.eps = eps
# self.momentum = torch.tensor( (momentum), requires_grad = False)
# self.gamma = nn.Parameter(torch.ones((1, num_features, 1, 1)), requires_grad=True)
# self.beta = nn.Parameter(torch.zeros((1, num_features, 1, 1)), requires_grad=True)
# self.running_avg_mean = torch.ones((1, num_features, 1, 1), requires_grad=False)
# self.running_avg_std = torch.zeros((1, num_features, 1, 1), requires_grad=False)
# self.max_r_max = 3.0
# self.max_d_max = 5.0
# self.r_max_inc_step = r_d_max_inc_step
# self.d_max_inc_step = r_d_max_inc_step
# self.r_max = torch.tensor( (1.0), requires_grad = False)
# self.d_max = torch.tensor( (0.0), requires_grad = False)
# def forward(self, x):
# device = self.gamma.device
# batch_ch_mean = torch.mean(x, dim=(0,2,3), keepdim=True).to(device)
# batch_ch_std = torch.clamp(torch.std(x, dim=(0,2,3), keepdim=True), self.eps, 1e10).to(device)
# self.running_avg_std = self.running_avg_std.to(device)
# self.running_avg_mean = self.running_avg_mean.to(device)
# self.momentum = self.momentum.to(device)
# self.r_max = self.r_max.to(device)
# self.d_max = self.d_max.to(device)
# if self.training:
# r = torch.clamp(batch_ch_std / self.running_avg_std, 1.0 / self.r_max, self.r_max).to(device).data.to(device)
# d = torch.clamp((batch_ch_mean - self.running_avg_mean) / self.running_avg_std, -self.d_max, self.d_max).to(device).data.to(device)
# x = ((x - batch_ch_mean) * r )/ batch_ch_std + d
# x = self.gamma * x + self.beta
# if self.r_max < self.max_r_max:
# self.r_max += self.r_max_inc_step * x.shape[0]
# if self.d_max < self.max_d_max:
# self.d_max += self.d_max_inc_step * x.shape[0]
# else:
# x = (x - self.running_avg_mean) / self.running_avg_std
# x = self.gamma * x + self.beta
# self.running_avg_mean = self.running_avg_mean + self.momentum * (batch_ch_mean.data.to(device) - self.running_avg_mean)
# self.running_avg_std = self.running_avg_std + self.momentum * (batch_ch_std.data.to(device) - self.running_avg_std)
# return x
# class FullGatedConv2d(nn.Conv2d):
# def __init__(self, in_channels, **kwargs):
# super().__init__(in_channels, in_channels * 2, **kwargs)
# self.channels = in_channels
# self.sigm = nn.Sigmoid()
# def forward(self, x):
# x = super().forward(x)
# gated_x = self.sigm(x[:, self.channels:, :, :])
# return x[:, :self.channels, :, :] * gated_x
# class HTRFlorConvBlock(nn.Module):
# def __init__(self, in_channels, out_channels, kernel_size):
# super().__init__()
# self.conv = nn.Conv2d(in_channels, out_channels, kernel_size)
# self.prelu = nn.PReLU()
# self.br = nn.BatchNorm2d(out_channels)
# def forward(self, x):
# x = self.br(self.prelu(self.conv(x)))
# return x
# class HTRFlor(nn.Module):
# def __init__(self, htr_dropout=0.2, gru_dropout=0.5):
# super().__init__()
# self.conv_block1 = HTRFlorConvBlock(3, 16, kernel_size=(3, 3))
# self.gconv1 = FullGatedConv2d(16, kernel_size=(3, 3), padding=1)
# self.pool1 = nn.MaxPool2d((2, 2))
# self.conv_block2 = HTRFlorConvBlock(16, 32, kernel_size=(3, 3))
# self.gconv2 = FullGatedConv2d(32, kernel_size=(3, 3), padding=1)
# self.pool2 = nn.MaxPool2d((2, 2))
# self.conv_block3 = HTRFlorConvBlock(32, 40, kernel_size=(2, 4))
# self.gconv3 = FullGatedConv2d(40, kernel_size=(3, 3), padding=1)
# self.drop3 = nn.Dropout2d(htr_dropout)
# self.conv_block4 = HTRFlorConvBlock(40, 48, kernel_size=(3, 3))
# self.gconv4 = FullGatedConv2d(48, kernel_size=(3, 3), padding=1)
# self.drop4 = nn.Dropout2d(htr_dropout)
# self.conv_block5 = HTRFlorConvBlock(48, 56, kernel_size=(2, 4))
# self.gconv5 = FullGatedConv2d(56, kernel_size=(3, 3), padding=1)
# self.drop5 = nn.Dropout2d(htr_dropout)
# self.conv_block6 = HTRFlorConvBlock(56, 64, kernel_size=(3, 3))
# # self.pool = nn.MaxPool2d((1, 2))
# self.flatten = nn.Flatten(1, 2)
# self.drop7 = nn.Dropout(gru_dropout)
# self.lstm7 = nn.LSTM(64*24, 128, num_layers=3, dropout=gru_dropout, bidirectional=True, batch_first=True)
# # self.lstm7 = nn.LSTM(64*24, 128, bidirectional=True, batch_first=True)
# # self.linear7 = nn.Linear(2*128, 256)
# # self.drop8 = nn.Dropout(gru_dropout)
# # self.lstm8 = nn.LSTM(256, 128, bidirectional=True, batch_first=True)
# self.linear8 = nn.Linear(2*128, len(letters) + 1)
# def forward(self, x):
# x = self.conv_block1(x)
# x = self.pool1(self.gconv1(x))
# x = self.conv_block2(x)
# x = self.pool2(self.gconv2(x))
# x = self.conv_block3(x)
# x = self.drop3(self.gconv3(x))
# x = self.conv_block4(x)
# x = self.drop4(self.gconv4(x))
# x = self.conv_block5(x)
# x = self.drop5(self.gconv5(x))
# x = self.flatten(self.conv_block6(x))
# x = x.transpose(1, 2)
# x, _ = self.lstm7(self.drop7(x))
# # x = self.linear7(x)
# # x, _ = self.lstm8(self.drop8(x))
# x = self.linear8(x)
# return x
# def init_weights(m):
# if type(m) == nn.Conv2d or type(m) == FullGatedConv2d:
# nn.init.kaiming_uniform_(m.weight)
# # nn.init.kaiming_uniform_(m.weight)
def create_model():
model = CNNBLSTM()
# model.apply(init_weights)
return model
model_path = 'language_model/train.binary'
decoder = CTCBeamDecoder([*letters, '~'],
model_path=None,
alpha=0.01,
blank_id=len(letters),
beam_width=100,
num_processes=n_cpus)
# decoder = CTCBeamDecoder([*letters, '~'],
# model_path=model_path,
# alpha=0.1,
# blank_id=len(letters),
# beam_width=100,
# num_processes=n_cpus)
def get_prediction(act_model, test_images):
act_model.eval()
with torch.no_grad():
output = F.softmax(act_model(test_images), dim=-1)
beam_results, _, _, out_lens = decoder.decode(output)
prediction = []
for i in range(len(beam_results)):
pred = "".join(letters[n] for n in beam_results[i][0][:out_lens[i][0]])
prediction.append(pred)
return prediction
def write_prediction(names_test, prediction, output_dir):
os.makedirs(output_dir, exist_ok=True)
for _, (name, line) in enumerate(zip(names_test, prediction)):
with open(os.path.join(output_dir, name.replace('.jpg', '.txt')), 'w') as file:
file.write(line)
def load_test_images(test_image_dir):
test_images = []
names_test = []
for name in os.listdir(test_image_dir):
img = Image.open(test_image_dir + '/' + name)
img = process_image(img).unsqueeze(0)
test_images.append(img)
names_test.append(name)
test_images = torch.cat(test_images, dim=0)
return names_test, test_images
def main():
test_image_dir = '/data'
filepath = 'checkpoint/model.pth'
pred_path = '/output'
print('Creating model...', end=' ')
act_model = create_model()
print('Success')
print(f'Loading weights from {filepath}...', end=' ')
act_model.load_state_dict(torch.load(filepath))
print('Success')
print(f'Loading test images from {test_image_dir}...', end=' ')
names_test, test_images = load_test_images(test_image_dir)
print('Success')
print('Running inference...')
prediction = get_prediction(act_model, test_images)
print('Writing predictions...')
write_prediction(names_test, prediction, pred_path)
if __name__ == '__main__':
main()
| 34.066838
| 145
| 0.60821
| 1,873
| 13,252
| 4.111052
| 0.180993
| 0.024026
| 0.056364
| 0.035325
| 0.522078
| 0.410779
| 0.327662
| 0.296883
| 0.28
| 0.25961
| 0
| 0.046904
| 0.235813
| 13,252
| 389
| 146
| 34.066838
| 0.712353
| 0.555916
| 0
| 0.025
| 0
| 0
| 0.054653
| 0.017461
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075
| false
| 0
| 0.1
| 0
| 0.225
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|