hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1a48273fcffbd81bf82e527381a62c49f570d938 | 3,794 | py | Python | 2021/d25/d25.py | btharper/aoc | c4e265515da4b61b9e6704652a1d1175ddfd3f92 | [
"Apache-2.0"
] | null | null | null | 2021/d25/d25.py | btharper/aoc | c4e265515da4b61b9e6704652a1d1175ddfd3f92 | [
"Apache-2.0"
] | null | null | null | 2021/d25/d25.py | btharper/aoc | c4e265515da4b61b9e6704652a1d1175ddfd3f92 | [
"Apache-2.0"
] | null | null | null | from collections import defaultdict, Counter, deque
from functools import cache
from itertools import product, pairwise
from multiprocessing import Pool
import math
import re
non_digits = re.compile('[^0-9]+')
def sign(a, b, step=1):
return int(math.copysign(step, b-a))
def autorange(a,b, step=1):
if a == b:return (a,)
s = sign(a, b, step)
return range(a, b+s, s)
def get_ints(line, strip_line=False):
if strip_line:
line = line.strip()
return [*map(int, non_digits.split(line))]
grid_char = {'.': '.', (0,1): 'v', (1,0):'>'}
def d25(inp, sample=False):
p1, p2 = None, None
grid = {}
max_x, max_y = 0, 0
for y, line in enumerate(inp.split()):
max_y = max(y+1, max_y)
for x, char in enumerate(line):
max_x = max(x+1, max_x)
if char == '>':
grid[x,y] = (1,0)
elif char == 'v':
grid[x,y] = (0,1)
turn = 0
moved = True
n_grid = {}
while moved:
# if turn in (0, 1, 2, 3, 4, 5, 10, 20, 30, 40, 50, 55, 56, 57, 58):
# print(f"After {turn} steps:")
# for y in range(max_y):
# for x in range(max_x):
# print(grid_char[grid.get((x,y), '.')], end='')
# print()
turn += 1
moved = False
for (x,y), (dx, dy) in grid.items():
if dy:
n_grid[x,y] = grid[x,y]
continue
nt = nx, ny = (x+dx)%max_x, (y+dy)%max_y
if grid.get(nt, None) is None:
n_grid[nt] = dx,dy
moved = True
else:
n_grid[x,y] = dx,dy
grid = n_grid
n_grid = {}
for (x,y), (dx, dy) in grid.items():
if dx:
n_grid[x,y] = grid[x,y]
continue
nt = nx, ny = (x+dx)%max_x, (y+dy)%max_y
if grid.get(nt, None) is None:
n_grid[nt] = dx,dy
moved = True
else:
n_grid[x,y] = dx,dy
grid = n_grid
n_grid = {}
p1 = turn
return p1, p2
def validate_test(case_id, inp=None, want_p1=None, want_p2=None):
do_p1, do_p2 = False, False
#print(f"validate_test({case_id}, {inp}, {want_p1}, {want_p2})")
got_p1, got_p2 = d25(inp, sample=True)
if want_p1 is not None:
assert want_p1 == got_p1, f"{case_id=} p1:\n\t{want_p1=}\n\t{got_p1=}"
do_p1 = True
if want_p2 is not None:
assert want_p2 == got_p2, f"{case_id=} p2:\n\t{want_p2=}\n\t{got_p2=}"
do_p2 = True
return True, do_p1, do_p2
def main():
with open('../inputs/d25.txt') as f:
inp = f.read().strip()
return d25(inp)
if __name__ == '__main__':
cases = [
#(id, inp, p1, p2),
(0, """v...>>.vv>
.vv>>.vv..
>>.>v>...v
>>v>>.>.v.
v>v.vv.v..
>.>>..v...
.vv..>.>v.
v.v..>>v.v
....v..v.>""", 58, None),
]
"""
# Non multiprocessing version
for case in cases:
validate_test(*case)
p1, p2 = main()
print(f"p1 = {p1}\np2 = {p2}")
"""
with Pool(processes=min(8, len(cases) + 1)) as pool:
main_res = pool.apply_async(main)
test_res = [pool.apply_async(validate_test, case) for case in cases]
test_pass, do_p1, do_p2 = True, False, False
for test in test_res:
tp, dp1, dp2 = test.get(30)
test_pass &= tp
do_p1 |= dp1
do_p2 |= dp2
if test_pass:
p1, p2 = main_res.get(60)
assert do_p1 or do_p2, "Didn't run any tets"
assert p1 is None or do_p1 == True, "Got P1 value without 'do_p1' set"
assert p2 is None or do_p2 == True, "Got P2 value without 'do_p2' set"
print(f"p1 = {p1}\np2 = {p2}")
| 29.410853 | 82 | 0.495783 | 596 | 3,794 | 3.010067 | 0.218121 | 0.014493 | 0.01505 | 0.015608 | 0.221293 | 0.17447 | 0.157191 | 0.148272 | 0.148272 | 0.123746 | 0 | 0.048077 | 0.342119 | 3,794 | 128 | 83 | 29.640625 | 0.670673 | 0.091987 | 0 | 0.23301 | 0 | 0 | 0.097955 | 0.018309 | 0 | 0 | 0 | 0 | 0.048544 | 1 | 0.058252 | false | 0.029126 | 0.058252 | 0.009709 | 0.174757 | 0.009709 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a4c9682da4b81a0f168d8e9419bc7e161393b09 | 26,025 | py | Python | build/releases/release-0.573py3/ob/ltr.py | farinacci/lispers.net | e1ed6e0f0a242b13ad629afb0fc1c7072b19b30c | [
"Apache-2.0"
] | 26 | 2019-02-01T19:12:21.000Z | 2022-03-25T04:40:38.000Z | build/releases/release-0.572py3/ob/ltr.py | farinacci/lispers.net | e1ed6e0f0a242b13ad629afb0fc1c7072b19b30c | [
"Apache-2.0"
] | 3 | 2019-10-29T17:49:19.000Z | 2022-03-20T21:21:31.000Z | build/releases/release-0.569/ob/ltr.py | farinacci/lispers.net | e1ed6e0f0a242b13ad629afb0fc1c7072b19b30c | [
"Apache-2.0"
] | 4 | 2019-02-02T16:50:48.000Z | 2020-10-29T03:10:58.000Z | #!/usr/bin/python
# -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# ltr.py - LISP EID Traceroute Client - Trace the encap/decap paths
#
# Usage: python ltr.py [-s <source-eid>] <destination-EID | DNS-name>
#
# -s: Optional source EID.
# <destination-EID>: required parameter [<iid>] in front is optional
#
# This application is run on an xTR. Typically a ITR or RTR, where the
# encapsulator adds to the ltr message with the RLOC the ITR is encapsulating
# to. Then the decapsulator will decapsulate and swap the source and
# destination addresses to return the packet to the source-EID (running the
# client program). If the ETR is not the EID, then the packet will be re-
# encapsulated in which more data is added to the ltr message.
#
# ltr messages run in UDP on port 2434 (4342 backwards) and are returned
# to the client program.
#
# The LISP-Trace message takes the following path:
#
# (1) ltr sends LISP-TRACE packet from its EID to the EID of the ETR on
# port 2434. It builds a type=9 packet with a nonce and an empty JSON field.
#
# (2) ITR will look up destination EID as part of forwarding logic and add
# RLOC information to LISP-Trace message. The message is encapsulated to
# the ETR.
#
# (3) The ETR (or RTR) will decap packet. It will add information to the LISP-
# packet. If it is the destination EID, it will send the LISP-Trace packet
# using itself as the source and the original source as the destination.
#
# (4) The local ITR will encapsulate the packet and add RLOC information to
# the LISP-Trace packet. It encapsulates the return packet to the ETR.
#
# (5) The ETR decapsulates the packet and sends it to the ltr client so the
# accumulated JSON data can be displayed for the user.
#
# This functionality works on a chain of encapsulating tunnels to give the
# user what RLOCs are used and the arrival time of the packet. It allows an
# ltr client to not only determine path and latency of the network, but if
# the encapsulation paths are symmetric or asymmetric.
#
# If there an error along the path, the node detecting the error will return
# the LISP-Trace packet to the RLOC of the originating ITR.
#
# The JSON format of an LISP-Trace packet is an array of dictionary arrays.
# The array will typically have 2 elements, one from ltr source to destination
# EID and one for the return path. Each dictionary array is keyed with "seid",
# "deid", and "paths". The array "paths" is the node data that is appended
# at each encapsulation hop. Note example below:
#
# [
# { "se" : "[<iid>]<orig-eid>", "de" : "[<iid>]<dest-eid>", "paths" : a
# [
# { "n" : "ITR", "sr" : "<source-rloc>", "dr" : "<dest_rloc>",
# "ets" : "<ts>", "hn" : "<hn>", "rtts" : [...], "hops" : [...] },
# { "n" : "RTR", "sr" : "<source-rloc>", "dr" : "<dest_rloc>",
# "dts" : "<ts>", "hn" : "<hn>" },
# { "n" : "RTR", "sr" : "<source-rloc>", "dr" : "<dest_rloc>",
# "ets" : "<ts>", "hn" : "<hn>", "rtts" : [...], "hops" : [...] },
# { "n" : "ETR", "sr" : "<source-rloc>", "dr" : "<dest_rloc>",
# "ets" : "<ts>", "hn" : "<hn>" }, ...
# ] },
#
# { "se" : "[<iid>]<dest-eid>", "de" : "[<iid>]<orig-eid>", "paths" :
# [
# { "n" : "ITR", "sr" : "<source-rloc>", "dr" : "<dest_rloc>",
# "ets" : "<ts>", "hn" : "<hn>", "rtts" : [...], "hops" : [...] },
# { "n" : "RTR", "sr" : "<source-rloc>", "dr" : "<dest_rloc>",
# "dts" : "<ts>", "hn" : "<hn>" },
# { "n" : "RTR", "sr" : "<source-rloc>", "dr" : "<dest_rloc>",
# "ets" : "<ts>", "hn" : "<hn>", "rtts" : [...], "hops" : [...] },
# { "n" : "ETR", "sr" : "<source-rloc>", "dr" : "<dest_rloc>",
# "ets" : "<ts>", "hn" : "<hn>" }, ...
# ] }
# ]
#
# Environment variable LISP_LTR_PORT is used to determine if the connection to
# the LISP API is done with a particular port. And if the port has a minus
# sign in front of it, it will use http rather https to connect to the
# lispers.net API. Environment variables LISP_LTR_USER and LISP_LTR_PW are
# used when lispers.net API is running with a password on username root.
#
#------------------------------------------------------------------------------
from __future__ import print_function
from future import standard_library
standard_library . install_aliases ( )
from builtins import hex
import sys
import struct
import random
import socket
import json
import time
import os
import binascii
from subprocess import getoutput
if 64 - 64: i11iIiiIii
if 65 - 65: O0 / iIii1I11I1II1 % OoooooooOO - i1IIi
if 73 - 73: II111iiii
if 22 - 22: I1IiiI * Oo0Ooo / OoO0O00 . OoOoOO00 . o0oOOo0O0Ooo / I1ii11iIi11i
if 48 - 48: oO0o / OOooOOo / I11i / Ii1I
if 48 - 48: iII111i % IiII + I1Ii111 / ooOoO0o * Ii1I
if 46 - 46: ooOoO0o * I11i - OoooooooOO
II1iII1i = "https"
oO0oIIII = 8080
if 59 - 59: i1IIi * i1IIi % OOooOOo + II111iiii
II = os . getenv ( "LISP_LTR_PORT" )
if ( II != None ) :
if ( II [ 0 ] == "-" ) :
II1iII1i = "http"
II = II [ 1 : : ]
if 100 - 100: i1IIi . I1Ii111 / IiII * OoooooooOO + I11i * oO0o
if ( II . isdigit ( ) == False ) :
print ( "Invalid value for env variable LISP_LTR_PORT" )
exit ( 1 )
if 99 - 99: iII111i . OOooOOo / iIii1I11I1II1 * iIii1I11I1II1
oO0oIIII = int ( II )
if 11 - 11: oO0o / i1IIi % II111iiii - OoOoOO00
OOo = os . getenv ( "LISP_LTR_USER" )
Ii1IIii11 = os . getenv ( "LISP_LTR_PW" )
if ( OOo == None ) : OOo = "root"
if ( Ii1IIii11 == None ) : Ii1IIii11 = ""
if 55 - 55: iIii1I11I1II1 - I1IiiI . Ii1I * IiII * i1IIi / iIii1I11I1II1
OOo000 = 2434
if 82 - 82: I11i . I1Ii111 / IiII % II111iiii % iIii1I11I1II1 % IiII
if 86 - 86: OoOoOO00 % I1IiiI
if 80 - 80: OoooooooOO . I1IiiI
if 87 - 87: oO0o / ooOoO0o + I1Ii111 - ooOoO0o . ooOoO0o / II111iiii
if 11 - 11: I1IiiI % o0oOOo0O0Ooo - Oo0Ooo
if 58 - 58: i11iIiiIii % I1Ii111
if 54 - 54: OOooOOo % O0 + I1IiiI - iII111i / I11i
if 31 - 31: OoO0O00 + II111iiii
if 13 - 13: OOooOOo * oO0o * I1IiiI
if 55 - 55: II111iiii
if 43 - 43: OoOoOO00 - i1IIi + I1Ii111 + Ii1I
if 17 - 17: o0oOOo0O0Ooo
if 64 - 64: Ii1I % i1IIi % OoooooooOO
if 3 - 3: iII111i + O0
if 42 - 42: OOooOOo / i1IIi + i11iIiiIii - Ii1I
if 78 - 78: OoO0O00
if 18 - 18: O0 - iII111i / iII111i + ooOoO0o % ooOoO0o - IiII
if 62 - 62: iII111i - IiII - OoOoOO00 % i1IIi / oO0o
if 77 - 77: II111iiii - II111iiii . I1IiiI / o0oOOo0O0Ooo
if 14 - 14: I11i % O0
if 41 - 41: i1IIi + I1Ii111 + OOooOOo - IiII
def oO ( rloc , port ) :
OO0OOooOoO0Oo = socket . htonl ( 0x90000000 + port )
iiIIiIiIi = struct . pack ( "I" , OO0OOooOoO0Oo )
if 38 - 38: Ii1I / Oo0Ooo
OooO0 = rloc . split ( "." )
II11iiii1Ii = int ( OooO0 [ 0 ] ) << 24
II11iiii1Ii += int ( OooO0 [ 1 ] ) << 16
II11iiii1Ii += int ( OooO0 [ 2 ] ) << 8
II11iiii1Ii += int ( OooO0 [ 3 ] )
iiIIiIiIi += struct . pack ( "I" , socket . htonl ( II11iiii1Ii ) )
if 70 - 70: oO0o / iIii1I11I1II1 % ooOoO0o % i11iIiiIii . I1IiiI
O0o0Oo = random . randint ( 0 , ( 2 ** 64 ) - 1 )
iiIIiIiIi += struct . pack ( "Q" , O0o0Oo )
return ( O0o0Oo , iiIIiIiIi )
if 78 - 78: iIii1I11I1II1 - Ii1I * OoO0O00 + o0oOOo0O0Ooo + iII111i + iII111i
if 11 - 11: iII111i - OoO0O00 % ooOoO0o % iII111i / OoOoOO00 - OoO0O00
if 74 - 74: iII111i * O0
if 89 - 89: oO0o + Oo0Ooo
if 3 - 3: i1IIi / I1IiiI % I11i * i11iIiiIii / O0 * I11i
if 49 - 49: oO0o % Ii1I + i1IIi . I1IiiI % I1ii11iIi11i
if 48 - 48: I11i + I11i / II111iiii / iIii1I11I1II1
def i1iiI11I ( nonce , packet ) :
if ( len ( packet ) < 12 ) : return ( False )
if 29 - 29: OoooooooOO
iI = "II"
I1i1I1II = struct . calcsize ( iI )
OO0OOooOoO0Oo , i1 = struct . unpack ( iI , packet [ : I1i1I1II ] )
packet = packet [ I1i1I1II : : ]
if ( socket . ntohl ( OO0OOooOoO0Oo ) != 0x90000000 ) :
print ( "Invalid LISP-Trace message" )
return ( { } )
if 48 - 48: O0 + O0 - I1ii11iIi11i . ooOoO0o / iIii1I11I1II1
if 77 - 77: i1IIi % OoOoOO00 - IiII + ooOoO0o
iI = "Q"
I1i1I1II = struct . calcsize ( iI )
I11iiIiii = struct . unpack ( iI , packet [ : I1i1I1II ] ) [ 0 ]
packet = packet [ I1i1I1II : : ]
if 1 - 1: II111iiii - I11i / I11i
if 46 - 46: Ii1I * OOooOOo - OoO0O00 * oO0o - I1Ii111
if 83 - 83: OoooooooOO
if 31 - 31: II111iiii - OOooOOo . I1Ii111 % OoOoOO00 - O0
if ( I11iiIiii != nonce ) :
print ( "Invalid nonce, sent {}, received {}" . format ( nonce , I11iiIiii ) )
return ( { } )
if 4 - 4: II111iiii / ooOoO0o . iII111i
if 58 - 58: OOooOOo * i11iIiiIii / OoOoOO00 % I1Ii111 - I1ii11iIi11i / oO0o
if ( len ( packet ) == 0 ) :
print ( "No JSON data in payload" )
return ( { } )
if 50 - 50: I1IiiI
if 34 - 34: I1IiiI * II111iiii % iII111i * OoOoOO00 - I1IiiI
if 33 - 33: o0oOOo0O0Ooo + OOooOOo * OoO0O00 - Oo0Ooo / oO0o % Ii1I
if 21 - 21: OoO0O00 * iIii1I11I1II1 % oO0o * i1IIi
if 16 - 16: O0 - I1Ii111 * iIii1I11I1II1 + iII111i
try :
Ii11iII1 = json . loads ( packet )
except :
print ( "Invalid JSON data: '{}'" . format ( packet ) )
return ( { } )
if 51 - 51: II111iiii * OoO0O00 % o0oOOo0O0Ooo * II111iiii % I1ii11iIi11i / ooOoO0o
return ( Ii11iII1 )
if 49 - 49: o0oOOo0O0Ooo
if 35 - 35: OoOoOO00 - OoooooooOO / I1ii11iIi11i % i1IIi
if 78 - 78: I11i
if 71 - 71: OOooOOo + ooOoO0o % i11iIiiIii + I1ii11iIi11i - IiII
if 88 - 88: OoOoOO00 - OoO0O00 % OOooOOo
if 16 - 16: I1IiiI * oO0o % IiII
if 86 - 86: I1IiiI + Ii1I % i11iIiiIii * oO0o . ooOoO0o * I11i
def i1I11i1iI ( jd ) :
for I1ii1Ii1 in jd :
iii11 = I1ii1Ii1 [ "se" ] if ( jd . index ( I1ii1Ii1 ) == 0 ) else oOOOOo0 ( I1ii1Ii1 [ "se" ] )
iiII1i1 = oOOOOo0 ( I1ii1Ii1 [ "de" ] ) if ( jd . index ( I1ii1Ii1 ) == 0 ) else I1ii1Ii1 [ "de" ]
if 66 - 66: OOooOOo - I11i
print ( "Path from {} to {}:" . format ( iii11 , iiII1i1 ) )
for I1i1III in I1ii1Ii1 [ "paths" ] :
if ( "ets" in I1i1III ) :
OO0O0OoOO0 = I1i1III [ "ets" ]
iiiI1I11i1 = "encap"
if 49 - 49: I1IiiI % ooOoO0o . ooOoO0o . I11i * ooOoO0o
if ( "dts" in I1i1III ) :
OO0O0OoOO0 = I1i1III [ "dts" ]
iiiI1I11i1 = "decap"
if 97 - 97: Ii1I + o0oOOo0O0Ooo . OOooOOo + I1ii11iIi11i % iII111i
oo0O = I1i1III [ "hn" ]
o0 = I1i1III [ "dr" ]
if ( o0 . find ( "?" ) != - 1 ) : o0 = oo0oOo ( o0 )
if 89 - 89: OoOoOO00
print ( " {} {}: {} -> {}, ts {}, node {}" . format ( I1i1III [ "n" ] , iiiI1I11i1 , I1i1III [ "sr" ] , o0 , OO0O0OoOO0 , OO0oOoOO0oOO0 ( oo0O ) ) )
if 86 - 86: OOooOOo
if 55 - 55: Oo0Ooo + iIii1I11I1II1 / OoOoOO00 * oO0o - i11iIiiIii - Ii1I
if ( "rtts" in I1i1III and "hops" in I1i1III and "lats" in I1i1III ) :
ii1ii1ii = json . dumps ( I1i1III [ "rtts" ] )
ii1ii1ii = ii1ii1ii . replace ( "-1" , "?" )
oooooOoo0ooo = json . dumps ( I1i1III [ "hops" ] )
oooooOoo0ooo = oooooOoo0ooo . replace ( "u" , "" )
oooooOoo0ooo = oooooOoo0ooo . replace ( "'" , "" )
oooooOoo0ooo = oooooOoo0ooo . replace ( '"' , "" )
I1I1IiI1 = json . dumps ( I1i1III [ "lats" ] )
I1I1IiI1 = I1I1IiI1 . replace ( "u" , "" )
I1I1IiI1 = I1I1IiI1 . replace ( "'" , "" )
I1I1IiI1 = I1I1IiI1 . replace ( '"' , "" )
print ( " " , end = ' ' )
print ( "recent-rtts {}, recent-hops {}" . format ( ii1ii1ii , oooooOoo0ooo ) )
print ( " recent-latencies {}" . format ( I1I1IiI1 ) )
if 5 - 5: o0oOOo0O0Ooo * ooOoO0o + OoOoOO00 . OOooOOo + OoOoOO00
if 91 - 91: O0
print ( "" )
if 61 - 61: II111iiii
if 64 - 64: ooOoO0o / OoOoOO00 - O0 - I11i
if 86 - 86: I11i % OoOoOO00 / I1IiiI / OoOoOO00
if 42 - 42: OoO0O00
if 67 - 67: I1Ii111 . iII111i . O0
if 10 - 10: I1ii11iIi11i % I1ii11iIi11i - iIii1I11I1II1 / OOooOOo + Ii1I
if 87 - 87: oO0o * I1ii11iIi11i + OOooOOo / iIii1I11I1II1 / iII111i
if 37 - 37: iII111i - ooOoO0o * oO0o % i11iIiiIii - I1Ii111
def o0oO ( eid ) :
IIiIi1iI = True
if 35 - 35: Ii1I % O0 - O0
if 16 - 16: II111iiii % OoOoOO00 - II111iiii + Ii1I
if 12 - 12: OOooOOo / OOooOOo + i11iIiiIii
if 40 - 40: I1IiiI . iIii1I11I1II1 / I1IiiI / i11iIiiIii
if 75 - 75: I11i + o0oOOo0O0Ooo
O0i1II1Iiii1I11 = eid . find ( "]" )
if ( O0i1II1Iiii1I11 == - 1 ) :
IIII = "0"
else :
IIiIi1iI = False
IIII = eid [ 1 : O0i1II1Iiii1I11 ]
eid = eid [ O0i1II1Iiii1I11 + 1 : : ]
if 32 - 32: OoooooooOO / iIii1I11I1II1 - o0oOOo0O0Ooo
if 91 - 91: iII111i % i1IIi % iIii1I11I1II1
if 20 - 20: OOooOOo % Ii1I / Ii1I + Ii1I
if 45 - 45: oO0o - IiII - OoooooooOO - OoO0O00 . II111iiii / O0
if 51 - 51: O0 + iII111i
if ( eid . find ( ":" ) == - 1 ) :
try : eid = socket . gethostbyname ( eid )
except : pass
if 8 - 8: oO0o * OoOoOO00 - Ii1I - OoO0O00 * OOooOOo % I1IiiI
return ( IIII , eid , IIiIi1iI )
if 48 - 48: O0
if 11 - 11: I11i + OoooooooOO - OoO0O00 / o0oOOo0O0Ooo + Oo0Ooo . II111iiii
if 41 - 41: Ii1I - O0 - O0
if 68 - 68: OOooOOo % I1Ii111
if 88 - 88: iIii1I11I1II1 - ooOoO0o + OOooOOo
if 40 - 40: I1IiiI * Ii1I + OOooOOo % iII111i
if 74 - 74: oO0o - Oo0Ooo + OoooooooOO + I1Ii111 / OoOoOO00
def i1I1iI1iIi111i ( eid , eid_prefix , ml ) :
iiIi1IIi1I = 2 ** ml - 1
if 84 - 84: ooOoO0o * II111iiii + Oo0Ooo
O0ooO0Oo00o = eid . split ( "." )
if ( len ( O0ooO0Oo00o ) == 1 ) : O0ooO0Oo00o = eid . split ( ":" )
if ( len ( O0ooO0Oo00o ) == 1 ) : return ( False )
if 77 - 77: iIii1I11I1II1 * OoO0O00
if ( len ( O0ooO0Oo00o ) == 4 ) :
iiIi1IIi1I = iiIi1IIi1I << ( 32 - ml )
eid = int ( O0ooO0Oo00o [ 0 ] ) << 24 | int ( O0ooO0Oo00o [ 1 ] ) << 16 | int ( O0ooO0Oo00o [ 2 ] ) << 8 | int ( O0ooO0Oo00o [ 3 ] )
O0ooO0Oo00o = eid & iiIi1IIi1I
eid = "{}.{}.{}.{}" . format ( ( O0ooO0Oo00o >> 24 ) & 0xff , ( O0ooO0Oo00o >> 16 ) & 0xff ,
( O0ooO0Oo00o >> 8 ) & 0xff , O0ooO0Oo00o & 0xff )
else :
iiIi1IIi1I = iiIi1IIi1I << ( 128 - ml )
eid = socket . inet_pton ( socket . AF_INET6 , eid )
eid = int ( binascii . hexlify ( eid ) , 16 )
O0ooO0Oo00o = eid & iiIi1IIi1I
eid = binascii . unhexlify ( hex ( O0ooO0Oo00o ) [ 2 : - 1 ] )
eid = socket . inet_ntop ( socket . AF_INET6 , eid )
if 95 - 95: I1IiiI + i11iIiiIii
return ( eid == eid_prefix )
if 6 - 6: ooOoO0o / i11iIiiIii + iII111i * oO0o
if 80 - 80: II111iiii
if 83 - 83: I11i . i11iIiiIii + II111iiii . o0oOOo0O0Ooo * I11i
if 53 - 53: II111iiii
if 31 - 31: OoO0O00
if 80 - 80: I1Ii111 . i11iIiiIii - o0oOOo0O0Ooo
if 25 - 25: OoO0O00
if 62 - 62: OOooOOo + O0
if 98 - 98: o0oOOo0O0Ooo
if 51 - 51: Oo0Ooo - oO0o + II111iiii * Ii1I . I11i + oO0o
def OoO0o ( match_iid , match_eid , user , pw , http , port , v4v6 ) :
oO0o0Ooooo = ( "curl --silent --insecure -u {}:{} {}://localhost:{}/lisp/" + "api/data/database-mapping" ) . format ( user , pw , http , port )
if 94 - 94: o0oOOo0O0Ooo * Ii1I / Oo0Ooo / Ii1I
oO0 = getoutput ( oO0o0Ooooo )
if 75 - 75: ooOoO0o + OoOoOO00 + o0oOOo0O0Ooo * I11i % oO0o . iII111i
try :
oOI1Ii1I1 = json . loads ( oO0 )
except :
return ( None , None , None , None )
if 28 - 28: O0 * Oo0Ooo - OOooOOo % iIii1I11I1II1 * Ii1I - i11iIiiIii
if 7 - 7: Oo0Ooo + oO0o - I1Ii111 % Ii1I + I1ii11iIi11i
for ooo0OOOoo in oOI1Ii1I1 :
if ( ( "eid-prefix" in ooo0OOOoo ) == False ) : continue
I1Ii1 = ooo0OOOoo [ "eid-prefix" ]
if 46 - 46: O0 + iII111i % I1IiiI / o0oOOo0O0Ooo . IiII * I11i
if 93 - 93: o0oOOo0O0Ooo % i1IIi . Ii1I . i11iIiiIii
if 56 - 56: I1ii11iIi11i % O0 - I1IiiI
if 100 - 100: Ii1I - O0 % oO0o * OOooOOo + I1IiiI
if 88 - 88: OoooooooOO - OoO0O00 * O0 * OoooooooOO . OoooooooOO
if ( I1Ii1 . count ( "'" ) == 2 ) : continue
if ( I1Ii1 . count ( "." ) != 3 and I1Ii1 . find ( ":" ) == - 1 ) : continue
if 33 - 33: I1Ii111 + iII111i * oO0o / iIii1I11I1II1 - I1IiiI
I1Ii1 , O0oO = I1Ii1 . split ( "/" )
IIII , I1Ii1 , IIiIi1iI = o0oO ( I1Ii1 )
if ( v4v6 and I1Ii1 . find ( "." ) == - 1 ) : continue
if ( v4v6 == False and I1Ii1 . find ( ":" ) == - 1 ) : continue
if 73 - 73: I1ii11iIi11i * i11iIiiIii % oO0o . I1ii11iIi11i
i1 = ooo0OOOoo [ "rlocs" ] [ 0 ] [ "rloc" ]
OOOOo0 = "translated-rloc" in ooo0OOOoo [ "rlocs" ] [ 0 ]
if 49 - 49: II111iiii % O0 . OoOoOO00 + oO0o / I1IiiI
if ( match_iid == None ) : return ( IIII , I1Ii1 , i1 , OOOOo0 )
if 72 - 72: ooOoO0o * Oo0Ooo . I1IiiI - II111iiii + i1IIi
iIi1ii = i1I1iI1iIi111i ( match_eid , I1Ii1 , int ( O0oO ) )
if ( match_iid == IIII and iIi1ii ) :
return ( None , None , i1 , OOOOo0 )
if 58 - 58: OoOoOO00 % o0oOOo0O0Ooo
if 50 - 50: I1Ii111 . o0oOOo0O0Ooo
return ( None , None , None , None )
if 97 - 97: O0 + OoOoOO00
if 89 - 89: o0oOOo0O0Ooo + OoO0O00 * I11i * Ii1I
if 37 - 37: OoooooooOO - O0 - o0oOOo0O0Ooo
if 77 - 77: OOooOOo * iIii1I11I1II1
if 98 - 98: I1IiiI % Ii1I * OoooooooOO
if 51 - 51: iIii1I11I1II1 . OoOoOO00 / oO0o + o0oOOo0O0Ooo
if 33 - 33: ooOoO0o . II111iiii % iII111i + o0oOOo0O0Ooo
def oO00O000oO0 ( user , pw , http , port ) :
oO0o0Ooooo = ( "curl --silent --insecure -u {}:{} {}://localhost:{}/lisp/" + "api/data/map-cache" ) . format ( user , pw , http , port )
if 79 - 79: I11i - OoooooooOO - oO0o - iIii1I11I1II1 * OOooOOo
oO0 = getoutput ( oO0o0Ooooo )
if 4 - 4: i11iIiiIii . OoooooooOO / OoO0O00 % I1Ii111 % I11i * O0
try :
oOI1Ii1I1 = json . loads ( oO0 )
except :
return ( [ ] )
if 14 - 14: OOooOOo / o0oOOo0O0Ooo
if 32 - 32: I1IiiI * Oo0Ooo
O0OooOo0o = [ ]
for ooo0OOOoo in oOI1Ii1I1 :
if ( "group-prefix" in ooo0OOOoo ) : continue
if ( ( "eid-prefix" in ooo0OOOoo ) == False ) : continue
if ( ooo0OOOoo [ "eid-prefix" ] != "0.0.0.0/0" ) : continue
if 29 - 29: I1IiiI % I1IiiI
for i1 in ooo0OOOoo [ "rloc-set" ] :
if ( ( "rloc-name" in i1 ) == False ) : continue
if ( i1 [ "rloc-name" ] != "RTR" ) : continue
if ( ( "address" in i1 ) == False ) : continue
O0OooOo0o . append ( i1 [ "address" ] )
if 94 - 94: iIii1I11I1II1 / Oo0Ooo % iII111i * iII111i * II111iiii
if 29 - 29: OoO0O00 + OoOoOO00 / o0oOOo0O0Ooo / OOooOOo * iIii1I11I1II1
return ( O0OooOo0o )
if 62 - 62: OOooOOo / oO0o - OoO0O00 . I11i
if 11 - 11: I1ii11iIi11i . OoO0O00 * IiII * OoooooooOO + ooOoO0o
if 33 - 33: O0 * o0oOOo0O0Ooo - I1Ii111 % I1Ii111
if 18 - 18: I1Ii111 / Oo0Ooo * I1Ii111 + I1Ii111 * i11iIiiIii * I1ii11iIi11i
if 11 - 11: ooOoO0o / OoOoOO00 - IiII * OoooooooOO + OoooooooOO . OoOoOO00
if 26 - 26: Ii1I % I1ii11iIi11i
if 76 - 76: IiII * iII111i
def oOOOOo0 ( string ) :
return ( "\033[1m" + string + "\033[0m" )
if 52 - 52: OOooOOo
if 19 - 19: I1IiiI
if 25 - 25: Ii1I / ooOoO0o
if 31 - 31: OOooOOo . O0 % I1IiiI . o0oOOo0O0Ooo + IiII
if 71 - 71: I1Ii111 . II111iiii
if 62 - 62: OoooooooOO . I11i
if 61 - 61: OoOoOO00 - OOooOOo - i1IIi
def OO0oOoOO0oOO0 ( string ) :
return ( "\033[94m" + oOOOOo0 ( string ) + "\033[0m" )
if 25 - 25: O0 * I11i + I1ii11iIi11i . o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 58 - 58: I1IiiI
if 53 - 53: i1IIi
if 59 - 59: o0oOOo0O0Ooo
if 81 - 81: OoOoOO00 - OoOoOO00 . iII111i
if 73 - 73: I11i % i11iIiiIii - I1IiiI
if 7 - 7: O0 * i11iIiiIii * Ii1I + ooOoO0o % OoO0O00 - ooOoO0o
def oo0oOo ( string ) :
return ( "\033[91m" + oOOOOo0 ( string ) + "\033[0m" )
if 39 - 39: Oo0Ooo * OOooOOo % OOooOOo - OoooooooOO + o0oOOo0O0Ooo - I11i
if 23 - 23: i11iIiiIii
if 30 - 30: o0oOOo0O0Ooo - i1IIi % II111iiii + I11i * iIii1I11I1II1
if 81 - 81: IiII % i1IIi . iIii1I11I1II1
if 4 - 4: i11iIiiIii % OoO0O00 % i1IIi / IiII
if 6 - 6: iII111i / I1IiiI % OOooOOo - I1IiiI
if 31 - 31: OOooOOo
if 23 - 23: I1Ii111 . IiII
def OO0000o ( deid , v4v6 ) :
if ( v4v6 ) :
i1I1i1 = int ( deid . split ( "." ) [ 0 ] )
if ( i1I1i1 < 224 or i1I1i1 >= 240 ) : return
else :
if ( deid [ 0 : 2 ] . lower ( ) != "ff" ) : return
if 81 - 81: ooOoO0o - iIii1I11I1II1 - i1IIi / I1Ii111 - O0 * I11i
print ( "Multicast EID not supported" )
exit ( 1 )
if 20 - 20: oO0o % IiII
if 19 - 19: I1ii11iIi11i % IiII + ooOoO0o / I1Ii111 . ooOoO0o
if 12 - 12: i1IIi + i1IIi - I1ii11iIi11i * Oo0Ooo % Oo0Ooo - II111iiii
if 52 - 52: ooOoO0o . iII111i + I1Ii111
if 38 - 38: i1IIi - II111iiii . I1Ii111
if 58 - 58: I1IiiI . iII111i + OoOoOO00
if 66 - 66: iII111i / oO0o * OoooooooOO + OoooooooOO % I11i
if ( "-s" in sys . argv ) :
IIii1111 = len ( sys . argv ) != 4
else :
IIii1111 = len ( sys . argv ) != 2
if 42 - 42: I11i / o0oOOo0O0Ooo . oO0o + oO0o % OoOoOO00 + i11iIiiIii
if ( IIii1111 ) :
print ( "Usage: python ltr.py [-s <source-eid>] <destination-EID | DNS-name>" )
exit ( 1 )
if 56 - 56: o0oOOo0O0Ooo
if 28 - 28: iII111i . iII111i % iIii1I11I1II1 * iIii1I11I1II1 . o0oOOo0O0Ooo / iII111i
iII1i1 , O0oOOoooOO0O , IIiIi1iI = o0oO ( sys . argv [ - 1 ] )
if ( iII1i1 == None ) :
print ( "<destinaton-eid> parse error" )
exit ( 1 )
if 86 - 86: o0oOOo0O0Ooo
i1Iii11Ii1i1 = O0oOOoooOO0O . find ( ":" ) == - 1
if 59 - 59: Oo0Ooo % OoooooooOO . iII111i / IiII + I1IiiI
if 76 - 76: ooOoO0o
if 73 - 73: O0 * iII111i + Ii1I + ooOoO0o
if 40 - 40: II111iiii . OoOoOO00 * I1Ii111 + OOooOOo + OOooOOo
if 9 - 9: I11i % OoooooooOO . oO0o % I11i
OO0000o ( O0oOOoooOO0O , i1Iii11Ii1i1 )
if 32 - 32: i11iIiiIii
if 31 - 31: iIii1I11I1II1 / OoO0O00 / I1ii11iIi11i
if 41 - 41: Oo0Ooo
if 10 - 10: Oo0Ooo / Oo0Ooo / I1Ii111 . I1Ii111
if ( "-s" in sys . argv ) :
O0i1II1Iiii1I11 = sys . argv . index ( "-s" ) + 1
OOoo , iIIiiiI , IIiIi1iI = o0oO ( sys . argv [ O0i1II1Iiii1I11 ] )
if ( OOoo == None ) :
print ( "-s <source-eid> parse error" )
exit ( 1 )
if 60 - 60: I1IiiI . I1Ii111
if ( IIiIi1iI ) : OOoo = None
IiI111ii1ii , O0OOo , i1 , OOOOo0 = OoO0o ( OOoo , iIIiiiI , OOo , Ii1IIii11 , II1iII1i , oO0oIIII , i1Iii11Ii1i1 )
if ( i1 == None ) :
print ( "[{}]{} not a local EID, maybe lispers.net API pw/port wrong" . format ( OOoo , iIIiiiI ) )
if 38 - 38: iIii1I11I1II1 + I1ii11iIi11i - OOooOOo - ooOoO0o - OoOoOO00
exit ( 1 )
if 71 - 71: OOooOOo / Ii1I % OoO0O00
else :
OOoo , iIIiiiI , i1 , OOOOo0 = OoO0o ( None , None , OOo , Ii1IIii11 , II1iII1i , oO0oIIII , i1Iii11Ii1i1 )
if ( OOoo == None ) :
print ( "Could not find local EID, maybe lispers.net API pw/port wrong?" )
exit ( 1 )
if 50 - 50: OOooOOo / Ii1I % ooOoO0o . OoOoOO00
if 41 - 41: OOooOOo * Ii1I - IiII + o0oOOo0O0Ooo
if 64 - 64: Ii1I
if 66 - 66: i11iIiiIii - OOooOOo * Oo0Ooo
if 76 - 76: i11iIiiIii + o0oOOo0O0Ooo / I1ii11iIi11i - OoO0O00 - Ii1I + I1ii11iIi11i
if 51 - 51: iIii1I11I1II1 . ooOoO0o + iIii1I11I1II1
iII1i1 = OOoo if iII1i1 == "0" else iII1i1
if ( iII1i1 != OOoo ) :
print ( "Instance-IDs must be the same for source and destination EIDs" )
exit ( 1 )
if 95 - 95: I1IiiI
if 46 - 46: OoOoOO00 + OoO0O00
if 70 - 70: iII111i / iIii1I11I1II1
if 85 - 85: OoooooooOO % i1IIi * OoooooooOO / I1ii11iIi11i
if 96 - 96: OoooooooOO + oO0o
iiII1i11i = socket . socket ( socket . AF_INET6 , socket . SOCK_DGRAM )
iiII1i11i . bind ( ( "0::0" , 0 ) )
iiII1i11i . settimeout ( 3 )
II = iiII1i11i . getsockname ( ) [ 1 ]
if 11 - 11: I1IiiI / II111iiii + o0oOOo0O0Ooo * I1ii11iIi11i - I1ii11iIi11i - I1IiiI
if 85 - 85: I11i % oO0o / iIii1I11I1II1 . iIii1I11I1II1
if 31 - 31: o0oOOo0O0Ooo % OoO0O00
if 14 - 14: oO0o / oO0o % ooOoO0o
O0o0Oo , iiIIiIiIi = oO ( i1 , II )
if 56 - 56: I1IiiI . O0 + Oo0Ooo
if 1 - 1: iII111i
if 97 - 97: OOooOOo + iII111i + O0 + i11iIiiIii
if 77 - 77: o0oOOo0O0Ooo / OoooooooOO
if ( OOOOo0 ) :
O0OooOo0o = oO00O000oO0 ( OOo , Ii1IIii11 , II1iII1i , oO0oIIII )
for IIii11I1i1I in O0OooOo0o :
print ( "Send NAT-traversal LISP-Trace to RTR {} ..." . format ( IIii11I1i1I ) )
iiII1i11i . sendto ( iiIIiIiIi , ( "::ffff:" + IIii11I1i1I , OOo000 ) )
if 99 - 99: iII111i
if 76 - 76: OoO0O00 * I1IiiI
if 82 - 82: Ii1I * iII111i / I1ii11iIi11i
print ( "Send round-trip LISP-Trace between EIDs [{}]{} and [{}]{} ..." . format ( OOoo , iIIiiiI , iII1i1 , O0oOOoooOO0O ) )
if 36 - 36: OoooooooOO - i1IIi . O0 / II111iiii + o0oOOo0O0Ooo
if 33 - 33: II111iiii / ooOoO0o * O0 % Ii1I * I1Ii111
O0o = O0oOOoooOO0O if ( O0oOOoooOO0O . find ( ":" ) != - 1 ) else "::ffff:" + O0oOOoooOO0O
OO0O0OoOO0 = time . time ( )
if 72 - 72: OOooOOo % I1ii11iIi11i + OoO0O00 / oO0o + IiII
if 10 - 10: I1Ii111 / ooOoO0o + i11iIiiIii / Ii1I
if 74 - 74: OOooOOo + O0 + i1IIi - i1IIi + II111iiii
if 83 - 83: I1ii11iIi11i - I1IiiI + OOooOOo
try :
iiII1i11i . sendto ( iiIIiIiIi , ( O0o , OOo000 ) )
except socket . error as O0ooO0Oo00o :
print ( "sock.sendto() failed: {}" . format ( O0ooO0Oo00o ) )
exit ( 1 )
if 5 - 5: Ii1I
if 46 - 46: IiII
if 45 - 45: ooOoO0o
if 21 - 21: oO0o . I1Ii111 . OOooOOo / Oo0Ooo / I1Ii111
if 17 - 17: OOooOOo / OOooOOo / I11i
try :
iiIIiIiIi , ii1 = iiII1i11i . recvfrom ( 9000 )
ii1 = ii1 [ 0 ] . replace ( "::ffff:" , "" )
except socket . timeout :
exit ( 1 )
except socket . error as O0ooO0Oo00o :
print ( "sock.recvfrom() failed, error: {}" . format ( O0ooO0Oo00o ) )
exit ( 1 )
if 1 - 1: ooOoO0o % iIii1I11I1II1 + Oo0Ooo . iIii1I11I1II1 % I1IiiI
if 89 - 89: Ii1I
ooOoOO0OoO00o = round ( time . time ( ) - OO0O0OoOO0 , 3 )
if 11 - 11: Oo0Ooo - I1IiiI * II111iiii . I1ii11iIi11i . oO0o
print ( "Received reply from {}, rtt {} secs" . format ( ii1 , ooOoOO0OoO00o ) )
print ( "" )
Ii11iII1 = i1iiI11I ( O0o0Oo , iiIIiIiIi )
if ( Ii11iII1 == { } ) : exit ( 1 )
if 61 - 61: iII111i % I1IiiI - o0oOOo0O0Ooo - II111iiii % O0
if 90 - 90: iIii1I11I1II1 + I1ii11iIi11i + ooOoO0o - I1Ii111 * IiII . I1ii11iIi11i
if 37 - 37: ooOoO0o % i11iIiiIii % II111iiii . O0 . Ii1I
if 51 - 51: OoO0O00 - O0 % oO0o - II111iiii
i1I11i1iI ( Ii11iII1 )
if 31 - 31: iII111i / Oo0Ooo - iII111i - OOooOOo
iiII1i11i . close ( )
exit ( 0 )
if 7 - 7: iII111i % O0 . OoOoOO00 + I1IiiI - I11i
if 75 - 75: I11i
# dd678faae9ac167bc83abf78e5cb2f3f0688d3a3
| 44.035533 | 152 | 0.621134 | 3,415 | 26,025 | 4.720937 | 0.157833 | 0.003163 | 0.004776 | 0.006947 | 0.092979 | 0.073998 | 0.053964 | 0.035852 | 0.035852 | 0.025183 | 0 | 0.171525 | 0.238117 | 26,025 | 590 | 153 | 44.110169 | 0.641567 | 0.181979 | 0 | 0.106776 | 0 | 0.002053 | 0.062872 | 0.003351 | 0 | 0 | 0.001699 | 0 | 0 | 1 | 0.022587 | false | 0.002053 | 0.024641 | 0 | 0.080082 | 0.051335 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a52dc51601a35116aee6d656ad33ca1c1841e16 | 7,660 | py | Python | tests/integration/test_autocommit.py | michaelcraige/neo4j-python-driver | 27d0ce3f1941c4b29d0f050c6186a4f48ae4d30a | [
"Apache-2.0"
] | 1 | 2021-05-18T14:11:39.000Z | 2021-05-18T14:11:39.000Z | tests/integration/test_autocommit.py | michaelcraige/neo4j-python-driver | 27d0ce3f1941c4b29d0f050c6186a4f48ae4d30a | [
"Apache-2.0"
] | null | null | null | tests/integration/test_autocommit.py | michaelcraige/neo4j-python-driver | 27d0ce3f1941c4b29d0f050c6186a4f48ae4d30a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (c) 2002-2019 "Neo4j,"
# Neo4j Sweden AB [http://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import SkipTest
from pytest import raises
from neo4j.work.simple import Statement, SessionError
from neo4j.exceptions import CypherError, ClientError, TransientError
from neo4j.graph import Node, Relationship
def test_can_run_simple_statement(session):
result = session.run("RETURN 1 AS n")
for record in result:
assert record[0] == 1
assert record["n"] == 1
with raises(KeyError):
_ = record["x"]
assert record["n"] == 1
with raises(KeyError):
_ = record["x"]
with raises(TypeError):
_ = record[object()]
assert repr(record)
assert len(record) == 1
def test_can_run_simple_statement_with_params(session):
count = 0
for record in session.run("RETURN $x AS n",
{"x": {"abc": ["d", "e", "f"]}}):
assert record[0] == {"abc": ["d", "e", "f"]}
assert record["n"] == {"abc": ["d", "e", "f"]}
assert repr(record)
assert len(record) == 1
count += 1
assert count == 1
def test_autocommit_transactions_use_bookmarks(neo4j_driver):
bookmarks = []
# Generate an initial bookmark
with neo4j_driver.session() as session:
session.run("CREATE ()").consume()
bookmark = session.last_bookmark()
assert bookmark is not None
bookmarks.append(bookmark)
# Propagate into another session
with neo4j_driver.session(bookmarks=bookmarks) as session:
assert list(session.next_bookmarks()) == bookmarks
session.run("CREATE ()").consume()
bookmark = session.last_bookmark()
assert bookmark is not None
assert bookmark not in bookmarks
def test_fails_on_bad_syntax(session):
with raises(CypherError):
session.run("X").consume()
def test_fails_on_missing_parameter(session):
with raises(CypherError):
session.run("RETURN {x}").consume()
def test_can_run_statement_that_returns_multiple_records(session):
count = 0
for record in session.run("unwind(range(1, 10)) AS z RETURN z"):
assert 1 <= record[0] <= 10
count += 1
assert count == 10
def test_can_use_with_to_auto_close_session(session):
record_list = list(session.run("RETURN 1"))
assert len(record_list) == 1
for record in record_list:
assert record[0] == 1
def test_can_return_node(neo4j_driver):
with neo4j_driver.session() as session:
record_list = list(session.run("CREATE (a:Person {name:'Alice'}) "
"RETURN a"))
assert len(record_list) == 1
for record in record_list:
alice = record[0]
assert isinstance(alice, Node)
assert alice.labels == {"Person"}
assert dict(alice) == {"name": "Alice"}
def test_can_return_relationship(neo4j_driver):
with neo4j_driver.session() as session:
record_list = list(session.run("CREATE ()-[r:KNOWS {since:1999}]->() "
"RETURN r"))
assert len(record_list) == 1
for record in record_list:
rel = record[0]
assert isinstance(rel, Relationship)
assert rel.type == "KNOWS"
assert dict(rel) == {"since": 1999}
# TODO: re-enable after server bug is fixed
# def test_can_return_path(session):
# with self.driver.session() as session:
# record_list = list(session.run("MERGE p=({name:'Alice'})-[:KNOWS]->"
# "({name:'Bob'}) RETURN p"))
# assert len(record_list) == 1
# for record in record_list:
# path = record[0]
# assert isinstance(path, Path)
# assert path.start_node["name"] == "Alice"
# assert path.end_node["name"] == "Bob"
# assert path.relationships[0].type == "KNOWS"
# assert len(path.nodes) == 2
# assert len(path.relationships) == 1
def test_can_handle_cypher_error(session):
with raises(CypherError):
session.run("X").consume()
def test_keys_are_available_before_and_after_stream(session):
result = session.run("UNWIND range(1, 10) AS n RETURN n")
assert list(result.keys()) == ["n"]
list(result)
assert list(result.keys()) == ["n"]
def test_keys_with_an_error(session):
with raises(CypherError):
result = session.run("X")
list(result.keys())
def test_should_not_allow_empty_statements(session):
with raises(ValueError):
_ = session.run("")
def test_statement_object(session):
value = session.run(Statement("RETURN $x"), x=1).single().value()
assert value == 1
def test_autocommit_transactions_should_support_metadata(session):
metadata_in = {"foo": "bar"}
try:
statement = Statement("CALL dbms.getTXMetaData", metadata=metadata_in)
metadata_out = session.run(statement).single().value()
except ClientError as e:
if e.code == "Neo.ClientError.Procedure.ProcedureNotFound":
raise SkipTest("Cannot assert correct metadata as Neo4j edition "
"does not support procedure dbms.getTXMetaData")
else:
raise
else:
assert metadata_in == metadata_out
def test_autocommit_transactions_should_support_timeout(neo4j_driver):
with neo4j_driver.session() as s1:
s1.run("CREATE (a:Node)").consume()
with neo4j_driver.session() as s2:
tx1 = s1.begin_transaction()
tx1.run("MATCH (a:Node) SET a.property = 1").consume()
with raises(TransientError):
s2.run(Statement("MATCH (a:Node) SET a.property = 2",
timeout=0.25)).consume()
def test_regex_in_parameter(session):
matches = session.run("UNWIND ['A', 'B', 'C', 'A B', 'B C', 'A B C', "
"'A BC', 'AB C'] AS t WITH t "
"WHERE t =~ $re RETURN t", re=r'.*\bB\b.*').value()
assert matches == ["B", "A B", "B C", "A B C"]
def test_regex_inline(session):
matches = session.run(r"UNWIND ['A', 'B', 'C', 'A B', 'B C', 'A B C', "
r"'A BC', 'AB C'] AS t WITH t "
r"WHERE t =~ '.*\\bB\\b.*' RETURN t").value()
assert matches == ["B", "A B", "B C", "A B C"]
def test_automatic_reset_after_failure(session):
try:
session.run("X").consume()
except CypherError:
result = session.run("RETURN 1")
record = next(iter(result))
assert record[0] == 1
else:
assert False, "A Cypher error should have occurred"
def test_session_error(bolt_driver):
session = bolt_driver.session()
session.close()
with raises(SessionError):
session.run("RETURN 1")
def test_large_values(bolt_driver):
for i in range(1, 7):
with bolt_driver.session() as session:
session.run("RETURN '{}'".format("A" * 2 ** 20))
| 33.160173 | 78 | 0.607441 | 978 | 7,660 | 4.623722 | 0.247444 | 0.050862 | 0.017691 | 0.029191 | 0.353383 | 0.303184 | 0.225343 | 0.194162 | 0.172932 | 0.145511 | 0 | 0.017749 | 0.264491 | 7,660 | 230 | 79 | 33.304348 | 0.784878 | 0.179896 | 0 | 0.326531 | 0 | 0.013605 | 0.132202 | 0.006882 | 0 | 0 | 0 | 0.004348 | 0.238095 | 1 | 0.142857 | false | 0 | 0.034014 | 0 | 0.176871 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a5e520e42f56529133381c93cca81b7e91fe302 | 4,060 | py | Python | tests/db/test_client.py | wxnacy/lfsdb | ff200e682ebafb9c806b8d5935c535d77b439981 | [
"MIT"
] | null | null | null | tests/db/test_client.py | wxnacy/lfsdb | ff200e682ebafb9c806b8d5935c535d77b439981 | [
"MIT"
] | null | null | null | tests/db/test_client.py | wxnacy/lfsdb | ff200e682ebafb9c806b8d5935c535d77b439981 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: wxnacy@gmail.com
"""
随机方法
"""
import pytest
import os
from wpy.path import read_dict
from wpy.randoms import (
random_str
)
from lfsdb import FileStorage
from lfsdb.db import FileStorageError
from lfsdb.db.errors import FSQueryError
from lfsdb.db.cache import CacheTable
from lfsdb.db.client import FileTable
from lfsdb.sockets.db import SocketTable
root = '/tmp'
root = None
db_name = 'wpy_db'
table = 'wpy_table'
fs = FileStorage(root)
file_table = fs.get_db(db_name).get_table(table)
socket_table = SocketTable(db_name, table)
cache_table = CacheTable(db_name, table)
tables = [file_table, socket_table, cache_table]
table_root = os.path.join(fs.root, db_name, table)
def _origin_data(data):
for k in ('_id', '_update_time', "_create_time"):
data.pop(k, None)
return data
def _handle_table_test(func):
for table in tables:
table.drop()
func(table)
table.drop()
def test_insert():
_handle_table_test(_test_insert)
def _test_insert(db):
name = random_str(6)
doc = {
"name": name
}
# 查看插入的数据是否存入到文件中
_id = db.insert(doc)
if isinstance(db, FileTable):
path = os.path.join(table_root, _id)
data = read_dict(path)
data = _origin_data(data)
assert doc == data
data = db.find_by_id(_id)
data = _origin_data(data)
assert doc == data
doc['_id'] = _id
with pytest.raises(FileStorageError) as excinfo:
db.insert(doc)
assert str(excinfo) == '{}._id {} is exists'.format(table, _id)
db.drop()
assert not os.path.exists(table_root)
def test_find():
_handle_table_test(_test_find)
def _test_find(db):
name = random_str(6)
doc = { "name": name}
db.drop()
db.insert(doc)
db.insert(doc)
doc['age'] = 12
db.insert(doc)
# 条件为空
docs = db.find()
assert len(docs) == 3
docs = db.find({ "name": name })
assert len(docs) == 3
docs = db.find({ "name": name, "age": 12 })
assert len(docs) == 1
doc = db.find_one({"age": 12}, {})
assert len(doc.keys()) == 5
doc = db.find_one({"age": 12}, {"name": 1})
assert len(doc.keys()) == 2
with pytest.raises(FSQueryError) as exe_info:
doc = db.find_one({"age": 12}, {"name": 1, "age": 0})
assert str(exe_info) == ('Projection cannot have a mix of inclusion'
' and exclusion.')
doc = db.find_one({"age": 12}, {"name": 1, "_id": 0})
assert len(doc.keys()) == 2
db.drop()
def test_update():
_handle_table_test(_test_update)
def _test_update(db):
# TODO 缓存
name = random_str(6)
doc = { "name": name}
db.insert(doc)
_id = db.insert(doc)
insert_utime = db.find_by_id(_id).get("_update_time")
db.insert(doc)
count = db.update(doc, {"name": "wxnacy"})
assert count == 3
db.update({"_id": _id}, {"name": "wxn"})
data = db.find_by_id(_id)
update_utime = data.get("_update_time")
# 检查修改时间是否改变
assert insert_utime < update_utime
data = db.find_by_id(_id)
data = _origin_data(data)
assert { "name": "wxn" } == data
db.drop()
def test_delete():
_handle_table_test(_test_delete)
def _test_delete(db):
db.drop()
name = random_str(6)
doc = { "name": name}
db.insert(doc)
_id = db.insert(doc)
db.insert(doc)
assert db.delete({ "_id": _id }) == 1
docs = db.find()
assert len(docs) == 2
count = db.delete(doc)
assert count == 2
db.drop()
def test_sort():
_handle_table_test(_test_sort)
def _test_sort(db):
db.drop()
arr = [{"age": 5, "id": 2}, {"age": 5, "id": 5}, {"age": 3, "id": 4}]
for a in arr:
db.insert(a)
items = db.find(sorter = [('age', 1), ('id', -1)])
for item in items:
item.pop('_id', None)
item.pop('_create_time', None)
item.pop('_update_time', None)
assert items == [{"age": 3, "id": 4},{"age": 5, "id": 5}, {"age": 5, "id": 2}]
db.drop()
socket_table.close()
| 22.065217 | 82 | 0.599754 | 590 | 4,060 | 3.913559 | 0.19661 | 0.033781 | 0.052404 | 0.041143 | 0.257254 | 0.216977 | 0.174101 | 0.157644 | 0.104807 | 0.07709 | 0 | 0.014921 | 0.24064 | 4,060 | 183 | 83 | 22.185792 | 0.734025 | 0.02734 | 0 | 0.325581 | 0 | 0 | 0.076239 | 0 | 0 | 0 | 0 | 0.005464 | 0.139535 | 1 | 0.093023 | false | 0 | 0.077519 | 0 | 0.178295 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a5fe65ea13d858d4fe548293ae1aefcbe8c803b | 865 | py | Python | days/dec6/dec6.py | denysvitali/aoc-2020 | b0b3460c5043e4ce78a6ef00a40cc817953f6c43 | [
"MIT"
] | null | null | null | days/dec6/dec6.py | denysvitali/aoc-2020 | b0b3460c5043e4ce78a6ef00a40cc817953f6c43 | [
"MIT"
] | null | null | null | days/dec6/dec6.py | denysvitali/aoc-2020 | b0b3460c5043e4ce78a6ef00a40cc817953f6c43 | [
"MIT"
] | null | null | null | import re
example = "".join(open("example.txt").readlines())
puzzle = "".join(open("puzzle.txt").readlines())
problem_input = puzzle
def parse_group(input: str):
votes = input.split("\n")
votes_map = dict()
for vote in votes:
for el in vote:
if el not in votes_map:
votes_map[el] = 0
votes_map[el] += 1
return votes_map, len(votes)
def part_a():
groups = problem_input.split("\n\n")
sum = 0
for g in groups:
parsed, votes = parse_group(g)
sum += len(parsed)
return sum
def part_b():
groups = problem_input.split("\n\n")
sum = 0
for g in groups:
parsed, votes = parse_group(g)
for i in parsed:
if parsed[i] == votes:
sum += 1
return sum
print("part_a: %d" % part_a())
print("part_b: %d" % part_b())
| 20.116279 | 50 | 0.552601 | 125 | 865 | 3.688 | 0.312 | 0.086768 | 0.071584 | 0.099783 | 0.273319 | 0.273319 | 0.273319 | 0.273319 | 0.273319 | 0.273319 | 0 | 0.008361 | 0.308671 | 865 | 42 | 51 | 20.595238 | 0.762542 | 0 | 0 | 0.322581 | 0 | 0 | 0.05896 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096774 | false | 0 | 0.032258 | 0 | 0.225806 | 0.064516 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a60a5f260cfd69458005fdf8f18f4a31de9980c | 1,154 | py | Python | integration_tests/web/test_issue_378.py | KharchenkoDmitriy/python-slack-sdk | 5340ee337a2364e84c38d696c107f19c341dd6eb | [
"MIT"
] | null | null | null | integration_tests/web/test_issue_378.py | KharchenkoDmitriy/python-slack-sdk | 5340ee337a2364e84c38d696c107f19c341dd6eb | [
"MIT"
] | null | null | null | integration_tests/web/test_issue_378.py | KharchenkoDmitriy/python-slack-sdk | 5340ee337a2364e84c38d696c107f19c341dd6eb | [
"MIT"
] | null | null | null | import asyncio
import logging
import os
import unittest
from integration_tests.env_variable_names import SLACK_SDK_TEST_USER_TOKEN
from integration_tests.helpers import async_test
from slack import WebClient
class TestWebClient(unittest.TestCase):
"""Runs integration tests with real Slack API
https://github.com/slackapi/python-slack-sdk/issues/378
"""
def setUp(self):
self.logger = logging.getLogger(__name__)
self.user_token = os.environ[SLACK_SDK_TEST_USER_TOKEN]
self.sync_client: WebClient = WebClient(token=self.user_token, run_async=False, loop=asyncio.new_event_loop())
self.async_client: WebClient = WebClient(token=self.user_token, run_async=True)
def tearDown(self):
pass
def test_issue_378(self):
client = self.sync_client
response = client.users_setPhoto(image="tests/data/slack_logo_new.png")
self.assertIsNotNone(response)
@async_test
async def test_issue_378_async(self):
client = self.async_client
response = await client.users_setPhoto(image="tests/data/slack_logo_new.png")
self.assertIsNotNone(response)
| 32.055556 | 118 | 0.740035 | 153 | 1,154 | 5.320261 | 0.392157 | 0.055283 | 0.047912 | 0.039312 | 0.358722 | 0.307125 | 0.307125 | 0.307125 | 0.307125 | 0.184275 | 0 | 0.009454 | 0.175043 | 1,154 | 35 | 119 | 32.971429 | 0.845588 | 0.085789 | 0 | 0.083333 | 0 | 0 | 0.055769 | 0.055769 | 0 | 0 | 0 | 0 | 0.083333 | 1 | 0.125 | false | 0.041667 | 0.291667 | 0 | 0.458333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a6106088407de4e7e1d70c09e88f77cac779b4b | 7,199 | py | Python | tune/iterative/asha.py | fugue-project/tune | bf2288ddcb29c8345d996a9b22c0910da9002da1 | [
"Apache-2.0"
] | 14 | 2021-03-03T20:02:09.000Z | 2021-11-10T20:32:22.000Z | tune/iterative/asha.py | fugue-project/tune | bf2288ddcb29c8345d996a9b22c0910da9002da1 | [
"Apache-2.0"
] | 26 | 2021-04-30T19:56:06.000Z | 2022-01-18T04:40:00.000Z | tune/iterative/asha.py | fugue-project/tune | bf2288ddcb29c8345d996a9b22c0910da9002da1 | [
"Apache-2.0"
] | 2 | 2021-04-30T03:12:21.000Z | 2022-02-05T12:13:37.000Z | from threading import RLock
from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple
from triad import to_uuid
from tune.concepts.flow import (
Monitor,
Trial,
TrialDecision,
TrialJudge,
TrialReport,
TrialReportHeap,
)
class RungHeap:
def __init__(self, n: int):
self._lock = RLock()
self._n = n
self._heap = TrialReportHeap(min_heap=False)
self._bests: List[float] = []
def __len__(self) -> int:
with self._lock:
return len(self._heap)
@property
def capacity(self) -> int:
return self._n
@property
def best(self) -> float:
with self._lock:
return self._bests[-1] if len(self._bests) > 0 else float("nan")
@property
def bests(self) -> List[float]:
with self._lock:
return self._bests
@property
def full(self) -> bool:
with self._lock:
return self.capacity <= len(self)
def __contains__(self, tid: str) -> bool:
with self._lock:
return tid in self._heap
def values(self) -> Iterable[TrialReport]:
return self._heap.values()
def push(self, report: TrialReport) -> bool:
with self._lock:
if len(self) == 0:
best = report.sort_metric
else:
best = min(self.best, report.sort_metric)
self._heap.push(report)
self._bests.append(best)
return (
len(self._heap) <= self._n
or self._heap.pop().trial_id != report.trial_id
)
class _PerTrial:
def __init__(self, parent: "_PerPartition") -> None:
self._history: List[TrialReport] = []
self._parent = parent
self._active = True
def can_promote(self, report: TrialReport) -> Tuple[bool, str]:
reasons: List[str] = []
if self._active:
can_accept = self._parent.can_accept(report.trial)
early_stop = self._parent._parent._trial_early_stop(
report, self._history, self._parent._rungs
)
self._active = can_accept and not early_stop
if not can_accept:
reasons.append("can't accept new")
if early_stop:
reasons.append("trial early stop")
if self._active:
self._history.append(report)
can_push = self._parent._rungs[report.rung].push(report)
if not can_push:
# data = sorted(
# (x for x in self._parent._rungs[report.rung].values()),
# key=lambda x: x["sort_metric"],
# )
# reasons.append("not best: " + json.dumps(data))
reasons.append("not best")
return can_push, ", ".join(reasons)
return False, ", ".join(reasons)
def judge(self, report: TrialReport) -> TrialDecision:
if report.rung >= len(self._parent._parent.schedule) - 1:
self._history.append(report)
self._parent._rungs[report.rung].push(report)
return TrialDecision(
report, budget=0, should_checkpoint=True, reason="last"
)
promote, reason = self.can_promote(report)
if not promote:
return TrialDecision(
report, budget=0, should_checkpoint=True, reason=reason
)
next_budget = self._parent.get_budget(report.trial, report.rung + 1)
return TrialDecision(
report,
budget=next_budget,
should_checkpoint=next_budget <= 0
or self._parent._parent.always_checkpoint,
reason="" if next_budget > 0 else "budget==0",
)
class _PerPartition:
def __init__(self, parent: "ASHAJudge", keys: List[Any]):
self._keys = keys
self._data: Dict[str, _PerTrial] = {}
self._lock = RLock()
self._parent = parent
self._rungs: List[RungHeap] = [RungHeap(x[1]) for x in self._parent.schedule]
self._active = True
self._accepted_ids: Set[str] = set()
def can_accept(self, trial: Trial) -> bool:
with self._lock:
if self._active:
self._active = not self._parent._study_early_stop(
self._keys, self._rungs
)
if self._active:
self._accepted_ids.add(trial.trial_id)
return True
# if not active, can only accept existing trials
return trial.trial_id in self._accepted_ids
def get_budget(self, trial: Trial, rung: int) -> float:
if rung >= len(self._parent.schedule) or not self.can_accept(trial):
return 0.0 # pragma: no cover
return self._parent.schedule[rung][0]
def judge(self, report: TrialReport) -> TrialDecision:
return self._get_judge(report.trial).judge(report)
def _get_judge(self, trial: Trial) -> _PerTrial:
key = trial.trial_id
with self._lock:
if key not in self._data:
self._data[key] = _PerTrial(self)
return self._data[key]
class ASHAJudge(TrialJudge):
def __init__(
self,
schedule: List[Tuple[float, int]],
always_checkpoint: bool = False,
study_early_stop: Optional[Callable[[List[Any], List[RungHeap]], bool]] = None,
trial_early_stop: Optional[
Callable[[TrialReport, List[TrialReport], List[RungHeap]], bool]
] = None,
monitor: Optional[Monitor] = None,
):
super().__init__(monitor=monitor)
self._lock = RLock()
self._data: Dict[str, _PerPartition] = {}
self._schedule = schedule
self._always_checkpoint = always_checkpoint
self._study_early_stop = study_early_stop or _default_study_early_stop
self._trial_early_stop = trial_early_stop or _default_trial_early_stop
@property
def schedule(self) -> List[Tuple[float, int]]:
return self._schedule
@property
def always_checkpoint(self) -> bool:
return self._always_checkpoint
def can_accept(self, trial: Trial) -> bool:
return self._get_judge(trial).can_accept(trial)
def get_budget(self, trial: Trial, rung: int) -> float:
budget = self._get_judge(trial).get_budget(trial, rung)
self.monitor.on_get_budget(trial, rung, budget)
return budget
def judge(self, report: TrialReport) -> TrialDecision:
self.monitor.on_report(report)
decision = self._get_judge(report.trial).judge(report)
self.monitor.on_judge(decision)
return decision
def _get_judge(self, trial: Trial) -> _PerPartition:
key = to_uuid(trial.keys)
with self._lock:
if key not in self._data:
self._data[key] = _PerPartition(self, trial.keys)
return self._data[key]
def _default_study_early_stop(keys: List[Any], rungs: List["RungHeap"]) -> bool:
return all(r.full for r in rungs)
def _default_trial_early_stop(
report: TrialReport, reports: List[TrialReport], rungs: List["RungHeap"]
) -> bool:
return False
| 33.640187 | 87 | 0.592999 | 838 | 7,199 | 4.836516 | 0.144391 | 0.041944 | 0.026647 | 0.022206 | 0.220331 | 0.175672 | 0.132248 | 0.067604 | 0.067604 | 0.020232 | 0 | 0.002786 | 0.301986 | 7,199 | 213 | 88 | 33.798122 | 0.803781 | 0.031115 | 0 | 0.241379 | 0 | 0 | 0.014066 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.149425 | false | 0 | 0.022989 | 0.045977 | 0.350575 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a68ae4c6daaddebc3deeeeee768f2c7fe6d1bdb | 1,936 | py | Python | cuppa/methods/markdown_to_html.py | pwj58/cuppa | 6c598a124c5aa52b459637ca1865cda2e2d300bd | [
"BSL-1.0"
] | 25 | 2015-09-24T07:04:45.000Z | 2022-02-19T03:31:03.000Z | cuppa/methods/markdown_to_html.py | pwj58/cuppa | 6c598a124c5aa52b459637ca1865cda2e2d300bd | [
"BSL-1.0"
] | 46 | 2015-05-20T12:48:12.000Z | 2022-01-10T10:38:55.000Z | cuppa/methods/markdown_to_html.py | pwj58/cuppa | 6c598a124c5aa52b459637ca1865cda2e2d300bd | [
"BSL-1.0"
] | 13 | 2015-07-12T09:55:03.000Z | 2021-07-02T15:32:12.000Z |
# Copyright Jamie Allsop 2015-2015
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
#-------------------------------------------------------------------------------
# MarkdownToHtmlMethod
#-------------------------------------------------------------------------------
import os.path
import itertools
import grip
import cuppa.progress
class GripRunner(object):
def __call__( self, target, source, env ):
for s, t in itertools.izip( source, target ):
in_file = str(s)
out_file = str(t)
try:
grip.export( path=in_file, render_wide=True, out_filename=out_file )
except Exception as error:
print( "cuppa: error: grip.export( path={}, render_wide=True, out_filename={}) failed with error [{}]".format( in_file, out_file, error ))
return None
class GripEmitter(object):
def __init__( self, output_dir ):
self._output_dir = output_dir
def __call__( self, target, source, env ):
target = []
for s in source:
path = os.path.join( self._output_dir, os.path.split( str(s) )[1] )
t = os.path.splitext(path)[0] + ".html"
target.append(t)
return target, source
class MarkdownToHtmlMethod(object):
def __call__( self, env, source, final_dir=None ):
if final_dir == None:
final_dir = env['abs_final_dir']
env.AppendUnique( BUILDERS = {
'Grip' : env.Builder(
action = GripRunner(),
emitter = GripEmitter(final_dir) )
} )
html = env.Grip( [], source )
cuppa.progress.NotifyProgress.add( env, html )
return html
@classmethod
def add_to_env( cls, cuppa_env ):
cuppa_env.add_method( "MarkdownToHtml", cls() )
| 29.333333 | 154 | 0.547521 | 216 | 1,936 | 4.689815 | 0.402778 | 0.039487 | 0.032577 | 0.023692 | 0.100691 | 0.051333 | 0 | 0 | 0 | 0 | 0 | 0.011339 | 0.271178 | 1,936 | 65 | 155 | 29.784615 | 0.706591 | 0.192149 | 0 | 0.051282 | 0 | 0 | 0.083495 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.128205 | false | 0 | 0.102564 | 0 | 0.384615 | 0.025641 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a68c8384b519235047d4e9148d15734f7c8bc80 | 3,718 | py | Python | WIN_EventLog/OS_gathering/files/extracting.py | exastro-playbook-collection/OS-Windows2019 | 79c4f13e75ae1d4380d30f503f04ffb4dcf52ecf | [
"Apache-2.0"
] | null | null | null | WIN_EventLog/OS_gathering/files/extracting.py | exastro-playbook-collection/OS-Windows2019 | 79c4f13e75ae1d4380d30f503f04ffb4dcf52ecf | [
"Apache-2.0"
] | null | null | null | WIN_EventLog/OS_gathering/files/extracting.py | exastro-playbook-collection/OS-Windows2019 | 79c4f13e75ae1d4380d30f503f04ffb4dcf52ecf | [
"Apache-2.0"
] | 1 | 2021-09-29T05:39:41.000Z | 2021-09-29T05:39:41.000Z | import re
import json
import sys
import os
args = sys.argv
if (len(args) < 2):
sys.exit(1)
path = args[1]
if(path[-1:] == "/"):
path = path[:-1]
result_filedata_list = []
registry_info = {}
target_filepath_list = []
target_filepath_list.append('/1/stdout.txt')
target_filepath_list.append('/3/stdout.txt')
for target_filepath in target_filepath_list:
filepath = path + '/command' + target_filepath
if os.path.isfile(filepath) and os.path.getsize(filepath) > 0:
with open(filepath) as file_object:
reader = json.load(file_object)
if isinstance(reader, list):
rows = reader
else:
rows = []
rows.append(reader)
for row in rows:
child_name = ''
for path_key, path_value in row.items():
filedata_table = {}
for param_key, param_value in path_value.items():
if param_key == 'logFileName':
filedata_table['LogPath'] = param_value
elif param_key == 'retention':
if param_value.lower() == 'true':
filedata_table['Retention'] = True
else:
filedata_table['Retention'] = False
elif param_key == 'autoBackup':
if param_value.lower() == 'true':
filedata_table['AutoBackup'] = True
else:
filedata_table['AutoBackup'] = False
else:
filedata_table[param_key] = param_value
if len(filedata_table) > 0:
registry_info[path_key] = filedata_table
target_filepath_list = []
target_filepath_list.append('/0/stdout.txt')
target_filepath_list.append('/2/stdout.txt')
for target_filepath in target_filepath_list:
filepath = path + '/command' + target_filepath
if os.path.isfile(filepath) and os.path.getsize(filepath) > 0:
with open(filepath) as file_object:
reader = json.load(file_object)
if isinstance(reader, list):
rows = reader
else:
rows = []
rows.append(reader)
for row in rows:
filedata_table = {}
for param_key, param_value in row.items():
if param_key == 'OverflowAction':
if param_value == -1:
filedata_table[param_key] = 'DoNotOverwrite'
elif param_value == 0:
filedata_table[param_key] = 'OverwriteAsNeeded'
elif param_value == 1:
filedata_table[param_key] = 'OverwriteOlder'
elif param_key == 'LogName':
filedata_table['Log'] = param_value
elif param_key == 'MaximumSizeInBytes':
filedata_table['MaximumKilobytes'] = int(param_value / 1024)
else:
filedata_table[param_key] = param_value
if param_key == 'Log' or param_key == 'LogName':
if param_value in registry_info:
filedata_table.update(registry_info[param_value])
if len(filedata_table) > 0:
result_filedata_list.append(filedata_table)
result = {}
target_parameter_root_key = 'VAR_WIN_EventLog'
result[target_parameter_root_key] = result_filedata_list
print(json.dumps(result))
| 39.553191 | 84 | 0.516945 | 368 | 3,718 | 4.964674 | 0.206522 | 0.135194 | 0.078818 | 0.057471 | 0.58347 | 0.528736 | 0.492611 | 0.349206 | 0.269294 | 0.269294 | 0 | 0.008861 | 0.392953 | 3,718 | 93 | 85 | 39.978495 | 0.80062 | 0 | 0 | 0.428571 | 0 | 0 | 0.075599 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.047619 | 0 | 0.047619 | 0.011905 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a6a4b81633f72d324dbf4acc99d2ef2ff1a4ab9 | 5,603 | py | Python | src/datamanager/dataset.py | iN1k1/deep-pyramidal-representations-peron-re-identification | 18eacd3b7bde2c4767ba290b655cb0f5c72ed8fe | [
"MIT"
] | 13 | 2019-08-09T08:33:27.000Z | 2020-12-21T08:51:33.000Z | src/datamanager/dataset.py | iN1k1/deep-pyramidal-representations-peron-re-identification | 18eacd3b7bde2c4767ba290b655cb0f5c72ed8fe | [
"MIT"
] | 5 | 2021-03-19T02:17:23.000Z | 2022-03-11T23:53:44.000Z | src/datamanager/dataset.py | iN1k1/deep-pyramidal-representations-peron-re-identification | 18eacd3b7bde2c4767ba290b655cb0f5c72ed8fe | [
"MIT"
] | 4 | 2019-11-06T08:02:21.000Z | 2021-01-13T20:34:23.000Z | import pickle
import os
import numpy as np
from .utils import make_dataset_images, find_classes
from operator import itemgetter
import copy
class Dataset(object):
def __init__(self, name, root_folder, im_size=None, in_memory=False):
super(Dataset, self).__init__()
self.name = name
self.images = []
self.targets = []
self.root = root_folder
self.length = 0
self.im_size = im_size
self.classes = []
self.class_to_idx = []
self.idx_to_class = []
self.in_memory = in_memory
self.data_path = os.path.join(root_folder, self.name + '.dat')
self._compute_data_path()
def _compute_data_path(self):
if self.im_size is not None:
self.data_path = os.path.join(self.root,'{}_sz{}_mem{}.dat'.format(self.name, "%s_%s" % self.im_size, self.in_memory))
else:
self.data_path = os.path.join(self.root,'{}_mem{}.dat'.format(self.name, self.in_memory))
def load(self, path=None):
if path is None:
path = self.data_path
if os.path.exists(path):
data = []
with open(path, 'rb') as f:
for _ in range(pickle.load(f)):
data.append(pickle.load(f))
self.images, self.targets, self.classes, self.class_to_idx = data
else:
self.classes, self.class_to_idx = find_classes(self.root)
dset = make_dataset_images(self.root, self.class_to_idx)
self.images = [dset[ii][0] for ii in range(0, len(dset))]
self.targets = [dset[ii][1] for ii in range(0, len(dset))]
self.compute_idx_to_class()
self.length = len(self.targets)
def save(self, path=None):
if path is None:
path = self.data_path
data = [self.images, self.targets, self.classes, self.class_to_idx]
with open(path, 'wb') as fp:
pickle.dump(len(data), fp)
for value in data:
pickle.dump(value, fp)
def clone(self, clear_data=False):
clone = copy.deepcopy(self)
if clear_data:
clone.images = []
clone.targes = []
clone.length = 0
return clone
def compute_idx_to_class(self):
self.idx_to_class = {v: k for v, k in zip(list(self.class_to_idx.values()), list(self.class_to_idx.keys()))}
return self.idx_to_class
def extract_subset(self, idx, dset=None):
if dset is None:
dset = Dataset(self.name, self.root, self.im_size)
dset.classes = copy.copy(self.classes)
dset.class_to_idx = copy.copy(self.class_to_idx)
dset.idx_to_class = copy.copy(self.idx_to_class)
if len(idx) > 0:
dset.images = itemgetter(*idx)(self.images)
dset.targets = itemgetter(*idx)(self.targets)
if isinstance(dset.targets, int):
dset.targets = [dset.targets]
dset.images = [dset.images]
dset.length = len(dset.targets)
return dset
def append_subset(self, dset, indexes=None, create_new_dset=False):
if indexes is None:
indexes = range(len(dset))
# Get new dataset that need to be added
dset_to_add = dset.extract_subset(indexes)
# Orig dset
dset_orig = self
if create_new_dset:
dset_orig = self.clone(clear_data=True)
# Extend data containers
dset_orig.images.extend(dset_to_add.images)
dset_orig.targets.extend(dset_to_add.targets)
dset_orig.length = len(self.targets)
return dset_orig
def diff_subset(self, dset, indexes=None, create_new_dset=False):
# TODO
pass
# if indexes is None:
# indexes = range(len(dset))
#
# # Get new dataset with data that need to be removed
# dset_to_removed = dset.extract_subset(indexes)
#
# # Orig dset
# dset_orig = self
# if create_new_dset:
# dset_orig = self.clone(clear_data=True)
#
# # Extend data containers
# dset_orig.images.extend(dset_to_add.images)
# dset_orig.targets.extend(dset_to_add.targets)
# dset_orig.length = len(self.targets)
#
# return dset_orig
def get_max_N_per_class(self, N, indexes=None, targets=None, seed=17):
# Get targets from dset and indexes from its length
if targets is None:
targets = self.targets
if indexes is None:
indexes = range(0, self.length)
# Constrain random generation
np.random.seed(seed)
# Extract indexes and corresponding classes
np_targets = np.array(targets)
unique_targets = np.unique(np_targets)
valid_idx = []
for t in unique_targets:
pos = np.where(np_targets==t)[0]
if len(pos) > N:
pos = np.random.choice(pos, N, replace=False)
valid_idx.extend(pos.tolist())
return itemgetter(*valid_idx)(indexes), itemgetter(*valid_idx)(targets)
def get_item_from_index(self, index):
return self.images[index], self.targets[index]
def split(self, ratios, save_load=True, **kwargs):
pass
def __getitem__(self, index):
return self.get_item_from_index(index)
def __len__(self):
return self.length
def __add__(self, dset):
return self.append_subset(dset, create_new_dset=True)
def __sub__(self, other):
return self.diff_subset(other, create_new_dset=True)
| 33.35119 | 130 | 0.598786 | 752 | 5,603 | 4.24734 | 0.168883 | 0.030056 | 0.028178 | 0.035066 | 0.36819 | 0.316531 | 0.285535 | 0.285535 | 0.251722 | 0.224796 | 0 | 0.002786 | 0.295377 | 5,603 | 167 | 131 | 33.550898 | 0.806231 | 0.107799 | 0 | 0.087719 | 0 | 0 | 0.008454 | 0 | 0 | 0 | 0 | 0.005988 | 0 | 1 | 0.140351 | false | 0.017544 | 0.052632 | 0.04386 | 0.289474 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a6b3b67a196647e34aaaebda16a46305f5a9bec | 1,565 | py | Python | ada_loss/chainer_impl/ada_loss_transforms.py | kumasento/gradient-scaling | 0ca435433b9953e33656173c4d60ebd61c5c5e87 | [
"MIT"
] | 7 | 2020-08-12T12:04:28.000Z | 2021-11-22T15:56:08.000Z | ada_loss/chainer_impl/ada_loss_transforms.py | kumasento/gradient-scaling | 0ca435433b9953e33656173c4d60ebd61c5c5e87 | [
"MIT"
] | 1 | 2021-10-07T08:37:39.000Z | 2021-10-08T02:41:39.000Z | ada_loss/chainer_impl/ada_loss_transforms.py | kumasento/gradient-scaling | 0ca435433b9953e33656173c4d60ebd61c5c5e87 | [
"MIT"
] | null | null | null | """ Implement the transformations we need to use
to convert a link to an adaptive loss scaled link. """
# NOTE: this file is deprecated
import chainer
import chainer.links as L
import chainer.initializers as I
# pylint: disable=unused-wildcard-import
from ada_loss.chainer_impl.links import *
__all__ = [
"AdaLossTransformLinear",
"AdaLossTransformConvolution2D",
]
class AdaLossTransform(object):
""" The base class """
def __call__(self, link, cfg):
""" Entry """
raise NotImplementedError("This call function should be implemented properly")
class AdaLossTransformLinear(AdaLossTransform):
""" """
cls = L.Linear
def __call__(self, link, cfg, initialW=I.HeNormal()):
assert isinstance(link, self.cls)
link_ = AdaLossLinear(
link.in_size,
out_size=link.out_size,
nobias=link.b is None,
ada_loss_cfg=cfg,
)
link_.copyparams(link)
return link_
class AdaLossTransformConvolution2D(AdaLossTransform):
""" """
cls = L.Convolution2D
def __call__(self, link, cfg, initialW=I.HeNormal()):
assert isinstance(link, self.cls)
link_ = AdaLossConvolution2D(
link.in_channels,
link.out_channels,
ksize=link.ksize,
stride=link.stride,
pad=link.pad,
dilate=link.dilate,
groups=link.groups,
nobias=link.b is None,
ada_loss_cfg=cfg,
)
link_.copyparams(link)
return link_
| 24.84127 | 86 | 0.623642 | 169 | 1,565 | 5.579882 | 0.443787 | 0.041357 | 0.034995 | 0.04772 | 0.282078 | 0.26299 | 0.26299 | 0.26299 | 0.26299 | 0.26299 | 0 | 0.003559 | 0.281789 | 1,565 | 62 | 87 | 25.241935 | 0.835409 | 0.120767 | 0 | 0.3 | 0 | 0 | 0.074738 | 0.038117 | 0 | 0 | 0 | 0 | 0.05 | 1 | 0.075 | false | 0 | 0.1 | 0 | 0.35 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a6d9e1b1f6a95c204998e268f6dedde3030bfcb | 4,451 | py | Python | atlas/foundations_rest_api/src/acceptance/v2beta/test_artifact_loading.py | DeepLearnI/atlas | 8aca652d7e647b4e88530b93e265b536de7055ed | [
"Apache-2.0"
] | 296 | 2020-03-16T19:55:00.000Z | 2022-01-10T19:46:05.000Z | atlas/foundations_rest_api/src/acceptance/v2beta/test_artifact_loading.py | DeepLearnI/atlas | 8aca652d7e647b4e88530b93e265b536de7055ed | [
"Apache-2.0"
] | 57 | 2020-03-17T11:15:57.000Z | 2021-07-10T14:42:27.000Z | atlas/foundations_rest_api/src/acceptance/v2beta/test_artifact_loading.py | DeepLearnI/atlas | 8aca652d7e647b4e88530b93e265b536de7055ed | [
"Apache-2.0"
] | 38 | 2020-03-17T21:06:05.000Z | 2022-02-08T03:19:34.000Z |
from foundations_spec import *
from acceptance.api_acceptance_test_case_base import APIAcceptanceTestCaseBase
from acceptance.v2beta.jobs_tests_helper_mixin_v2 import JobsTestsHelperMixinV2
class TestArtifactLoading(JobsTestsHelperMixinV2, APIAcceptanceTestCaseBase):
url = '/api/v2beta/projects/{_project_name}/job_listing'
sorting_columns = []
filtering_columns = []
@classmethod
def setUpClass(klass):
from copy import deepcopy
import shutil
import foundations_contrib.global_state as global_state
from foundations_internal.foundations_job import FoundationsJob
shutil.rmtree('/tmp/foundations_acceptance', ignore_errors=True)
JobsTestsHelperMixinV2.setUpClass()
klass._set_project_name(JobsTestsHelperMixinV2._str_random_uuid())
klass._some_artifacts = JobsTestsHelperMixinV2._str_random_uuid()
klass._no_artifacts = JobsTestsHelperMixinV2._str_random_uuid()
klass._one_artifact = JobsTestsHelperMixinV2._str_random_uuid()
random_uuid = JobsTestsHelperMixinV2._str_random_uuid()
klass._make_running_job(klass._one_artifact, JobsTestsHelperMixinV2._str_random_uuid(), start_timestamp=99999999)
klass._make_completed_job(klass._no_artifacts, random_uuid, start_timestamp=100000000, end_timestamp=100086400)
klass._make_completed_job(klass._some_artifacts, random_uuid, start_timestamp=100000001, end_timestamp=100086400)
klass._old_config = deepcopy(global_state.config_manager.config())
klass._old_context = global_state.foundations_job
global_state.config_manager.reset()
global_state.foundations_job = FoundationsJob()
klass._save_artifacts()
@classmethod
def tearDownClass(klass):
import foundations_contrib.global_state as global_state
global_state.config_manager.reset()
global_state.config_manager.config().update(klass._old_config)
global_state.foundations_job = klass._old_context
@classmethod
def _set_job_id(klass, job_id):
import foundations_contrib.global_state as global_state
job = global_state.foundations_job
job.job_id = job_id
@classmethod
def _artifact_fixture_path(klass, artifact_name):
import os.path as path
return path.join('acceptance/v2beta/fixtures', artifact_name)
@classmethod
def _save_artifacts(klass):
import foundations
klass._set_job_id(klass._one_artifact)
foundations.save_artifact(filepath=klass._artifact_fixture_path('image_file.png'))
klass._set_job_id(klass._some_artifacts)
foundations.save_artifact(filepath=klass._artifact_fixture_path('no_extension'))
foundations.save_artifact(filepath=klass._artifact_fixture_path('other_file.other'))
foundations.save_artifact(filepath=klass._artifact_fixture_path('audio_file.mp3'), key='audio_artifact')
def test_get_route(self):
data = super().test_get_route()
jobs = data['jobs']
some_artifacts_payload = [
{
'filename': 'audio_file.mp3',
'uri': f'https://archive.dessa.com/archive/{self._some_artifacts}/user_artifacts/audio_file.mp3',
'artifact_type': 'audio',
'archive_key': 'audio_artifact'
},
{
'filename': 'no_extension',
'uri': f'https://archive.dessa.com/archive/{self._some_artifacts}/user_artifacts/no_extension',
'artifact_type': 'unknown',
'archive_key': 'no_extension'
},
{
'filename': 'other_file.other',
'uri': f'https://archive.dessa.com/archive/{self._some_artifacts}/user_artifacts/other_file.other',
'artifact_type': 'unknown',
'archive_key': 'other_file.other'
}
]
self.assertEqual(some_artifacts_payload, jobs[0]['artifacts'])
self.assertEqual([], jobs[1]['artifacts'])
one_artifact_payload = [
{
'filename': 'image_file.png',
'uri': f'https://archive.dessa.com/archive/{self._one_artifact}/user_artifacts/image_file.png',
'artifact_type': 'image',
'archive_key': 'image_file.png'
}
]
self.assertEqual(one_artifact_payload, jobs[2]['artifacts']) | 38.704348 | 121 | 0.682768 | 473 | 4,451 | 6.021142 | 0.230444 | 0.054073 | 0.065309 | 0.073736 | 0.420646 | 0.300913 | 0.268258 | 0.204354 | 0.064256 | 0.064256 | 0 | 0.018182 | 0.221523 | 4,451 | 115 | 122 | 38.704348 | 0.803752 | 0 | 0 | 0.139535 | 0 | 0 | 0.184228 | 0.022692 | 0 | 0 | 0 | 0 | 0.034884 | 1 | 0.069767 | false | 0 | 0.127907 | 0 | 0.255814 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a6deb9d77ae7f9dd3a6a89e5e68c8dd4e802884 | 16,234 | py | Python | model/state_encoder_model.py | lil-lab/cerealbar_generation | 41153537c0bd8aed97f2ea841165477a8c480d58 | [
"MIT"
] | null | null | null | model/state_encoder_model.py | lil-lab/cerealbar_generation | 41153537c0bd8aed97f2ea841165477a8c480d58 | [
"MIT"
] | null | null | null | model/state_encoder_model.py | lil-lab/cerealbar_generation | 41153537c0bd8aed97f2ea841165477a8c480d58 | [
"MIT"
] | null | null | null | import os, sys, copy
import pickle
import math
import time
import numpy as np
from typing import Dict, Any, List, Set, Tuple
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn.utils.rnn import pad_sequence
import torch.nn.utils.rnn as rnn_utils
from agent.environment.position import Position
from agent.environment import card as agent_cards
from . import util
from .map_transformations import pose as pose_lib
from .modules import state_embedder as embedder_lib
from .utilities import initialization
from .helpers import state_representation
from .utilities import hex_util
from .utilities.hex_conv_util import HexConv
def getPositionalEncoding(d_model=768, max_len=1024):
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2) *
-(math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
return pe
def generate_attention_mask_from_mask_indicies_and_instruction_tensors(feature_attention_mask, instruction_tensors) -> torch.tensor:
attention_mask = torch.cat([feature_attention_mask, torch.ones(instruction_tensors.shape).to(util.DEVICE).bool()], 1)
return attention_mask
class CNNLSTMStateEncodingModel(nn.Module):
def __init__(self, config):
super(CNNLSTMStateEncodingModel, self).__init__()
self._d_input = 19
self._d_embed = config["d_embed"]
self._d_model = config["d_model"]
self._embeddings_type = config["embeddings_type"]
self._breakpoint_type = config["breakpoint_type"]
if self._breakpoint_type == "":
pass
elif self._breakpoint_type == "onehot":
self._d_input += 1
else:
raise ValueError("not supported breakpoint type")
self._conv = []
# embedding layer
self._n_depth = config["n_depth"]
if self._embeddings_type == "learned":
if "state_embedder_pretrained_model" in config:
pretrained_model = config["state_embedder_pretrained_model"]
else:
pretrained_model = ""
self._embedder = embedder_lib.StateEmbedder(
self._d_embed, pretrained_model)
self._d_input = self._embedder.embedding_size()
else:
if self._embeddings_type == "onehot":
self._embedder = embedder_lib.StateOnehotEmbedder()
self._d_input = self._embedder.embedding_size()
elif self._embeddings_type == "none":
self._embedder = None
if self._n_depth != 0:
conv_module = nn.ModuleList([])
conv_layer = nn.Conv2d(self._d_input, self._d_model, (1, 1))
conv_module.append(conv_layer)
conv_module.append(nn.LeakyReLU())
if torch.cuda.is_available():
conv_module = conv_module.to(util.DEVICE)
self._conv.append(conv_module)
# convolutional Layer
self._rcpf_size = config["rcpf_size"]
self._cnn_use_norm = config["cnn_use_norm"]
self._cnn_hex = config["cnn_hex"]
self._cnn_actv_func = config["cnn_actv_func"]
padding_size = int((self._rcpf_size-1)/2)
for d in range(self._n_depth-1):
conv_module = nn.ModuleList([])
if d == 0 and self._embeddings_type == "learned":
conv_in_channels = self._d_input
else:
conv_in_channels = self._d_model
if self._cnn_use_norm:
norm = nn.InstanceNorm2d(conv_in_channels)
conv_module.append(norm)
conv_out_channels: int = self._d_model
if self._cnn_hex:
conv_layer = HexConv(conv_in_channels, conv_out_channels,
self._rcpf_size, stride=1, padding=padding_size)
else:
conv_layer = nn.Conv2d(conv_in_channels, conv_out_channels,
(self._rcpf_size, self._rcpf_size), padding=(padding_size, padding_size))
conv_module.append(conv_layer)
if self._cnn_actv_func == "leaky_relu":
conv_module.append(nn.LeakyReLU())
elif self._cnn_actv_func == "tanh":
conv_module.append(nn.Tanh())
if torch.cuda.is_available():
conv_module = conv_module.to(util.DEVICE)
self._conv.append(conv_module)
if len(self._conv) == 0:
self._d_model = self._d_input
self._conv = nn.ModuleList(self._conv)
self._conv_output_channel = conv_out_channels
# feature translation and rotation layers
self._feature_map_size = config["feature_map_size"] if "feature_map_size" in config else 3
self._feature_filter_size = config["feature_filter_size"] if "feature_filter_size" in config else self._feature_map_size
self._rotate_feature_map = config["rotate_feature_map"] if "rotate_feature_map" in config else True
self._feature_cnn_n_depth = config["feature_cnn_n_depth"] if "feature_cnn_n_depth" in config else 0
self._feature_merge_type = config["feature_merge_type"] if "feature_merge_type" in config else "sum"
self._feature_output_dimension = config["feature_output_dimension"] if "feature_output_dimension" in config else 512
self._feature_cnn_actv_func = config["feature_cnn_actv_func"] if "feature_cnn_actv_func" in config else 0
self._feature_cnn_use_norm = config["feature_cnn_use_norm"] if "feature_cnn_use_norm" in config else True
self._feature_conv = []
try:
assert(self._feature_output_dimension * (self._feature_map_size)**2 //
(self._feature_map_size)**2 == self._feature_output_dimension)
except:
raise ValueError(
"Feature output dimension is not divisible by the nubmer of hexes to be clopped.")
for d in range(self._feature_cnn_n_depth):
conv_module = nn.ModuleList([])
if self._feature_cnn_use_norm:
norm = nn.InstanceNorm2d(512) #! not adaptive
conv_module.append(norm)
if self._feature_merge_type == "cat":
traj_output_channel = self._feature_output_dimension // (self._feature_map_size)**2
padding = (self._feature_filter_size-1)//2
if self._cnn_hex:
conv_layer = HexConv(self._conv_output_channel, traj_output_channel,
self._feature_filter_size, stride=1, padding=padding)
else:
conv_layer = nn.Conv2d(self._conv_output_channel, traj_output_channel, (
self._feature_filter_size, self._feature_filter_size), padding=(padding, padding))
self._conv_output_channel = traj_output_channel
elif self._feature_merge_type == "sum":
traj_output_channel = self._conv_output_channel
if self._cnn_hex:
conv_layer = HexConv(self._conv_output_channel, traj_output_channel,
self._feature_map_size, stride=1, padding=0)
else:
conv_layer = nn.Conv2d(self._conv_output_channel, traj_output_channel,
(self._feature_map_size, self._feature_map_size), padding=(0, 0))
conv_module.append(conv_layer)
if self._cnn_actv_func == "tanh":
conv_module.append(nn.Tanh())
self._feature_conv.append(conv_module)
self._feature_conv = nn.ModuleList(self._feature_conv)
if self._feature_merge_type == "cat":
self._conv_output_channel = self._feature_output_dimension
self._d_model = self._feature_output_dimension
elif self._feature_merge_type == "sum":
self._d_model = traj_output_channel
self._rotator = hex_util.Hex_Rotator()
# LSTM Layer
# 0. Pose + breakpoint embedder
# 1. Preprocessing linear layer (optional)
# 2. LSTM layer
# 2.1 Optional skip connection
self._lstm_input_merge_type = config["lstm_input_merge_type"]
self._lstm_output_merge_type = config["lstm_output_merge_type"]
self._lstm_skip = config["lstm_skip"]
if self._lstm_input_merge_type == "cat":
self._traj_break_embedder = embedder_lib.TrajBreakEmbedder(config["lstm_pb_dim"])
lstm_input_dim = self._d_model + config["lstm_pb_dim"]
lstm_output_dim = config["lstm_d_model"]
elif self._lstm_input_merge_type == "add":
self._traj_break_embedder = embedder_lib.TrajBreakEmbedder(self._d_model)
lstm_input_dim = self._d_model
lstm_output_dim = config["lstm_d_model"]
self._lstm = nn.LSTM(
input_size=lstm_input_dim,
hidden_size=lstm_output_dim,
num_layers=config["lstm_num_layers"],
bidirectional=config["lstm_bidirectional"],
dropout=config["lstm_dropout"],
batch_first=True,
)
if config["lstm_bidirectional"]:
lstm_output_dim = lstm_output_dim * 2
else:
lstm_output_dim = config["lstm_d_model"]
if self._lstm_skip:
if self._lstm_output_merge_type == "spatial-cat":
self._d_model = lstm_output_dim + self._d_model // (self._feature_map_size)**2
else:
try:
assert(self._lstm_output_merge_type != "spatial-cat")
except:
raise ValueError(
"Spaitial conceteneation option is only supported for LSTM with a skip coonection.")
self._d_model = lstm_output_dim
if torch.cuda.is_available():
self._lstm.to(util.DEVICE)
def forward(self, x, traj=None, bkpoint=None):
input = x.transpose(1, 3) # [BWHC] ==> [BCHW]
input = input.transpose(2, 3) # [BCHW] ==>[BCWH]
# input processing
input[:, 15, :, :] = torch.clamp(input[:, 15, :, :], 0, 1)
input = input.detach()
input = input.contiguous()
# embeddings layer
if self._embedder is not None:
input = self._embedder(input)
# hex CNN 1
conv_outputs: List[torch.Tensor] = list()
for i, layer in enumerate(self._conv):
conv_in = input if i == 0 else conv_outputs[-1]
x = conv_in
for l in layer:
x = l(x)
# residual coneection (if k != 1)
if (i != 0 and i != self._n_depth):
x = x + conv_outputs[-1]
conv_outputs.append(x)
if len(self._conv) == 0:
final_feature = input
else:
final_feature = conv_outputs[-1]
# cropping features
if self._feature_map_size != 1:
center = (self._feature_map_size-1) // 2
# Syntax: https://discuss.pytorch.org/t/is-there-a-way-to-pad-a-tensor-instead-of-variable/10448/2
final_feature = F.pad(final_feature, (center, center, center, center))
features = []
spatial_features = []
pb_features = []
batch_idx_list = [[i for _ in range(len(t))] for i, t in enumerate(traj)]
final_feature_mask_indicies = [len(t) for t in traj]
batch_idx = []
for l in batch_idx_list:
batch_idx += l
batch_idx = torch.tensor(batch_idx).to(util.DEVICE)
coords = torch.cat(traj,0)
h_mask = coords[:, 0]
w_mask = coords[:, 1]
pose = coords[:, 2]
h_mask = h_mask.detach()
w_mask = w_mask.detach()
if self._feature_map_size == 1:
feature = final_feature[i, :, h_mask, w_mask]
feature = feature.permute(1, 0)
else:
rows = [h_mask + (slack-center) for slack in range(self._feature_map_size)]
rows = torch.stack(rows, 0).unsqueeze(1)
rows = rows.repeat(1, self._feature_map_size, 1)
rows = rows + center # need to add center bc of padding
rows = rows.detach()
cols = [w_mask + (slack-center) for slack in range(self._feature_map_size)]
cols = torch.stack(cols, 0).unsqueeze(0)
cols = cols.repeat(self._feature_map_size, 1, 1)
cols = cols + center # need to add center bc of padding
cols = cols.detach()
batch_idx = batch_idx.unsqueeze(0).unsqueeze(0)
batch_idx = batch_idx.repeat(self._feature_map_size, self._feature_map_size, 1)
feature = final_feature[batch_idx, :, rows, cols]
feature = feature.permute(2, 3, 0, 1) # TxDxHxW
# rotate features
if self._rotate_feature_map:
mask_l = len(h_mask)
# converting to offset coordinates
pose_position = torch.tensor([[center+center//2, center]
for _ in range(mask_l)]).to(util.DEVICE)
pose_rot = (pose-1) * math.radians(60)
pose_obj = pose_lib.Pose(pose_position, pose_rot)
new_feature = self._rotator.translate_and_rotate(feature, pose_obj)
feature = new_feature
# hex CNN 2
feature = feature.contiguous()
x = feature
for i, layer in enumerate(self._feature_conv):
for l in layer:
x = l(x)
spatial_feature = x.view(x.shape[0], x.shape[1], x.shape[2]*x.shape[3]) #LxDX(H*W)
feature = torch.cat([spatial_feature[:, :, i]
for i in range(spatial_feature.shape[2])], 1) # LxDX(H*W)
# attach pose features
bk_onehot = torch.zeros(pose.shape).long().to(util.DEVICE)
pose_bk_raw_features = torch.stack([pose, bk_onehot], 0)
pb_feature = self._traj_break_embedder(pose_bk_raw_features)
if self._lstm_input_merge_type == "cat":
feature = torch.cat([feature, pb_feature], 1)
elif self._lstm_input_merge_type == "add":
feature += pb_feature
spatial_features = torch.split(spatial_feature, final_feature_mask_indicies)
features = torch.split(feature, final_feature_mask_indicies)
# LSTM layer
# reference: https://discuss.pytorch.org/t/how-can-i-compute-seq2seq-loss-using-mask/861
lstm_input = pad_sequence(features, 1, padding_value=0)
unpacked = lstm_input.permute(1, 0, 2)
packed = rnn_utils.pack_padded_sequence(unpacked, final_feature_mask_indicies, enforce_sorted=False)
outputs, _ = self._lstm(packed, None)
unpacked, unpacked_len = rnn_utils.pad_packed_sequence(outputs)
final_feature = unpacked.permute(1, 0, 2)
final_feature = final_feature.contiguous()
if self._lstm_skip:
spatial_features = pad_sequence(spatial_features, 1, padding_value=0)
final_feature = final_feature.unsqueeze(-1)
final_feature = final_feature.repeat(1, 1, 1, spatial_features.shape[-1])
final_feature = torch.cat([final_feature, spatial_features], 2)
final_feature = final_feature.permute(0, 1, 3, 2)
final_feature = final_feature.contiguous().view(
(final_feature.shape[0], final_feature.shape[1]*final_feature.shape[2], final_feature.shape[3]))
final_feature = final_feature.contiguous()
# generate attention mask for feature
feature_attention_mask = torch.ones(final_feature.shape[:2]).to(util.DEVICE)
batch_size = final_feature.shape[0]
neighbor_size = spatial_features.shape[-1]
for i in range(batch_size):
feature_attention_mask[i, neighbor_size*final_feature_mask_indicies[i]:] = 0
feature_attention_mask = feature_attention_mask.bool()
return final_feature, feature_attention_mask
def get_dimension(self):
return self._d_model
| 44.721763 | 132 | 0.620549 | 2,037 | 16,234 | 4.598429 | 0.136475 | 0.051671 | 0.029892 | 0.03459 | 0.33223 | 0.236468 | 0.180634 | 0.115192 | 0.096402 | 0.07804 | 0 | 0.013766 | 0.284034 | 16,234 | 362 | 133 | 44.845304 | 0.792136 | 0.049526 | 0 | 0.21843 | 0 | 0 | 0.059947 | 0.012665 | 0 | 0 | 0 | 0 | 0.006826 | 1 | 0.017065 | false | 0.003413 | 0.068259 | 0.003413 | 0.102389 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a6e8546353af8e6a5051dfc50d1bb801c40b2af | 2,247 | py | Python | tensorlayer/layers/noise.py | Howdy-Personally/tensorlayer-master | bb92e4e187419d5e7ded8331d5c7cbf5615ee744 | [
"Apache-2.0"
] | 4,484 | 2017-12-27T03:28:35.000Z | 2021-12-02T14:42:58.000Z | tensorlayer/layers/noise.py | Howdy-Personally/tensorlayer-master | bb92e4e187419d5e7ded8331d5c7cbf5615ee744 | [
"Apache-2.0"
] | 549 | 2017-12-28T07:19:52.000Z | 2021-11-05T02:34:20.000Z | tensorlayer/layers/noise.py | Howdy-Personally/tensorlayer-master | bb92e4e187419d5e7ded8331d5c7cbf5615ee744 | [
"Apache-2.0"
] | 1,076 | 2017-12-27T12:25:46.000Z | 2021-11-24T09:12:36.000Z | #! /usr/bin/python
# -*- coding: utf-8 -*-
import tensorflow as tf
import tensorlayer as tl
from tensorlayer import logging
from tensorlayer.decorators import deprecated_alias
from tensorlayer.layers.core import Layer
__all__ = [
'GaussianNoise',
]
class GaussianNoise(Layer):
"""
The :class:`GaussianNoise` class is noise layer that adding noise with
gaussian distribution to the activation.
Parameters
------------
mean : float
The mean. Default is 0.0.
stddev : float
The standard deviation. Default is 1.0.
is_always : boolean
Is True, add noise for train and eval mode. If False, skip this layer in eval mode.
seed : int or None
The seed for random noise.
name : str
A unique layer name.
Examples
--------
With TensorLayer
>>> net = tl.layers.Input([64, 200], name='input')
>>> net = tl.layers.Dense(n_units=100, act=tf.nn.relu, name='dense')(net)
>>> gaussianlayer = tl.layers.GaussianNoise(name='gaussian')(net)
>>> print(gaussianlayer)
>>> output shape : (64, 100)
"""
def __init__(
self,
mean=0.0,
stddev=1.0,
is_always=True,
seed=None,
name=None, # 'gaussian_noise',
):
super().__init__(name)
self.mean = mean
self.stddev = stddev
self.seed = seed
self.is_always = is_always
self.build()
self._built = True
logging.info("GaussianNoise %s: mean: %f stddev: %f" % (self.name, self.mean, self.stddev))
def __repr__(self):
s = '{classname}(mean={mean}, stddev={stddev}'
if self.name is not None:
s += ', name=\'{name}\''
s += ')'
return s.format(classname=self.__class__.__name__, **self.__dict__)
def build(self, inputs=None):
pass
def forward(self, inputs):
if (self.is_train or self.is_always) is False:
return inputs
else:
# noise = np.random.normal(0.0 , sigma , tf.to_int64(self.inputs).get_shape())
noise = tf.random.normal(shape=inputs.get_shape(), mean=self.mean, stddev=self.stddev, seed=self.seed)
outputs = inputs + noise
return outputs
| 27.072289 | 114 | 0.595461 | 283 | 2,247 | 4.583039 | 0.374558 | 0.03084 | 0.012336 | 0.01542 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01601 | 0.277259 | 2,247 | 82 | 115 | 27.402439 | 0.782635 | 0.377837 | 0 | 0 | 0 | 0 | 0.076507 | 0.018547 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0.025 | 0.125 | 0 | 0.325 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a700cc75c7db30432b3bee2d9bb6579a2ada503 | 2,195 | py | Python | examples/example_plots.py | lupify/pynverse | b943b49a0a398a00d4e712f492721f3dfa77ed52 | [
"MIT"
] | 46 | 2016-10-30T19:59:36.000Z | 2022-03-01T10:59:11.000Z | examples/example_plots.py | Phibedy/pynverse | afa62d1f8f59110cced17471e57d7a1b6ab4f1df | [
"MIT"
] | 3 | 2018-04-04T10:50:57.000Z | 2021-12-03T16:55:57.000Z | examples/example_plots.py | Phibedy/pynverse | afa62d1f8f59110cced17471e57d7a1b6ab4f1df | [
"MIT"
] | 9 | 2016-12-09T00:32:26.000Z | 2022-01-17T12:24:29.000Z | from pynverse import inversefunc, piecewise
import numpy as np
import matplotlib.pyplot as plt
import scipy
cube = lambda x: x**3
invcube = inversefunc(cube)
invcube_a = lambda x: scipy.special.cbrt(x)
square = lambda x: x**2
invsquare = inversefunc(square, domain=0)
invsquare_a = lambda x: x**(1/2.)
log = lambda x: np.log10(x)
invlog = inversefunc(log, domain=0, open_domain=True)
invlog_a = lambda x: 10**x
cos = lambda x: np.cos(x)
invcos = inversefunc(cos, domain=[0, np.pi])
invcos_a = lambda x: np.arccos(x)
tan = lambda x: np.tan(x)
invtan = inversefunc(tan,
domain=[-np.pi/2,np.pi/2],
open_domain=True)
invtan_a =lambda x: np.arctan2(x,1)
pw=lambda x: piecewise(x,[x<1,(x>=1)*(x<3),x>=3],[lambda x: x, lambda x: x**2, lambda x: x+6])
invpw =inversefunc(pw)
invpw_a=lambda x: piecewise(x,[x<1,(x>=1)*(x<9),x>=9],[lambda x: x, lambda x: x**0.5, lambda x: x-6])
N=50
def plot(title,ax1,x1,y1,ax2,x2,y21,y22):
ax1.plot(x1,y1,'-')
ax2.plot(x2,y22,'-',color='b')
ax2.plot(x2,y21,'--',color='r')
ax1.set_ylabel(title)
fig,axes=plt.subplots(6,2,figsize=(5,15))
x1=np.linspace(0,4,100)
x2=np.linspace(0,16,100)
plot('square',axes[0][0],x1,square(x1) ,axes[0][1],x2,invsquare_a(x2),invsquare(x2))
axes[0][1].legend(['Numerically','Analytical\nsolution'],fontsize=10,loc=4)
axes[0][1].set_title('Inverse functions')
axes[0][0].set_title('Direct functions')
x1=np.linspace(-2,2,100)
x2=np.linspace(-8,8,100)
plot('cube',axes[1][0],x1,cube(x1) ,axes[1][1],x2,invcube_a(x2),invcube(x2))
x1=np.linspace(0.00001,10,100)
x2=np.linspace(-5,1,100)
plot('log10',axes[2][0],x1,log(x1) ,axes[2][1],x2,invlog_a(x2),invlog(x2))
x1=np.linspace(0, np.pi,100)
x2=np.linspace(-1,1,100)
plot('cos',axes[3][0],x1,cos(x1) ,axes[3][1],x2,invcos_a(x2),invcos(x2))
x1=np.linspace(-np.pi/2+0.1, np.pi/2-0.1,100)
x2=np.linspace(-10,10,100)
plot('tan',axes[4][0],x1,tan(x1) ,axes[4][1],x2,invtan_a(x2),invtan(x2))
x1=np.linspace(0,4,100)
x2=np.linspace(0,10,100)
plot('piecewise',axes[5][0],x1,pw(x1) ,axes[5][1],x2,invpw_a(x2),invpw(x2))
plt.show() | 30.486111 | 102 | 0.62779 | 421 | 2,195 | 3.232779 | 0.194774 | 0.092579 | 0.052902 | 0.066128 | 0.133725 | 0.099927 | 0.076414 | 0.076414 | 0.076414 | 0.044085 | 0 | 0.109984 | 0.146697 | 2,195 | 72 | 103 | 30.486111 | 0.616658 | 0 | 0 | 0.037736 | 0 | 0 | 0.047059 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.018868 | false | 0 | 0.075472 | 0 | 0.09434 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a70a409960397bfd1f99c4022d1a0d3e8addf9d | 174 | py | Python | modulo 3/aulas/5.5 - Funcao fatorial.py | GabrielBrotas/Python | 9441b6b86ff3cb7fa5921b508c484075adac08b3 | [
"MIT"
] | null | null | null | modulo 3/aulas/5.5 - Funcao fatorial.py | GabrielBrotas/Python | 9441b6b86ff3cb7fa5921b508c484075adac08b3 | [
"MIT"
] | null | null | null | modulo 3/aulas/5.5 - Funcao fatorial.py | GabrielBrotas/Python | 9441b6b86ff3cb7fa5921b508c484075adac08b3 | [
"MIT"
] | null | null | null | def fatorial(num=1):
f = 1
for c in range(num, 1, -1):
f *= c
return f
n = int(input('Digite um numero: '))
print(f'O fatorial de {n}: {fatorial(n)}')
| 15.818182 | 42 | 0.528736 | 31 | 174 | 2.967742 | 0.612903 | 0.086957 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.032258 | 0.287356 | 174 | 10 | 43 | 17.4 | 0.709677 | 0 | 0 | 0 | 0 | 0 | 0.289017 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0 | 0 | 0.285714 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a714779778dd4a1ce1758b9319d7fbb68580ce1 | 6,017 | py | Python | Kijiji-Scraper.py | YounesZ/Kijiji-Scraper | 23d19a8551e18d7478e814d0e71669ae7087f248 | [
"MIT"
] | null | null | null | Kijiji-Scraper.py | YounesZ/Kijiji-Scraper | 23d19a8551e18d7478e814d0e71669ae7087f248 | [
"MIT"
] | null | null | null | Kijiji-Scraper.py | YounesZ/Kijiji-Scraper | 23d19a8551e18d7478e814d0e71669ae7087f248 | [
"MIT"
] | null | null | null | #!C:\Python34\scrapper\Scripts
# Place url, linking to ad list, with desired search filters here.
url_to_scrape = "http://www.kijiji.ca/b-canot-kayak-paddle-board/quebec/kayak/k0c329l9001"
# Set the delay in (s) that the programs waits before scraping again.
scrape_delay = 600 # 600 = 10 mins
# Set filename to store ads in.
filename = 'ads.txt'
import requests
from bs4 import BeautifulSoup
import datetime
import time
def ParseAd(html): # Parses ad html trees and sorts relevant data into a dictionary
ad_info = {}
try:
ad_info["Title"] = html.find_all('a', {"class": "title"})[0].text.strip()
except:
log('[Error] Unable to parse Title data.')
try:
ad_info["Url"] = 'http://www.kijiji.ca' + html.get("data-vip-url")
except:
log('[Error] Unable to parse URL data.')
try:
ad_info["Description"] = html.find_all('div', {"class": "description"})[0].text.strip()
except:
log('[Error] Unable to parse Description data.')
try:
tempsoup = html.find_all('div', {"class": "location"})[0].text.strip()
if tempsoup.find('-') > 0:
tempsoup = tempsoup[:tempsoup.find('-') - 2]
ad_info["Location"] = tempsoup
except:
log('[Error] Unable to parse Location data.')
try:
ad_info["Date"] = html.find_all('span', {"class": "date-posted"})[0].text.strip()
except:
log('[Error] Unable to parse Date data.')
try:
ad_info["Price"] = html.find_all('div', {"class": "price"})[0].text.strip()
except:
log('[Error] Unable to parse Price data.')
return ad_info
def WriteAds(ad_dict, filename): # Writes ads to given file
try:
file = open(filename, 'a')
for ad_id in ad_dict:
file.write(ad_id)
file.write(str(ad_dict[ad_id]) + "\n")
log('[Okay] Ad ' + ad_id + ' written to database.')
file.close()
except:
log('[Error] Unable to write ad(s) to database.')
def ReadAds(filename): # Reads given file and creates a dict of ads in file
import ast
import os.path
if not os.path.isfile(filename): # If the file doesn't exist, it makes it.
file = open(filename, 'w')
file.close()
ad_dict = {}
file = open(filename, 'r')
for line in file:
if line.strip() != '':
index = line.find('{')
ad_id = line[:index]
dictionary = line[index:]
dictionary = ast.literal_eval(dictionary)
ad_dict[ad_id] = dictionary
file.close()
return ad_dict
def log(text): # writes log data to log.txt with datetime.
date_time = datetime.datetime.now()
myfile = open('log.txt', 'a')
date_time = str(date_time) + '\n'
text += '\n\n'
myfile.write(date_time)
myfile.write(text)
myfile.close()
def MailAd(ad_dict): # Sends an email with a link and info of new ads
import smtplib
from email.mime.text import MIMEText
sender = 'email@example.com'
passwd = 'Password'
receiver = 'email@example.com'
count = len(ad_dict)
if count > 1:
subject = str(count) + ' Nouvelle annonces trouvés!'
if count == 1:
subject = 'Une nouvelle annonce trouvé'
body = ''
try:
for ad_id in ad_dict:
body += ad_dict[ad_id]['Title'] + ' - ' + ad_dict[ad_id]['Price'] + ' - ' + ad_dict[ad_id]['Location']
body += ' - ' + ad_dict[ad_id]['Date'] + '\n'
body += ad_dict[ad_id]['Url'] + '\n\n'
except:
log('[Error] Unable to create body for email message')
body += 'This is an automated message.\nPlease do not reply to this message.'
msg = MIMEText(body)
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = receiver
try:
server = smtplib.SMTP('smtp.live.com', 587)
server.ehlo()
server.starttls()
server.ehlo()
except:
log('[Error] Unable to connect to email server.')
try:
server.login(sender, passwd)
except:
log('[Error] Unable to login to email server.')
try:
server.sendmail(msg['From'], msg['To'], msg.as_string())
server.quit()
log('[Okay] Email message successfully delivered.')
except:
log('[Error] Unable to send message.')
def main(old_ad_dict): # Main function, brings it all together.
try:
page = requests.get(url_to_scrape)
log("[Okay] Retrieved HTML data from: " + url_to_scrape)
except:
log("[Error] Unable to load html data from: " + url_to_scrape)
soup = BeautifulSoup(page.content, "html.parser")
page = None
kijiji_ads = soup.find_all("div", {"class": "regular-ad"}) # Finds all ad trees in page html.
ad_dict = {}
checklist = ['boréal', 'kayak de mer', 'baffin', 'epsilon', 'scorpio']
excludelist = ['wanted', 'recherché']
for ad in kijiji_ads: # Creats a dictionary of all ads sorted by ad id.
title = ad.find_all('a', {"class": "title"})[0].text.strip()
ad_id = ad.find_all('div', {'class': "watch"})[0].get('data-adid')
if not [False for match in excludelist if match in title.lower()]:
if [True for match in checklist if match in title.lower()]:
if ad_id not in old_ad_dict:
log('[Okay] New ad found! Ad id: ' + ad_id)
ad_dict[ad_id] = ParseAd(ad)
if ad_dict != {}: # If dict not emtpy, write ads to text file and send email.
WriteAds(ad_dict, filename)
MailAd(ad_dict)
try:
old_ad_dict = ReadAds(filename)
log("[Okay] Database succesfully reloaded.")
except:
log("[Error] Unable to reload database.")
time.sleep(scrape_delay)
main(old_ad_dict)
if __name__ == "__main__":
old_ad_dict = ReadAds(filename)
log("[Okay] Ad database succesfully loaded.")
myfile = open('log.txt', 'w') # Create/Empty log file
myfile.close()
main(old_ad_dict)
| 31.668421 | 114 | 0.592156 | 820 | 6,017 | 4.236585 | 0.268293 | 0.043178 | 0.052389 | 0.074842 | 0.208693 | 0.123201 | 0.07369 | 0.055843 | 0.042602 | 0 | 0 | 0.007478 | 0.266578 | 6,017 | 189 | 115 | 31.835979 | 0.779742 | 0.112016 | 0 | 0.275168 | 0 | 0.006711 | 0.247043 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.040268 | false | 0.013423 | 0.053691 | 0 | 0.107383 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a74249a03f2cae7503b71c858c78b242c87a346 | 424 | py | Python | tests/test_rrpproxy_check_contact.py | ByteInternet/rrpproxy | 9f644c8ed31f963f4eadc1dafea35e59006f89fc | [
"MIT"
] | 3 | 2020-10-20T12:12:36.000Z | 2021-12-11T19:10:20.000Z | tests/test_rrpproxy_check_contact.py | ByteInternet/rrpproxy | 9f644c8ed31f963f4eadc1dafea35e59006f89fc | [
"MIT"
] | null | null | null | tests/test_rrpproxy_check_contact.py | ByteInternet/rrpproxy | 9f644c8ed31f963f4eadc1dafea35e59006f89fc | [
"MIT"
] | null | null | null | from unittest.mock import patch
from tests.test_rrpproxy_base import TestRRPProxyBase
class TestRRPProxyCheckContact(TestRRPProxyBase):
@patch('rrpproxy.RRPProxy.call')
def test_calls_call_correctly(self, call_mock):
response = self.proxy.check_contact('CONTACT-A')
call_mock.assert_called_once_with('CheckContact', contact='CONTACT-A')
self.assertEqual(response, call_mock.return_value)
| 32.615385 | 78 | 0.773585 | 51 | 424 | 6.176471 | 0.588235 | 0.07619 | 0.095238 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.134434 | 424 | 12 | 79 | 35.333333 | 0.858311 | 0 | 0 | 0 | 0 | 0 | 0.122642 | 0.051887 | 0 | 0 | 0 | 0 | 0.25 | 1 | 0.125 | false | 0 | 0.25 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a75d4ccd2e54ea0708b468c31e7fe2bd3fd2f92 | 310 | py | Python | docker-wrapper/parse_docker_args.py | Duke-GCB/docker-wrapper | 004ca5cd067a177ec96ac40702b2f8cb9d57e440 | [
"MIT"
] | null | null | null | docker-wrapper/parse_docker_args.py | Duke-GCB/docker-wrapper | 004ca5cd067a177ec96ac40702b2f8cb9d57e440 | [
"MIT"
] | null | null | null | docker-wrapper/parse_docker_args.py | Duke-GCB/docker-wrapper | 004ca5cd067a177ec96ac40702b2f8cb9d57e440 | [
"MIT"
] | null | null | null | import os
def parse_mount(volume_spec):
# Docker volumes may be "/src:dest:ro" or simply "/src"
components = volume_spec.split(':')
perm = 'w' # assume write perm if not specified
src_path = components[0]
# check if ro specified
if components[-1] == 'ro':
perm = 'r'
return (src_path, perm)
| 25.833333 | 57 | 0.664516 | 47 | 310 | 4.276596 | 0.659574 | 0.099502 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008097 | 0.203226 | 310 | 11 | 58 | 28.181818 | 0.805668 | 0.354839 | 0 | 0 | 0 | 0 | 0.02551 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a76a52e6e7d5bdeb177ef8e685d702c16d3b2ac | 20,415 | py | Python | find_dups.py | colinrcooper/filedups | 7a2271c84df85f45c9f67ab18976bebe347bc256 | [
"MIT"
] | null | null | null | find_dups.py | colinrcooper/filedups | 7a2271c84df85f45c9f67ab18976bebe347bc256 | [
"MIT"
] | null | null | null | find_dups.py | colinrcooper/filedups | 7a2271c84df85f45c9f67ab18976bebe347bc256 | [
"MIT"
] | null | null | null | from __future__ import print_function
import os, sys
if sys.version_info[0] != 3 or sys.version_info[1] < 0:
print('Your Python version is too old! Please use Python 3.0 or higher.')
sys.exit(1)
import hashlib
import fnmatch
import configparser
import argparse
import platform
os.chdir(os.path.dirname(os.path.abspath(__file__)))
warnings = []
BULLET = '* o '
DEFAULT_FILTERMODE = 'NONE'
DEFAULT_FILTERFILE = ''
DEFAULT_SUBDIRS = 'TRUE'
DEFAULT_MAXFILESIZE = 0
DEFAULT_INCLUDEEMPTYFILES = 'FALSE'
DEFAULT_BLOCKSIZE = 65536
DEFAULT_HASHALGORITHM = 1
DEFAULT_CSV_FOLDER = os.getcwd()
DEFAULT_CSV = ''
MIN_BLOCKSIZE = 65536
try:
maxcol = os.get_terminal_size().columns - 2
# piped output to file or other process
except OSError:
maxcol = sys.maxsize - 2
def findDup(parentFolder, filters, scanOptions):
# This does a quick scan to identify files of exactly the same size without having to read every files contents
# This shorter 'preliminary list' is then passed for file hashing which is much slower. In this way, only likely candidates for duplicates are read
sizeDups = {}
hashDups = {}
filterMode = scanOptions['FilterMode']
numFiles = 0
for dirName, subdirs, fileList in os.walk(parentFolder):
newDirName = True
for fileName in fileList:
numFiles = numFiles + 1
if ((scanOptions['SubDirs'].upper()=='FALSE') and (dirName == parentFolder)) or (scanOptions['SubDirs'].upper()!='FALSE'):
# Get the path to the file
filterFound = False
# Calculate size
path = os.path.join(dirName, fileName)
for filter_fn in filters:
if fnmatch.fnmatch(path, filter_fn):
filterFound=True
if (not filterFound and filterMode.upper() == 'EXCLUDE') or (filterFound and filterMode.upper() == 'INCLUDE') or (filterMode.upper()=='NONE'):
if newDirName:
print(' ' * maxcol, end='\r')
print('Scanning ' + shortenName(dirName, maxcol - 9), end='\r')
newDirName = False
try:
fileSize = int(os.path.getsize(path))
except:
fileSize = -1
else:
fileSize = -1
# Add or append the file path
if (fileSize != -1):
if ((fileSize == 0 and scanOptions['MaxFileSize'] == 0 and scanOptions['IncludeEmptyFiles'].upper() == 'TRUE')
or (fileSize == 0 and scanOptions['MaxFileSize'] > 0 and scanOptions['IncludeEmptyFiles'].upper() == 'TRUE')
or (fileSize > 0 and scanOptions['MaxFileSize'] == 0)
or (fileSize > 0 and scanOptions['MaxFileSize'] > 0 and scanOptions['MaxFileSize'] >= fileSize)):
if fileSize in sizeDups:
sizeDups[fileSize].append(path)
else:
sizeDups[fileSize] = [path]
print(' ' * maxcol, end='\r')
print (str(numFiles) + ' file(s) in',parentFolder, 'scanned.')
print ('Now checking potential duplicates...')
hashDups = findDupsInDict(sizeDups, scanOptions['HashAlgorithm'], scanOptions['Blocksize'])
return hashDups
def findDupsInDict(fileDict, hashAlgorithmVal, blocksize):
dups = {}
hashAlgorithms = {}
hashAlgorithms = getHashAlgorithms(hashAlgorithmVal)
results = list(filter(lambda x: len(x) > 1, fileDict.values()))
if len(results) > 0:
currResult = 0
percentComplete = 0
numResults = len(results)
for result in results:
currResult = currResult + 1
for subresult in result:
fileHash = hashfile(subresult, blocksize, hashAlgorithms)
if fileHash in dups and fileHash != 0:
dups[fileHash].append(subresult)
elif not(fileHash in dups) and fileHash != 0:
dups[fileHash] = [subresult]
print(' ' * maxcol, end='\r')
print ('Checking potential duplicate set', currResult, 'of', numResults, end='\r')
percentComplete = int(round(currResult / numResults,0))
print('')
return dups
# Joins two dictionaries
def joinDicts(dict1, dict2):
for key in dict2.keys():
if key in dict1:
dict1[key] = dict1[key] + dict2[key]
else:
dict1[key] = dict2[key]
def getHashAlgorithms(algorithm_val):
hashAlgorithms = {}
valSHA512 = 32
valSHA384 = 16
valSHA256 = 8
valSHA224 = 4
valSHA1 = 2
valMD5 = 1
if not str(algorithm_val).isnumeric():
algorithm_val = valMD5
hashAlgorithms['useSHA512'] = False
hashAlgorithms['useSHA384'] = False
hashAlgorithms['useSHA256'] = False
hashAlgorithms['useSHA224'] = False
hashAlgorithms['useSHA1'] = False
hashAlgorithms['useMD5'] = False
if (algorithm_val <= 0) or (algorithm_val >= 64):
algorithm_val = 1
if algorithm_val >= valSHA512:
hashAlgorithms['useSHA512'] = True
algorithm_val = algorithm_val - valSHA512
if algorithm_val >= valSHA384:
hashAlgorithms['useSHA384'] = True
algorithm_val = algorithm_val - valSHA384
if algorithm_val >= valSHA256:
hashAlgorithms['useSHA256'] = True
algorithm_val = algorithm_val - valSHA256
if algorithm_val >= valSHA224:
hashAlgorithms['useSHA224'] = True
algorithm_val = algorithm_val - valSHA224
if algorithm_val >= valSHA1:
hashAlgorithms['useSHA1'] = True
algorithm_val = algorithm_val - valSHA1
if algorithm_val >= valMD5:
hashAlgorithms['useMD5'] = True
return hashAlgorithms
def hashfile(path, blocksize, hashAlgorithms):
compositeHash = ''
if int(blocksize) <= MIN_BLOCKSIZE:
blocksize = DEFAULT_BLOCKSIZE
try:
afile = open(path, 'rb')
if hashAlgorithms['useMD5']: hasherMD5 = hashlib.md5()
if hashAlgorithms['useSHA1']: hasherSHA1 = hashlib.sha1()
if hashAlgorithms['useSHA224']: hasherSHA224 = hashlib.sha224()
if hashAlgorithms['useSHA256']: hasherSHA256 = hashlib.sha256()
if hashAlgorithms['useSHA384']: hasherSHA384 = hashlib.sha384()
if hashAlgorithms['useSHA512']: hasherSHA512 = hashlib.sha512()
buf = afile.read(blocksize)
while len(buf) > 0:
if hashAlgorithms['useMD5']: hasherMD5.update(buf)
if hashAlgorithms['useSHA1']: hasherSHA1.update(buf)
if hashAlgorithms['useSHA224']: hasherSHA224.update(buf)
if hashAlgorithms['useSHA256']: hasherSHA256.update(buf)
if hashAlgorithms['useSHA384']: hasherSHA384.update(buf)
if hashAlgorithms['useSHA512']: hasherSHA512.update(buf)
buf = afile.read(blocksize)
afile.close()
if hashAlgorithms['useMD5']: compositeHash = compositeHash + hasherMD5.hexdigest()
if hashAlgorithms['useSHA1']: compositeHash = compositeHash + hasherSHA1.hexdigest()
if hashAlgorithms['useSHA224']: compositeHash = compositeHash + hasherSHA224.hexdigest()
if hashAlgorithms['useSHA256']: compositeHash = compositeHash + hasherSHA256.hexdigest()
if hashAlgorithms['useSHA384']: compositeHash = compositeHash + hasherSHA384.hexdigest()
if hashAlgorithms['useSHA512']: compositeHash = compositeHash + hasherSHA512.hexdigest()
return compositeHash
except:
warnings.append('WARNING: Could not calculate the hash of ' + path)
return 0
def printResults(dict1, csvOutput):
if (not os.path.exists(os.path.dirname(csvOutput)) and csvOutput != ''):
if os.path.dirname(csvOutput) == '':
newCsvOutput = os.path.join(DEFAULT_CSV_FOLDER, csvOutput)
else:
newCsvOutput = csvOutput.replace(os.path.dirname(csvOutput), DEFAULT_CSV_FOLDER)
warnings.append('WARNING: The folder name "' + os.path.dirname(csvOutput)
+ '" for the CSV output file does not exist. '
+ 'Results will be saved in ' + newCsvOutput + ' instead.')
csvOutput = newCsvOutput
results = list(filter(lambda x: len(x) > 1, dict1.values()))
print('')
print('************************************************************')
if len(results) > 0:
if csvOutput !='': f = open(csvOutput, 'w+')
print('* RESULTS: DUPLICATES FOUND:')
if csvOutput !='': f.write('DUPLICATES FOUND:\nFile Name,File Size (bytes)')
print('* ---------------------------------------------------------')
for result in results:
if csvOutput !='': f.write('\n')
for subresult in result:
print('* \t' + subresult)
if csvOutput !='': f.write(subresult + ',' + str(os.path.getsize(subresult)) + '\n')
print('* ---------------------------------------------------------\n*')
if csvOutput !='': f.close()
else:
print('* RESULTS: NO DUPLICATE FILES FOUND')
print('************************************************************')
def loadDefaultScanOptions():
#These values will be used if they are not set through config file or command line parameters
scanOptions = {}
scanOptions['FilterMode'] = DEFAULT_FILTERMODE
scanOptions['FilterFile'] = DEFAULT_FILTERFILE
scanOptions['SubDirs'] = DEFAULT_SUBDIRS
scanOptions['MaxFileSize'] = DEFAULT_MAXFILESIZE
scanOptions['IncludeEmptyFiles'] = DEFAULT_INCLUDEEMPTYFILES
scanOptions['Blocksize'] = DEFAULT_BLOCKSIZE
scanOptions['HashAlgorithm'] = DEFAULT_HASHALGORITHM
scanOptions['CSVOutput'] = DEFAULT_CSV
return scanOptions
def loadConfigFileScanOptions(configFile):
#These values will override the defaults if they are set
scanOptions = {}
scanOptions = loadDefaultScanOptions()
if os.path.exists(configFile):
config = configparser.ConfigParser()
with open(configFile) as cf:
config.read_file(cf)
if config.has_option('General', 'FilterMode') and (config.get('General', 'FilterMode').upper() == 'NONE' or config.get('General', 'FilterMode').upper() == 'INCLUDE') or config.get('General', 'filterMode').upper() == 'EXCLUDE':
scanOptions['FilterMode'] = config.get('General', 'FilterMode').upper()
if (scanOptions['FilterMode'].upper() != 'NONE') and (os.path.exists(config.get('General', 'FilterFile'))):
scanOptions['FilterFile'] = config.get('General', 'FilterFile')
if config.has_option('Scan Options', 'SubDirs') and (config.get('Scan Options', 'SubDirs').upper() == 'TRUE' or config.get('Scan Options', 'SubDirs').upper() == 'FALSE'):
scanOptions['SubDirs'] = config.get('Scan Options', 'SubDirs').upper()
if config.has_option('Scan Options', 'MaxFileSize') and (config.get('Scan Options', 'MaxFileSize').isnumeric()):
scanOptions['MaxFileSize'] = int(config.get('Scan Options', 'MaxFileSize'))
if config.has_option('Scan Options', 'IncludeEmptyFiles') and (config.get('Scan Options', 'IncludeEmptyFiles').upper() == 'TRUE' or config.get('Scan Options', 'IncludeEmptyFiles').upper == 'FALSE'):
scanOptions['IncludeEmptyFiles'] = config.get('Scan Options', 'IncludeEmptyFiles').upper()
if config.has_option('Advanced', 'Blocksize') and (config.get('Advanced', 'Blocksize').isnumeric()):
scanOptions['Blocksize'] = abs(int(config.get('Advanced', 'Blocksize')))
if scanOptions['Blocksize'] <= MIN_BLOCKSIZE: scanOptions['Blocksize'] = MIN_BLOCKSIZE
if config.has_option('Advanced', 'HashAlgorithm') and (config.get('Advanced', 'HashAlgorithm').isnumeric()):
scanOptions['HashAlgorithm'] = int(config.get('Advanced', 'HashAlgorithm'))
if config.has_option('Scan Options', 'CSVOutput'):
scanOptions['CSVOutput'] = str(config.get('Scan Options', 'CSVOutput'))
return scanOptions
def loadFilters(filterFile):
if os.path.exists(filterFile):
with open(filterFile) as f:
filters = f.read().splitlines()
else:
filters = []
return filters
def printHashAlgorithms(hashAlgorithms):
print('* USING ALGORITHMS:')
print('* -----------------')
if hashAlgorithms['useMD5']: print(BULLET + 'MD5')
if hashAlgorithms['useSHA1']: print(BULLET + 'SHA1')
if hashAlgorithms['useSHA224']: print(BULLET + 'SHA224')
if hashAlgorithms['useSHA256']: print(BULLET + 'SHA256')
if hashAlgorithms['useSHA384']: print(BULLET + 'SHA384')
if hashAlgorithms['useSHA512']: print(BULLET + 'SHA512')
def loadCommandLineScanOptions(args, scanOptions):
if args['filterMode'] != None and (args['filterMode'].upper()=='INCLUDE' or args['filterMode'].upper()=='EXCLUDE' or args['filterMode'].upper()=='NONE'):
scanOptions['FilterMode'] = args['filterMode'].upper()
if args['filterFile'] != None:
if os.path.exists(args['filterFile']):
scanOptions['FilterFile'] = args['filterFile']
if args['subDirs'] != None and (args['subDirs'].upper()=='TRUE' or args['subDirs'].upper()=='FALSE'):
scanOptions['SubDirs'] = args['subDirs'].upper()
if args['maxFileSize'] != None:
scanOptions['MaxFileSize'] = int(abs(args['maxFileSize']))
if (args['includeEmptyFiles'] != None) and ((args['includeEmptyFiles'].upper()=='TRUE') or args['includeEmptyFiles'].upper()=='FALSE'):
scanOptions['IncludeEmptyFiles'] = args['includeEmptyFiles'].upper()
if args['blocksize'] != None and abs(args['blocksize']) >= MIN_BLOCKSIZE:
scanOptions['Blocksize'] = int(abs(args['blocksize']))
if args['hashAlgorithm'] != None:
scanOptions['HashAlgorithm'] = int(args['hashAlgorithm'])
if args['csvOutput'] != None:
scanOptions['CSVOutput'] = args['csvOutput']
return scanOptions
def shortenName(stringToShorten, lengthToShorten):
if stringToShorten == None: return ''
if lengthToShorten == None: return stringToShorten
if lengthToShorten < 5: lengthToShorten = 5
if len(stringToShorten) <= lengthToShorten:
shortenedString = stringToShorten
else:
splitSize = int(round((lengthToShorten-3) / 2,0))
shortenedString = stringToShorten[:splitSize] + '...' + stringToShorten[-splitSize:]
return shortenedString
def padSpaces(stringToPad, lengthToPad):
stringToPad = str(stringToPad)
while len(stringToPad) < lengthToPad:
stringToPad = stringToPad + ' '
return stringToPad
def printSettings(folders, scanOptions, filters):
print('')
print('************************************************************')
printHashAlgorithms(getHashAlgorithms(scanOptions['HashAlgorithm']))
print('* \n* FOLDER(S) TO SCAN:')
print('* ------------------')
for x in folders: print(BULLET + str(x))
print('* \n* SCAN OPTIONS USED:')
print('* ------------------')
for x in scanOptions: print(BULLET + padSpaces(str(x),20) + ': ' + str(scanOptions[x]))
if len(filters) > 0:
print('* FILTERS: ')
print('* --------')
for x in filters: print(BULLET + str(x))
print('*\n************************************************************')
print ('')
def printWarnings(warnings):
if len(warnings) > 0:
print('')
print('************************************************************')
print('* WARNINGS:')
for x in range(len(warnings)): print (BULLET + ' ' + warnings[x])
print('************************************************************')
print('')
def getConfigurations(cmdArgs):
#First load the default scan options
scanOptions = {}
scanOptions = loadDefaultScanOptions()
#Then over-write these default scan options with any values supplied in a configuration file
config = configparser.ConfigParser()
scanOptions['ConfigFile']=''
if cmdArgs['configFile'] != None: scanOptions['ConfigFile'] = cmdArgs['configFile']
configFile = scanOptions['ConfigFile']
if os.path.exists(configFile): scanOptions = loadConfigFileScanOptions(configFile)
#Finally over-write these scan options with any explicitly supplied in the command line itself
loadCommandLineScanOptions(cmdArgs, scanOptions)
return scanOptions
def getFilters(filterFile, cmdFilters):
#If a filter has been set in the commandline, use that. Otherwise try to get it from the config file
if filterFile != None and filterFile != '' and cmdFilters != None:
warnings.append('INFO: Supplied --filters command line parameter will take precedence over supplied --filterMode parameter or config file settings')
if cmdFilters != None and cmdFilters != '':
filters = cmdFilters
elif filterFile != None and filterFile != '':
filters = loadFilters(filterFile)
else:
filters = []
return filters
def getDupsInFolders(folders):
#Iterate through each supplied folder name and start scanning for duplicates
for i in folders:
if i[-1] == ':' and platform.system() == 'Windows': i = i + '\\'
if os.path.exists(i):
# Find the duplicated files and append them to the dups
joinDicts(dups, findDup(i, filters, scanOptions))
else:
warnings.append('WARNING: ' + str(i) + ' is not a valid path, please verify')
return dups
if __name__ == '__main__':
dups = {}
#Read the command line parameters
parser = argparse.ArgumentParser(description='Search for duplicate files in one or more folders')
parser.add_argument('-cfg', '--configFile', help='Configuration File for script', required=False)
parser.add_argument('-fm', '--filterMode', help='Filter Mode', choices=['INCLUDE', 'EXCLUDE', 'NONE'], required=False)
parser.add_argument('-ff', '--filterFile', help='File containing list of filters to be applied if Filter Mode is not NONE', required=False)
parser.add_argument('-f', '--filters', nargs='+', help = 'List of filters', required=False)
parser.add_argument('-s', '--subDirs', help='Scan subdirectories of selected folders?', choices=['TRUE', 'FALSE'], required=False)
parser.add_argument('-ms', '--maxFileSize', type=int, help='Maximum size of files to be scanned', required=False)
parser.add_argument('-emp', '--includeEmptyFiles', help='Include files with no content in results?', choices=['TRUE', 'FALSE'], required=False)
parser.add_argument('-bs', '--blocksize', type=int, help='Blocksize for file reads', required=False)
parser.add_argument('-ha', '--hashAlgorithm', type=int, help='Algorithm(s) to be used for file hashing', required=False)
parser.add_argument('-csv', '--csvOutput', help='Path to output results in CSV format', required=False)
parser.add_argument('-dirs', '--directories', nargs='+', help = 'List of directories to scan', required=True)
args = vars(parser.parse_args())
#Construct the set of scan options from command line parameters (1st precedence), configuration file settings (2nd precedence), and default values (fallback)
scanOptions = getConfigurations(args)
#Get the filter list to be used, if any
filters = getFilters(scanOptions['FilterFile'], args['filters'])
#Get list of directories to be scanned (currently can only be a command line parameter)
folders = args['directories']
#Print the list of settings to the console
printSettings(folders, scanOptions, filters)
#Find all the duplicates
dups = getDupsInFolders(folders)
#Print the results to the console and any output file specified
printResults(dups, scanOptions['CSVOutput'])
#Print any errors / warnings and the duplicates found to the consoles
printWarnings(warnings)
| 48.262411 | 235 | 0.611903 | 2,035 | 20,415 | 6.092875 | 0.174447 | 0.03097 | 0.015082 | 0.017743 | 0.151706 | 0.079684 | 0.042584 | 0.037584 | 0.019356 | 0.015243 | 0 | 0.017927 | 0.240411 | 20,415 | 422 | 236 | 48.376777 | 0.781647 | 0.072692 | 0 | 0.173789 | 0 | 0 | 0.200703 | 0.025967 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051282 | false | 0 | 0.019943 | 0 | 0.111111 | 0.150997 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a79ab06520a99ff01196eabd99831377f229eee | 13,208 | py | Python | devday/devday/tests/test_utils_devdata.py | stefanbethke/devday_website | c4820e03b9dbb22a63b84f9d338f3a165a6d0354 | [
"BSD-3-Clause"
] | null | null | null | devday/devday/tests/test_utils_devdata.py | stefanbethke/devday_website | c4820e03b9dbb22a63b84f9d338f3a165a6d0354 | [
"BSD-3-Clause"
] | null | null | null | devday/devday/tests/test_utils_devdata.py | stefanbethke/devday_website | c4820e03b9dbb22a63b84f9d338f3a165a6d0354 | [
"BSD-3-Clause"
] | null | null | null | from io import StringIO
from cms.constants import TEMPLATE_INHERITANCE_MAGIC
from cms.models import Page
from cms.models.pluginmodel import CMSPlugin
from cms.models.static_placeholder import StaticPlaceholder
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.sites.models import Site
from django.core.management.base import OutputWrapper
from django.db.models import Count
from django.test import TestCase
from devday.utils.devdata import DevData
from event.models import Event
from speaker.models import Speaker
from talk import COMMITTEE_GROUP
from talk.models import (Room, Talk, TalkFormat, TalkSlot, TimeSlot,
Track, Vote)
from twitterfeed.models import Tweet, TwitterProfileImage
User = get_user_model()
class DevDataTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.stdout = StringIO()
cls.devdata = DevData(stdout=OutputWrapper(cls.stdout))
def setUp(self):
self.stdout.seek(0)
self.stdout.truncate(0)
def test_create_objects_failure(self):
class FooManager:
def count(self):
return 0
class Foo:
objects = FooManager()
def create():
raise Exception('testing')
try:
self.devdata.create_objects('foo', Foo, 1, create)
except Exception:
pass
self.assertTrue('FAILED' in self.stdout.getvalue(),
'should have FAILED, but got: {}'
.format(self.stdout.getvalue()))
def subtest_create_admin_user(self):
self.devdata.create_admin_user()
u = User.objects.get(email=settings.ADMINUSER_EMAIL)
self.assertEquals(u.email, settings.ADMINUSER_EMAIL)
self.assertEquals(u.is_superuser, True)
self.assertEquals(u.is_staff, True)
def subtest_update_site(self):
self.devdata.update_site()
site = Site.objects.get(pk=1)
self.assertEquals(site.domain, 'devday.de')
self.assertEquals(site.name, 'Dev Data')
def get_page(self, title):
return Page.objects.get(
title_set__title=title, title_set__published=True,
publisher_is_draft=False)
def check_plugin(self, page, slot, plugin_type):
placeholder = page.placeholders.get(slot=slot)
plugins = placeholder.get_plugins()
self.assertEquals(len(plugins), 1,
'{} placeholder has exactly one plugin'
.format(slot))
self.assertEquals(plugins[0].plugin_type, plugin_type,
'{} placeholder is of type {}'
.format(slot, plugin_type))
def subtest_create_pages(self):
self.devdata.create_objects('pages', Page, 3,
self.devdata.create_pages)
index = self.get_page('Deutsche Homepage')
self.assertEquals(index.languages, 'de', 'Homepage is German')
self.assertEquals(index.template, 'devday_index.html',
'Homepage uses correct template')
self.assertTrue(index.is_home, 'Homepage is_home')
self.check_plugin(index, 'eventinfo', 'TextPlugin')
self.check_plugin(index, 'cfp_open', 'TextPlugin')
self.check_plugin(index, 'save_the_date', 'TextPlugin')
self.check_plugin(index, 'sign_up', 'TextPlugin')
sponsoring = self.get_page('Sponsoring')
self.assertEquals(sponsoring.languages, 'de', 'Sponsoring is German')
self.assertEquals(sponsoring.template, 'devday_no_cta.html',
'Sponsoring uses correct template')
impress = self.get_page('Impressum')
self.assertEquals(impress.languages, 'de', 'Impress is German')
self.assertEquals(impress.template, 'devday_no_cta.html',
'Impress uses correct template')
def subtest_update_static_placeholders(self):
self.devdata.update_static_placeholders()
name = 'create-talk-introtext'
lang = 'de'
sph = StaticPlaceholder.objects.get(name=name)
ph = sph.draft
np = CMSPlugin.objects.filter(placeholder=ph,
language=lang).count()
self.assertEquals(np, 1, 'Exactly one static placeholder create')
def subtest_create_talk_formats(self):
self.devdata.create_objects('talk formats', TalkFormat, 3,
self.devdata.create_talk_formats)
formats = TalkFormat.objects.all().order_by('name', 'duration')
self.assertEquals(len(formats), 4, 'There are four TalkFormats')
self.assertEquals(formats[0].name, 'Lightning Talk')
self.assertEquals(formats[0].duration, 10)
self.assertEquals(formats[1].name, 'Vortrag')
self.assertEquals(formats[1].duration, 30)
self.assertEquals(formats[2].name, 'Vortrag')
self.assertEquals(formats[2].duration, 60)
self.assertEquals(formats[3].name, 'Workshop')
self.assertEquals(formats[3].duration, 180)
def subtest_update_events(self):
self.devdata.update_events()
events = list(Event.objects.order_by('start_time'))
self.assertEquals(len(events), 3,
'there are three events')
stdformat = TalkFormat.objects.get(name='Vortrag', duration=60)
for e in events[:-1]:
self.assertEquals(e.registration_open, False,
'registration not open')
self.assertEquals(e.submission_open, False,
'submission not open')
tf = e.talkformat.filter(id=stdformat.id)
self.assertTrue(len(tf) == 1,
'standard format assigned')
e = events[-1]
self.assertEquals(e.registration_open, True,
'registration open')
self.assertEquals(e.submission_open, True,
'submission open')
tf = e.talkformat.all()
self.assertTrue(len(tf) == 4,
'all formats assigned')
def subtest_get_committee_members(self):
count = len(self.devdata.get_committee_members()
.strip().split('\n')) - 1 # one extra line
self.assertEquals(count, 7, 'Seven users are committee members')
def subtest_create_users_and_attendees(self):
self.devdata.create_objects(
'users', User, 3, self.devdata.create_attendees,
self.devdata.get_committee_members)
users = len(User.objects.all())
self.assertTrue(520 <= users <= 522, 'About 520 users')
events = Event.objects.annotate(natt=Count('attendee'))
for e in events:
self.assertTrue(users * 0.70 <= e.natt <= users * 0.80,
'about {:d} attend event {}: actual {}'
.format(int(users * 0.8), e.title, e.natt))
self.subtest_get_committee_members()
def subtest_get_speakers(self):
count = len(self.devdata.get_speakers().strip().split(
'\n')) - 1 # one extra line
self.assertEquals(count, 10, 'At least 10 speakers')
def subtest_create_speakers(self):
self.devdata.create_objects(
'speakers', Speaker, 1, self.devdata.create_speakers,
self.devdata.get_speakers)
speakers = 150
number_of_speakers = Speaker.objects.count()
self.assertTrue(
speakers * 0.70 <= number_of_speakers <= speakers * 1.2,
'about {:d} speakers: actual {}'
.format(speakers, number_of_speakers))
self.subtest_get_speakers()
def subtest_create_talks(self):
self.devdata.create_objects(
'talks', Talk, 1, self.devdata.create_talks)
speakers = 50
# With a probability of 10% a speaker will submit 2 talks, and with
# a probability of 75% will submit one talk. For each event, we will
# have talk in the amount of about 0.95 times the number of speakers.
talks = speakers * 0.95
events = Event.objects.annotate(
ntalk=Count('talk'))
for e in events:
self.assertTrue(talks * 0.75 <= e.ntalk <= talks * 1.25,
'about {:d} talks at event {}: actual {}'
.format(int(talks), e.title, e.ntalk))
def subtest_create_votes(self):
event = Event.objects.current_event()
self.devdata.create_objects(
'votes', Vote, 1, self.devdata.vote_for_talk)
number_of_votes = Vote.objects.exclude(talk__event=event).count()
self.assertEquals(number_of_votes, 0, 'No votes for older events')
number_of_votes = Vote.objects.count()
number_of_talks = Talk.objects.filter(event=event).count()
potential_votes = number_of_talks * User.objects.filter(
groups__name=COMMITTEE_GROUP).count()
self.assertTrue(
potential_votes * 0.7 <= number_of_votes <= potential_votes,
'about {} votes for {} talks: actual {}'.format(
int(potential_votes * 0.8), number_of_talks, number_of_votes))
def subtest_create_tracks(self):
self.devdata.create_objects(
'tracks', Track, 1, self.devdata.create_tracks)
# FIXME implement data checks
ntracks = Track.objects.filter(
event=Event.objects.current_event()).count()
self.assertEquals(ntracks, 0, 'No tracks for current event')
ntracks = Track.objects.filter(
event=Event.objects.get(title='devdata.17')).count()
self.assertEquals(ntracks, 5, '5 tracks for devdata.17')
ntracks = Track.objects.filter(
event=Event.objects.get(title='devdata.18')).count()
self.assertEquals(ntracks, 6, '6 tracks for devdata.18')
def subtest_create_rooms(self):
self.devdata.create_objects('rooms', Room, 1,
self.devdata.create_rooms)
nrooms = Room.objects.filter(
event=Event.objects.get(title='devdata.17')).count()
self.assertEquals(nrooms, 4, 'we have 4 rooms for devdata.17')
nrooms = Room.objects.filter(
event=Event.objects.get(title='devdata.18')).count()
self.assertEquals(nrooms, 4, 'we have 4 rooms for devdata.18')
def subtest_create_time_slots(self):
self.devdata.create_objects('time slots', TimeSlot, 1,
self.devdata.create_time_slots)
events = Event.objects.exclude(
id=Event.objects.current_event_id()).annotate(
ntimeslot=Count('timeslot'))
for e in events:
self.assertEquals(e.ntimeslot, 13, 'we have 13 time slots for {}'
.format(Event))
def subtest_create_talk_slots(self):
self.devdata.create_objects('talk slots', TalkSlot, 1,
self.devdata.create_talk_slots)
events = Event.objects.exclude(
id=Event.objects.current_event_id()).annotate(
ntalkslot=Count('talk__talkslot'))
for e in events:
self.assertEquals(e.ntalkslot, 14, 'we have 14 talk slots for {}'
.format(e))
def subtest_create_twitter_profiles(self):
self.devdata.create_objects('twitter profiles', TwitterProfileImage, 1,
self.devdata.create_twitter_profiles)
ntpp = TwitterProfileImage.objects.count()
self.assertEquals(ntpp, 1, 'we have 1 twitter profile picture')
def subtest_create_tweets(self):
self.devdata.create_objects(
'tweets', Tweet, 1, self.devdata.create_tweets)
number_of_tweets = Tweet.objects.count()
self.assertEquals(number_of_tweets, 7, 'we have 7 tweets')
def test_get_name_from_email(self):
self.assertEquals(
self.devdata.get_name_from_email('admin@devday.de'),
'admin@devday.de')
self.assertEquals(
self.devdata.get_name_from_email('first.last@devday.de'),
'First Last')
def test_create_devdata(self):
self.subtest_create_admin_user()
self.subtest_update_site()
self.subtest_create_pages()
self.subtest_update_static_placeholders()
self.subtest_create_talk_formats()
self.subtest_update_events()
self.subtest_create_users_and_attendees()
self.subtest_create_speakers()
self.subtest_create_talks()
self.subtest_create_votes()
self.subtest_create_tracks()
self.subtest_create_rooms()
self.subtest_create_time_slots()
self.subtest_create_talk_slots()
self.subtest_create_twitter_profiles()
self.subtest_create_tweets()
self.stdout.seek(0)
self.stdout.truncate(0)
self.devdata.create_devdata()
self.assertTrue('OK' in self.stdout.getvalue(),
'At least one OK in output')
self.assertTrue('FAILED' not in self.stdout.getvalue(),
'No FAILED in output')
| 43.022801 | 79 | 0.619397 | 1,514 | 13,208 | 5.243065 | 0.171731 | 0.084656 | 0.055682 | 0.039305 | 0.293777 | 0.177375 | 0.128244 | 0.094482 | 0.075082 | 0.075082 | 0 | 0.014637 | 0.275818 | 13,208 | 306 | 80 | 43.163399 | 0.815264 | 0.019534 | 0 | 0.116541 | 0 | 0 | 0.116743 | 0.001623 | 0 | 0 | 0 | 0.003268 | 0.199248 | 1 | 0.101504 | false | 0.003759 | 0.06391 | 0.007519 | 0.184211 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a79eeac88453654638db9192535898ef31080d0 | 2,499 | py | Python | xmantissa/suspension.py | jonathanj/mantissa | 53e5502aba23ce99be78b27f923a276593033fe8 | [
"MIT"
] | 6 | 2016-02-17T15:04:53.000Z | 2021-08-20T09:44:10.000Z | xmantissa/suspension.py | jonathanj/mantissa | 53e5502aba23ce99be78b27f923a276593033fe8 | [
"MIT"
] | 62 | 2015-02-04T23:40:55.000Z | 2021-02-18T19:56:02.000Z | xmantissa/suspension.py | jonathanj/mantissa | 53e5502aba23ce99be78b27f923a276593033fe8 | [
"MIT"
] | 8 | 2015-11-15T17:26:42.000Z | 2020-12-02T06:36:52.000Z | from twisted.python.components import registerAdapter
from axiom.attributes import reference
from axiom.item import Item
from nevow.page import Element
from xmantissa.ixmantissa import INavigableElement, INavigableFragment
from xmantissa.webnav import Tab
from zope.interface import implements, Interface
class ISuspender(Interface):
"""
Marker interface for suspended powerup facades.
"""
class SuspendedNavigableElement(Item):
implements(INavigableElement, ISuspender)
powerupInterfaces = (INavigableElement, ISuspender)
originalNE = reference()
def getTabs(self):
origTabs = self.originalNE.getTabs()
def proxyTabs(tabs):
for tab in tabs:
yield Tab(tab.name, self.storeID, tab.priority,
proxyTabs(tab.children),
authoritative=tab.authoritative,
linkURL=tab.linkURL)
return proxyTabs(origTabs)
class SuspendedFragment(Element):
"""
Temporary account-suspended fragment.
"""
fragmentName = 'suspend'
live = False
implements(INavigableFragment)
def head(self):
pass
registerAdapter(SuspendedFragment, SuspendedNavigableElement, INavigableFragment)
def suspendJustTabProviders(installation):
"""
Replace INavigableElements with facades that indicate their suspension.
"""
if installation.suspended:
raise RuntimeError("Installation already suspended")
powerups = list(installation.allPowerups)
for p in powerups:
if INavigableElement.providedBy(p):
p.store.powerDown(p, INavigableElement)
sne = SuspendedNavigableElement(store=p.store, originalNE=p)
p.store.powerUp(sne, INavigableElement)
p.store.powerUp(sne, ISuspender)
installation.suspended = True
def unsuspendTabProviders(installation):
"""
Remove suspension facades and replace them with their originals.
"""
if not installation.suspended:
raise RuntimeError("Installation not suspended")
powerups = list(installation.allPowerups)
allSNEs = list(powerups[0].store.powerupsFor(ISuspender))
for p in powerups:
for sne in allSNEs:
if sne.originalNE is p:
p.store.powerDown(sne, INavigableElement)
p.store.powerDown(sne, ISuspender)
p.store.powerUp(p, INavigableElement)
sne.deleteFromStore()
installation.suspended = False
| 32.454545 | 81 | 0.683473 | 234 | 2,499 | 7.299145 | 0.384615 | 0.02459 | 0.012295 | 0.044496 | 0.11007 | 0 | 0 | 0 | 0 | 0 | 0 | 0.000527 | 0.240496 | 2,499 | 76 | 82 | 32.881579 | 0.899368 | 0.088836 | 0 | 0.076923 | 0 | 0 | 0.02843 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096154 | false | 0.019231 | 0.134615 | 0 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a7b4acea6211e0f11b6ce0b1340e0ebd3c19df0 | 1,889 | py | Python | setup.py | brianredbeard/mischief | b58210eae304614ee6102fc4e29af4ce4d07de8f | [
"MIT"
] | null | null | null | setup.py | brianredbeard/mischief | b58210eae304614ee6102fc4e29af4ce4d07de8f | [
"MIT"
] | null | null | null | setup.py | brianredbeard/mischief | b58210eae304614ee6102fc4e29af4ce4d07de8f | [
"MIT"
] | null | null | null | ##############################################
# The MIT License (MIT)
# Copyright (c) 2019 Kevin Walchko
# see LICENSE for full details
##############################################
from __future__ import print_function
from setuptools import setup
from build_utils import BuildCommand
from build_utils import PublishCommand
from build_utils import BinaryDistribution
from build_utils import SetGitTag
from build_utils import get_pkg_version
VERSION = get_pkg_version('mischief/__init__.py')
PACKAGE_NAME = 'mischief'
BuildCommand.pkg = PACKAGE_NAME
BuildCommand.py2 = False
BuildCommand.test = False
PublishCommand.pkg = PACKAGE_NAME
PublishCommand.version = VERSION
SetGitTag.version = VERSION
setup(
author='Kevin Walchko',
author_email='walchko@users.noreply.github.com',
name=PACKAGE_NAME,
version=VERSION,
description='Another interface to haveibeenpawned.com',
long_description=open('readme.md').read(),
long_description_content_type='text/markdown',
url='http://github.com/AllGloryToTheHypnotoad/{}'.format(PACKAGE_NAME),
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.7',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries :: Application Frameworks'
],
license='MIT',
keywords=['hibp', 'pwn', 'pwned', 'haveibeenpwned'],
packages=[PACKAGE_NAME],
install_requires=[
'build_utils',
'requests',
'colorama'
],
cmdclass={
'publish': PublishCommand,
'make': BuildCommand,
'git': SetGitTag
},
scripts=[
'bin/pymischief.py',
]
)
| 30.467742 | 78 | 0.654314 | 187 | 1,889 | 6.44385 | 0.545455 | 0.049793 | 0.058091 | 0.082988 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005239 | 0.191636 | 1,889 | 61 | 79 | 30.967213 | 0.78389 | 0.043939 | 0 | 0.039216 | 0 | 0 | 0.353801 | 0.018713 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.137255 | 0 | 0.137255 | 0.019608 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a7bf867c1b1bbf1814a6ce1e2bd6f07d0bcafef | 9,244 | py | Python | metadamage/main.py | ChristianMichelsen/metadamage | f2cd8b24ae93f50c8e64202b484c5cba973ae167 | [
"MIT"
] | 3 | 2021-01-18T12:12:01.000Z | 2021-01-18T15:10:43.000Z | metadamage/main.py | ChristianMichelsen/metadamage | f2cd8b24ae93f50c8e64202b484c5cba973ae167 | [
"MIT"
] | 7 | 2021-03-03T08:35:56.000Z | 2021-11-04T08:34:54.000Z | metadamage/main.py | ChristianMichelsen/metadamage | f2cd8b24ae93f50c8e64202b484c5cba973ae167 | [
"MIT"
] | 1 | 2021-03-25T11:34:49.000Z | 2021-03-25T11:34:49.000Z | # Scientific Library
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# Standard Library
from collections import defaultdict
from concurrent.futures import ThreadPoolExecutor
from functools import partial
from importlib import reload
import logging
import os
from pathlib import Path
# Third Party
import numpyro
# First Party
from metadamage import cli_utils, counts, fits, plot, utils
from metadamage.progressbar import console, progress
numpyro.enable_x64()
logger = logging.getLogger(__name__)
#%%
def main(filenames, cfg):
utils.initial_print(filenames, cfg)
N_files = len(filenames)
bad_files = 0
with progress:
task_id_overall = progress.add_task(
f"Overall progress",
total=N_files,
progress_type="overall",
)
for filename in filenames:
if not utils.file_is_valid(filename):
bad_files += 1
continue
cfg.add_filename(filename)
progress.add_task(
"task_name",
progress_type="shortname",
name=cfg.shortname,
)
df_counts = counts.load_counts(cfg)
# print(len(pd.unique(df_counts.tax_id)))
# continue
# group = utils.get_specific_tax_id(df_counts, tax_id=-1) # get very first group
if not utils.is_df_counts_accepted(df_counts, cfg):
continue
df_fit_results = fits.get_fits(df_counts, cfg)
progress.refresh()
progress.advance(task_id_overall)
logger.debug("End of loop\n")
# if all files were bad, raise error
if bad_files == N_files:
raise Exception("All files were bad!")
if utils.is_ipython():
print("Doing iPython plot")
filenames = [
# "./data/input/ugly/KapK_small.UglyPrint.txt"
]
reload(utils)
cfg = utils.Config(
out_dir=Path("./data/out/"),
max_fits=10,
max_cores=-1,
min_alignments=10,
min_k_sum=10,
min_N_at_each_pos=1,
substitution_bases_forward=cli_utils.SubstitutionBases.CT.value,
substitution_bases_reverse=cli_utils.SubstitutionBases.GA.value,
bayesian=True,
forced=False,
version="0.0.0",
dask_port=8787,
)
path = Path().cwd().parent
os.chdir(path)
filenames = sorted(Path("./data/input/").rglob("ugly/*.txt"))
cfg.add_filenames(filenames)
filename = filenames[0] # BPN19-AR
filename = filenames[1] # EC-Ext-14-
filename = filenames[2] # EC-Ext-A27
filename = filenames[3] # KapK
filename = filenames[4] # Lok-75
filename = filenames[5] # SJArg-1
# filename = "data/input/n_sigma_test.txt"
if False:
# if True:
main(filenames, cfg)
# from metadamage import io
# io.Parquet(
# "./data/out/fit_results/KapK-12-1-24-Ext-1-Lib-1-Index2.parquet"
# ).load_metadata()
tax_id = 1224
tax_id = 1236
tax_id = 135622
tax_id = 2742
tax_id = 28211
tax_id = 8006
tax_id = 4751
tax_id = 469
tax_id = 28211
tax_id = 356 # BNP
tax_id = 286
tax_id = 526227
tax_id = 71240
tax_id = 68336
tax_id = 6072
tax_id = 7711
tax_id = 3193
tax_id = 58024
tax_id = 7898
tax_id = 2742 # Lok-75
tax_id = 22973 # KapK
tax_id = 9606 # SJ
tax_id = 6656 # SJ
tax_id = 68895 # BPN
tax_id = 673929 # KapK
tax_id = 9979 # KapK
tax_id = -1
cfg.add_filename(filename)
df_counts = counts.load_counts(cfg)
group = utils.get_specific_tax_id(df_counts, tax_id=tax_id)
data = fits.group_to_numpyro_data(group, cfg)
# x = x
# First Party
from metadamage import dashboard
dashboard.utils.set_custom_theme()
# reload(dashboard)
fit_results = dashboard.fit_results.FitResults(
folder=Path("./data/out/"),
# verbose=True,
# very_verbose=False,
use_memoization=False,
)
# fit_results.set_marker_size(marker_transformation="log10", marker_size_max=8)
df = fit_results.df_fit_results
# Third Party
import plotly.express as px
def compute_range(df, x, range_x):
if range_x is None:
return df[x].min(), df[x].max()
elif isinstance(range_x, (list, tuple)):
if range_x[0] is None:
return df[x].min(), range_x[1]
elif range_x[1] is None:
return range_x[0], df[x].max()
else:
return range_x
def plotly(x, y, x_title, y_title, range_x=None, range_y=None, savefig=True):
range_x = compute_range(df, x, range_x)
range_y = compute_range(df, y, range_y)
fig = px.scatter(
df,
x=x,
y=y,
size="size",
color="shortname",
hover_name="shortname",
# size_max=marker_size_max,
# opacity=1,
color_discrete_map=fit_results.d_cmap,
custom_data=fit_results.custom_data_columns,
range_x=range_x,
range_y=range_y,
render_mode="webgl",
symbol="shortname",
symbol_map=fit_results.d_symbols,
)
fig.update_traces(
hovertemplate=fit_results.hovertemplate,
marker_line_width=0,
marker_sizeref=2.0
* fit_results.max_of_size
/ (fit_results.marker_size_max ** 2),
)
fig.update_layout(
xaxis_title=x_title,
yaxis_title=y_title,
showlegend=False,
# legend_title="Files",
)
fig.for_each_trace(
lambda trace: dashboard.figures.set_opacity_for_trace(
trace,
method="sqrt",
scale=20 / df.shortname.nunique(),
opacity_min=0.3,
opacity_max=0.95,
)
)
if savefig:
fig_name = f"./data/out/plotlys/plotly__{x}__{y}.html"
utils.init_parent_folder(fig_name)
fig.write_html(fig_name)
return fig
savefig = False
# savefig = True
x = x
df["LR"] = np.clip(df["LR"], a_min=-10, a_max=None)
plotly(
x="LR",
y="D_max",
x_title="LR frequentist",
y_title="D_max frequentist",
savefig=savefig,
)
plotly(
x="Bayesian_n_sigma",
y="Bayesian_D_max",
x_title="n_sigma Bayesian",
y_title="D_max Bayesian",
savefig=savefig,
)
plotly(
x="D_max",
y="Bayesian_D_max",
x_title="D_max",
y_title="D_max Bayesian",
range_x=(0, 0.8),
range_y=(0, 0.8),
savefig=savefig,
)
plotly(
x="q",
y="Bayesian_q",
x_title="q",
y_title="q Bayesian",
range_x=(0, 1.0),
range_y=(0, 1.0),
savefig=savefig,
)
plotly(
x="phi",
y="Bayesian_phi",
x_title="phi",
y_title="phi Bayesian",
range_x=(2, 7_000),
range_y=(2, 7_000),
savefig=savefig,
)
plotly(
x="LR",
y="Bayesian_n_sigma",
x_title="LR",
y_title="n_sigma Bayesian",
savefig=savefig,
)
plotly(
x="forward_D_max",
y="reverse_D_max",
x_title="Forward D_max",
y_title="Reverse D_max",
range_x=(0, 0.8),
range_y=(0, 0.8),
savefig=savefig,
)
plotly(
x="forward_LR",
y="reverse_LR",
x_title="Forward D_max",
y_title="Reverse D_max",
range_x=(-10, 120),
range_y=(-10, 120),
savefig=savefig,
)
plotly(
x="phi",
y="D_max_std",
x_title="phi",
y_title="D_max_std",
savefig=savefig,
)
df["phi_log10"] = np.log10(df["phi"])
plotly(
x="phi_log10",
y="D_max_std",
x_title="log10 phi",
y_title="D_max_std",
savefig=savefig,
)
df["D_max_significance"] = df["D_max"] / df["D_max_std"]
plotly(
x="phi_log10",
y="D_max_significance",
x_title="log10 phi",
y_title="D_max_significance",
savefig=savefig,
)
plotly(
x="phi",
y="D_max_significance",
x_title="phi",
y_title="D_max_significance",
savefig=savefig,
)
plotly(
x="D_max",
y="D_max_significance",
x_title="D_max",
y_title="D_max_significance",
savefig=savefig,
)
plotly(
x="LR",
y="rho_Ac",
x_title="LR",
y_title="rho_Ac",
savefig=savefig,
)
df["rho_Ac_abs"] = np.abs(df["rho_Ac"])
plotly(
x="LR",
y="rho_Ac_abs",
x_title="LR",
y_title="rho_Ac_abs",
savefig=savefig,
)
plotly(
x="LR",
y="asymmetry",
x_title="LR",
y_title="asymmetry",
savefig=savefig,
)
| 23.521628 | 93 | 0.545219 | 1,160 | 9,244 | 4.081034 | 0.235345 | 0.034854 | 0.050697 | 0.053232 | 0.246092 | 0.199409 | 0.134981 | 0.109421 | 0.090621 | 0.068019 | 0 | 0.038037 | 0.343033 | 9,244 | 392 | 94 | 23.581633 | 0.741479 | 0.082756 | 0 | 0.293919 | 0 | 0 | 0.103248 | 0.004742 | 0 | 0 | 0 | 0 | 0 | 1 | 0.010135 | false | 0 | 0.050676 | 0 | 0.077703 | 0.006757 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a7e3e87f84ae18c2e3e70c21079daec01376fa1 | 2,433 | py | Python | compaction.py | robbassi/kvs | 3242a70f9f214f90b2fa540f1049e836b62932d2 | [
"MIT"
] | 1 | 2020-10-07T16:47:07.000Z | 2020-10-07T16:47:07.000Z | compaction.py | robbassi/kvs | 3242a70f9f214f90b2fa540f1049e836b62932d2 | [
"MIT"
] | 7 | 2020-10-07T17:42:13.000Z | 2020-12-15T00:43:41.000Z | compaction.py | robbassi/kvs | 3242a70f9f214f90b2fa540f1049e836b62932d2 | [
"MIT"
] | 1 | 2020-10-07T16:49:06.000Z | 2020-10-07T16:49:06.000Z | import sys
from common import TOMBSTONE, Value
from binio import kv_iter, kv_writer
from typing import List, Optional, Tuple
from os import scandir, stat
from sstable import SSTable
MIN_THRESHOLD = 4
MIN_SIZE = 50000000
class Bucket:
def __init__(self):
self.min = 0
self.max = 0
self.avg = 0
self.segments = []
def compute_bounds(self):
bucket_size = 0
for segment in self.segments:
bucket_size += segment.size
self.avg = round(bucket_size / len(self.segments))
self.min = self.avg - round(self.avg / 2)
self.max = self.avg + round(self.avg / 2)
def add(self, segment: SSTable):
self.segments.append(segment)
self.compute_bounds()
def fits(self, segment: SSTable) -> bool:
return segment.size >= self.min and segment.size <= self.max
def size(self) -> int:
return len(self.segments)
def oldest(self, n: int) -> List[SSTable]:
segments = sorted(self.segments, key=lambda segment: segment.index)
return segments[:n]
def compute_buckets(segments: List[SSTable]) -> List[Bucket]:
small_bucket = Bucket()
buckets = [small_bucket]
for segment in segments:
if segment.size <= MIN_SIZE:
small_bucket.add(segment)
else:
segment_bucket = None
for bucket in buckets:
if bucket.fits(segment):
segment_bucket = bucket
break
if segment_bucket is None:
new_bucket = Bucket()
new_bucket.add(segment)
buckets.append(new_bucket)
else:
segment_bucket.add(segment)
buckets.sort(key=lambda b: b.avg)
return buckets
def compaction_pass(buckets: List[Bucket]) -> Tuple[List[SSTable], Optional[SSTable]]:
for bucket in buckets:
if bucket.size() >= MIN_THRESHOLD:
old_files = bucket.oldest(MIN_THRESHOLD)
new_file = SSTable.merge(old_files)
return (old_files, new_file)
return ([], None)
def describe_buckets(buckets):
for i, b in enumerate(buckets):
compaction = "(needs compaction)" if b.size() >= MIN_THRESHOLD else ""
print(
f"Tier {i + 1} | min {b.min} avg {b.avg} max {b.max} {compaction}"
)
for f in b.segments:
print(f" {f.path} {f.size} bytes")
| 31.192308 | 86 | 0.594328 | 304 | 2,433 | 4.641447 | 0.259868 | 0.029766 | 0.031892 | 0.022679 | 0.065202 | 0.065202 | 0 | 0 | 0 | 0 | 0 | 0.00944 | 0.303329 | 2,433 | 77 | 87 | 31.597403 | 0.823009 | 0 | 0 | 0.060606 | 0 | 0.015152 | 0.043157 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.136364 | false | 0.015152 | 0.090909 | 0.030303 | 0.333333 | 0.030303 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a7ee06ae12edb30e9d03447739bff0ec37db3b6 | 8,002 | py | Python | mrl/configs/make_continuous_agents.py | nicolascastanet/mrl | 0623c40c3e03c6a4d3e495426eae765d9f8aa751 | [
"MIT"
] | null | null | null | mrl/configs/make_continuous_agents.py | nicolascastanet/mrl | 0623c40c3e03c6a4d3e495426eae765d9f8aa751 | [
"MIT"
] | null | null | null | mrl/configs/make_continuous_agents.py | nicolascastanet/mrl | 0623c40c3e03c6a4d3e495426eae765d9f8aa751 | [
"MIT"
] | null | null | null | from mrl.import_all import *
from argparse import Namespace
import gym
import time
def make_ddpg_agent(base_config=default_ddpg_config,
args=Namespace(env='InvertedPendulum-v2',
tb='',
parent_folder='/tmp/mrl',
layers=(256, 256),
num_envs=None),
agent_name_attrs=['env', 'seed', 'tb'],
**kwargs):
if callable(base_config):
base_config = base_config()
config = base_config
if hasattr(args, 'num_envs') and args.num_envs is None:
import multiprocessing as mp
args.num_envs = max(mp.cpu_count() - 1, 1)
if not hasattr(args, 'prefix'):
args.prefix = 'ddpg'
if not args.tb:
args.tb = str(time.time())
merge_args_into_config(args, config)
config.agent_name = make_agent_name(config, agent_name_attrs, prefix=args.prefix)
base_modules = {
k: v
for k, v in dict(module_train=StandardTrain(),
module_eval=EpisodicEval(),
module_policy=ActorPolicy(),
module_logger=Logger(),
module_state_normalizer=Normalizer(MeanStdNormalizer()),
module_replay=OnlineHERBuffer(),
module_action_noise=ContinuousActionNoise(GaussianProcess,
std=ConstantSchedule(config.action_noise)),
module_algorithm=DDPG()).items() if not k in config
}
config.update(base_modules)
if type(args.env) is str:
env = lambda: gym.make(args.env)
eval_env = env
else:
env = args.env
eval_env = env
if hasattr(args, 'eval_env') and args.eval_env is not None:
if type(args.eval_env) is str:
eval_env = lambda: gym.make(args.eval_env)
else:
eval_env = args.eval_env
config.module_train_env = EnvModule(env, num_envs=config.num_envs, seed=config.seed)
config.module_eval_env = EnvModule(eval_env, num_envs=config.num_eval_envs, name='eval_env', seed=config.seed + 1138)
layer_norm = nn.LayerNorm if (hasattr(args, 'layer_norm') and args.layer_norm) else nn.Identity
e = config.module_eval_env
config.module_actor = PytorchModel(
'actor', lambda: Actor(FCBody(e.state_dim + e.goal_dim, args.layers, layer_norm, make_activ(config.activ)), e.action_dim, e.max_action))
config.module_critic = PytorchModel(
'critic', lambda: Critic(FCBody(e.state_dim + e.goal_dim + e.action_dim, args.layers, layer_norm, make_activ(config.activ)), 1))
if e.goal_env:
config.never_done = True # important for standard Gym goal environments, which are never done
return config
def make_td3_agent(base_config=spinning_up_td3_config,
args=Namespace(env='InvertedPendulum-v2',
tb='',
prefix='td3',
parent_folder='/tmp/mrl',
layers=(256, 256),
num_envs=None),
agent_name_attrs=['env', 'seed', 'tb'],
**kwargs):
config = make_ddpg_agent(base_config, args, agent_name_attrs, **kwargs)
del config.module_algorithm
config.module_algorithm = TD3()
layer_norm = nn.LayerNorm if (hasattr(args, 'layer_norm') and args.layer_norm) else nn.Identity
e = config.module_eval_env
config.module_critic2 = PytorchModel('critic2',
lambda: Critic(FCBody(e.state_dim + e.goal_dim + e.action_dim, args.layers, layer_norm, make_activ(config.activ), False), 1, False))
return config
def make_sac_agent(base_config=spinning_up_sac_config,
args=Namespace(env='InvertedPendulum-v2',
tb='',
prefix='sac',
parent_folder='/tmp/mrl',
layers=(256, 256),
num_envs=None),
agent_name_attrs=['env', 'seed', 'tb'],
**kwargs):
config = make_ddpg_agent(base_config, args, agent_name_attrs, **kwargs)
e = config.module_eval_env
layer_norm = nn.LayerNorm if (hasattr(args, 'layer_norm') and args.layer_norm) else nn.Identity
del config.module_actor
del config.module_action_noise
del config.module_policy
config.module_policy = StochasticActorPolicy()
del config.module_algorithm
config.module_algorithm = SAC()
config.module_actor = PytorchModel(
'actor', lambda: StochasticActor(FCBody(e.state_dim + e.goal_dim, args.layers, layer_norm, make_activ(config.activ)),
e.action_dim, e.max_action, log_std_bounds = (-20, 2)))
config.module_critic2 = PytorchModel('critic2',
lambda: Critic(FCBody(e.state_dim + e.goal_dim + e.action_dim, args.layers, layer_norm, make_activ(config.activ), False), 1, False))
return config
def make_Alice_and_Bob(config):
"""
Set Alice and Bob policies / replay / algo etc ... from agent config
TO DO : implem default Agent config
"""
# Alice
config.policy_A.required_agent_modules = [
'actor_A', 'action_noise', 'env', 'replay_buffer_A'
]
config.policy_A.module_name = 'policy_A'
config.policy_A.actor = config.actor_A
config.policy_A.replay_buffer = config.replay_A
config.Alice.required_agent_modules = ['actor_A','critic_A','replay_buffer_A', 'env']
config.Alice.actor = config.actor_A
config.Alice.critic = config.critic_A
config.Alice.replay_buffer = config.replay_A
# Bob
config.policy_B.required_agent_modules = [
'actor_B', 'action_noise', 'env', 'replay_buffer_B'
]
config.policy_B.module_name = 'policy_B'
config.policy_B.actor = config.actor_B
config.policy_B.replay_buffer = config.replay_B
config.Bob.required_agent_modules = ['actor_B','critic_B','replay_buffer_B', 'env']
config.Bob.actor = config.actor_B
config.Bob.critic = config.critic_B
config.Bob.replay_buffer = config.replay_B
config.evaluation.required_agent_modules = ['policy_B', 'eval_env']
config.evaluation.policy = config.policy_B
"""
# Alice
dict_a = {'actor': config.actor._copy({'name':'actor',
'model_fn':config.actor.model_fn}),
'critic': config.actor._copy({'name':'critic',
'model_fn':config.critic.model_fn}),
'replay_buffer': config.replay._copy()
}
config.actor_A = dict_a['actor']
config.critic_A = dict_a['critic']
config.replay_A = dict_a['replay_buffer']
config.policy_A = config.policy._copy(name = 'policy_A')
config.policy_A.actor = config.actor_A
config.policy_A.replay_buffer = config.replay_A
config.Alice = config.algorithm._copy(name = 'Alice')
config.Alice.actor = config.actor_A
config.Alice.critic = config.critic_A
config.Alice.replay_buffer = config.replay_A
# Bob
dict_b = {'actor': config.actor._copy({'name':'actor',
'model_fn':config.actor.model_fn}),
'critic': config.actor._copy({'name':'critic',
'model_fn':config.critic.model_fn}),
'replay_buffer': config.replay._copy()
}
config.actor_B = dict_b['actor']
config.critic_B = dict_b['critic']
config.replay_B = dict_b['replay_buffer']
config.policy_B = config.policy._copy(name = 'policy_B')
config.policy_B.actor = config.actor_B
config.policy_B.replay_buffer = config.replay_B
config.Bob = config.algorithm._copy(name = 'Bob')
config.Bob.actor = config.actor_B
config.Bob.critic = config.critic_B
config.Bob.replay_buffer = config.replay_B
# Other config
config.evaluation.policy = config.policy_B # Only Bob policy when test time"""
return config | 35.564444 | 142 | 0.622969 | 1,006 | 8,002 | 4.690855 | 0.137177 | 0.045772 | 0.045772 | 0.050858 | 0.619411 | 0.536978 | 0.506463 | 0.478491 | 0.458148 | 0.458148 | 0 | 0.006972 | 0.265059 | 8,002 | 225 | 143 | 35.564444 | 0.795443 | 0.022744 | 0 | 0.362903 | 0 | 0 | 0.062644 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032258 | false | 0 | 0.040323 | 0 | 0.104839 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a80808d8e9e323c40e1bbd90918908a6dd8d9e0 | 4,036 | py | Python | gpuexperiments/old/sharedmemory.py | hughperkins/gpu-experiments | 3e5064e45682494be97190558807672b602f1c76 | [
"BSD-2-Clause"
] | 2 | 2016-07-05T05:52:18.000Z | 2018-04-14T07:35:36.000Z | gpuexperiments/old/sharedmemory.py | hughperkins/gpu-experiments | 3e5064e45682494be97190558807672b602f1c76 | [
"BSD-2-Clause"
] | null | null | null | gpuexperiments/old/sharedmemory.py | hughperkins/gpu-experiments | 3e5064e45682494be97190558807672b602f1c76 | [
"BSD-2-Clause"
] | null | null | null | # Note that this will erase your nvidia cache, ~/.nv/ComputeCache This may or may not be an undesirable side-effect for you. For example, cutorch will take 1-2 minutes or so to start after this cache has been emptied.
from __future__ import print_function, division
import time
import string
import random
import numpy as np
import pyopencl as cl
import subprocess
import os
from os.path import join
from gpuexperiments.callkernel import call_cl_kernel
#import gpuexperiments.cpu_check
from gpuexperiments.timecheck import inittime, timecheck
gpu_idx = 0
platforms = cl.get_platforms()
i = 0
for platform in platforms:
gpu_devices = platform.get_devices(device_type=cl.device_type.GPU)
if gpu_idx < i + len(gpu_devices):
ctx = cl.Context(devices=[gpu_devices[gpu_idx-i]])
break
i += len(gpu_devices)
print('context', ctx)
q = cl.CommandQueue(ctx)
mf = cl.mem_flags
sources = {
'kernel_store_to_local': r"""
kernel void kernel_store_to_local(global int *data) {
local int F[32];
F[0] = 123;
}
"""
, 'kernel_init_local': r"""
kernel void kernel_init_local(global int *data) {
local int F[32];
for(int i = 0; i < 32; i++) {
F[i] = 0;
};
}
"""
, 'kernel_init_local_noloop': r"""
kernel void kernel_init_local_noloop(global int *data) {
local int F[32];
F[get_local_id(0)] = 0;
}
"""
, 'kernel_copy_local_to_global': r"""
kernel void kernel_copy_local_to_global(global int *data) {
local int F[32];
int tid = get_local_id(0);
data[tid] = F[tid];
}
"""
, 'kernel_copy_local_from_global': r"""
kernel void kernel_copy_local_from_global(global int *data) {
local int F[32];
int tid = get_local_id(0);
F[tid] = data[tid];
}
"""
, 'kernel_copy_local_to_global_gid': r"""
kernel void kernel_copy_local_to_global_gid(global int *data) {
local int F[32];
int tid = get_local_id(0);
int gid = get_global_id(0);
data[gid] = F[tid];
}
"""
, 'kernel_copy_local_from_global_gid': r"""
kernel void kernel_copy_local_from_global_gid(global int *data) {
local int F[32];
int tid = get_local_id(0);
int gid = get_global_id(0);
F[tid] = data[gid];
}
"""
}
optimized = set()
def clearComputeCache():
cache_dir = join(os.environ['HOME'], '.nv/ComputeCache')
for subdir in os.listdir(cache_dir):
if subdir == 'index':
continue
print('clean', subdir)
subprocess.call(['rm', '-Rf', join(cache_dir, subdir)])
# subprocess.call(['rm', '-Rf', join(os.environ['HOME'], '.nv/ComputeCache')])
def getPtx(kernelName):
with open('/tmp/gpucmd.sh', 'w') as f:
f.write(r"""#!/bin/bash
cat $(grep -r %s ~/.nv/ComputeCache | awk '{print $3}')
""" % kernelName)
filepath = subprocess.check_output(['/bin/bash', '/tmp/gpucmd.sh'])
filepath_utf8 = ''
for byte in filepath:
# print(byte)
if byte >= 10 and byte < 128:
if chr(byte) in string.printable:
filepath_utf8 += chr(byte)
# print('filepath', filepath)
#print(kernelName)
print(filepath_utf8.split('--opt-level')[0])
def buildKernel(name, source):
options = '-cl-opt-disable'
if name in optimized:
print('ENABLING OPTIMIZATIONS')
options = ''
return cl.Program(ctx, source).build(options=options).__getattr__(name)
d = np.zeros((1024*1024 * 32 * 2,), dtype=np.float32)
d_cl = cl.Buffer(ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=d)
def timeKernel(name, kernel):
# clearComputeCache()
grid = (1024*1024,1,1)
block = (32,1,1)
q.finish()
inittime()
call_cl_kernel(kernel, q, grid, block, d_cl)
q.finish()
return timecheck(name)
# print(getPtx('mykernel'))
times = {}
for name, source in sorted(sources.items()):
clearComputeCache()
kernel = buildKernel(name, source)
print('built kernel')
for it in range(3):
t = timeKernel(name, kernel)
times[name] = t
print(getPtx(name))
for name, time in sorted(times.items()):
print(name, time)
| 27.834483 | 219 | 0.652131 | 590 | 4,036 | 4.284746 | 0.291525 | 0.031646 | 0.047468 | 0.047073 | 0.294304 | 0.27057 | 0.200158 | 0.177215 | 0.090981 | 0.090981 | 0 | 0.022472 | 0.206145 | 4,036 | 144 | 220 | 28.027778 | 0.766542 | 0.106789 | 0 | 0.180328 | 0 | 0 | 0.379694 | 0.110153 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032787 | false | 0 | 0.090164 | 0 | 0.139344 | 0.081967 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a80cfa10b9df2618d9406f7d94897169d17a59a | 4,827 | py | Python | activities/boss_db.py | jhuapl-boss/boss-tools | 2ace8ce2985ffa3c442ed85134d26c76fb5d984f | [
"Apache-2.0"
] | 1 | 2018-08-04T21:57:34.000Z | 2018-08-04T21:57:34.000Z | activities/boss_db.py | jhuapl-boss/boss-tools | 2ace8ce2985ffa3c442ed85134d26c76fb5d984f | [
"Apache-2.0"
] | 16 | 2018-05-21T16:28:10.000Z | 2021-03-17T20:15:25.000Z | activities/boss_db.py | jhuapl-boss/boss-tools | 2ace8ce2985ffa3c442ed85134d26c76fb5d984f | [
"Apache-2.0"
] | 3 | 2018-02-08T16:45:59.000Z | 2018-03-22T15:26:14.000Z | # Copyright 2020 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pymysql.cursors
import bossutils
LOG = bossutils.logger.bossLogger()
def get_db_connection(host):
"""
Connects to vault to get database information and then makes a DB connection.
Note that the connection is opened with auto-commit turned ON.
Args:
host (str): Host name of database.
Returns:
(pymysql.Connection) connection to DB
"""
vault = bossutils.vault.Vault()
# ------ get values from Vault -----------
user = vault.read('secret/endpoint/django/db', 'user')
password = vault.read('secret/endpoint/django/db', 'password')
db_name = vault.read('secret/endpoint/django/db', 'name')
port = int(vault.read('secret/endpoint/django/db', 'port'))
# ---- debug locally -------
# host = "localhost"
# user = "testuser"
# password = ""
# db_name = "boss"
# port = 3306
return pymysql.connect(host=host,
user=user,
password=password,
db=db_name,
port=port,
# Don't turn off autocommit w/o visiting every user
# of this connection and ensuring that they use a
# transaction!
autocommit=True,
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
def update_downsample_status_in_db(args):
"""
Update the downsample status in the MySQL database.
This supports a state of the resolution hierarchy step function.
This function is tested in
boss.git/django/bossspatialdb/test/test_update_downsample_status.py.
Tests live there because Django owns the DB.
Args:
args (dict):
db_host (str): MySQL host name.
channel_id (int): ID of channel for downsample.
status (str): String from DownsampleStatus class.
Returns:
(dict): Returns input args for passing to next SFN state.
"""
sql = """
UPDATE channel
SET downsample_status = %(status)s
WHERE id = %(chan_id)s
"""
db_host = args['db_host']
chan_id = args['channel_id']
status = args['status']
sql_args = dict(status=status, chan_id=str(chan_id))
try:
db_connection = get_db_connection(db_host)
with db_connection.cursor(pymysql.cursors.SSCursor) as cursor:
rows = cursor.execute(sql, sql_args)
if rows < 1:
LOG.error(
f'DB said no rows updated when trying to set downsample status to {status} for channel {chan_id}'
)
except Exception as ex:
LOG.exception(f'Failed to set downsample status to {status} for channel {chan_id}: {ex}')
return args
def set_downsample_arn_in_db(args):
"""
Set the arn of the running downsample step function in the MySQL database.
This supports a state of the resolution hierarchy step function.
This function is tested in
boss.git/django/bossspatialdb/test/test_set_downsample_arn.py.
Tests live there because Django owns the DB.
Args:
args (dict):
db_host (str): MySQL host name.
channel_id (int): ID of channel for downsample.
exe_sfn_arn (str): ARN of running downsample step function.
Returns:
(dict): Returns input args for passing to next SFN state.
"""
sql = """
UPDATE channel
SET downsample_arn = %(arn)s
WHERE id = %(chan_id)s
"""
db_host = args['db_host']
chan_id = args['channel_id']
arn = args['exe_sfn_arn']
sql_args = dict(arn=arn, chan_id=str(chan_id))
try:
db_connection = get_db_connection(db_host)
with db_connection.cursor(pymysql.cursors.SSCursor) as cursor:
rows = cursor.execute(sql, sql_args)
if rows < 1:
LOG.error(
f'DB said no rows updated when trying to set downsample arn for channel {chan_id}'
)
except Exception as ex:
LOG.exception(f'Failed to set downsample arn for channel {chan_id}: {ex}')
return args
| 33.061644 | 117 | 0.617982 | 619 | 4,827 | 4.720517 | 0.298869 | 0.024641 | 0.027379 | 0.031485 | 0.501027 | 0.501027 | 0.45859 | 0.450376 | 0.442847 | 0.442847 | 0 | 0.004695 | 0.293971 | 4,827 | 145 | 118 | 33.289655 | 0.8527 | 0.443132 | 0 | 0.474576 | 0 | 0 | 0.272293 | 0.039809 | 0 | 0 | 0 | 0 | 0 | 1 | 0.050847 | false | 0.033898 | 0.033898 | 0 | 0.135593 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a81385c1aeda3274db449a3e0247ddc02d27f2f | 15,454 | py | Python | DataWrangler.py | AlexLamson/DataWrangler | 3d4b64c30d708a55f4423a486ec9559f087c5acd | [
"MIT"
] | 12 | 2018-08-15T15:12:52.000Z | 2021-11-21T16:04:52.000Z | DataWrangler.py | AlexLamson/DataWrangler | 3d4b64c30d708a55f4423a486ec9559f087c5acd | [
"MIT"
] | 19 | 2018-06-04T15:11:01.000Z | 2019-10-09T13:16:02.000Z | DataWrangler.py | AlexLamson/DataWrangler | 3d4b64c30d708a55f4423a486ec9559f087c5acd | [
"MIT"
] | 1 | 2019-04-09T16:23:30.000Z | 2019-04-09T16:23:30.000Z | import sublime
import sublime_plugin
from collections import defaultdict
from math import log10, floor, ceil
import threading
from subprocess import check_output
import re
import itertools
# pass in a variable name and an optional default value
# to get what that value is set to in settings
def settings(name, default=None):
return sublime.load_settings("DataWrangler.sublime-settings").get(name, default)
def detect_num_columns(self, sep=None):
if sep is None:
sep = detect_separations(self)
# read and split the first line
r = sublime.Region(0, self.view.size())
first_line_region = self.view.lines(r)[0]
first_line = self.view.substr(first_line_region)
return len(first_line.split(sep))
def detect_separations(self, first_line=None):
if first_line is None:
# read and split the first line
r = sublime.Region(0, self.view.size())
first_line_region = self.view.lines(r)[0]
first_line = self.view.substr(first_line_region)
# hacky solution that should be improved in the future
if '\t' in first_line:
return '\t'
elif ', ' in first_line:
return ', '
elif ',' in first_line:
return ','
else:
' '
def detect_col_widths(self, sep=None, num_columns=None, lines=None):
if sep is None:
sep = detect_separations(self)
if num_columns is None:
num_columns = detect_num_columns(self, sep)
if lines is None:
# collect the line strings
r = sublime.Region(0, self.view.size())
line_regions = self.view.lines(r)
lines = (self.view.substr(x) for x in line_regions)
lines = [x for x in lines if x != '']
column_widths = [0]*num_columns
for line in lines:
split_line = line.split(sep)
for i, cell_string in enumerate(split_line):
column_widths[i] = max(len(cell_string), column_widths[i])
return column_widths
'''
Description
-----------
Count the number of times each line occurs, then display that info in a new tab
'''
class LineFreqCommand(sublime_plugin.TextCommand):
def run(self, edit):
sublime.status_message('Data Wrangler: Counting line frequencies')
# collect settings
ignore_case_when_merging_lines = settings("ignore_case_when_merging_lines", False)
# collect the line strings
r = sublime.Region(0, self.view.size())
line_regions = self.view.lines(r)
lines = (self.view.substr(x) for x in line_regions)
lines = [x for x in lines if x != '']
# count the unique lines
counts = defaultdict(int)
for line in lines:
if ignore_case_when_merging_lines:
line = line.lower()
counts[line] += 1
total_num_words = sum((counts[x] for x in counts))
# sort by word count then alphabetically
count_tuples = ((counts[word], word) for word in counts)
count_tuples = sorted(count_tuples, key=lambda x: (-x[0], x[1]))
# make all the line counts line up
max_count_characters = floor(log10( max((counts[x] for x in counts)) ))+1
# add more decimal places to the percentages as needed
smallest_percent = min((counts[x]/total_num_words for x in counts))
num_decimal_places = 0
if -(log10(smallest_percent)+2) > 0:
num_decimal_places = ceil(-(log10(smallest_percent)+2))
num_leading_chars_in_percentage = num_decimal_places+4
else:
num_leading_chars_in_percentage = 3 # special case when all percentages are whole numbers
# initialize array to hold output lines
out_strings = []
# add a title to the beginning
header_string = 'Frequencies of unique lines'
out_strings.append(header_string)
out_strings.append('='*len(header_string))
# format each lines percentage, count, and line string
percentage_format_specifier = '{: >' + str(num_leading_chars_in_percentage) + '.' + str(num_decimal_places) + '%}'
count_format_specifier = '{: >' + str(max_count_characters) + 'd}'
for (count, word) in count_tuples:
percentage = count/total_num_words
count_string = (percentage_format_specifier+' '+count_format_specifier+' {}').format(percentage, count, word)
out_strings.append(count_string)
# add in a grand total at the end
out_strings.append('='*len(header_string))
out_strings.append(str(total_num_words)+' Total')
output_string = '\n'.join(out_strings) + '\n'
# write frequencies to new tab
new_view = sublime.active_window().new_file()
new_view.insert(edit, 0, output_string)
'''
Description
-----------
Remove english stopwords from lines. Assumes that each line is a single word.
'''
class RemoveStopwordsCommand(sublime_plugin.TextCommand):
def run(self, edit):
sublime.status_message('Data Wrangler: Removing stopwords')
# collect the line strings
r = sublime.Region(0, self.view.size())
line_regions = self.view.lines(r)
lines = (self.view.substr(x) for x in line_regions)
lines = [x for x in lines if x != '']
# load in list of stopwords
stopwords = settings("stopwords", "").split()
# filter out stopwords
lines = [x for x in lines if x.lower() not in stopwords]
output_string = '\n'.join(lines)
# write frequencies to new tab
new_view = sublime.active_window().new_file()
new_view.insert(edit, 0, output_string)
'''
Description
-----------
Remove lines that contain no meaningful content (spaces, tabs, commas).
'''
class RemoveSeparatorOnlyRowsCommand(sublime_plugin.TextCommand):
def run(self, edit):
sublime.status_message('Data Wrangler: Removing stopwords')
# collect the line strings
r = sublime.Region(0, self.view.size())
line_regions = self.view.lines(r)
lines = [self.view.substr(x) for x in line_regions]
# filter out lines that only contain separators
is_useful_line = re.compile(r'[^\s\t,\.]')
lines = list(filter(is_useful_line.search, lines))
output_string = '\n'.join(lines)
# write frequencies to new tab
new_view = sublime.active_window().new_file()
new_view.insert(edit, 0, output_string)
'''
Description
-----------
List all pairs of lines.
'''
class AllPairsCommand(sublime_plugin.TextCommand):
def run(self, edit):
sublime.status_message('Data Wrangler: Removing stopwords')
# collect the line strings
r = sublime.Region(0, self.view.size())
line_regions = self.view.lines(r)
lines = [self.view.substr(x) for x in line_regions]
lines = [x for x in lines if x != '']
# generate all pairs of lines
lines = [x+' ~~~ '+y for (x, y) in itertools.combinations(lines, 2)]
output_string = '\n'.join(lines)
# write frequencies to new tab
new_view = sublime.active_window().new_file()
new_view.insert(edit, 0, output_string)
'''
Description
-----------
Before:
AAA
BBB
CCC
After:
AAA BBB
AAA CCC
'''
class FlattenListOfListsCommand(sublime_plugin.TextCommand):
def run(self, edit):
sublime.status_message('Data Wrangler: Flattening list column')
# collect all the lines from the documents as a list of strings
self.view.run_command('select_all')
everything_region = self.view.sel()[0]
everything_string = self.view.substr(everything_region)
self.view.sel().clear()
lines = everything_string.split('\n')
# discard the last line if it is blank
if lines[-1] == '':
del lines[-1]
# flatten the list of lists
new_lines = list()
current_heading = ""
for i, line in enumerate(lines):
if not (line.startswith('\t') or line.startswith(' ')):
current_heading = line
else:
new_lines.append(current_heading + line)
new_everything_string = "\n".join(new_lines) + "\n"
# open new file
new_view = sublime.active_window().new_file()
# insert selected text into the new file
new_view.insert(edit, 0, new_everything_string)
# self.view.replace(edit, everything_region, new_everything_string)
# self.view.sel().clear()
# self.view.run_command("go_to_line", {'line':'0'})
'''
Description
-----------
Before:
AAA BBB
AAA CCC
After:
AAA
BBB
CCC
'''
class GroupListOfListsCommand(sublime_plugin.TextCommand):
def run(self, edit):
sublime.status_message('Data Wrangler: Grouping list column')
# collect all the lines from the documents as a list of strings
self.view.run_command('select_all')
everything_region = self.view.sel()[0]
everything_string = self.view.substr(everything_region)
self.view.sel().clear()
lines = everything_string.split('\n')
# discard the last line if it is blank
if lines[-1] == '':
del lines[-1]
# flatten the list of lists
new_lines = list()
current_heading = ""
for i, line in enumerate(lines):
heading, subheading = line.split('\t')
if heading == current_heading:
new_lines.append('\t'+subheading)
else:
current_heading = heading
new_lines.append(current_heading)
new_everything_string = "\n".join(new_lines) + "\n"
# open new file
new_view = sublime.active_window().new_file()
# insert selected text into the new file
new_view.insert(edit, 0, new_everything_string)
# self.view.replace(edit, everything_region, new_everything_string)
# self.view.sel().clear()
# self.view.run_command("go_to_line", {'line':'0'})
'''
Description
-----------
Before:
aaaa bb ccc
dd eeeeee ff
After:
aaaa bb ccc
dd eeeeee ff
'''
class AlignColumnsCommand(sublime_plugin.TextCommand):
def run(self, edit):
sublime.status_message('Data Wrangler: Aligning columns')
# collect the line strings
r = sublime.Region(0, self.view.size())
line_regions = self.view.lines(r)
lines = (self.view.substr(x) for x in line_regions)
lines = [x for x in lines if x != '']
# compute the max width of each column
sep = detect_separations(self)
num_columns = detect_num_columns(self, sep)
column_widths = detect_col_widths(self, sep, num_columns)
# re-format all the columns
format_string = " ".join(("{: >"+str(width)+"}" for width in column_widths))
out_strings = []
for line in lines:
cells = line.split(sep)
resized_line = format_string.format(*cells)
out_strings.append(resized_line)
# initialize array to hold output lines
output_string = '\n'.join(out_strings) + '\n'
# write frequencies to new tab
new_view = sublime.active_window().new_file()
new_view.insert(edit, 0, output_string)
'''
Description
-----------
Delete every column that has a cursor in it
'''
class DeleteColumnsCommand(sublime_plugin.TextCommand):
def run(self, edit):
sublime.status_message('Data Wrangler: Deleting columns')
# indices of data columns that will be removed
data_columns_to_delete = set()
# for each cursor, find which data column that cursor is in
for cursor_region in self.view.sel():
line_region = self.view.line(cursor_region.a)
tabs_region = sublime.Region(line_region.a, cursor_region.a)
num_tabs_before_cursor = self.view.substr(tabs_region).count('\t')
# the data column index is equal to the number of tabs before the cursor
data_column = num_tabs_before_cursor
data_columns_to_delete.add( data_column )
# collect the line strings
r = sublime.Region(0, self.view.size())
line_regions = self.view.lines(r)
lines = (self.view.substr(x) for x in line_regions)
lines = [x for x in lines if x != '']
# delete unwanted columns
sep = detect_separations(self)
out_strings = []
for line in lines:
columns = line.split(sep)
filtered_columns = [y for (x,y) in enumerate(columns) if x not in data_columns_to_delete]
filtered_line = sep.join(filtered_columns)
out_strings.append(filtered_line)
# initialize array to hold output lines
output_string = '\n'.join(out_strings) + '\n'
# write frequencies to new tab
new_view = sublime.active_window().new_file()
new_view.insert(edit, 0, output_string)
'''
Description
-----------
Delete every column that doesn't have a cursor in it
'''
class IsolateColumnsCommand(sublime_plugin.TextCommand):
def run(self, edit):
sublime.status_message('Data Wrangler: Isolating columns')
# indices of data columns that will be removed
data_columns_to_keep = set()
# for each cursor, find which data column that cursor is in
for cursor_region in self.view.sel():
line_region = self.view.line(cursor_region.a)
tabs_region = sublime.Region(line_region.a, cursor_region.a)
num_tabs_before_cursor = self.view.substr(tabs_region).count('\t')
# the data column index is equal to the number of tabs before the cursor
data_column = num_tabs_before_cursor
data_columns_to_keep.add( data_column )
# collect the line strings
r = sublime.Region(0, self.view.size())
line_regions = self.view.lines(r)
lines = (self.view.substr(x) for x in line_regions)
lines = [x for x in lines if x != '']
# delete unwanted columns
sep = detect_separations(self)
out_strings = []
for line in lines:
columns = line.split(sep)
filtered_columns = [y for (x,y) in enumerate(columns) if x in data_columns_to_keep]
filtered_line = sep.join(filtered_columns)
out_strings.append(filtered_line)
# initialize array to hold output lines
output_string = '\n'.join(out_strings) + '\n'
# write frequencies to new tab
new_view = sublime.active_window().new_file()
new_view.insert(edit, 0, output_string)
'''
Description
-----------
Separate each word onto its own line
'''
class WordSplitCommand(sublime_plugin.TextCommand):
def run(self, edit):
sublime.status_message('Data Wrangler: Counting line frequencies')
# collect the line strings
r = sublime.Region(0, self.view.size())
line_regions = self.view.lines(r)
lines = (self.view.substr(x) for x in line_regions)
lines = [x for x in lines if x != '']
# initialize array to hold output lines
out_strings = []
for line in lines:
re.split(r"\W+", line)
for word in line.split():
out_strings += [word]
output_string = '\n'.join(out_strings) + '\n'
# write frequencies to new tab
new_view = sublime.active_window().new_file()
new_view.insert(edit, 0, output_string)
| 32.81104 | 123 | 0.631616 | 2,026 | 15,454 | 4.650543 | 0.132774 | 0.045001 | 0.013373 | 0.014859 | 0.686691 | 0.650074 | 0.624177 | 0.617173 | 0.606135 | 0.597856 | 0 | 0.004473 | 0.262197 | 15,454 | 470 | 124 | 32.880851 | 0.821873 | 0.155623 | 0 | 0.625514 | 0 | 0 | 0.047375 | 0.004878 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057613 | false | 0 | 0.032922 | 0.004115 | 0.156379 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a8180645e23b18d704d5f3aab84f4eda829d302 | 507 | py | Python | resources/rasanlu/test.py | pmelet/tcnlu | 4ad9267de4f2d3d269be5e7adb70686d4935865c | [
"Apache-2.0"
] | 1 | 2018-09-13T08:36:15.000Z | 2018-09-13T08:36:15.000Z | resources/rasanlu/test.py | pmelet/tcnlu | 4ad9267de4f2d3d269be5e7adb70686d4935865c | [
"Apache-2.0"
] | 6 | 2018-09-13T09:37:32.000Z | 2018-09-26T07:54:59.000Z | resources/rasanlu/test.py | pmelet/tcnlu | 4ad9267de4f2d3d269be5e7adb70686d4935865c | [
"Apache-2.0"
] | null | null | null | import sys, json, random
from rasa_nlu.model import Interpreter
from pprint import pprint
interpreter = Interpreter.load("models\\current\\nlu")
responses_file = sys.argv[1]
responses = json.load(open(responses_file))
while True:
question = input("> ")
response = interpreter.parse(question)
pprint(response)
intent = response.get("intent").get("name")
answers = responses.get(intent)
answer = answers[random.randrange(len(answers))]
print ()
print (answer)
print ()
| 24.142857 | 54 | 0.70217 | 61 | 507 | 5.786885 | 0.52459 | 0.073654 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002375 | 0.169625 | 507 | 20 | 55 | 25.35 | 0.836105 | 0 | 0 | 0.125 | 0 | 0 | 0.063116 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1875 | 0 | 0.1875 | 0.3125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a89de4b46f00e88cfbed3e6547fad63b83b5448 | 31,473 | py | Python | sapextractor/algo/prod/obj_centr_log.py | aarkue/sap-meta-explorer | 613bf657bbaa72a3781a84664e5de7626516532f | [
"Apache-2.0"
] | null | null | null | sapextractor/algo/prod/obj_centr_log.py | aarkue/sap-meta-explorer | 613bf657bbaa72a3781a84664e5de7626516532f | [
"Apache-2.0"
] | null | null | null | sapextractor/algo/prod/obj_centr_log.py | aarkue/sap-meta-explorer | 613bf657bbaa72a3781a84664e5de7626516532f | [
"Apache-2.0"
] | null | null | null | import pandas as pd
from dateutil import parser
from pm4pymdl.objects.mdl.exporter import exporter as mdl_exporter
from pm4pymdl.objects.ocel.exporter import exporter as ocel_exporter
from sapextractor.utils.dates import timestamp_column_from_dt_tm
from pandas.core.frame import DataFrame
from sapextractor.database_connection.interface import DatabaseConnection
def apply(con, keep_first=True, min_extr_date="2020-01-01 00:00:00", gjahr="2020", enable_changes=True,
enable_payments=True, allowed_act_doc_types=None, allowed_act_changes=None, mandt="800"):
print("WIP: production, 'apply' function")
print("######################################################")
print("Create Production Order")
print("######################################################")
afko_res = con.prepare_and_execute_query("AFKO", ["AUFNR","PLNBEZ", "GAMNG", "GMEIN"], additional_query_part=" WHERE MANDT = '"+mandt+"'");
# Remove deleted ones
aufk_res = con.prepare_and_execute_query("AUFK", ["AUFNR"], additional_query_part=" WHERE LOEKZ != 'X' AND MANDT = '"+mandt+"'");
afko_res = afko_res.merge(aufk_res,left_on="AUFNR",right_on="AUFNR",how="inner");
print(afko_res)
afko_res['event_PRODORD'] = "OR" + afko_res["AUFNR"] #afko_res.apply(lambda row: "OR"+row.AUFNR)
# removed: CDTCODE='CO01' AND
jcds_res = con.prepare_and_execute_query("JCDS", ["OBJNR", "CHGNR", "UDATE","UTIME","CDTCODE"], additional_query_part=" WHERE MANDT = '"+mandt+"'");
print(jcds_res)
dataframe = afko_res.merge(jcds_res,left_on="event_PRODORD",right_on="OBJNR",how="inner")
print(dataframe)
s026 = con.prepare_and_execute_query("S026", ["MATNR","MCOMB", "AUFNR"], additional_query_part=" WHERE MANDT = '"+mandt+"'").drop_duplicates();
# s026.rename(columns={'MATNR': 'event_REQMAT'},inplace=True)
print(s026)
dataframe['event_REQMAT'] = "";
timestamp_column_from_dt_tm.apply(dataframe, "UDATE", "UTIME", "event_timestamp")
min_extr_date = parser.parse(min_extr_date)
dataframe = dataframe[dataframe["event_timestamp"] >= min_extr_date]
dataframe = dataframe.sort_values("event_timestamp")
dataframe = dataframe.drop_duplicates(subset=["event_PRODORD"],keep="first")
# # Define relevant Production orders
# relevant_production_orders_series = pd.Series(data=["OR000000822321"]);
relevant_production_orders = dataframe["event_PRODORD"];
print("relevant prod orders:", relevant_production_orders);
# # Filter data
# dataframe = dataframe.merge(relevant_production_orders,left_on="event_PRODORD",right_on="event_PRODORD",how="inner")
# print("#### MERGED DATAFRAME: ")
# print(dataframe)
dataframe["DOCTYPE_RequiredMaterial"] = "";
dataframe["DOCTYPE_RequiredMaterial"] = dataframe["DOCTYPE_RequiredMaterial"].astype(object)
dataframe["event_REQMAT"] = "";
# dataframe["event_REQMAT"] = dataframe["event_REQMAT"].astype(object)
relevant_materials_series = pd.Series();
dataframe: pd.DataFrame;
for index, row in dataframe.iterrows():
filtered = s026[(("OR"+s026['AUFNR']) == row['event_PRODORD'])];
if not filtered.empty :
print("Conncted Material:");
if row['PLNBEZ'] == " ":
dataframe.loc[index,'PLNBEZ'] = filtered['MATNR'].tolist()[0]
relevant_materials_series = relevant_materials_series.append(filtered["MCOMB"]);
connected_mat_list = filtered["MCOMB"].tolist()
dataframe.at[index,'DOCTYPE_RequiredMaterial'] = connected_mat_list
# for i, mat in enumerate(connected_mat_list):
# dataframe.at[index,"event_REQMAT"+counter] = filtered["MCOMB"].toList(;
dataframe.at[index,'event_REQMAT'] = str(connected_mat_list);
relevant_materials = pd.DataFrame({'event_REQMAT': relevant_materials_series}).drop_duplicates(subset=["event_REQMAT"]);
print(dataframe)
dataframe.rename(columns={'PLNBEZ': 'event_MATNR'},inplace=True)
dataframe.rename(columns={'GAMNG': 'event_MNG'},inplace=True)
dataframe = dataframe.assign(DOCTYPE_ProdOrd = lambda x: x['event_PRODORD'])
dataframe = dataframe.assign(DOCTYPE_Material = lambda x: x['event_MATNR'])
# dataframe = dataframe.assign(DOCTYPE_RequiredMaterial = lambda x: str(x['event_REQMAT'].tolist()))
# dataframe = dataframe.reset_index()
# dataframe["event_id"] = dataframe.index.astype(str)
dataframe["event_activity"] = "Create Production Order"
dataframe = dataframe.drop(["AUFNR","OBJNR","CHGNR","UDATE","UTIME","CDTCODE","GMEIN"],axis=1)
print(dataframe)
# eban_cols = con.get_columns("EBAN")
# print(eban_cols)
# eban_test = con.prepare_and_execute_query("EBAN", eban_cols, additional_query_part=" WHERE BANFN = '0010044618' AND MANDT = '"+mandt+"'");
# # resb = con.execute_read_sql("SELECT * FROM RESB WHERE AUFNR = '000000822321' AND MANDT = '"+mandt+"'",["RSNUM","RSPOS", "MATNR", "BDMNG","MEINS", "AUFNR", "BAUGR","BANFN","BNFPO"]);
# print(eban_test);
# eban_test.to_csv ('exported_eban.csv', index = True, header=True)
print("######################################################")
print("Plan Material// Create Purchase Requsition")
print("######################################################")
# TODO
# print("THIS STEP IS IN PROGRESS")
# Deletion Indicator: LOEKZ = X
eban = con.prepare_and_execute_query("EBAN", ["BANFN","MATNR", "MENGE", "MEINS","BADAT"], additional_query_part=" WHERE LOEKZ != 'X' AND MANDT = '"+mandt+"'");
# eban.rename(columns={'BANFN': 'event_PURCHREQ'},inplace=True)
eban['event_PURCHREQ'] = "PR" + eban["BANFN"];
eban.rename(columns={'MATNR': 'event_REQMAT'},inplace=True)
# Filter Materials
eban = eban.merge(relevant_materials,left_on="event_REQMAT",right_on="event_REQMAT",how="inner")
# Save relevant Purchase Requisitions
relevant_purchase_requisitions_series = pd.Series();
relevant_purchase_requisitions_series = relevant_purchase_requisitions_series.append(eban['event_PURCHREQ']);
relevant_purchase_requisitions = pd.DataFrame({'event_PURCHREQ': relevant_purchase_requisitions_series}).drop_duplicates(subset=["event_PURCHREQ"]);
print("Relevant Purchase Requisitions:")
print(relevant_purchase_requisitions)
eban.rename(columns={'MENGE': 'event_MNG'},inplace=True)
eban.rename(columns={'MEINS': 'event_EIN'},inplace=True)
eban["event_activity"] = "Create Purchase Requisition"
eban['DOCTYPE_PurchReq'] = eban['event_PURCHREQ']
eban['DOCTYPE_RequiredMaterial'] = eban['event_REQMAT']
eban['TIME'] = "235959"; # TODO: Made up timestamp
# timestamp_column_from_dt_tm.apply(eban, "BADAT", "TIME", "event_timestamp") # filter only based on day
# # TODO: TEMP REMOVED, in favor of having only release event (with TIME!) in log
# # eban = eban[eban["event_timestamp"] >= min_extr_date]
# eban['TIME'] = "000000"; # but set time to start of day after that
# timestamp_column_from_dt_tm.apply(eban, "BADAT", "TIME", "event_timestamp")
eban = eban.drop(["BADAT","TIME","BANFN"],axis=1)
print(eban)
print("######################################################")
print("Release Purchase Requisition")
print("######################################################")
# TODO: Include TCODE and CHNGIND: update?
cdpos_res = con.prepare_and_execute_query("CDPOS", ["OBJECTID","CHANGENR", "FNAME","VALUE_NEW"], additional_query_part=" WHERE (VALUE_NEW = 'X' OR VALUE_NEW='XX') AND CHNGIND='U' AND FNAME='FRGZU' AND MANDANT = '"+mandt+"'");
print(cdpos_res)
#Removed: TCODE = 'ME54' AND
cdhdr_res = con.prepare_and_execute_query("CDHDR", ["OBJECTID","CHANGENR", "UDATE", "UTIME"], additional_query_part=" WHERE MANDANT = '"+mandt+"'");
print(cdhdr_res)
release_purch_req_data = cdpos_res.merge(cdhdr_res,left_on=["OBJECTID","CHANGENR"],right_on=["OBJECTID","CHANGENR"],how="inner") #.drop_duplicates(subset=["OBJECTID","CHANGENR","UDATE","UTIME"])
print(release_purch_req_data)
timestamp_column_from_dt_tm.apply(release_purch_req_data, "UDATE", "UTIME", "event_timestamp")
release_purch_req_data = release_purch_req_data[release_purch_req_data["event_timestamp"] >= min_extr_date]
release_purch_req_data = release_purch_req_data.sort_values("event_timestamp")
# release_purch_req_data["event_id"] = release_purch_req_data.index.astype(str)
release_purch_req_data["event_activity"] = ""
release_purch_req_data["event_activity"] = release_purch_req_data.apply(lambda x: 'Release Purchase Requisition (1)' if x['VALUE_NEW'] == 'X' else ('Release Purchase Requisition (2)' if x['VALUE_NEW'] == 'XX' else 'Release PurReq: ERR_UNKNOWN_CHANGE'), axis=1)
release_purch_req_data.rename(columns={'PLNBEZ': 'event_REQMAT'},inplace=True)
release_purch_req_data['event_PURCHREQ'] = "PR" + release_purch_req_data["OBJECTID"]
release_purch_req_data.rename(columns={'VALUE_NEW': 'event_NEW-FRGZU'},inplace=True)
release_purch_req_data = release_purch_req_data.assign(DOCTYPE_PurchReq = lambda x: x['event_PURCHREQ'])
release_purch_req_data["event_REQMAT"] = ""
if not release_purch_req_data.empty:
release_purch_req_data["event_REQMAT"] = release_purch_req_data.apply(lambda x:eban.loc[eban['event_PURCHREQ'] == x['event_PURCHREQ']]['event_REQMAT'].values[0] if eban.loc[eban['event_PURCHREQ'] == x['event_PURCHREQ']]['event_REQMAT'].values.size > 0 else '', axis=1)
# Filter only relevant Purchase Requisitions
release_purch_req_data = release_purch_req_data.merge(relevant_purchase_requisitions,left_on="event_PURCHREQ",right_on="event_PURCHREQ",how="inner")
release_purch_req_data = release_purch_req_data.assign(DOCTYPE_RequiredMaterial = lambda x: x['event_REQMAT'])
# release_purch_req_data['DOCTYPE_RequiredMaterial'] = release_purch_req_data['event_REQMAT']
print(release_purch_req_data)
release_purch_req_data = release_purch_req_data.drop(["OBJECTID","CHANGENR","FNAME","UDATE","UTIME"],axis=1)
print(release_purch_req_data)
print("######################################################")
print("Convert PR to Purchase Order")
print("######################################################")
ekpo_res = con.prepare_and_execute_query("EKPO",["EBELN","MATNR","MENGE", "MEINS", "BANFN"],additional_query_part=" WHERE BANFN != ' ' AND MANDT = '"+mandt+"'");
print(ekpo_res)
# cdpos_res_2 = con.prepare_and_execute_query("CDPOS",["OBJECTID","CHANGENR"],additional_query_part=" WHERE OBJECTCLAS = 'EINKBELEG' AND CHNGIND = 'I' AND MANDANT = '"+mandt+"'");
# print(cdpos_res_2)
cdhdr_res_2 = con.prepare_and_execute_query("CDHDR", ["OBJECTID","CHANGENR", "UDATE", "UTIME"], additional_query_part=" WHERE OBJECTCLAS = 'EINKBELEG' AND CHANGE_IND = 'I' AND MANDANT = '"+mandt+"'");
print(cdhdr_res_2)
convert_to_purch_order = ekpo_res.merge(cdhdr_res_2,left_on=["EBELN"],right_on=["OBJECTID"],how="inner") #.drop_duplicates(subset=["OBJECTID","CHANGENR","UDATE","UTIME"])
print(convert_to_purch_order)
timestamp_column_from_dt_tm.apply(convert_to_purch_order, "UDATE", "UTIME", "event_timestamp")
convert_to_purch_order = convert_to_purch_order[convert_to_purch_order["event_timestamp"] >= min_extr_date]
convert_to_purch_order = convert_to_purch_order.sort_values("event_timestamp")
convert_to_purch_order.rename(columns={'PLNBEZ': 'event_REQMAT'},inplace=True)
convert_to_purch_order['event_PURCHORD'] = "PUOR" + convert_to_purch_order["EBELN"]
convert_to_purch_order['event_PURCHREQ'] = "PR" + convert_to_purch_order["BANFN"] # Can be empty
# convert_to_purch_order['event_PURCHREQ'] = convert_to_purch_order.apply(lambda x: "" if (x["BANFN"] == "" or x["BANFN"] == " ") else ("PR"+x["BANFN"]), axis=1)
convert_to_purch_order.rename(columns={'MATNR': 'event_REQMAT'},inplace=True)
if not convert_to_purch_order.empty:
convert_to_purch_order = convert_to_purch_order.merge(relevant_materials,left_on="event_REQMAT",right_on="event_REQMAT",how="inner")
print(convert_to_purch_order);
# Filter only relevant Purchase Requisitions
convert_to_purch_order = convert_to_purch_order.merge(relevant_purchase_requisitions,left_on="event_PURCHREQ",right_on="event_PURCHREQ",how="inner")
# Save relevant Purchase Orders
relevant_purchase_orders_series = pd.Series();
relevant_purchase_orders_series = relevant_purchase_orders_series.append(convert_to_purch_order['event_PURCHORD']);
relevant_purchase_orders = pd.DataFrame({'event_PURCHORD': relevant_purchase_orders_series}).drop_duplicates(subset=["event_PURCHORD"]);
convert_to_purch_order.rename(columns={'MENGE': 'event_MNG'},inplace=True)
convert_to_purch_order.rename(columns={'MEINS': 'event_EIN'},inplace=True)
convert_to_purch_order = convert_to_purch_order.assign(DOCTYPE_PurchOrd = lambda x: x['event_PURCHORD'])
convert_to_purch_order['DOCTYPE_PurchReq'] = "";
convert_to_purch_order['DOCTYPE_PurchReq'] = convert_to_purch_order.apply(lambda x: None if x['event_PURCHREQ'] == '' else x['event_PURCHREQ'], axis=1)
convert_to_purch_order = convert_to_purch_order.assign(DOCTYPE_RequiredMaterial = lambda x: x['event_REQMAT'])
convert_to_purch_order["event_activity"] = "Convert to Purchase Order"
print(convert_to_purch_order)
convert_to_purch_order = convert_to_purch_order.drop(["OBJECTID","CHANGENR","UDATE","UTIME","EBELN","BANFN"],axis=1)
print(convert_to_purch_order)
print("######################################################")
print("Release Purchase Order")
print("######################################################") # AND FNAME='FRGZU' ? (VALUE_NEW = 'X') AND
release_purchase_order_cdpos = con.prepare_and_execute_query("CDPOS", ["OBJECTID","CHANGENR", "FNAME","VALUE_NEW"], additional_query_part=" WHERE CHNGIND='U' AND VALUE_NEW = 'X' AND FNAME='FRGZU' AND OBJECTCLAS = 'EINKBELEG' AND MANDANT = '"+mandt+"'");
print(release_purchase_order_cdpos)
release_purchase_order_cdhdr = con.prepare_and_execute_query("CDHDR", ["OBJECTID","CHANGENR", "UDATE", "UTIME"], additional_query_part=" WHERE OBJECTCLAS = 'EINKBELEG' AND MANDANT = '"+mandt+"'");
print(release_purchase_order_cdhdr)
release_purch_order = release_purchase_order_cdpos.merge(release_purchase_order_cdhdr,left_on=["OBJECTID","CHANGENR"],right_on=["OBJECTID","CHANGENR"],how="inner") #.drop_duplicates(subset=["OBJECTID","CHANGENR","UDATE","UTIME"])
print(release_purch_order)
timestamp_column_from_dt_tm.apply(release_purch_order, "UDATE", "UTIME", "event_timestamp")
release_purch_order = release_purch_order[release_purch_order["event_timestamp"] >= min_extr_date]
release_purch_order = release_purch_order.sort_values("event_timestamp")
# # release_purch_req_data["event_id"] = release_purch_req_data.index.astype(str)
# # release_purch_req_data["event_activity"] = "Release Purchase Requisition"
release_purch_order["event_activity"] = ""
if not release_purch_order.empty:
release_purch_order["event_activity"] = release_purch_order.apply(lambda x: 'Release Purchase Order (Normal)' if x['VALUE_NEW'] == 'X' else 'Release Purchase Order (Special)', axis=1)
release_purch_order['event_PURCHORD'] = "PUOR" + release_purch_order["OBJECTID"]
# Filter only relevant Purchase Requisitions
release_purch_order = release_purch_order.merge(relevant_purchase_orders,left_on="event_PURCHORD",right_on="event_PURCHORD",how="inner")
release_purch_order.rename(columns={'VALUE_NEW': 'event_NEW-FRGZU'},inplace=True)
release_purch_order = release_purch_order.assign(DOCTYPE_PurchOrd = lambda x: x['event_PURCHORD'])
print(release_purch_order)
release_purch_order = release_purch_order.drop(["OBJECTID","CHANGENR","FNAME","UDATE","UTIME"],axis=1)
print(release_purch_order)
print("######################################################")
print("Reject Purchase Order")
print("######################################################") # (VALUE_NEW = '08') AND
reject_purchase_order_cdpos = con.prepare_and_execute_query("CDPOS", ["OBJECTID","CHANGENR", "FNAME","VALUE_NEW"], additional_query_part=" WHERE (VALUE_NEW = 'B') AND CHNGIND='U' AND FNAME='FRGKE' AND OBJECTCLAS = 'EINKBELEG' AND MANDANT = '"+mandt+"'");
print(reject_purchase_order_cdpos)
reject_purchase_order_cdhdr = con.prepare_and_execute_query("CDHDR", ["OBJECTID","CHANGENR", "UDATE", "UTIME"], additional_query_part=" WHERE OBJECTCLAS = 'EINKBELEG' AND MANDANT = '"+mandt+"'");
print(reject_purchase_order_cdhdr)
reject_purch_order = reject_purchase_order_cdpos.merge(reject_purchase_order_cdhdr,left_on=["OBJECTID","CHANGENR"],right_on=["OBJECTID","CHANGENR"],how="inner") #.drop_duplicates(subset=["OBJECTID","CHANGENR","UDATE","UTIME"])
reject_purch_order = reject_purch_order.merge(ekpo_res.drop(["MENGE", "MEINS", "BANFN"],axis=1),left_on=["OBJECTID"],right_on=["EBELN"],how="inner")
reject_purch_order.rename(columns={'MATNR': 'event_REQMAT'},inplace=True)
if not reject_purch_order.empty:
reject_purch_order = reject_purch_order.merge(relevant_materials,left_on="event_REQMAT",right_on="event_REQMAT",how="inner")
print(reject_purch_order)
timestamp_column_from_dt_tm.apply(reject_purch_order, "UDATE", "UTIME", "event_timestamp")
reject_purch_order = reject_purch_order[reject_purch_order["event_timestamp"] >= min_extr_date]
reject_purch_order = reject_purch_order.sort_values("event_timestamp")
reject_purch_order["event_activity"] = ""
if not reject_purch_order.empty:
reject_purch_order["event_activity"] = reject_purch_order.apply(lambda x: 'Reject Purchase Order' if x['VALUE_NEW'] == 'B' else 'Reject PurOrd: ERR_UNKNOWN_CHANGE', axis=1)
reject_purch_order['event_PURCHORD'] = "PUOR" + reject_purch_order["OBJECTID"]
# Filter only relevant Purchase Requisitions
reject_purch_order = reject_purch_order.merge(relevant_purchase_orders,left_on="event_PURCHORD",right_on="event_PURCHORD",how="inner")
reject_purch_order.rename(columns={'VALUE_NEW': 'event_NEW-FRGZU'},inplace=True)
reject_purch_order = reject_purch_order.assign(DOCTYPE_PurchOrd = lambda x: x['event_PURCHORD'])
reject_purch_order = reject_purch_order.assign(DOCTYPE_RequiredMaterial = lambda x: x['event_REQMAT'])
print(reject_purch_order)
reject_purch_order = reject_purch_order.drop(["OBJECTID","CHANGENR","FNAME","UDATE","UTIME","EBELN"],axis=1)
print(reject_purch_order)
print("######################################################")
print("Reconsider Purchase Order")
print("######################################################") # (VALUE_NEW = '08') AND
reconsider_purchase_order_cdpos = con.prepare_and_execute_query("CDPOS", ["OBJECTID","CHANGENR", "FNAME","VALUE_NEW"], additional_query_part=" WHERE (VALUE_NEW = 'A') AND CHNGIND='U' AND FNAME='FRGKE' AND OBJECTCLAS = 'EINKBELEG' AND MANDANT = '"+mandt+"'");
print(reconsider_purchase_order_cdpos)
reconsider_purchase_order_cdhdr = con.prepare_and_execute_query("CDHDR", ["OBJECTID","CHANGENR", "UDATE", "UTIME"], additional_query_part=" WHERE OBJECTCLAS = 'EINKBELEG' AND MANDANT = '"+mandt+"'");
print(reconsider_purchase_order_cdhdr)
reconsider_purch_order = reconsider_purchase_order_cdpos.merge(reconsider_purchase_order_cdhdr,left_on=["OBJECTID","CHANGENR"],right_on=["OBJECTID","CHANGENR"],how="inner") #.drop_duplicates(subset=["OBJECTID","CHANGENR","UDATE","UTIME"])
print(reconsider_purch_order)
timestamp_column_from_dt_tm.apply(reconsider_purch_order, "UDATE", "UTIME", "event_timestamp")
reconsider_purch_order = reconsider_purch_order[reconsider_purch_order["event_timestamp"] >= min_extr_date]
reconsider_purch_order = reconsider_purch_order.sort_values("event_timestamp")
reconsider_purch_order["event_activity"] = ""
if not reconsider_purch_order.empty:
reconsider_purch_order["event_activity"] = reconsider_purch_order.apply(lambda x: 'Reconsider Purchase Order' if x['VALUE_NEW'] == 'A' else 'Reconsider PurOrd: ERR_UNKNOWN_CHANGE', axis=1)
reconsider_purch_order['event_PURCHORD'] = "PUOR" + reconsider_purch_order["OBJECTID"]
# Filter only relevant Purchase Requisitions
reconsider_purch_order = reconsider_purch_order.merge(relevant_purchase_orders,left_on="event_PURCHORD",right_on="event_PURCHORD",how="inner")
reconsider_purch_order.rename(columns={'VALUE_NEW': 'event_NEW-FRGKE'},inplace=True)
reconsider_purch_order = reconsider_purch_order.assign(DOCTYPE_PurchOrd = lambda x: x['event_PURCHORD'])
print(reconsider_purch_order)
reconsider_purch_order = reconsider_purch_order.drop(["OBJECTID","CHANGENR","FNAME","UDATE","UTIME"],axis=1)
print(reconsider_purch_order)
print("######################################################")
print("Goods Receipt for Purchase Order")
print("######################################################")
goods_receipt_for_purchase_order = con.prepare_and_execute_query("EKBE",["EBELN","CPUDT","CPUTM","BELNR","MATNR", "MENGE"], additional_query_part=" WHERE (NOT CPUDT = '00000000') AND MANDT = '"+mandt+"'")
print(goods_receipt_for_purchase_order)
timestamp_column_from_dt_tm.apply(goods_receipt_for_purchase_order, "CPUDT", "CPUTM", "event_timestamp")
goods_receipt_for_purchase_order = goods_receipt_for_purchase_order[goods_receipt_for_purchase_order["event_timestamp"] >= min_extr_date]
goods_receipt_for_purchase_order = goods_receipt_for_purchase_order.sort_values("event_timestamp")
if not goods_receipt_for_purchase_order.empty:
goods_receipt_for_purchase_order["event_activity"] = goods_receipt_for_purchase_order.apply(lambda x: 'Goods Receipt for Purchase Order', axis=1)
goods_receipt_for_purchase_order['event_PURCHORD'] = "PUOR" + goods_receipt_for_purchase_order["EBELN"]
goods_receipt_for_purchase_order.rename(columns={'MENGE': 'event_MENGE'},inplace=True)
# Filter only relevant Purchase Orders
goods_receipt_for_purchase_order = goods_receipt_for_purchase_order.merge(relevant_purchase_orders,left_on="event_PURCHORD",right_on="event_PURCHORD",how="inner")
goods_receipt_for_purchase_order = goods_receipt_for_purchase_order.assign(DOCTYPE_PurchOrd = lambda x: x['event_PURCHORD'])
goods_receipt_for_purchase_order['event_MATDOC'] = "MATDOC" + goods_receipt_for_purchase_order["BELNR"]
#?##goods_receipt_for_purchase_order = goods_receipt_for_purchase_order.assign(DOCTYPE_MatDoc = lambda x: x['event_MATDOC'])
goods_receipt_for_purchase_order.rename(columns={'MATNR': 'event_REQMAT'},inplace=True)
if not goods_receipt_for_purchase_order.empty:
goods_receipt_for_purchase_order = goods_receipt_for_purchase_order.merge(relevant_materials,left_on="event_REQMAT",right_on="event_REQMAT",how="inner")
goods_receipt_for_purchase_order = goods_receipt_for_purchase_order.assign(DOCTYPE_RequiredMaterial = lambda x: x['event_REQMAT'])
goods_receipt_for_purchase_order = goods_receipt_for_purchase_order.drop(["EBELN","CPUDT","CPUTM","BELNR"],axis=1)
print(goods_receipt_for_purchase_order)
print("######################################################")
print("Goods Issue for Production Order")
print("######################################################")
goods_issue_for_production_order : DataFrame = con.prepare_and_execute_query("MSEG",["MBLNR","MATNR","MENGE","MEINS","AUFNR","CPUDT_MKPF","CPUTM_MKPF","BWART"], additional_query_part=" WHERE AUFNR > '000000000000' AND MANDT = '"+mandt+"' AND (NOT CPUDT_MKPF = '00000000')")
print(goods_issue_for_production_order)
timestamp_column_from_dt_tm.apply(goods_issue_for_production_order, "CPUDT_MKPF", "CPUTM_MKPF", "event_timestamp")
goods_issue_for_production_order = goods_issue_for_production_order[goods_issue_for_production_order["event_timestamp"] >= min_extr_date]
goods_issue_for_production_order = goods_issue_for_production_order.sort_values("event_timestamp")
goods_issue_for_production_order['event_PRODORD'] = ""
if not goods_issue_for_production_order.empty:
goods_issue_for_production_order["event_activity"] = goods_issue_for_production_order.apply(lambda x: 'Goods Issue for Production Order', axis=1)
goods_issue_for_production_order['event_PRODORD'] = goods_issue_for_production_order.apply(lambda x: "" if (x["AUFNR"] == "") else ("OR"+x["AUFNR"]), axis=1)
# Filter
goods_issue_for_production_order = goods_issue_for_production_order.merge(relevant_production_orders,left_on="event_PRODORD",right_on="event_PRODORD",how="inner")
goods_issue_for_production_order.rename(columns={'MATNR': 'event_REQMAT'},inplace=True)
goods_issue_for_production_order.rename(columns={'BWART': 'event_BWART'},inplace=True)
if not goods_issue_for_production_order.empty:
goods_issue_for_production_order = goods_issue_for_production_order.merge(relevant_materials,left_on="event_REQMAT",right_on="event_REQMAT",how="inner")
gi_grouped = goods_issue_for_production_order.groupby(['AUFNR','CPUDT_MKPF','CPUTM_MKPF'])
goods_issue_for_production_order_processed = pd.DataFrame()
goods_issue_for_production_order_processed['MBLNR'] = ""
for name,group in gi_grouped:
toAdd = group.copy();
toAdd["DOCTYPE_RequiredMaterial"] = "";
toAdd['DOCTYPE_RequiredMaterial']=toAdd['DOCTYPE_RequiredMaterial'].astype('object')
if(group.shape[0] > 1):
related_req_mats = toAdd["event_REQMAT"].tolist()
toAdd:DataFrame = toAdd.drop_duplicates(subset=['AUFNR','CPUDT_MKPF','CPUTM_MKPF'])
# toAdd["DOCTYPE_RequiredMaterial"] = "";
# toAdd["DOCTYPE_RequiredMaterial"] = toAdd["DOCTYPE_RequiredMaterial"].astype(object)
for index, row in toAdd.iterrows():
toAdd.at[index,'DOCTYPE_RequiredMaterial'] = related_req_mats
toAdd['event_REQMAT'] = str(related_req_mats);
else:
toAdd['DOCTYPE_RequiredMaterial'] = group["event_REQMAT"]
# toAdd['event_REQMAT'] = group["event_REQMAT"]
goods_issue_for_production_order_processed = goods_issue_for_production_order_processed.append(toAdd)
goods_issue_for_production_order_processed['event_MATDOC'] = "MATDOC" + goods_issue_for_production_order_processed["MBLNR"]
goods_issue_for_production_order_processed.rename(columns={'MENGE': 'event_MNG'},inplace=True)
goods_issue_for_production_order_processed.rename(columns={'MEINS': 'event_EIN'},inplace=True)
if not goods_issue_for_production_order.empty:
goods_issue_for_production_order_processed = goods_issue_for_production_order_processed.assign(DOCTYPE_ProdOrd = lambda x: x['event_PRODORD'])
goods_issue_for_production_order_processed = goods_issue_for_production_order_processed.drop(["MBLNR","AUFNR","CPUDT_MKPF","CPUTM_MKPF"],axis=1)
else:
goods_issue_for_production_order_processed['DOCTYPE_ProdOrd'] = ''
#?##goods_issue_for_production_order_processed = goods_issue_for_production_order_processed.assign(DOCTYPE_MatDoc = lambda x: x['event_MATDOC'])
# goods_issue_for_production_order_processed = goods_issue_for_production_order_processed.assign(DOCTYPE_RequiredMaterial = lambda x: x['event_REQMAT'])
print(goods_issue_for_production_order_processed)
print("######################################################")
print("Production Order Confirmation")
print("######################################################")
production_order_confirmation = con.prepare_and_execute_query("AFRU",["AUFNR","LMNGA","XMNGA","GMEIN","ERSDA","ERZET"],additional_query_part=" WHERE MANDT = '"+mandt+"'")
print(production_order_confirmation)
production_order_confirmation_mat = con.prepare_and_execute_query("AUFM",["MATNR","MBLNR","AUFNR"],additional_query_part=" WHERE ELIKZ = 'X' AND MANDT = '"+mandt+"'")
print(production_order_confirmation_mat)
production_order_confirmation = production_order_confirmation.merge(production_order_confirmation_mat,left_on="AUFNR",right_on="AUFNR",how="inner").drop_duplicates(subset=["AUFNR"])
print(production_order_confirmation);
timestamp_column_from_dt_tm.apply(production_order_confirmation, "ERSDA", "ERZET", "event_timestamp")
production_order_confirmation = production_order_confirmation[production_order_confirmation["event_timestamp"] >= min_extr_date]
production_order_confirmation = production_order_confirmation.sort_values("event_timestamp")
production_order_confirmation['event_PRODORD'] = ""
if not production_order_confirmation.empty:
production_order_confirmation['event_PRODORD'] = production_order_confirmation.apply(lambda x: "" if (x["AUFNR"] == "") else ("OR"+x["AUFNR"]), axis=1)
print(production_order_confirmation)
# Filter
production_order_confirmation = production_order_confirmation.merge(relevant_production_orders,left_on="event_PRODORD",right_on="event_PRODORD",how="inner")
production_order_confirmation.rename(columns={'LMNGA': 'event_MNG'},inplace=True)
production_order_confirmation.rename(columns={'XMNGA': 'event_SCRAP_MNG'},inplace=True)
production_order_confirmation.rename(columns={'GMEIN': 'event_EIN'},inplace=True)
production_order_confirmation.rename(columns={'MATNR': 'event_MATNR'},inplace=True)
production_order_confirmation['event_MATDOC'] = "MATDOC" + production_order_confirmation["MBLNR"]
#?##production_order_confirmation = production_order_confirmation.assign(DOCTYPE_MatDoc = lambda x: x['event_MATDOC'])
production_order_confirmation = production_order_confirmation.assign(DOCTYPE_Material = lambda x: x['event_MATNR'])
if not production_order_confirmation.empty:
production_order_confirmation["event_activity"] = production_order_confirmation.apply(lambda x: 'Confirmation of Production Order', axis=1)
production_order_confirmation = production_order_confirmation.assign(DOCTYPE_ProdOrd = lambda x: x['event_PRODORD'])
production_order_confirmation = production_order_confirmation.drop(["AUFNR","ERSDA","ERZET","MBLNR"],axis=1);
print(production_order_confirmation);
union = pd.concat([dataframe,release_purch_req_data,convert_to_purch_order,release_purch_order,reject_purch_order,reconsider_purch_order,goods_receipt_for_purchase_order,goods_issue_for_production_order_processed,production_order_confirmation]).sort_values("event_timestamp").reset_index()
union["event_id"] = union.index.astype(str)
return union
def cli(con):
print("\n\nProd Object-Centric Log Extractor\n\n")
min_extr_date = input("Insert the minimum extraction date (default: 2020-01-01 00:00:00): ")
if not min_extr_date:
min_extr_date = "2020-01-01 00:00:00"
gjahr = input("Insert the fiscal year (default: 2020):")
if not gjahr:
gjahr = "2020"
dataframe = apply(con, min_extr_date=min_extr_date, gjahr=gjahr)
path = input("Insert the path where the log should be saved (default: prod.xmlocel): ")
if not path:
path = "prod.xmlocel"
if path.endswith("mdl"):
mdl_exporter.apply(dataframe, path)
elif path.endswith("jsonocel") or path.endswith("xmlocel"):
ocel_exporter.apply(dataframe, path)
| 71.205882 | 293 | 0.722302 | 3,892 | 31,473 | 5.457091 | 0.079394 | 0.055558 | 0.026932 | 0.047648 | 0.712275 | 0.616366 | 0.49814 | 0.423655 | 0.345591 | 0.308395 | 0 | 0.006957 | 0.114002 | 31,473 | 441 | 294 | 71.367347 | 0.754707 | 0.127951 | 0 | 0.174051 | 0 | 0.012658 | 0.264639 | 0.048249 | 0 | 0 | 0 | 0.002268 | 0 | 1 | 0.006329 | false | 0 | 0.022152 | 0 | 0.031646 | 0.246835 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a8c55fdde1ff4ebdfeb884f8d8ccf37742a0471 | 2,676 | py | Python | src/tempgen/parsers/docx.py | k5md/Templated-Generator | f3cd2bc6c7de6f68fa1e5835471e8eaa9163d530 | [
"MIT"
] | null | null | null | src/tempgen/parsers/docx.py | k5md/Templated-Generator | f3cd2bc6c7de6f68fa1e5835471e8eaa9163d530 | [
"MIT"
] | null | null | null | src/tempgen/parsers/docx.py | k5md/Templated-Generator | f3cd2bc6c7de6f68fa1e5835471e8eaa9163d530 | [
"MIT"
] | null | null | null | import docx
from tempgen.parsers.parser import AbstractParser
class Parser(AbstractParser):
def paragraph_replace_text(self, paragraph, str, replace_str):
'''
https://github.com/python-openxml/python-docx/issues/30#issuecomment-881106471
'''
count = 0
search_pos = 0
while paragraph.text.find(str, search_pos) != -1:
match = { 'start': paragraph.text.find(str, search_pos), 'end': paragraph.text.find(str, search_pos) + len(str) }
search_pos = match['end']
padding = (len(replace_str) - (match['end'] -match['start']) ) *count
runs = iter(paragraph.runs)
start, end = match['start'] + padding , match['end'] + padding
for run in runs:
run_len = len(run.text)
if start < run_len:
break
start, end = start - run_len, end - run_len
run_text = run.text
run_len = len(run_text)
run.text = '%s%s%s' % (run_text[:start], replace_str, run_text[end:])
end -= run_len
for run in runs:
if end <= 0:
break
run_text = run.text
run_len = len(run_text)
run.text = run_text[end:]
end -= run_len
count += 1
return paragraph
def replace_in_paragraph(self, p, d):
for replaced, replacement in d.items():
self.paragraph_replace_text(p, replaced, replacement)
def collect_paragraphs(self, doc):
paragraphs = []
for p in doc.paragraphs:
paragraphs.append(p)
for table in doc.tables:
for col in table.columns:
for cell in col.cells:
for p in cell.paragraphs:
paragraphs.append(p)
return paragraphs
def parse(self, path, container, parse_entry, find_matches):
doc = docx.Document(path)
paragraphs = self.collect_paragraphs(doc)
for p in paragraphs:
matches = find_matches(p.text)
for match in matches:
payload = parse_entry(match, path)
container[payload['id']] = payload
def replace(self, source_path, target_path, compute_match, replacements, update_external = False):
doc = docx.Document(source_path)
paragraphs = self.collect_paragraphs(doc)
to_replace = {}
for p in paragraphs:
compute_match(p.text, to_replace, replacements, source_path, update_external)
self.replace_in_paragraph(p, to_replace)
doc.save(target_path) | 39.940299 | 125 | 0.563901 | 312 | 2,676 | 4.676282 | 0.24359 | 0.057574 | 0.047978 | 0.047978 | 0.201508 | 0.190541 | 0.05072 | 0.05072 | 0.05072 | 0.05072 | 0 | 0.009029 | 0.337818 | 2,676 | 67 | 126 | 39.940299 | 0.814334 | 0.029148 | 0 | 0.271186 | 0 | 0 | 0.013592 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.084746 | false | 0 | 0.033898 | 0 | 0.169492 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a90e4197c82e64e65616870e1cd90e24176a9dd | 466 | py | Python | tf/easytensorflow.py | mutazag/mdsi_deeplearn | 45776b3ec3ed952d59477c5f29e444c4de277f11 | [
"MIT"
] | null | null | null | tf/easytensorflow.py | mutazag/mdsi_deeplearn | 45776b3ec3ed952d59477c5f29e444c4de277f11 | [
"MIT"
] | null | null | null | tf/easytensorflow.py | mutazag/mdsi_deeplearn | 45776b3ec3ed952d59477c5f29e444c4de277f11 | [
"MIT"
] | null | null | null | # https://github.com/easy-tensorflow/easy-tensorflow/blob/master/1_TensorFlow_Basics/Tutorials/1_Graph_and_Session.ipynb
#%%
import tensorflow as tf
# tf.disable_eager_execution()
#%%
a = 2
b = 3
c = tf.add(a, b, name='Add')
print(c)
#%%
# to run the graph, put it in a session and run
sess = tf.Session()
print(sess.run(c))
sess.close()
#%%
# a better way to deal with session
with tf.Session() as sess:
print(sess.run(c))
#%%
print("END")
#%%
| 13.314286 | 120 | 0.665236 | 77 | 466 | 3.935065 | 0.532468 | 0.092409 | 0.079208 | 0.085809 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010309 | 0.167382 | 466 | 34 | 121 | 13.705882 | 0.770619 | 0.521459 | 0 | 0.181818 | 0 | 0 | 0.028708 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.090909 | 0.363636 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a92064954db18af7514af8bb12eb3b5c7406db8 | 639 | py | Python | msk/consumer.py | wingkwong/aws-playground | 5d395bb63e6f47bb4a536bab9e34ca0744e79c5d | [
"MIT"
] | 1 | 2021-09-10T05:21:39.000Z | 2021-09-10T05:21:39.000Z | msk/consumer.py | wingkwong/aws-playground | 5d395bb63e6f47bb4a536bab9e34ca0744e79c5d | [
"MIT"
] | null | null | null | msk/consumer.py | wingkwong/aws-playground | 5d395bb63e6f47bb4a536bab9e34ca0744e79c5d | [
"MIT"
] | null | null | null | from kafka import KafkaConsumer
from json import loads
# Define Amazon MSK Brokers
brokers=['<YOUR_MSK_BROKER_1>:9092', '<YOUR_MSK_BROKER_2>:9092']
# Define Kafka topic to be consumed from
kafka_topic='<YOUR_KAFKA_TOPIC>'
# A Kafka client that consumes records from a Kafka cluster
consumer = KafkaConsumer(
kafka_topic,
bootstrap_servers=brokers,
auto_offset_reset='earliest',
enable_auto_commit=True,
group_id='my-group',
value_deserializer=lambda x: loads(x.decode('utf-8')))
for message in consumer:
message = message.value
print('{}'.format(message))
| 31.95 | 64 | 0.690141 | 83 | 639 | 5.108434 | 0.60241 | 0.09434 | 0.061321 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.021912 | 0.214398 | 639 | 19 | 65 | 33.631579 | 0.822709 | 0.192488 | 0 | 0 | 0 | 0 | 0.173828 | 0.09375 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a9611f6590b2309ddd6925053805e4c14110715 | 12,176 | py | Python | MNIST_VAE.py | federicobergamin/Variational-Autoencoders | 6cae335c82ec55aa5fa9f260e7daa2ad6ace453e | [
"MIT"
] | 1 | 2019-12-23T12:12:13.000Z | 2019-12-23T12:12:13.000Z | MNIST_VAE.py | federicobergamin/Variational-Autoencoders | 6cae335c82ec55aa5fa9f260e7daa2ad6ace453e | [
"MIT"
] | null | null | null | MNIST_VAE.py | federicobergamin/Variational-Autoencoders | 6cae335c82ec55aa5fa9f260e7daa2ad6ace453e | [
"MIT"
] | null | null | null | '''
We are going to learn a latent space and a generative model for the MNIST dataset.
'''
import numpy as np
import torch
import torch.utils
import torch.utils.data
from torch.utils.tensorboard import SummaryWriter
import torchvision
import torch.nn.functional as F
from torchvision import datasets, transforms, utils
from VAE_personal_implementation.VAE import VariationalAutoencoder
from sklearn.decomposition import PCA
from VAE_personal_implementation.utils.code_to_load_the_dataset import load_MNIST_dataset
import matplotlib.pyplot as plt
def show_images(images, title=None, path=None):
images = utils.make_grid(images)
show_image(images[0], title, path)
def show_image(img, title = "", path = None):
plt.imshow(img, cmap='gray')
plt.title(title)
if path is not None:
plt.savefig(path)
plt.show()
# def binary_cross_entropy(r, x):
# return -torch.sum(x * torch.log(r + 1e-8) + (1 - x) * torch.log(1 - r + 1e-8), dim=-1)
# Writer will output to ./runs/ directory by default
writer = SummaryWriter()
ORIGINAL_BINARIZED_MNIST = True
use_cuda = torch.cuda.is_available()
print('Do we get access to a CUDA? - ', use_cuda)
device = torch.device("cuda" if use_cuda else "cpu")
BATCH_SIZE = 64
HIDDEN_LAYERS = [256,128]
Z_DIM = 12
N_EPOCHS = 200
LEARNING_RATE = 3e-4#1e-3#3e-4
WEIGHT_DECAY = -1
N_SAMPLE = 64
SAVE_MODEL_EPOCH = N_EPOCHS - 5
PATH = 'saved_models/'
if ORIGINAL_BINARIZED_MNIST:
## we load the original dataset by Larochelle
train_loader, val_loader, test_loader = load_MNIST_dataset('Original_MNIST_binarized/', BATCH_SIZE, True, True,
True)
else:
# we have the binarized MNIST
## TRAIN SET
training_set = datasets.MNIST('../MNIST_dataset', train=True, download=True,
transform=transforms.ToTensor())
print('Number of examples in the training set:', len(training_set))
print('Size of the image:', training_set[0][0].shape)
## we plot an example only to check it
idx_ex = 1000
x, y = training_set[idx_ex] # x is now a torch.Tensor
plt.imshow(x.numpy()[0], cmap='gray')
plt.title('Example n {}, label: {}'.format(idx_ex, y))
plt.show()
### we only check if it is binarized
input_dim = x.numpy().size
print('Size of the image:', input_dim)
flatten_bernoulli = lambda x: transforms.ToTensor()(x).view(-1).bernoulli()
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../MNIST_dataset', train=True, transform=flatten_bernoulli),
batch_size=BATCH_SIZE, shuffle=True)
## TEST SET
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../MNIST_dataset', train=False, transform=flatten_bernoulli),
batch_size=BATCH_SIZE, shuffle=True)
## another way to plot some images from the dataset
# dataiter = iter(train_loader)
# images, labels = dataiter.next() ## next return a complete batch --> BATCH_SIZE images
# print('prirdffervgevev', images.shape)
# show_images(images.view(BATCH_SIZE,1,28,28))
# input_dim = x.numpy().size
# print('Size of the image:', input_dim)
## now we have our train and test set
## we can create our model and try to train it
model = VariationalAutoencoder(28*28, HIDDEN_LAYERS, Z_DIM)
print('Model overview and recap\n')
print(model)
print('\n')
## optimization
if WEIGHT_DECAY > 0:
# we add small L2 reg as in the original paper
optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE, betas=(0.9, 0.999), weight_decay=WEIGHT_DECAY)
else:
optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE, betas=(0.9, 0.999))
## training loop
training_loss = []
approx_kl = []
anal_kl = []
print('.....Starting trianing')
for epoch in range(N_EPOCHS):
tmp_elbo = 0
tmp_kl = 0
tmp_recon = 0
n_batch = 0
for i, data in enumerate(train_loader, 0):
n_batch += 1
# images, labels = data
# images = images.to(device)
if ORIGINAL_BINARIZED_MNIST:
images = data
else:
images, labels = data
images = images.to(device)
reconstruction = model(images)
# print(test_set_reconstruction)
# print('images shape', images.shape)
# print('recon shape', test_set_reconstruction.shape)
# likelihood = -binary_cross_entropy(test_set_reconstruction, images)
likelihood = - F.binary_cross_entropy(reconstruction, images, reduction='sum')
# print('likel hsape', likelihood.shape)
# print(model.kl_divergence.shape)
# print(model.kl_analytical.shape)
elbo = likelihood - torch.sum(model.kl_divergence)
# elbo = likelihood + torch.sum(model.kl_analytical)
# print('Sampled kl', model.kl_divergence.shape)
# print('Anal kl', model.kl_analytical.shape)
# print('---')
approx_kl.append(torch.sum(model.kl_divergence)/ len(images))
anal_kl.append(-torch.sum(model.kl_analytical)/ len(images))
L = - elbo / len(images)#BATCH_SIZE
L.backward()
optimizer.step()
optimizer.zero_grad()
# if L.item()/len(images) > 4:
# print('Epoch: {}, Batch: {}, images in the batch: {}, L.item: {}'.format(epoch, i, len(images), L.item()))
training_loss.append(- elbo/ len(images))
tmp_elbo += - L.item() * BATCH_SIZE
tmp_recon += likelihood
# tmp_kl += - torch.sum(model.kl_analytical)
tmp_kl += torch.sum(model.kl_divergence)
## at the end of each epoch we can try to store some images
##
with torch.no_grad():
for r, data in enumerate(test_loader, 0):
# images, labels = data
# images = images.to(device)
if ORIGINAL_BINARIZED_MNIST:
images = data
else:
images, labels = data
images = images.to(device)
reconstruction = model(images)
# print(test_set_reconstruction.shape)
recon_image_ = reconstruction.view(reconstruction.shape[0], 1, 28, 28)
images = images.view(images.shape[0], 1, 28, 28)
if r % 100 == 0:
# show_images(images, 'original')
# show_images(recon_image_, 'test_set_reconstruction')
grid1 = torchvision.utils.make_grid(images)
writer.add_image('orig images', grid1, 0)
grid2 = torchvision.utils.make_grid(recon_image_)
writer.add_image('recon images', grid2)
writer.close()
## maybe we just store the test_set_reconstruction
## maybe we just store the test_set_reconstruction
images = utils.make_grid(images)
recon_image_ = utils.make_grid(recon_image_)
plt.imshow(images[0], cmap='gray')
plt.title('Original from epoch {}'.format(epoch + 1))
plt.savefig('reconstruction_during_training/originals_epoch_{}_example_{}_12zdim'.format(epoch + 1, r))
plt.imshow(recon_image_[0], cmap='gray')
plt.title('Reconstruction from epoch {}'.format(epoch + 1))
plt.savefig('reconstruction_during_training/reconstruction_epoch_{}_example_{}_12zdim'.format(epoch + 1, r))
## we want also to sample something from the model during training
rendom_samples = model.sample(N_SAMPLE)
samples = rendom_samples.view(rendom_samples.shape[0], 1, 28, 28)
samples = utils.make_grid(samples)
plt.imshow(samples[0], cmap='gray')
plt.title('Samples from epoch {}'.format(epoch + 1))
plt.savefig('samples_during_training/samples_epoch_{}_12zdim'.format(epoch + 1))
print('Epoch: {}, Elbo: {}, recon_error: {}, KL: {}'.format(epoch+1, tmp_elbo/ len(train_loader.dataset), -tmp_recon/ len(train_loader.dataset), tmp_kl/ len(train_loader.dataset) ))
if epoch + 1 > SAVE_MODEL_EPOCH:
## we have to store the model
torch.save(model.state_dict(), PATH + 'VAE_zdim_{}_epoch_{}_elbo_{}_learnrate_{}'.format(Z_DIM, epoch+1, tmp_elbo/ len(train_loader.dataset), LEARNING_RATE))
print('....Training ended')
fig = plt.figure()
plt.plot(training_loss, label='Elbo mean per batch')
plt.legend()
plt.show()
plt.plot(approx_kl, label='Approximated KL (mean)')
plt.plot(anal_kl, label='Analitycal KL (mean)')
plt.legend()
plt.show()
model.eval()
with torch.no_grad():
for i, data in enumerate(test_loader, 0):
# images, labels = data
# images = images.to(device)
if ORIGINAL_BINARIZED_MNIST:
images = data
else:
images, labels = data
images = images.to(device)
reconstruction = model(images)
# print(test_set_reconstruction.shape)
recon_image_ = reconstruction.view(reconstruction.shape[0], 1, 28, 28)
images = images.view(images.shape[0], 1, 28, 28)
if i % 100 == 0:
show_images(images, 'original')
show_images(recon_image_, 'test_set_reconstruction')
images = utils.make_grid(images)
recon_image_ = utils.make_grid(recon_image_)
plt.imshow(images[0], cmap='gray')
plt.title('Original')
plt.savefig('reconstruction_during_training/originals_example_{}_12zdim'.format(i))
plt.imshow(recon_image_[0], cmap='gray')
plt.title('Reconstruction')
plt.savefig('reconstruction_during_training/reconstruction_example_{}_12zdim'.format(i))
## at this point I want to take the test set and compute the latent code
## for each example and then run PCA or TSNE and plot it
if not ORIGINAL_BINARIZED_MNIST:
latent_representation = []
all_labels = []
with torch.no_grad():
for i, data in enumerate(test_loader, 0):
images, labels = data
labels = labels.numpy()
images = images.to(device)
for k in range(len(images)):
latent_repr, _, _ = model.encoder(images[k])
latent_representation.append(latent_repr.numpy())
all_labels.append(labels[k])
# at this point the two sets contain what we want
# we can do PCA and plot the 2 components results
latent_representation = np.array(latent_representation)
print(latent_representation.shape)
pca = PCA(2)
pca.fit(latent_representation)
feat = pca.fit_transform(latent_representation)
features_pca = np.array(feat)
print(features_pca.shape)
colors = ['#0165fc', '#02ab2e', '#fdaa48', '#fffe7a', '#6a79f7', '#db4bda', '#0ffef9', '#bd6c48', '#fea993', '#1e9167']
COLORS = ["#0072BD",
"#D95319",
"#006450",
"#7E2F8E",
"#77AC30",
"#EDB120",
"#4DBEEE",
"#A2142F",
"#191970",
"#A0522D"]
# print(all_labels)
all_labels = np.array(all_labels)
fig = plt.figure()
for i in range(10):
idxs = np.where(all_labels == i)
# print(idxs)
plt.scatter(features_pca[idxs,0], features_pca[idxs,1], c = colors[i], label = i)
# plt.scatter(features_pca[:,0], features_pca[:,1], c = all_labels)
plt.title('PCA on the latent dimension')
plt.legend()
plt.savefig('PCA/PCA_latent_repr_layer_nlatent_{}'.format(Z_DIM))
plt.show()
## now we want also to try to sample from the decoder
## RANDOM SAMLING
# Z IS RANDOM N(0,1)
# mus = torch.zeros((BATCH_SIZE,Z_DIM))
# stds = torch.zeros((BATCH_SIZE, Z_DIM))
# eps = torch.randn((BATCH_SIZE, Z_DIsM))
# random_z = mus.addcmul(stds, eps)
with torch.no_grad():
for i in range(5):
# random_latent = torch.randn((N_SAMPLE, Z_DIM), dtype = torch.float).to(device)
images_from_random = model.sample(N_SAMPLE)
sampled_ima = images_from_random.view(images_from_random.shape[0], 1, 28, 28)
show_images(sampled_ima, 'Random sampled imagess', 'random_samples/Random_samples_ex_{}_12zdim'.format(i+1))
| 37.931464 | 185 | 0.634938 | 1,587 | 12,176 | 4.686831 | 0.200378 | 0.020973 | 0.02541 | 0.015058 | 0.364076 | 0.323609 | 0.273998 | 0.261495 | 0.244421 | 0.229901 | 0 | 0.024639 | 0.243348 | 12,176 | 320 | 186 | 38.05 | 0.782698 | 0.224622 | 0 | 0.267327 | 0 | 0 | 0.127088 | 0.050749 | 0 | 0 | 0 | 0 | 0 | 1 | 0.009901 | false | 0 | 0.059406 | 0 | 0.069307 | 0.059406 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a97c3f3a52c4076a37c5647166b601d480b1378 | 5,202 | py | Python | examples/allennlp/allennlp_simple.py | kevtran23/optuna | 4f2fa9c60d9a216ff3cfbc0f3ca12cb32ff53434 | [
"MIT"
] | null | null | null | examples/allennlp/allennlp_simple.py | kevtran23/optuna | 4f2fa9c60d9a216ff3cfbc0f3ca12cb32ff53434 | [
"MIT"
] | null | null | null | examples/allennlp/allennlp_simple.py | kevtran23/optuna | 4f2fa9c60d9a216ff3cfbc0f3ca12cb32ff53434 | [
"MIT"
] | null | null | null | """
Optuna example that optimizes a classifier configuration for IMDB movie review dataset.
This script is based on the example of allentune (https://github.com/allenai/allentune).
In this example, we optimize the validation accuracy of sentiment classification using AllenNLP.
Since it is too time-consuming to use the entire dataset, we here use a small subset of it.
We have the following two ways to execute this example:
(1) Execute this code directly.
$ python allennlp_simple.py
(2) Execute through CLI.
$ STUDY_NAME=`optuna create-study --direction maximize --storage sqlite:///example.db`
$ optuna study optimize allennlp_simple.py objective --n-trials=100 --study-name $STUDY_NAME \
--storage sqlite:///example.db
"""
import os
import pkg_resources
import random
import shutil
import allennlp
import allennlp.data
import allennlp.models
import allennlp.modules
import numpy
import torch
import optuna
from optuna.integration import AllenNLPPruningCallback
DEVICE = -1 # If you want to use GPU, use DEVICE = 0.
MAX_DATA_SIZE = 3000
MODEL_DIR = os.path.join(os.getcwd(), "result")
TARGET_METRIC = "accuracy"
class SubsampledDatasetReader(allennlp.data.dataset_readers.TextClassificationJsonReader):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def _read(self, datapath):
data = list(super()._read(datapath))
random.shuffle(data)
yield from data[:MAX_DATA_SIZE]
def prepare_data():
glove_indexer = allennlp.data.token_indexers.SingleIdTokenIndexer(lowercase_tokens=True)
tokenizer = allennlp.data.tokenizers.whitespace_tokenizer.WhitespaceTokenizer()
reader = SubsampledDatasetReader(
token_indexers={"tokens": glove_indexer}, tokenizer=tokenizer,
)
train_dataset = reader.read(
"https://s3-us-west-2.amazonaws.com/allennlp/datasets/imdb/train.jsonl"
)
valid_dataset = reader.read(
"https://s3-us-west-2.amazonaws.com/allennlp/datasets/imdb/dev.jsonl"
)
vocab = allennlp.data.Vocabulary.from_instances(train_dataset)
train_dataset.index_with(vocab)
valid_dataset.index_with(vocab)
return train_dataset, valid_dataset, vocab
def create_model(vocab, trial):
dropout = trial.suggest_float("dropout", 0, 0.5)
output_dim = trial.suggest_int("output_dim", 16, 128)
max_filter_size = trial.suggest_int("max_filter_size", 3, 6)
num_filters = trial.suggest_int("num_filters", 16, 128)
encoder = allennlp.modules.seq2vec_encoders.CnnEncoder(
ngram_filter_sizes=range(1, max_filter_size),
num_filters=num_filters,
embedding_dim=50,
output_dim=output_dim,
)
embedding = allennlp.modules.Embedding(
embedding_dim=50,
trainable=True,
pretrained_file="https://s3-us-west-2.amazonaws.com/allennlp/datasets/glove/glove.6B.50d.txt.gz", # NOQA
vocab=vocab,
)
embedder = allennlp.modules.text_field_embedders.BasicTextFieldEmbedder({"tokens": embedding})
model = allennlp.models.BasicClassifier(
text_field_embedder=embedder, seq2vec_encoder=encoder, dropout=dropout, vocab=vocab,
)
return model
def objective(trial):
train_dataset, valid_dataset, vocab = prepare_data()
model = create_model(vocab, trial)
if DEVICE > -1:
model.to(torch.device("cuda:{}".format(DEVICE)))
lr = trial.suggest_float("lr", 1e-5, 1e-1, log=True)
optimizer = torch.optim.SGD(model.parameters(), lr=lr)
data_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=64, collate_fn=allennlp.data.allennlp_collate
)
validation_data_loader = torch.utils.data.DataLoader(
valid_dataset, batch_size=64, collate_fn=allennlp.data.allennlp_collate
)
serialization_dir = os.path.join(MODEL_DIR, "trial_{}".format(trial.number))
trainer = allennlp.training.GradientDescentTrainer(
model=model,
optimizer=optimizer,
data_loader=data_loader,
validation_data_loader=validation_data_loader,
validation_metric="+" + TARGET_METRIC,
patience=None, # `patience=None` since it could conflict with AllenNLPPruningCallback
num_epochs=50,
cuda_device=DEVICE,
serialization_dir=serialization_dir,
epoch_callbacks=[AllenNLPPruningCallback(trial, "validation_" + TARGET_METRIC)],
)
metrics = trainer.train()
return metrics["best_validation_" + TARGET_METRIC]
if __name__ == "__main__":
if pkg_resources.parse_version(allennlp.__version__) < pkg_resources.parse_version("1.0.0"):
raise RuntimeError("AllenNLP>=1.0.0 is required for this example.")
random.seed(41)
torch.manual_seed(41)
numpy.random.seed(41)
pruner = optuna.pruners.HyperbandPruner()
study = optuna.create_study(direction="maximize", pruner=pruner)
study.optimize(objective, n_trials=50, timeout=600)
print("Number of finished trials: ", len(study.trials))
print("Best trial:")
trial = study.best_trial
print(" Value: ", trial.value)
print(" Params: ")
for key, value in trial.params.items():
print(" {}: {}".format(key, value))
shutil.rmtree(MODEL_DIR)
| 33.133758 | 113 | 0.716455 | 649 | 5,202 | 5.546995 | 0.360555 | 0.023333 | 0.0075 | 0.010833 | 0.147222 | 0.112222 | 0.076667 | 0.076667 | 0.076667 | 0.065 | 0 | 0.016321 | 0.175509 | 5,202 | 156 | 114 | 33.346154 | 0.823036 | 0.16436 | 0 | 0.018868 | 0 | 0.028302 | 0.106368 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04717 | false | 0 | 0.113208 | 0 | 0.198113 | 0.04717 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a98b5e99f3f08c40346d868f154e3474a9471fd | 703 | py | Python | main/increasing-order-search-tree/increasing-order-search-tree.py | EliahKagan/old-practice-snapshot | 1b53897eac6902f8d867c8f154ce2a489abb8133 | [
"0BSD"
] | null | null | null | main/increasing-order-search-tree/increasing-order-search-tree.py | EliahKagan/old-practice-snapshot | 1b53897eac6902f8d867c8f154ce2a489abb8133 | [
"0BSD"
] | null | null | null | main/increasing-order-search-tree/increasing-order-search-tree.py | EliahKagan/old-practice-snapshot | 1b53897eac6902f8d867c8f154ce2a489abb8133 | [
"0BSD"
] | null | null | null | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def increasingBST(self, root: TreeNode) -> TreeNode:
head = TreeNode(None)
link = head
def dfs(subroot : TreeNode) -> None:
nonlocal link
while subroot is not None:
dfs(subroot.left)
subroot.left = None
link.right = subroot
link = link.right
subroot = subroot.right
link.right = None
dfs(root)
return head.right
| 26.037037 | 56 | 0.479374 | 69 | 703 | 4.826087 | 0.405797 | 0.081081 | 0.096096 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.445235 | 703 | 26 | 57 | 27.038462 | 0.853846 | 0.211949 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0 | 0 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a9922af72c1516621850ee44f2b5c699c589d4b | 2,715 | py | Python | src/lex.py | urlordjames/pointysnake | cac720068d55ffec8e7b220285e460faf6ceaa3e | [
"MIT"
] | 1 | 2020-07-12T14:20:58.000Z | 2020-07-12T14:20:58.000Z | src/lex.py | urlordjames/pointysnake | cac720068d55ffec8e7b220285e460faf6ceaa3e | [
"MIT"
] | 5 | 2020-07-16T15:58:41.000Z | 2020-07-24T01:42:59.000Z | src/lex.py | urlordjames/pointysnake | cac720068d55ffec8e7b220285e460faf6ceaa3e | [
"MIT"
] | null | null | null | def lex(filename):
f = open(filename, "r")
src = f.read()
f.close()
lines = src.split("\n")
newlines = []
for line in lines:
if len(line) < 1 or line[0] == "#":
continue
newlines.append(line)
tokens = []
for line in newlines:
tokens.append(tokenizeln(line))
return tokens
import re
tokens = {
"^function$": ["functiondefine"],
"^assert$": ["assert"],
"^{$": ["functionstart"],
"^}$": ["functionterminate"],
"^,$": ["argseperate"],
"^int$": ["type", "int"],
"^str$": ["type", "str"],
"^bool$": ["type", "bool"],
"^if$": ["ifdefine"],
"^while$": ["whiledefine"],
"^[a-z]+\\($": ["function"],
"^\\d+$": ["int"],
"^\".*\"$": ["str"],
"^\\($": ["argopen"],
"^\\)$": ["argend"],
"^var $": ["setvar"],
"^staticvar $": ["setstaticvar"],
"^[a-zA-Z]+ $": ["assignvar"],
"^=$": ["assignop"],
"^true|false$": ["bool"],
"^[ |\t]$": ["ignore"]
}
def tokenizeln(line):
buffer = ""
matches = []
for char in line:
buffer += char
for token in tokens.keys():
match = re.search(token, buffer)
if not match is None:
matches.append([match, tokens[token]])
buffer = ""
break
tokenized = []
for match in matches:
if match[1][0] == "ignore":
continue
elif match[1][0] == "function":
tokenized.append([match[1][0], match[0].group()[:-1]])
continue
elif match[1][0] == "str":
tokenized.append([match[1][0], match[0].group()[1:-1]])
continue
elif match[1][0] == "int":
previoustoken = tokenized[len(tokenized) - 1]
if previoustoken[0] == "int":
previoustoken[1] = int(str(previoustoken[1]) + match[0].group())
else:
tokenized.append([match[1][0], int(match[0].group())])
continue
elif match[1][0] == "bool":
tokenized.append([match[1][0], match[0].group()])
continue
elif match[1][0] == "assignvar":
try:
oldthing = tokenized[len(tokenized) - 2][0]
except:
continue
if not (oldthing == "setvar" or oldthing == "setstaticvar"):
continue
varname = match[0].group()[:-1]
tokenized.append([match[1][0], varname])
tokens.update({f"^{varname}$": [tokenized[len(tokenized) - 3][0][3:], varname]})
continue
tokenized.append(match[1])
return tokenized
if __name__ == "__main__":
print(lex("../tests/printstr.psn"))
| 28.882979 | 92 | 0.467403 | 270 | 2,715 | 4.67037 | 0.318519 | 0.057098 | 0.061063 | 0.099921 | 0.199841 | 0.149881 | 0.118953 | 0.118953 | 0.053925 | 0 | 0 | 0.023745 | 0.317495 | 2,715 | 93 | 93 | 29.193548 | 0.656773 | 0 | 0 | 0.130952 | 0 | 0 | 0.150645 | 0.007735 | 0 | 0 | 0 | 0 | 0.011905 | 1 | 0.02381 | false | 0 | 0.011905 | 0 | 0.059524 | 0.011905 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a9abd84e1be3db2d14bf1598d2908aacba99eaf | 417 | py | Python | DailyChallenge/LC_198.py | iphyer/LeetcodeSummary | ad5229bbb8e76083e5c7f0312fa0c8ff78d516a9 | [
"MIT"
] | null | null | null | DailyChallenge/LC_198.py | iphyer/LeetcodeSummary | ad5229bbb8e76083e5c7f0312fa0c8ff78d516a9 | [
"MIT"
] | null | null | null | DailyChallenge/LC_198.py | iphyer/LeetcodeSummary | ad5229bbb8e76083e5c7f0312fa0c8ff78d516a9 | [
"MIT"
] | null | null | null | class Solution:
def rob(self, nums: List[int]) -> int:
# DP
# dp[i]: the max value until house i
dp = [0] * len(nums)
N = len(nums)
if N == 1: return nums[0]
if N == 2: return max(nums[0], nums[1])
dp[0] = nums[0]
dp[1] = max(nums[0], nums[1])
for i in range(2, N):
dp[i] = max(dp[i-2] + nums[i], dp[i-1])
return dp[N-1]
| 29.785714 | 51 | 0.446043 | 72 | 417 | 2.583333 | 0.347222 | 0.064516 | 0.086022 | 0.129032 | 0.139785 | 0 | 0 | 0 | 0 | 0 | 0 | 0.057252 | 0.371703 | 417 | 13 | 52 | 32.076923 | 0.652672 | 0.088729 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a9d1bf580afd0ee0a24298894018f257f2969f5 | 351 | py | Python | MyGame/code/constants.py | Chad474/2dPyGame | 22d2c19a5407fa4b539b772facfc5c08e6860ddd | [
"MIT"
] | null | null | null | MyGame/code/constants.py | Chad474/2dPyGame | 22d2c19a5407fa4b539b772facfc5c08e6860ddd | [
"MIT"
] | null | null | null | MyGame/code/constants.py | Chad474/2dPyGame | 22d2c19a5407fa4b539b772facfc5c08e6860ddd | [
"MIT"
] | null | null | null | import pygame
TITLE = 'Gravity'
BLOCK_SIZE = 32
SCREEN_WIDTH = 1024 # 32 * 32
SCREEN_HEIGHT = 576 # 32* 18
SCREEN = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
COLOR_KEY = (200,0,200)
MAX_FPS = 60
def split(frame, x, y, w, h):
img = pygame.Surface([w, h])
img.blit(frame, (0,0), (x, y, w, h))
return img
| 18.473684 | 64 | 0.612536 | 57 | 351 | 3.631579 | 0.596491 | 0.028986 | 0.028986 | 0.038647 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.105263 | 0.242165 | 351 | 18 | 65 | 19.5 | 0.672932 | 0.039886 | 0 | 0 | 0 | 0 | 0.022222 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.083333 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a9d7cda48b1e296206076d3d46cfc71b4de07c2 | 1,852 | py | Python | ytdlscript.py | antsareflying/ytdlscript | e03e965bfab1b7148982ea9fef04d06b6b5a84c1 | [
"MIT"
] | null | null | null | ytdlscript.py | antsareflying/ytdlscript | e03e965bfab1b7148982ea9fef04d06b6b5a84c1 | [
"MIT"
] | null | null | null | ytdlscript.py | antsareflying/ytdlscript | e03e965bfab1b7148982ea9fef04d06b6b5a84c1 | [
"MIT"
] | null | null | null | """
MIT License
Copyright (c) 2021 Moon Seongmu
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import subprocess
import sys
import os
def sortlength(e):
return len(e)
if len(sys.argv) != 2:
print("Usage: python ytdlscript <yt link>")
sys.exit()
if os.getcwd() != "C:\\Users\\seong\\archive\\public\\videos":
print("not in public videos directory")
sys.exit()
ytlink = sys.argv[1]
ytdl_command = f"yt-dlp --write-description --write-info-json --write-annotations --write-sub --sub-langs en --write-thumbnail --write-comments --no-playlist --verbose --download-archive C:/users/seong/archive/public/videos/downloadedarchive.txt -a C:/users/seong/archive/public/videos/downloadlinks.txt -f bv*+ba/b --merge-output-format mkv -o \"%(playlist_title)s/%(upload_date)s-%(title)s-%(id)s.%(ext)s\" {ytlink}"
subprocess.run(ytdl_command, check=True)
| 38.583333 | 418 | 0.759179 | 287 | 1,852 | 4.885017 | 0.560976 | 0.062767 | 0.023538 | 0.038516 | 0.064194 | 0.064194 | 0 | 0 | 0 | 0 | 0 | 0.003778 | 0.142549 | 1,852 | 47 | 419 | 39.404255 | 0.879093 | 0.576674 | 0 | 0.142857 | 0 | 0.071429 | 0.572351 | 0.224806 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.214286 | 0.071429 | 0.357143 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a9dcbe44706cc17ddb463f1054c5280c08ef119 | 36,033 | py | Python | grid_generator/body_fitted_grid_generator.py | Mayu14/2D_comp_viscos | a62633d8684b218b52e39d47a13717a1edfa4a46 | [
"MIT"
] | 1 | 2020-05-08T18:00:28.000Z | 2020-05-08T18:00:28.000Z | grid_generator/body_fitted_grid_generator.py | Mayu14/2D_comp_viscos | a62633d8684b218b52e39d47a13717a1edfa4a46 | [
"MIT"
] | null | null | null | grid_generator/body_fitted_grid_generator.py | Mayu14/2D_comp_viscos | a62633d8684b218b52e39d47a13717a1edfa4a46 | [
"MIT"
] | 1 | 2020-02-20T09:26:27.000Z | 2020-02-20T09:26:27.000Z | # coding: utf-8
from math import sqrt
from scipy import interpolate
from scipy.spatial import Delaunay
import numpy as np
from numpy.linalg import norm
from naca_4digit_test import Naca_4_digit, Naca_5_digit
from joukowski_wing import joukowski_wing_complex, karman_trefftz_wing_complex
import matplotlib.pyplot as plt
import os
# 物体表面の複素座標を取得する
def get_complex_coords(type, size, center_x=-0.08, center_y=0.08, naca4="0012"):
def reshape_z(z):
if z[0] != z[z.shape[0] - 1]:
return np.concatenate([z, z[0].reshape(-1)]), z.shape[0] + 1
else:
return z, z.shape[0]
# 極端に距離の近い制御点を除去する
def adjust_length(z):
len = np.zeros_like(z, dtype=float)
len[:z.shape[0] - 1] = np.abs(z[1:] - z[:z.shape[0] - 1])
len[z.shape[0] - 1] = np.abs(z[0] - z[z.shape[0] - 1])
average_len = np.average(len)
put_out = lambda x, count: np.hstack((x[:count], x[count + 1]))
count = z.shape[0] - 2
while count > 0:
if len[count] < 0.1 * average_len:
z = put_out(z, count)
count -= 1
return z
if type == 0:
t = np.linspace(start=0, stop=2.0 * np.pi, num=size + 1)
z = np.exp(1j * t)[:size]
elif type == 1:
z = joukowski_wing_complex(size, center_x, center_y)
elif type == 2:
z = karman_trefftz_wing_complex(size, center_x, center_y)
elif type == 3:
naca = Naca_4_digit(int_4=naca4, attack_angle_deg=0.0, resolution=size, quasi_equidistant=False)
z = naca.transform2complex()
elif type == 4:
naca = Naca_5_digit(int_5=naca4, attack_angle_deg=0.0, resolution=size, quasi_equidistant=False,
length_adjust=True)
z = naca.transform2complex()
else:
print("type error")
exit()
z = adjust_length(z)
if type < 3:
return reshape_z(z)
else:
return z, z.shape[0]
# 物体の代表長さを得る(x,y方向どちらかが最大と仮定しており,最長部長さを求めているわけでないことに注意されたし)
def get_model_length(z, both=False):
def get_length(x):
return np.max(x) - np.min(x)
x = np.real(z)
y = np.imag(z)
if both:
return get_length(x), get_length(y)
else:
return max(get_length(x), get_length(y))
# 物体の中心位置を返す
def get_model_center(z):
return np.average(np.real(z)), np.average(np.imag(z))
# 格子の外部境界(分割数は物体と同じの,物体形状の長軸長さのmagnification倍の円を返す)
def get_outer_boundary(z1, magnification=5, equidistant=False):
model_length = get_model_length(z1)
center_x, center_y = get_model_center(z1)
zc = center_x + 1j * center_y
radius = model_length * magnification
if equidistant == False:
# 法線の角度
delta2 = get_delta2(z1)
theta1 = np.angle(delta2 / (np.abs(delta2) * 1j))
theta1 = np.where(theta1 > 0, theta1, theta1 + 2.0 * np.pi)
# 物体を円に変換したときの換算角度
theta2 = get_length_rate(z1) * 2.0 * np.pi
average_theta = np.sort(0.5 * (theta1 + theta2))
z3 = zc + radius * np.exp(1j * average_theta)
return z3[::-1] # Clock Wise and -1
else:
z3 = zc + radius * np.exp(1j * np.linspace(0, 2.0 * np.pi, z1.shape[0] + 1))
return z3[1:]
# そこまでの累積長さが全体の長さに占める割合を返す
def get_length_rate(z1, output_total_length=False):
size = z1.shape[0]
delta1 = np.zeros_like(z1, dtype=complex)
delta1[:size - 1] = z1[1:] - z1[:size - 1]
delta1[size - 1] = z1[0] - z1[size - 1]
len = np.abs(delta1)
total_len = np.sum(len)
len_rate = np.zeros_like(z1, dtype=float)
accumulated_len = 0.0
for i in range(size):
len_rate[i] = accumulated_len / total_len
accumulated_len += len[i]
if output_total_length:
return len_rate, total_len
else:
return len_rate
# 1点飛ばしでの座標の差分を返す
def get_delta2(z1):
size = z1.shape[0]
delta2 = np.zeros_like(z1, dtype=complex)
delta2[0] = z1[1] - z1[size - 1]
for i in range(1, size - 1):
delta2[i] = z1[i + 1] - z1[i - 1]
delta2[size - 1] = z1[0] - z1[size - 2]
return delta2
# 物体と外周を結ぶ線分を返す
def get_connect_z1_to_z3(z1, z3, resolution=None, magnification=10):
if resolution == None:
resolution = z1.shape[0]
else:
resolution = resolution
inner_end = z1[np.argmax(np.real(z1))] # 内側のx方向最大位置
outer_end = z3[np.argmax(np.real(z3))] # 外側のx方向最大位置
exp_end = np.log(magnification) # 指定倍率:magnificationに達する指数関数上の位置:magnification = exp(exp_end) + 1
delta_x = np.exp(np.linspace(0, exp_end, resolution - 2, dtype=complex)) # 指数関数の等間隔サンプリング
raw_length = np.sum(delta_x) # 等間隔サンプリングされた微小長さの総和
delta_x = (outer_end - inner_end) / raw_length * delta_x # 内から外への長さにスケーリング&方向を付ける
z2 = np.zeros(resolution, dtype=complex)
z2[0] = inner_end
z2[resolution - 1] = outer_end
for k in range(1, resolution - 1):
z2[k] = z2[k - 1] + delta_x[k - 1]
return z2
def deduplication(z, array_list=None):
def put_out(x, count):
return np.hstack((x[:count], x[count + 1]))
def put_out_bound(x):
return x[:x.shape[0] - 1]
size = z.shape[0]
if z[size - 1] == z[0]:
size -= 1
z = put_out_bound(z)
if array_list != None:
for i in range(len(array_list)):
array_list[i] = put_out_bound(array_list[i])
count = size - 2
while count > 0:
if z[count] == z[count + 1]:
z = put_out(z, count)
if array_list != None:
for i in range(len(array_list)):
array_list[i] = put_out(array_list[i], count)
count -= 1
if array_list == None:
return z
else:
return z, array_list
def Tri2vtk(path, fname, Tri_points, Tri_simplices):
fname = path + fname + ".vtk"
with open(fname, 'w') as f:
point_number = Tri_points.shape[0]
cell_number = Tri_simplices.shape[0]
cell_vertex_number = 4 * Tri_simplices.shape[0]
#cell_vertex_number = str(2 * 4 * (xi_max) * (eta_max - 1))
f.write("# vtk DataFile Version 3.0\n")
f.write("Unstructured Grid tri example\n")
f.write("ASCII\nDATASET UNSTRUCTURED_GRID\n")
f.write("POINTS " + str(point_number) + " double\n")
# point coordinates
for i in range(point_number):
f.write(str(Tri_points[i, 0]) + " " + str(Tri_points[i, 1]) + " 0.0\n")
# cell structure
f.write("CELLS " + str(cell_number) + " " + str(cell_vertex_number) + "\n")
for i in range(cell_number):
f.write("3 " + str(Tri_simplices[i, 0]) + " " + str(Tri_simplices[i, 1]) + " " + str(Tri_simplices[i, 2]) + "\n")
# cell types
f.write("CELL_TYPES " + str(cell_number) + "\n")
[f.write("5\n") for i in range(cell_number)]
# p1とp2, p3とp4が線分をなすとして
def line_intersect(p1, p2, p3, p4):
flag = 0
for i in range(2):
if (p1[i] >= p2[i]):
if((p1[i] < p3[i] and p1[i] < p4[i]) or (p2[i] > p3[i] and p2[i] > p4[i])):
flag = 1
break
else:
if((p2[i] < p3[i] and p2[i] < p4[i]) or (p1[i] > p3[i] and p1[i] > p4[i])):
flag = 1
break
if flag == 1:
return False
c1 = (p3[0] - p4[0]) * (p1[1] - p3[1]) + (p3[1] - p4[1]) * (p3[0] - p1[0])
c2 = (p3[0] - p4[0]) * (p2[1] - p3[1]) + (p3[1] - p4[1]) * (p3[0] - p2[0])
c3 = (p1[0] - p2[0]) * (p3[1] - p1[1]) + (p1[1] - p2[1]) * (p1[0] - p3[0])
c4 = (p1[0] - p2[0]) * (p4[1] - p1[1]) + (p1[1] - p2[1]) * (p1[0] - p4[0])
return c3 * c4 < 0 and c1 * c2 < 0
def point_intersect(p0, p1, p2, p3):
sign1 = np.sign(np.cross(p1 - p0, p2 - p0))
sign2 = np.sign(np.cross(p2 - p0, p3 - p0))
sign3 = np.sign(np.cross(p3 - p0, p1 - p0))
return (sign1 == sign2) and (sign2 == sign3)
def renumbering(z):
len_x, len_y = get_model_length(z, both = True)
if len_x > len_y:
size = z.shape[0]
z = np.concatenate([z[int(size/4):], z[:int(size/4)]])
return z
def plot_complex(z):
plt.plot(np.real(z), np.imag(z), "x")
return
def set_long_axis_direction(z1):
if (np.max(np.real(z1)) - np.min(np.real(z1))) > np.max(np.imag(z1)) - np.min(np.imag(z1)):
return True
else:
return False
def split_surface(z):
direction = set_long_axis_direction(z)
if direction:
start = np.argmin(np.imag(z))
end = np.argmax(np.imag(z))
else:
start = np.argmin(np.real(z))
end = np.argmax(np.real(z))
if start < end:
z_upper = z[start:end + 1]
z_lower = np.concatenate([z[end:], z[:start + 1]])
else:
z_upper = z[end:start + 1]
z_lower = np.concatenate([z[start:], z[:end + 1]])
return z_upper, z_lower
def get_equidistant_curve(z2, add=0, rate=0.5, high_dens=True):
def func2(x, rate):
a = 3.0 / (2 + rate)
b = a * rate
return np.where(x < 1 / 6, a * x,
np.where(x < 2 / 6, b * (x - 1 / 6) + a / 6,
np.where(x < 4 / 6, a * (x - 2 / 6) + (a + b) / 6,
np.where(x < 5 / 6, b * (x - 4 / 6) + (3 * a + b) / 6,
a * (x - 5 / 6) + (3 * a + 2 * b) / 6))))
t, total_len = get_length_rate(z2, output_total_length=True)
fx = interpolate.PchipInterpolator(np.hstack((t, np.array([1.0]))), np.real(np.hstack((z2, z2[0]))))
fy = interpolate.PchipInterpolator(np.hstack((t, np.array([1.0]))), np.imag(np.hstack((z2, z2[0]))))
if high_dens:
equidistant_t = func2(np.linspace(0, 1, z2.shape[0] + add + 1)[:z2.shape[0] + add], rate)
else:
equidistant_t = np.linspace(0, 1, z2.shape[0] + add + 1)[:z2.shape[0] + add]
return fx(equidistant_t) + 1j * fy(equidistant_t)
def make_grid_seko(z1, path="", fname="sample", mg2=True, vtk=True, bdm=True, trianglation=True):
z1 = renumbering(z1)
plot_complex(z1)
plot_complex(z1[:10])
xi_max = z1.shape[0]
eta_max = z1.shape[0] # int(0.5 * z1.shape[0])
def make_point(z):
return np.vstack([np.real(z), np.imag(z)]).T
def convert_complex2real(comp):
return np.vstack([np.real(comp), np.imag(comp)]).flatten()
def dot_product_c(z1, z2):
return np.real(z1 * np.conjugate(z2))
def cross_product_c(z1, z2):
return -np.real(z1 * 1j * z2)
# 物体表面を押し出す(η格子線の複素座標,オフセット方向(外側or内側),最大のオフセット量,オフセット倍率(遠方領域で1以上の値を与えて計算領域を効率的に広げる))
def offset_surface(z, outer=False, max_incremental=0.1, accel=1.0, restriction=True, del_wedge=160, min_theta_output=False, long_axis="x"):
size = z.shape[0]
delta = np.zeros(size, dtype=complex)
delta[0] = z[1] - z[size - 1]
for i in range(1, size - 1):
delta[i] = z[i + 1] - z[i - 1]
delta[size - 1] = z[0] - z[size - 2]
delta1 = np.zeros(size, dtype=complex)
delta1[:size - 1] = z[1:] - z[:size - 1]
delta1[size - 1] = z[0] - z[size - 1]
len1 = np.abs(delta1)
theta = np.zeros(size)
for i in range(size - 1):
ab_ab = dot_product_c(-delta1[i], delta1[i + 1]) / (len1[i] * len1[i + 1]) # arccosのoverflow対策
if abs(ab_ab) > 1.0:
theta[i + 1] = 0.0
else:
theta[i + 1] = np.arccos(ab_ab)
ab_ab = dot_product_c(-delta1[size - 1], delta1[0]) / (len1[size - 1] * len1[0])
if abs(ab_ab) > 1.0:
theta[0] = 0.0
else:
theta[0] = np.arccos(dot_product_c(-delta1[size - 1], delta1[0]) / (len1[size - 1] * len1[0]))
outside = np.where(cross_product_c(np.concatenate([z[1:], [z[0]]]), z) > 0, True, False)
# sum(pi - theta) ~ 2.0*piで円に近づいたとみなす
theta_flag = 0
if np.min(theta) > 0.95 * np.pi:
theta_flag = 1
phai = np.arctan2(np.imag(-delta1), np.real(-delta1))
angle = phai + np.pi - 0.5 * theta
if long_axis == "x":
coef = 1.3 + 1j
else:
coef = 1.0 + 1j * 1.5
normal = np.exp(1j * angle) * coef
"""
# 格子の裏返り防止(隣接する格子点から外向きξ方向へ伸びる2つの格子線がなす角度が90°以上開いている場合に,新しいη格子線が既存のη格子線と被らないように角度を修正する)
def prevent_turn_over_cell(i, imp1, downwind=True):
dot_normal = np.real(normal[i] * np.conj(normal[imp1])) # 隣接する格子線同士の内積を取る
flag = 0
if dot_normal < 0: # 内積が負のとき修正対象
flag = 1
angle1 = np.angle(normal[i])
angle0 = np.angle(normal[imp1])
if angle1 < angle0:
angle1 += 2.0 * np.pi
if downwind: # ξが小さい側を修正する場合
normal[i] -= (angle1 - angle0) / 100 * 5 # 角度差の5%だけ寄せる
else: # ξが大きい側を変更する場合(風下と風上とを2回セットとして修正する)
normal[i] += (angle1 - angle0) / 100 * 5 # 角度差の5%だけ寄せる
return flag
flag = 0
count = 0
tmp_normal = normal.copy()
while flag != 0: # flag=0となるまで格子の裏返り防止処理を掛ける
flag = 0
count += 1
flag += prevent_turn_over_cell(0, xi_max - 1)
for i in range(1, size):
flag += prevent_turn_over_cell(i, i - 1)
for i in range(size - 1):
flag += prevent_turn_over_cell(i, i + 1, downwind=False)
flag += prevent_turn_over_cell(xi_max - 1, 0, downwind=False)
if count == 1000:
flag = True
if flag:
normal = tmp_normal
"""
# 凹部から点を削除
# if restriction:
dmask = np.where(theta > del_wedge / 180 * np.pi, True, False)
if restriction == False:
# 解像度が厳しいところに点を追加
chk = np.sort(np.argwhere(np.where(theta < 0.95 * np.pi, True, False)))[::-1]
if chk.shape[0] != 0:
dmask = np.where(theta == theta, True, False)
for id in chk.flatten():
if dmask[id]:
if outside[id]:
z = np.concatenate([z[:id], [z[id]], [z[id]], [z[id]], z[id:]])
normal = np.concatenate([normal[:id], [0.5 * (normal[id-1] + normal[id])], [normal[id]], [0.5 * (normal[id] + normal[id+1])], [normal[id+1]], normal[id+1:]])
dmask = np.concatenate([dmask[:id], [dmask[id]], [dmask[id]], [dmask[id]], dmask[id:]])
outside = np.concatenate([outside[:id], [outside[id]], [outside[id]], [outside[id]], outside[id:]])
if np.min(np.abs(delta)) > 0.1 * np.average(np.abs(delta)):
incremental = accel * min(min(2.0 / np.pi * np.min(np.abs(delta)), np.average(np.abs(delta))),
max_incremental) # obj sizeとboundaryサイズを均等に分割したときの幅で置換すべき(0.1)
else:
incremental = accel * 0.5 * np.average(np.abs(delta)) # obj sizeとboundaryサイズを均等に分割したときの幅で置換すべき(0.1)
# print(z.shape, ((z - normal * incremental) * dmask).shape)
if min_theta_output:
if outer == True:
return (z - normal * incremental)[dmask], theta_flag
else:
return (z + normal * incremental)[dmask], theta_flag
else:
if outer == True:
return (z - normal * incremental)[dmask]
else:
return (z + normal * incremental)[dmask]
def equidistant_offset(z2, max_incremental, accel, add=0, restriction=True, rate=0.5, min_theta_output=False):
if min_theta_output:
z2, theta_flag = offset_surface(z2, outer=True, max_incremental=max_incremental, accel=accel, restriction=restriction, min_theta_output=min_theta_output)
return get_equidistant_curve(z2, add, rate), theta_flag
else:
z2 = offset_surface(z2, outer = True, max_incremental = max_incremental, accel = accel,
restriction = restriction, min_theta_output = min_theta_output)
return get_equidistant_curve(z2, add, rate)
get_object_center = lambda z2: np.average(np.real(z2)) + 1j * np.average(np.imag(z2))
magnification = 5.0
max_incremental = 10.0 # radius1 * (magnification - 1) / eta_max
accel_parameter = 1.3 # 物体遠方領域でオフセット量を増やす際の割合
# accel量の設定
def set_accel(j, accel_parameter):
if j < int(eta_max / 2):
return 1.0
else:
return accel_parameter
z1_eq = get_equidistant_curve(z1)
pts_x = []
pts_y = []
pts_x.append(np.real(z1_eq))
pts_y.append(np.imag(z1_eq))
xi_max += 2
# z2 = equidistant_offset(z1_eq, max_incremental, accel=1.0, add = 2)
z2= equidistant_offset(z1_eq, max_incremental, accel=1.0, restriction = False, rate = 0.5)
# z2 = offset_surface(z1_eq, True, max_incremental, accel = 1.0, restriction = False)
z2_eq = z2
# z2 = np.hstack((z2[1:], z2[0]))
def get_im1_im0_ip1_ip2(i, size):
if i == 0:
return size - 1, 0, 1, 2
elif i == size - 1:
return size - 2, size - 1, 0, 1
elif i == size - 2:
return size - 3, size - 2, size - 1, 0
else:
return i - 1, i, i + 1, i + 2
def safe_concatenate(z2, size, im0, ip1, ip2):
if ip2 == 0:
return np.concatenate([z2[:im0], [0.5 * (z2[im0] + z2[ip1])]])
elif ip1 == 0:
return np.concatenate([z2[ip2:im0], [0.5 * (z2[im0] + z2[ip1])]])
elif im0 == 0:
return np.concatenate([[0.5 * (z2[im0] + z2[ip1])], z2[ip2:]])
else:
return np.concatenate([z2[:im0], [0.5 * (z2[im0] + z2[ip1])], z2[ip2:]])
def merge_edge(z2):
size = z2.shape[0]
z2_xy = make_point(z2)
merge = np.zeros(size)
for i in range(size):
im1, im0, ip1, ip2 = get_im1_im0_ip1_ip2(i, size)
if (dot_product_c(z2[ip2] - z2[im1], z2[ip1] - z2[im0]) / (np.abs(z2[ip2] - z2[im1]) * np.abs(z2[ip1] - z2[im0])) < 0.0) or (
line_intersect(z2_xy[im1], z2_xy[ip2], z2_xy[im0], z2_xy[ip1])):
merge[i] = 1
for i in range(size - 1, -1, -1):
if merge[i] == 1:
im1, im0, ip1, ip2 = get_im1_im0_ip1_ip2(i, size)
z2 = safe_concatenate(z2, size, im0, ip1, ip2)
size -= 1
return z2
def delete_edge(z2, param=0.6):
size = z2.shape[0]
flag = 1
const = 0
while flag != 0:
flag = 0
delta = np.concatenate([z2[1:] - z2[:size - 1], [z2[0] - z2[size - 1]]])
len = np.abs(delta)
if const == 0:
ave_len = np.average(len)
const = 1
for i in range(size):
if len[i] < param * ave_len:
flag = 1
im1, im0, ip1, ip2 = get_im1_im0_ip1_ip2(i, size)
z2 = safe_concatenate(z2, size, im0, ip1, ip2)
size -= 1
break
return z2
# print("calc base grid-line")
model_length = get_model_length(z1)
theta_j = 0
for j in range(1, eta_max):
# print(j, z2.shape)
pts_x.append(np.real(z2))
pts_y.append(np.imag(z2))
plot_complex(z2)
if theta_j == 0:
restrict = False
else:
restrict = True
z2_equidistant, theta_flag = equidistant_offset(z2=z2, max_incremental=max_incremental, accel=set_accel(j, accel_parameter), add=0, rate = min(0.05 * j + 0.8, 1.0), restriction=restrict, min_theta_output = True)
# z2_orthogonal, m_theta = offset_surface(z2, outer=True, max_incremental=max_incremental, accel=set_accel(j, accel_parameter))
if theta_j == 0 and theta_flag == 1:
theta_j = j
fix_z2 = merge_edge(z2_equidistant) # (1.0 - mix_rate) * z2_orthogonal + mix_rate * z2_equidistant
# delta_j__ = np.hstack((z2[1:] - z2[:z2.shape[0] - 1], z2[0] - z2[z2.shape[0] - 1]))
# delta_jp1 = np.hstack((fix_z2[1:] - fix_z2[:fix_z2.shape[0] - 1], fix_z2[0] - fix_z2[fix_z2.shape[0] - 1]))
"""
if np.any(np.real(delta_j__ * np.conj(delta_jp1)) < 0):
mix_rate = 1.0 - max(-0.18 * np.exp(-float(j - 2)), 0.7 - 0.1 * flag)
print(mix_rate)
if mix_rate > 0.9:
z2 = fix_z2
break
else:
flag = -1
"""
z2 = delete_edge(fix_z2)
plot_complex(z2)
if (np.max(np.real(z2)) - np.min(np.real(z2)) > 40.0 * model_length):
eta_max = j + 2
break
z3 = z2
pts_x.append(np.real(z3))
pts_y.append(np.imag(z3))
"""
# ここまででおおよその点の追加が終了
# このまま三角形を形成すると段階では物体を貫通していたり,都合のよくない線が存在している可能性がある
# そこで、物体を三角形領域に分割し,その三角形群と格子線の接触判定を行う
def obj_trianglization(z1):
z_u, z_l = split_surface(z1)
new_z_eq = np.concatenate([z_u, z_l[1:z_l.shape[0]-1]])
size = new_z_eq.shape[0]
def set_r(i):
return i
def set_l(i, size):
return size - i # -1ではない
cell_total = new_z_eq.shape[0] - 2
tri_point = np.vstack([np.real(new_z_eq), np.imag(new_z_eq)]).T
tri_block = np.zeros((cell_total, 3), dtype=int)
process = 0
for i in range(int(cell_total/2)):
tri_block[2 * i, :] = np.array([set_r(i), set_r(i+1), set_l(i + 1, size)])
tri_block[2 * i + 1, :] = np.array([set_r(i + 1), set_l(i + 1, size), set_l(i + 2, size)])
process += 2
if process != cell_total:
i = int(cell_total/2)
tri_block[cell_total - 1, :] = np.array([set_r(i + 1), set_l(i + 1, size), set_l(i + 2, size)])
return tri_point, tri_block
"""
"""
print("object triangulation")
# 物体内部の三角形化
obj_tri_pts, obj_tri_spx = obj_trianglization(z1)
# plt.triplot(obj_tri_pts[:, 0], obj_tri_pts[:, 1], obj_tri_spx)
# plt.show()
"""
"""
print("outside triangulation")
# 物体外部+内部の三角形化
points2D = np.vstack([np.array(pts_x[1]).flatten(), np.array(pts_y[1]).flatten()]).T # η=1線は独立させておく
for i in range(2, eta_max):
points2D = np.concatenate([points2D, np.vstack([np.array(pts_x[i]).flatten(), np.array(pts_y[i]).flatten()]).T])
Tri2 = Delaunay(points2D)
plt.triplot(Tri2.points[:, 0], Tri2.points[:, 1], Tri2.simplices)
plt.xlim(-0.1, 1.1)
plt.ylim(-0.1, 1.1)
plt.show()
print("delete bad triangle")
# 品質の悪いセルを除去する
def check_aspect(pts3, param=1.0/3):
l2 = norm(np.array([(pts3[1, :] - pts3[0, :]), (pts3[2, :] - pts3[1, :]), (pts3[0, :] - pts3[2, :])]), axis = 1)
return (np.min(l2) / np.max(l2) < param) , np.argmin(l2)
calc_Area2 = lambda pts3: np.cross(pts3[1, :] - pts3[0, :], pts3[2, :] - pts3[0, :]) # 実際は1/2にする必要がある
minimum_edge = lambda pts3: np.argmin(np.array([norm(pts3[1, :] - pts3[0, :]), norm(pts3[2, :] - pts3[1, :]), norm(pts3[0, :] - pts3[2, :])]))
minimum_area2_half = np.average(np.abs(z1_eq[1:] - z1_eq[:z1_eq.shape[0] - 1])) ** 2
total_add_cell = Tri2.simplices.shape[0]
total_add_point = points2D.shape[0]
remove_point = np.zeros(total_add_point)
for iCell in range(total_add_cell):
if all(Tri2.simplices[iCell] > xi_max): # η=1線の格子点を含まず
area2 = calc_Area2(Tri2.points[Tri2.simplices[iCell]])
if (area2 < 0.1 * minimum_area2_half): # 物体近傍のセル面積の半分以下の面積しか持たない要素
target = minimum_edge(Tri2.points[Tri2.simplices[iCell]])
if Tri2.simplices[iCell][target] > Tri2.simplices[iCell][(target + 1) % 3]:
remove_point[Tri2.simplices[iCell][target]] = 1
else:
remove_point[Tri2.simplices[iCell][(target + 1) % 3]] = 1
else:
chk, target = check_aspect(Tri2.points[Tri2.simplices[iCell]], param = 0.3)
if chk:
remove_point[Tri2.simplices[iCell][target]] = 1
remove_point = np.where(remove_point == 1, False, True) # Falseを削除,Trueを残す
# plt.triplot(Tri2.points[:,0], Tri2.points[:,1], Tri2.simplices)
points2D = points2D[remove_point]
Tri2 = Delaunay(points2D)
total_add_point = points2D.shape[0]
plt.triplot(Tri2.points[:, 0], Tri2.points[:, 1], Tri2.simplices)
plt.xlim(-0.1, 1.1)
plt.ylim(-0.1, 1.1)
plt.show()
"""
# print("genearate eta1")
# 追加する点の総数が分かった時点で,物体表面→η=1線の格子を先に切る
# 物体表面のη=0線と物体表面から少し外側のη=1格子線を三角形で繋ぐ
def eta_next(z1_eq, z2_eq, cum_p):
num0 = z1_eq.shape[0]
num1 = z2_eq.shape[0]
edge_mask = np.ones((num0, num1), dtype=int)
length = np.zeros((num0, num1), dtype=float)
for i in range(num0):
for j in range(num1):
length[i, j] = np.abs(z1_eq[i] - z2_eq[j])
z1_xy = make_point(z1_eq)
z2_xy = make_point(z2_eq)
# print("0th step")
# 0.物体表面における辺の長さの最大値と比較して長すぎるものを除外
ave = np.max(np.abs(z1_eq[1:] - z1_eq[:z1_eq.shape[0] - 1]))
for i in range(num0):
for j in range(num1):
if length[i, j] > 4.0 * ave:
edge_mask[i, j] = 0
# print("1st step")
# 1.そもそも物体表面と交差してるのを除外
for i in range(num0):
for j in range(num1):
for k in range(num0):
kp1 = k + 1
if kp1 == num0:
kp1 = 0
if (i != k and i != kp1):
if line_intersect(z1_xy[i], z2_xy[j], z1_xy[k], z1_xy[kp1]):
edge_mask[i, j] = 0
# print("2nd step")
# 2.線分同士で交差してるのを除外(bluteforth)
for i in range(num0): # p1
for j in range(num1): # p2
if edge_mask[i, j] != 0: # まだ除外されてない線分p1-p2のうち
for k in range(num0): # p3
if k != i: # p1 = p3は交差してないことにする
for l in range(num1): # p4
if l != j: # p2 = p4も交差してないことにする
if(edge_mask[k, l] != 0): # p3-p4がまだ除外されていなければ
if line_intersect(z1_xy[i], z2_xy[j], z1_xy[k], z2_xy[l]): # 交差してたら長い方を消す
if length[i, j] > length[k, l]:
edge_mask[i, j] = 0
else:
edge_mask[k, l] = 0
# print("3rd step")
# 3.残った辺から三角形を構築
simplices = [] # 最終的な格子に登録する用
for i in range(num0): # p1
for j in range(num1): # p3
if edge_mask[i, j] == 1: # 辺p1-p3が生き残っていたときに
jp1 = j + 1
if jp1 == num1:
jp1 = 0
if edge_mask[i, jp1] == 1:
simplices.append([i + cum_p, j + cum_p + num0, jp1 + cum_p + num0])
for i in range(num1): # p2
for j in range(num0): # p1
if edge_mask[j, i] == 1: # 辺p1-p2が生き残っており
jp1 = j + 1
if jp1 == num0:
jp1 = 0
if edge_mask[jp1, i] == 1: # 次に生き残っている辺p2-p3について
simplices.append([i + cum_p + num0, j + cum_p, jp1 + cum_p])
simplices = (np.array(simplices))
new_edge = []
for i in range(num0):
for j in range(num1):
if edge_mask[i, j] == 1:
new_edge.append([i, j])
new_edge = np.array(new_edge)
return simplices, new_edge
cum_p = 0
total_simplices, new_edge = eta_next(z1_eq, z2_eq, cum_p)
cum_p = z1_eq.shape[0]
total_point = np.vstack([np.real(z1_eq), np.imag(z1_eq)]).T
total_point = np.concatenate([total_point, np.vstack([np.real(z2_eq), np.imag(z2_eq)]).T])
eta_max = len(pts_x)
for j in range(1, eta_max-1):
print(str(j) + "_line")
z1_eq = pts_x[j] + 1j * pts_y[j]
z2_eq = pts_x[j+1] + 1j * pts_y[j+1]
total_point = np.concatenate([total_point, np.vstack([np.real(z2_eq), np.imag(z2_eq)]).T])
simplices, new_edge = eta_next(z1_eq, z2_eq, cum_p)
cum_p += z1_eq.shape[0]
total_simplices = np.concatenate([total_simplices, simplices])
Tri2vtk(path = path, fname = fname, Tri_points = total_point, Tri_simplices = total_simplices)
"""
obj_center = np.array([np.real(get_object_center(z2_eq)), np.imag(get_object_center(z2_eq))])
for j in range(theta_j, eta_max - 1):
z1_eq = pts_x[j] + 1j * pts_y[j]
z2_eq = pts_x[j+1] + 1j * pts_y[j+1]
total_point = np.concatenate([total_point, np.vstack([np.real(z2_eq), np.imag(z2_eq)]).T])
local_point = np.concatenate([np.vstack([np.real(z2_eq), np.imag(z2_eq)]).T, [obj_center]])
last_pt = z2_eq.shape[0]
Tri_j = Delaunay(local_point)
mask = ~(Tri_j.simplices == last_pt).any(axis = 1)
total_simplices = np.concatenate([total_simplices, Tri_j.simplices[mask, :] + cum_p])
cum_p += z1_eq.shape[0]
plt.triplot(total_point[:, 0], total_point[:, 1], total_simplices)
plt.xlim(0.9, 1.1)
plt.ylim(0.45,0.55)
plt.show()
plt.triplot(total_point[:, 0], total_point[:, 1], total_simplices)
plt.show()
"""
"""
new_edge_num = new_edge.shape[0]
z1_xy = make_point(z1_eq)
z2_xy = make_point(z2_eq)
# ここまで物体側
print("extract cell near body")
# 物体外部+内部の格子について物体近傍の三角形のみを取り出す(物体の大きさの1.1倍程度のboudanry box)
len_x, len_y = get_model_length(z1, both=True)
center = get_object_center(z1)
if len_x > 10.0 * len_y:
param_x = 1.0
param_y = 2.0
elif len_y > 10.0 * len_x:
param_x = 2.0
param_y = 1.0
else:
param_x = 1.0
param_y = 1.0
min_x = np.real(center) - param_x * len_x
max_x = np.real(center) + param_x * len_x
min_y = np.imag(center) - param_y * len_y
max_y = np.imag(center) + param_y * len_y
total_add_cell = Tri2.simplices.shape[0]
checK_target = np.zeros(total_add_cell)
for iCell in range(total_add_cell):
pts = Tri2.points[Tri2.simplices[iCell, :]]
for iPoint in range(3):
if (min_x < pts[iPoint, 0] < max_x) and (min_y < pts[iPoint, 1] < max_y):
checK_target[iCell] = 1
mask = np.where(checK_target == 1, True, False)
def tri_intersect(pts3, obj_tri_pts, obj_tri_spx):
pattern = np.array([[0,1], [1,2], [2,0]])
cross = 0
i = 0
for tri in obj_tri_spx:
obj_pts3 = obj_tri_pts[tri]
for out_pat in pattern:
for obj_pat in pattern:
chk = line_intersect(pts3[out_pat[0]], pts3[out_pat[1]], obj_pts3[obj_pat[0]], obj_pts3[obj_pat[1]])
if chk:
cross = 1
break
chk2 = point_intersect(pts[i % 3], obj_pts3[0], obj_pts3[1], obj_pts3[2])
i += 1
if chk2:
cross = 1
break
if cross == 1:
break
if cross == 1:
break
if cross == 0:
for iEdge in range(new_edge_num):
i = new_edge[iEdge, 0]
j = new_edge[iEdge, 1]
for out_pat in pattern:
chk = line_intersect(pts3[out_pat[0]], pts3[out_pat[1]], z1_xy[i], z2_xy[j])
if chk:
cross = 1
break
if cross == 1:
break
return cross == 1
print("judge traiangle intersect")
# 物体を通過する線分を保有する三角形要素 (&新規追加する辺と接触する三角形要素)の除去(これを行うためのマスク作成)
for iCell in range(total_add_cell):
if mask[iCell]:
pts3 = Tri2.points[Tri2.simplices[iCell], :]
if tri_intersect(pts3, obj_tri_pts, obj_tri_spx) == False:
mask[iCell] = False
# 4.最後方にη=0の格子点を追加
pointj0 = np.vstack([np.real(z1_eq).flatten(), np.imag(z1_eq).flatten()]).T
grid_pts = np.concatenate([points2D, pointj0])
grid_simplices = np.concatenate([Tri2.simplices[~mask, :], simplices])
Tri2vtk(path=path, fname=fname, Tri_points=grid_pts, Tri_simplices=grid_simplices)
"""
return
def make_grid(fname, type, size=100, naca4="0012", center_x=0.08, center_y=0.08, mayugrid2=False, vtk=False, bdm=False,
trianglation=True, path=""):
z1, size = get_complex_coords(type=type, center_x=center_x, center_y=center_y, naca4=naca4, size=size)
z1 = deduplication(z1)[::-1]
make_grid_seko(z1, path, fname, mayugrid2, vtk, bdm, trianglation)
def main():
z1, size = get_complex_coords(type=3, naca4="2831", size=100)
# z1, size = get_complex_coords(type=0, center_x=0.08, center_y=0.3, naca4="4912", size=100)
z1 = deduplication(z1)[::-1]
plot_complex(z1)
make_grid_seko(z1)
# plt.plot(np.real(z1), np.imag(z1))
# plt.show()
def makeGridLoop():
header = "NACA"
path = "G:\\Toyota\\Data\\grid_vtk\\NACA4\\"
fin_path = "G:\\Toyota\\Data\\grid_vtk\\NACA4_finish\\"
i1 = 9
print(i1)
# for i1 in range(start + 1, start+500, 2):
for i2 in range(5, 10):
for i34 in range(1, 40):
naca4 = str(i1) + str(i2) + str(i34).zfill(2)
fname = header + naca4
if os.path.exists(fin_path + fname + ".vtk") == False:
print(fname)
make_grid(fname, type=3, naca4=naca4, path = path, size=50)
def output_coords_csv(fname = "NACA", type = 3, size = 200, naca4 = "0411", center_x = 0.08, center_y = 0.08):
if type == 3:
path = "G:\\Toyota\\Data\\grid_vtk\\NACA4_csv_HD\\"
for i1 in range(10):
for i2 in range(10):
for i34 in range(1,100):
naca4 = str(i1) + str(i2) + str(i34).zfill(2)
z1, gomi = get_complex_coords(type = type, center_x = center_x, center_y = center_y, naca4 = naca4, size = size)
z_u, z_l = split_surface(deduplication(z1)[::-1])
# z_u, z_l = split_surface(z1[::-1])
new_z_eq = np.concatenate([z_u, z_l[1:z_l.shape[0] - 1]])
z1 = get_equidistant_curve(new_z_eq, high_dens = False)
fname = "NACA" + naca4
np.savetxt(path + fname + "_x.csv", np.real(z1), delimiter=",")
np.savetxt(path + fname + "_y.csv", np.imag(z1), delimiter = ",")
elif type == 4:
path = "G:\\Toyota\\Data\\grid_vtk\\NACA5_csv_HD\\"
head_int3 = [210, 220, 230, 240, 250, 221, 231, 241, 251]
for int3 in head_int3:
for i45 in range(1, 100):
naca5 = str(int3) + str(i45).zfill(2)
z1, gomi = get_complex_coords(type = type, center_x = center_x, center_y = center_y, naca4 = naca5,
size = size)
z_u, z_l = split_surface(deduplication(z1)[::-1])
new_z_eq = np.concatenate([z_u, z_l[1:z_l.shape[0] - 1]])
z1 = get_equidistant_curve(new_z_eq, high_dens = False)
fname = "NACA" + naca5
np.savetxt(path + fname + "_x.csv", np.real(z1), delimiter = ",")
np.savetxt(path + fname + "_y.csv", np.imag(z1), delimiter = ",")
if __name__ == '__main__':
# main()
# makeGridLoop()
output_coords_csv(type = 4) | 38.537968 | 219 | 0.532512 | 5,149 | 36,033 | 3.568266 | 0.096329 | 0.016981 | 0.007838 | 0.014369 | 0.41893 | 0.351657 | 0.288684 | 0.241496 | 0.191313 | 0.169216 | 0 | 0.062446 | 0.322704 | 36,033 | 935 | 220 | 38.537968 | 0.690309 | 0.062193 | 0 | 0.25 | 0 | 0 | 0.016842 | 0.00663 | 0 | 0 | 0 | 0 | 0 | 1 | 0.073308 | false | 0 | 0.016917 | 0.015038 | 0.18985 | 0.007519 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a9e2fd106422503a1e5fac850661d45c2df7299 | 4,238 | py | Python | cmp/gnfa.py | kikeXD/Grammar-Analyser | ca8f35c0e36e3d8181dab78bb0231e101f953437 | [
"MIT"
] | 1 | 2020-02-13T16:52:57.000Z | 2020-02-13T16:52:57.000Z | cmp/gnfa.py | kikeXD/Grammar-Analyser | ca8f35c0e36e3d8181dab78bb0231e101f953437 | [
"MIT"
] | null | null | null | cmp/gnfa.py | kikeXD/Grammar-Analyser | ca8f35c0e36e3d8181dab78bb0231e101f953437 | [
"MIT"
] | 1 | 2020-02-13T16:44:26.000Z | 2020-02-13T16:44:26.000Z | from cmp.regex import Regex
from cmp.nfa_dfa import NFA
import pydot
from pprint import pprint
class GNFA:
def __init__(self, nfa :NFA):
self.nfa = nfa
self.states = nfa.states + 2
self.start = 0
self.finals = [nfa.states + 1]
self.transitions = { state: {} for state in range(self.states) }
for start in range(nfa.states):
for finish in range(nfa.states):
if self._regexes(start, finish) == '':
continue
try:
self.transitions[start + 1][Regex(self._regexes(start, finish))].append(finish + 1)
except KeyError:
self.transitions[start + 1][Regex(self._regexes(start, finish))] = [finish + 1]
# for start, dic in nfa.transitions.items():
# for regex, destinations in dic.items():
# for d in destinations:
# try:
# self.transitions[start + 1][self._regexes(start, d)].append(d + 1)
# except KeyError:
# self.transitions[start + 1][self._regexes(start, d)] = [d + 1]
# self.transitions[start + 1][self._regexes(start, ) Regex(str(regex))] = [i + 1 for i in destinations]
# pprint(nfa.transitions)
for start, dic in nfa.transitions.items():
if len(dic.items()) == 0:
# print(start)
uni = [i + 1 for i in range(nfa.states)]
# print(uni)
self.transitions[start + 1][Regex('~')] = uni
else:
s = set()
for regex, destinations in dic.items():
for item in destinations:
s.add(item)
# print(s)
uni = set([i for i in range(nfa.states)])
dif = uni.difference(s)
for state in dif:
self.transitions[start + 1][Regex('~')] = [state + 1]
self.transitions[0][Regex('ε')] = [nfa.start + 1]
self.transitions[0][Regex('~')] = [i + 1 for i in range(nfa.states + 1) if i != nfa.start]
for i in range(nfa.states):
if not i in nfa.finals:
self.transitions[i + 1][Regex('~')] = [nfa.states + 1]
else:
self.transitions[i + 1][Regex('ε')] = [nfa.states + 1]
# pprint(self.transitions)
# print(self.transitions)
# for (origin, regex), destinations in transitions.items():
# assert hasattr(destinations, '__iter__'), 'Invalid collection of states'
# self.transitions[origin][regex] = destinations
def _regexes(self, i, j):
regexes = ''
for regex, value in self.nfa.transitions[i].items():
if j in value:
if regexes == '':
regexes = str(regex)
else:
regexes += '|' + str(regex)
continue
return regexes
def epsilon_transitions(self, state):
assert state in self.transitions, 'Invalid state'
try:
return self.transitions[state]['']
except KeyError:
return ()
def graph(self):
G = pydot.Dot(rankdir='LR', margin=0.1)
G.add_node(pydot.Node('start', shape='plaintext', label='', width=0, height=0))
for start, dic in self.transitions.items():
for regex, destination in dic.items():
tran = 'ε' if regex.regular_exp == '' else regex.regular_exp
G.add_node(pydot.Node(start, shape='circle', style='bold' if start in self.finals else ''))
for end in destination:
G.add_node(pydot.Node(end, shape='circle', style='bold' if end in self.finals else ''))
G.add_edge(pydot.Edge(start, end, label=tran, labeldistance=2))
G.add_edge(pydot.Edge('start', self.start, label='', style='dashed'))
return G
def _repr_svg_(self):
try:
return self.graph().create_svg().decode('utf8')
except:
pass
def _repr_png_(self):
# try:
return self.graph()
# except:
# pass | 37.175439 | 119 | 0.51345 | 489 | 4,238 | 4.386503 | 0.186094 | 0.125874 | 0.065268 | 0.068531 | 0.386946 | 0.265268 | 0.217716 | 0.100699 | 0.044755 | 0 | 0 | 0.012141 | 0.35866 | 4,238 | 114 | 120 | 37.175439 | 0.777042 | 0.173903 | 0 | 0.135135 | 0 | 0 | 0.020684 | 0 | 0 | 0 | 0 | 0 | 0.013514 | 1 | 0.081081 | false | 0.013514 | 0.054054 | 0.013514 | 0.22973 | 0.013514 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a9e301a01427b553ef254d21db7775e0f228100 | 4,203 | py | Python | python/venv/lib/python2.7/site-packages/openstackclient/tests/image/v2/fakes.py | sjsucohort6/openstack | 8471e6e599c3f52319926a582358358ef84cbadb | [
"MIT"
] | null | null | null | python/venv/lib/python2.7/site-packages/openstackclient/tests/image/v2/fakes.py | sjsucohort6/openstack | 8471e6e599c3f52319926a582358358ef84cbadb | [
"MIT"
] | null | null | null | python/venv/lib/python2.7/site-packages/openstackclient/tests/image/v2/fakes.py | sjsucohort6/openstack | 8471e6e599c3f52319926a582358358ef84cbadb | [
"MIT"
] | null | null | null | # Copyright 2013 Nebula Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
from openstackclient.tests import fakes
from openstackclient.tests import utils
from openstackclient.tests.identity.v3 import fakes as identity_fakes
image_id = '0f41529e-7c12-4de8-be2d-181abb825b3c'
image_name = 'graven'
image_owner = 'baal'
image_protected = False
image_visibility = 'public'
IMAGE = {
'id': image_id,
'name': image_name,
'owner': image_owner,
'protected': image_protected,
'visibility': image_visibility,
}
IMAGE_columns = tuple(sorted(IMAGE))
IMAGE_data = tuple((IMAGE[x] for x in sorted(IMAGE)))
member_status = 'pending'
MEMBER = {
'member_id': identity_fakes.project_id,
'image_id': image_id,
'status': member_status,
}
# Just enough v2 schema to do some testing
IMAGE_schema = {
"additionalProperties": {
"type": "string"
},
"name": "image",
"links": [
{
"href": "{self}",
"rel": "self"
},
{
"href": "{file}",
"rel": "enclosure"
},
{
"href": "{schema}",
"rel": "describedby"
}
],
"properties": {
"id": {
"pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", # noqa
"type": "string",
"description": "An identifier for the image"
},
"name": {
"type": [
"null",
"string"
],
"description": "Descriptive name for the image",
"maxLength": 255
},
"owner": {
"type": [
"null",
"string"
],
"description": "Owner of the image",
"maxLength": 255
},
"protected": {
"type": "boolean",
"description": "If true, image will not be deletable."
},
"self": {
"type": "string",
"description": "(READ-ONLY)"
},
"schema": {
"type": "string",
"description": "(READ-ONLY)"
},
"size": {
"type": [
"null",
"integer"
],
"description": "Size of image file in bytes (READ-ONLY)"
},
"status": {
"enum": [
"queued",
"saving",
"active",
"killed",
"deleted",
"pending_delete"
],
"type": "string",
"description": "Status of the image (READ-ONLY)"
},
"visibility": {
"enum": [
"public",
"private"
],
"type": "string",
"description": "Scope of image accessibility"
},
}
}
class FakeImagev2Client(object):
def __init__(self, **kwargs):
self.images = mock.Mock()
self.images.resource_class = fakes.FakeResource(None, {})
self.image_members = mock.Mock()
self.image_members.resource_class = fakes.FakeResource(None, {})
self.auth_token = kwargs['token']
self.management_url = kwargs['endpoint']
class TestImagev2(utils.TestCommand):
def setUp(self):
super(TestImagev2, self).setUp()
self.app.client_manager.image = FakeImagev2Client(
endpoint=fakes.AUTH_URL,
token=fakes.AUTH_TOKEN,
)
self.app.client_manager.identity = identity_fakes.FakeIdentityv3Client(
endpoint=fakes.AUTH_URL,
token=fakes.AUTH_TOKEN,
)
| 27.116129 | 121 | 0.520343 | 414 | 4,203 | 5.188406 | 0.415459 | 0.0554 | 0.011639 | 0.013966 | 0.111266 | 0.084264 | 0.048883 | 0.048883 | 0.01257 | 0.01257 | 0 | 0.020349 | 0.34523 | 4,203 | 154 | 122 | 27.292208 | 0.760174 | 0.145848 | 0 | 0.232 | 0 | 0.008 | 0.25399 | 0.034444 | 0 | 0 | 0 | 0 | 0 | 1 | 0.016 | false | 0 | 0.032 | 0 | 0.064 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1aa1225f975c24c5462e87cbb4bbe01830ca0112 | 2,557 | py | Python | wwyfcs/utils/create_examples.py | shc558/wwyfcs | 05ca6c94f59f7317e4e597d3df18f549dcadf7c1 | [
"MIT"
] | 1 | 2021-03-24T18:00:03.000Z | 2021-03-24T18:00:03.000Z | wwyfcs/utils/create_examples.py | shc558/wwyfcs | 05ca6c94f59f7317e4e597d3df18f549dcadf7c1 | [
"MIT"
] | null | null | null | wwyfcs/utils/create_examples.py | shc558/wwyfcs | 05ca6c94f59f7317e4e597d3df18f549dcadf7c1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import argparse
import os
import pandas as pd
from sklearn.model_selection import train_test_split
# load data and tag lines with source characters' names
def load_data(args):
data = pd.read_csv(args.file_path)
data['id:data'] = data[args.id_colname]+':'+data[args.data_colname]
return data
# create response sets where each row includes n previous responses as context
def extract_dialogues(df, args):
dialogue_chains = []
n = args.len_context
for i in range(n, len(df[args.data_colname])):
if args.character: #collect responses from specified character
if df[args.id_colname][i] == args.character:
row = []
prev = i - 1 - n # include current response and previous n responses
for j in range(i, prev, -1):
row.append(df[args.data_colname][j])
dialogue_chains.append(row)
else:
row = []
prev = i - 1 - n
for j in range(i, prev, -1):
row.append(df[args.data_colname][j])
dialogue_chains.append(row)
columns = ['response','context']+['context/' + str(i) for i in range(n-1)]
df = pd.DataFrame.from_records(dialogue_chains, columns= columns)
df = df.dropna().reset_index(drop=True)
return df
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--file_path', type=str,
help='Path to row data.')
parser.add_argument('--data_colname', type=str,
help='Name of the data field.')
parser.add_argument('--id_colname', type=str,
help='Name of the ID field.')
parser.add_argument('--output_dir', type=str,
default=None, help='Dir to output data')
parser.add_argument('--character', type=str,
default=None,help='Name of the character to extract.')
parser.add_argument('--len_context', type=int,
default = 9, help='Number of previous lines to use as context')
parser.add_argument('--eval_size', type=float,
default = 0.1, help='fraction to use as evaluation set')
args = parser.parse_args()
extracted = extract_dialogues(load_data(args), args)
train, eval = train_test_split(extracted[:100], test_size = args.eval_size, random_state=42)
if args.output_dir:
train.to_csv(os.path.join(args.output_dir,'train_examples.csv'), index=False)
eval.to_csv(os.path.join(args.output_dir,'eval_examples.csv'), index=False)
else:
train.to_csv(os.path.join(os.getcwd(),'train_examples.csv'), index=False)
eval.to_csv(os.path.join(os.getcwd(),'eval_examples.csv'), index=False)
if __name__ == "__main__":
main()
| 36.014085 | 96 | 0.678138 | 381 | 2,557 | 4.393701 | 0.293963 | 0.037634 | 0.071087 | 0.026284 | 0.297491 | 0.215054 | 0.20908 | 0.158303 | 0.133811 | 0.133811 | 0 | 0.006705 | 0.183418 | 2,557 | 70 | 97 | 36.528571 | 0.795019 | 0.095424 | 0 | 0.214286 | 0 | 0 | 0.164716 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.053571 | false | 0 | 0.071429 | 0 | 0.160714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1aa1a697528836b8f497782c36ef0f059f0914be | 2,445 | py | Python | dottyDrawings.py | riyap/DottyDrawings | 33691c390ef939507e6499c56ab3e0f33485734f | [
"MIT"
] | null | null | null | dottyDrawings.py | riyap/DottyDrawings | 33691c390ef939507e6499c56ab3e0f33485734f | [
"MIT"
] | null | null | null | dottyDrawings.py | riyap/DottyDrawings | 33691c390ef939507e6499c56ab3e0f33485734f | [
"MIT"
] | null | null | null | from tkinter import *
color = 'black'
def red():
global color
color = 'red'
def orange():
global color
color = 'orange'
def yellow():
global color
color = 'yellow'
def green():
global color
color = 'lime green'
def lime_green():
global color
color = 'green'
def light_blue ():
global color
color = 'light blue'
def blue ():
global color
color = 'blue'
def purple():
global color
color = 'purple'
def pink():
global color
color = 'pink'
def brown():
global color
color = 'brown'
def black():
global color
color = 'black'
def gray():
global color
color = 'gray'
tk = Tk()
tk.title( "Dotty Drawings")
frame = Frame(tk)
frame.pack()
r = Button(frame, bg = 'red', command = red, width = 5)
r.pack(side = LEFT)
r = Button(frame, bg = 'orange', command = orange , width = 5)
r.pack(side = LEFT)
r = Button(frame, bg = 'yellow', command = yellow , width = 5)
r.pack(side = LEFT)
r = Button(frame, bg = 'lime green', command = lime_green, width = 5)
r.pack(side = LEFT)
r = Button(frame, bg = 'green', command = green, width = 5)
r.pack(side = LEFT)
r = Button(frame, bg = 'light blue', command =light_blue , width = 5)
r.pack(side = LEFT)
r = Button(frame, bg = 'blue', command = blue, width = 5)
r.pack(side = LEFT)
r = Button(frame, bg = 'purple', command = purple , width = 5)
r.pack(side = LEFT)
r = Button(frame, bg = 'pink', command = pink, width = 5)
r.pack(side = LEFT)
r = Button(frame, bg = 'brown', command = brown , width = 5)
r.pack(side = LEFT)
r = Button(frame, bg = 'black', command = black , width = 5)
r.pack(side = LEFT)
r = Button(frame, bg = 'gray', command = gray , width = 5)
r.pack(side = LEFT)
canvas = Canvas(tk, width = 500, height = 300, bg = 'white', cursor = 'dot')
canvas.pack(expand = YES, fill = BOTH)
def paint(event):
global color
x1, y1 = (event.x -5), (event.y -5)
x2, y2 = (event.x +5), (event.y +5)
canvas.create_oval(x1,y1,x2,y2, fill = color, outline = color)
canvas.bind("<B1-Motion>", paint)
def clear():
canvas.delete(ALL)
clear = Button(frame, text = "Clear", command = clear, width = 5)
clear.pack(side = RIGHT)
def erase():
global color
color = 'white'
e = Button(frame, text = 'Erase', command = erase, width = 5)
e.pack(side = RIGHT)
#seeing changes on github
tk.mainloop()
| 26.010638 | 77 | 0.592229 | 349 | 2,445 | 4.13467 | 0.197708 | 0.106722 | 0.144144 | 0.116424 | 0.296604 | 0.296604 | 0.264033 | 0.264033 | 0.264033 | 0.264033 | 0 | 0.018043 | 0.251943 | 2,445 | 93 | 78 | 26.290323 | 0.770913 | 0.009816 | 0 | 0.333333 | 0 | 0 | 0.08122 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.178571 | false | 0 | 0.011905 | 0 | 0.190476 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1aa5ff8d79fad0cf1844b76d7b3b8225587726b3 | 1,248 | py | Python | search/nlp/qu/alpha_numbers.py | octabytes/search | 750124d2de0e349249e3183daccc83ba5a82af36 | [
"Apache-2.0"
] | null | null | null | search/nlp/qu/alpha_numbers.py | octabytes/search | 750124d2de0e349249e3183daccc83ba5a82af36 | [
"Apache-2.0"
] | null | null | null | search/nlp/qu/alpha_numbers.py | octabytes/search | 750124d2de0e349249e3183daccc83ba5a82af36 | [
"Apache-2.0"
] | null | null | null | alpha_numbers = [
{
"alpha": "one",
"number": 1
},
{
"alpha": "two",
"number": 2
},
{
"alpha": "three",
"number": 3
},
{
"alpha": "four",
"number": 4
},
{
"alpha": "five",
"number": 5
},
{
"alpha": "six",
"number": 6
},
{
"alpha": "seven",
"number": 7
},
{
"alpha": "eight",
"number": 8
},
{
"alpha": "nine",
"number": 9
},
{
"alpha": "ten",
"number": 10
},
{
"alpha": "first",
"number": 1
},
{
"alpha": "second",
"number": 2
},
{
"alpha": "third",
"number": 3
},
{
"alpha": "fourth",
"number": 4
},
{
"alpha": "fifth",
"number": 5
},
{
"alpha": "sixth",
"number": 6
},
{
"alpha": "seventh",
"number": 7
},
{
"alpha": "eighth",
"number": 8
},
{
"alpha": "ninth",
"number": 9
},
{
"alpha": "tenth",
"number": 10
}
]
numeric_words = [a["alpha"] for a in alpha_numbers]
| 14.682353 | 51 | 0.310096 | 91 | 1,248 | 4.21978 | 0.417582 | 0.0625 | 0.0625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.034591 | 0.490385 | 1,248 | 84 | 52 | 14.857143 | 0.569182 | 0 | 0 | 0.240964 | 0 | 0 | 0.255609 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1aa66e4a88e8039f0db550652234c2a41fa7e240 | 2,510 | py | Python | pdfmerge3/pdfmerge3.py | marciojmo/pdfmerge3 | f1fa598000dd304fc85bcb60bb72e35e1121feea | [
"MIT"
] | null | null | null | pdfmerge3/pdfmerge3.py | marciojmo/pdfmerge3 | f1fa598000dd304fc85bcb60bb72e35e1121feea | [
"MIT"
] | null | null | null | pdfmerge3/pdfmerge3.py | marciojmo/pdfmerge3 | f1fa598000dd304fc85bcb60bb72e35e1121feea | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Merge pdf files into a single file.
EXAMPLES
pdfmerge3 -o output.pdf
Merges all pdf files from the current folder in lexicographic order and put the result in output.pdf
pdfmerge3 -o output.pdf file1.pdf file2.pdf file3.pdf
Merges file1.pdf, file2.pdf, file3.pdf (in this order) and put the result in output.pdf
"""
import os
import argparse
from PyPDF2 import PdfFileMerger
def merge(output_file="output.pdf", files_to_merge=None):
"""
Merge pdf files into a single file.
:param output_file: The result file.
:param files_to_merge: The list of files to merge. If empty, all pdf files in the current directory will be
used and merged in lexicographic order.
"""
if not files_to_merge:
# Load all pdf files from the current directory
files_to_merge = sorted([x for x in os.listdir() if x.endswith(".pdf")])
if not output_file.endswith(".pdf"):
output_file += ".pdf"
# Simple error checking...
# Check if there is at least two files to merge
if len(files_to_merge) < 2:
print("Error: At least two pdf files are required to merge.")
return
# Check if the files to merge exists and if they are pdf files
errors = []
for f in files_to_merge:
if not os.path.isfile(f):
errors.append("File {0} doesn't exist.".format(f))
if len(errors) > 0:
print("Error: ")
[print(" " + x) for x in errors]
return
# Check if the final filename conflicts with existing files
if output_file in os.listdir():
print("Output filename {0} conflicts with existing files".format(output_file))
return
# Merging files
merger = PdfFileMerger()
print("Merging files...")
for f in files_to_merge:
merger.append(open(f, 'rb'))
with open(output_file, 'wb') as fout:
merger.write(fout)
print("DONE!")
def parse_args():
""" Arguments parser """
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument("-o", "--output", metavar="output_file")
parser.add_argument("files_to_merge", nargs='*', metavar="files_to_merge")
return parser.parse_args()
def main():
""" Command line entry point """
args = parse_args()
if args.output:
merge(args.output, args.files_to_merge or None)
else:
merge(files_to_merge=args.files_to_merge or None)
| 30.987654 | 111 | 0.657371 | 360 | 2,510 | 4.466667 | 0.327778 | 0.069652 | 0.11194 | 0.026119 | 0.18408 | 0.18408 | 0.073383 | 0.038557 | 0 | 0 | 0 | 0.006828 | 0.241434 | 2,510 | 80 | 112 | 31.375 | 0.83771 | 0.347809 | 0 | 0.116279 | 0 | 0 | 0.146465 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.069767 | false | 0 | 0.069767 | 0 | 0.232558 | 0.139535 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1aa707c8a85977c3cb01e58f2e7d0ab3b1f10872 | 2,685 | py | Python | openGaussBase/testcase/KEYWORDS/restrict/Opengauss_Function_Keyword_Restrict_Case0020.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | openGaussBase/testcase/KEYWORDS/restrict/Opengauss_Function_Keyword_Restrict_Case0020.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | openGaussBase/testcase/KEYWORDS/restrict/Opengauss_Function_Keyword_Restrict_Case0020.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | """
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
'''
#-- @testpoint:opengauss关键字restrict(非保留),作为目录对象名
'''
import unittest
from testcase.utils.Logger import Logger
from testcase.utils.Constant import Constant
from testcase.utils.CommonSH import CommonSH
logger = Logger()
commonsh = CommonSH('dbuser')
constant = Constant()
class Hostname(unittest.TestCase):
def setUp(self):
logger.info("------------------------ Opengauss_Function_Keyword_Restrict_Case0020 开始执行--------------------------")
# 关键字作为目录对象名不带双引号 - 成功
def test_restrict_1(self):
SqlMdg = commonsh.execut_db_sql('''create directory restrict as '/tmp/';
drop directory restrict;''')
logger.info(SqlMdg)
self.assertIn(constant.CREATE_DIRECTORY_SUCCESS_MSG, SqlMdg)
self.assertIn(constant.DROP_DIRECTORY_SUCCESS_MSG, SqlMdg)
# 关键字作为目录对象名带双引号—成功
def test_restrict_2(self):
SqlMdg = commonsh.execut_db_sql('''create directory "restrict" as '/tmp/';
drop directory "restrict";''')
logger.info(SqlMdg)
self.assertIn(constant.CREATE_DIRECTORY_SUCCESS_MSG, SqlMdg)
self.assertIn(constant.DROP_DIRECTORY_SUCCESS_MSG, SqlMdg)
# 关键字作为目录对象名带单引号 - 合理报错
def test_restrict_3(self):
SqlMdg = commonsh.execut_db_sql('''drop directory if exists 'restrict';''')
logger.info(SqlMdg)
self.assertIn(constant.SYNTAX_ERROR_MSG, SqlMdg)
SqlMdg = commonsh.execut_db_sql(''' create directory 'restrict' as '/tmp/';''')
logger.info(SqlMdg)
self.assertIn(constant.SYNTAX_ERROR_MSG, SqlMdg)
#关键字作为目录对象名带反引号 - 合理报错
def test_restrict_4(self):
SqlMdg = commonsh.execut_db_sql('''drop directory if exists \`restrict\`;''')
logger.info(SqlMdg)
self.assertIn(constant.SYNTAX_ERROR_MSG, SqlMdg)
SqlMdg = commonsh.execut_db_sql('''create directory \`restrict\` as '/tmp/';''')
logger.info(SqlMdg)
self.assertIn(constant.SYNTAX_ERROR_MSG, SqlMdg)
def tearDown(self):
logger.info('------------------------ Opengauss_Function_Keyword_Restrict_Case0020 执行结束--------------------------') | 36.283784 | 124 | 0.665922 | 316 | 2,685 | 5.509494 | 0.370253 | 0.045951 | 0.082711 | 0.119472 | 0.541068 | 0.541068 | 0.541068 | 0.541068 | 0.479035 | 0.479035 | 0 | 0.009781 | 0.200372 | 2,685 | 74 | 125 | 36.283784 | 0.800652 | 0.220112 | 0 | 0.368421 | 0 | 0 | 0.28071 | 0.096695 | 0 | 0 | 0 | 0 | 0.210526 | 1 | 0.157895 | false | 0 | 0.105263 | 0 | 0.289474 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1aa7f5994534e2c541d01b757ecf3f65fc58d25f | 17,858 | py | Python | acwingcli/__main__.py | jasonsun0310/acwingcli | ef5de599b8fceeb16563aa4f2ac10db106b374da | [
"MIT"
] | null | null | null | acwingcli/__main__.py | jasonsun0310/acwingcli | ef5de599b8fceeb16563aa4f2ac10db106b374da | [
"MIT"
] | null | null | null | acwingcli/__main__.py | jasonsun0310/acwingcli | ef5de599b8fceeb16563aa4f2ac10db106b374da | [
"MIT"
] | null | null | null | import json
import acwingcli.actions as actions
import sys
import os
import acwingcli.commandline_writer as cmdwrite
# import acwingcli
import argparse
import subprocess
import acwingcli.update as update
import os
import websocket
import json
import threading
import psutil
import acwingcli.utils as utils
import time
import glob
from time import sleep
from colorama import Fore, Back, Style, init
from acwingcli.utils import *
from multiprocessing import Pool, TimeoutError, Lock, Value
from multiprocessing.connection import Client
from .headers import socket_header
from .login import prepare_session, make_runcode_header, trait_finished_running
from .readfile import get_string_from_file
from .login import submit, get_submission, display_submission_result
from .persistent_session import runserver
from functools import reduce
import acwingcli.persistent_session as localserver
ap = argparse.ArgumentParser()
ap.add_argument('-S', '--submit', help = 'submit file')
ap.add_argument('-f', '--files', nargs = '+', help = 'submit file')
ap.add_argument('-r', '--run', help = 'run code')
ap.add_argument('-g', '--get', help = 'get problem')
ap.add_argument('-s', '--serversubmit', help = 'run code')
ap.add_argument('-setup', action = 'store_true')
ap.add_argument('-login', action = 'store_true')
ap.add_argument('-clean', action = 'store_true')
ap.add_argument('-all', action = 'store_true')
ap.add_argument('-debug', action = 'store_true')
ap.add_argument('-initserver', action = 'store_true')
ap.add_argument('-updateproblems', action = 'store_true')
ap.add_argument('-stopserver', action = 'store_true')
ap.add_argument('-runserver', action = 'store_true')
ap.add_argument('-debuginfo', action = 'store_true')
def make_submission_url(url):
return url.replace('content', 'content/submission')
def camelize(words):
return ' '.join(list(map(lambda x : x[0].capitalize() + x[1:].lower(), words.split('_'))))
def has_valid_testcase(response: dict):
if 'testcase_input' in response.keys() and 'testcase_output' in response.keys() and \
min(len(response['testcase_input']), len(response['testcase_output'])) >= 1:
return True
else:
return False
def display_judge_status(response : dict, problem_id:str):
import acwingcli.config as config
url = config.problem_cache[eval(problem_id)]['submission_link']
if response['status'] == 'ACCEPTED':
cmdwrite.judge_status('[✓] Judge Status: {}\n'.format('Accepted'), color = Fore.GREEN)
display_submission_result(response, url)
return True
elif response['status'] in {'WRONG_ANSWER', 'MEMORY_LIMITED_EXCEEDED', 'TIME_LIMIT_EXCEEDED', 'RUNTIME_ERROR', 'COMPILE_ERROR'}:
cmdwrite.judge_status('[✗] Judge Status: {}\n'.format(camelize(response['status'])), color = Fore.RED)
display_submission_result(response, url)
if has_valid_testcase(response):
update.testcases(problem_id, response['testcase_input'], response['testcase_output'])
return True
else:
cmdwrite.judge_status('[-] Judge Status: {}'.format(camelize(response['status'])), color = Fore.YELLOW)
return False
def display_run_status(response:dict, problem_id:str, test_data:str, expected_answer:str):
if response['status'] == 'FINISHED':
if response.get('stdout', '').strip() == expected_answer.strip():
cmdwrite.judge_status('[✓] Judge Status: ' + 'Correct' + ' (' + camelize(response['status']) + ')',
linebreak = True, color = Fore.GREEN)
cmdwrite.judge_status('[✓] Testcase: ' + test_data.strip(), linebreak = True, color = Fore.GREEN)
cmdwrite.judge_status('[✓] Answer: ' + response.get('stdout', 'N/A').strip(), linebreak = True, color = Fore.GREEN)
cmdwrite.judge_status('[✓] Expected answer: ' + expected_answer.strip(), linebreak = True, color = Fore.GREEN)
else:
cmdwrite.judge_status('[✗] Judge Status: ' + 'Wrong' + ' (' + camelize(response['status']) + ')',
linebreak = True, color = Fore.RED)
cmdwrite.judge_status('[✗] Testcase: ' + test_data.strip(), linebreak = True, color = Fore.RED)
cmdwrite.judge_status('[✗] Answer: ' + response.get('stdout', 'N/A').strip(), linebreak = True, color = Fore.RED)
cmdwrite.judge_status('[✗] Expected answer: ' + expected_answer.strip(), linebreak = True, color = Fore.RED)
elif response['status'] == 'COMPILE_ERROR':
sys.stdout.write(Fore.RED + Style.BRIGHT + '\r[✗] Judge Status: ' + camelize(response['status']) + '\n' + Style.RESET_ALL)
sys.stdout.flush()
cmdwrite.judge_status('[✗] Testcase: ' + test_data.strip(), linebreak = True, color = Fore.RED)
cmdwrite.judge_status('[✗] Expected answer: ' + expected_answer.strip(), linebreak = True, color = Fore.RED)
cmdwrite.judge_status(response.get('compilation_log', '').strip(), linebreak = True, color = Fore.RED)
elif response['status'] in {'MEMORY_LIMITED_EXCEEDED', 'TIME_LIMIT_EXCEEDED', 'RUNTIME_ERROR'}:
sys.stdout.write(Fore.RED + Style.BRIGHT + '\r[✗] Judge Status: ' + camelize(response['status']) + '\n' + Style.RESET_ALL)
sys.stdout.flush()
cmdwrite.judge_status('[✗] Testcase: ' + test_data.strip(), linebreak = True, color = Fore.RED)
cmdwrite.judge_status('[✗] Answer: ' + response.get('stdout', 'N/A').strip(), linebreak = True, color = Fore.RED)
cmdwrite.judge_status('[✗] Expected answer: ' + expected_answer.strip(), linebreak = True, color = Fore.RED)
cmdwrite.judge_status('[✗] Stderr: ' + response.get('stderr', 'N/A').strip(), linebreak = True, color = Fore.RED)
else:
sys.stdout.write(Fore.YELLOW + Style.BRIGHT + '\r[-] Judge Status: ' + response['status'] + Style.RESET_ALL)
sys.stdout.flush()
return False
return True
def log_run_status(response:dict, problem_id:str, test_data:str, expected_answer:str):
if response['status'] == 'FINISHED':
if response.get('stdout', '').strip() == expected_answer.strip():
return '\n'.join([Fore.GREEN + Style.BRIGHT,
'[✓] Judge Status: Correct ({})'.format(camelize(response['status'])),
'[✓] Testcase: {}'.format(test_data.strip()),
'[✓] Answer: {}'.format(response.get('stdout', 'N/A').strip()),
'[✓] Expected: {}'.format(expected_answer.strip()),
'[✓] StdErr: {}'.format(response.get('stderr', 'N/A').strip())]) + Style.RESET_ALL
else:
return '\n'.join([Fore.RED + Style.BRIGHT,
'[✗] Judge Status: Wrong ({})'.format(camelize(response['status'])),
'[✗] Testcase: {}'.format(test_data.strip()),
'[✗] Answer: {}'.format(response.get('stdout', 'N/A').strip()),
'[✗] Expected: {}'.format(expected_answer.strip()),
'[✗] StdErr: {}'.format(response.get('stderr', 'N/A').strip())]) + Style.RESET_ALL
elif response['status'] == 'COMPILE_ERROR':
with early_terminate.get_lock():
early_terminate.value = True
return '\n'.join([Fore.RED + Style.BRIGHT,
'[✗] Judge Status: {}'.format(camelize(response['status'])),
'[✗] Testcase: {}'.format(test_data.strip()),
'[✗] Expected: {}'.format(expected_answer.strip()),
'[✗] {}'.format(response.get('compilation_log', '').strip())]) + Style.RESET_ALL
elif response['status'] in {'MEMORY_LIMITED_EXCEEDED', 'TIME_LIMIT_EXCEEDED', 'RUNTIME_ERROR'}:
return '\n'.join([Fore.RED + Style.BRIGHT,
'[✗] Judge Status: {}'.format(camelize(response['status'])),
'[✗] Testcase: {}'.format(test_data.strip()),
'[✗] Answer: {}'.format(response.get('stdout', 'N/A').strip()),
'[✗] Expected: {}'.format(expected_answer.strip()),
'[✗] StdErr: {}'.format(response.get('stderr', 'N/A').strip())]) + Style.RESET_ALL
else:
return 'UNKNOWN STATUS'
def display_debug_message(response):
cmdwrite.server_debug(response['local_debug_message'])
def runcode(problem_id, code, input_data, output_data):
if early_terminate.value == True:
return
try:
s, cook = prepare_session()
import acwingcli.config as c
url = c.problem_cache[eval(problem_id)]['link']
ws = websocket.create_connection('wss://www.acwing.com/wss/chat/',
header = socket_header,
cookie = cook)
ws.settimeout(20)
ws.send(json.dumps(make_runcode_header(url, code, input_data)))
try:
while True:
message = ws.recv()
if trait_finished_running(message) != None:
ws.close()
with finished_cases.get_lock():
finished_cases.value += 1
cmdwrite.progress(str(finished_cases.value) + '/' + str(total_cases))
return log_run_status(json.loads(message), problem_id, input_data, output_data)
except:
pass
ws.close()
except Exception as e:
raise(Exception(e.message))
def serverrun_single(problem_id, code, input_data, output_data):
import acwingcli.config as config
url = config.problem_cache[eval(problem_id)]['link']
address = ('localhost', 6001)
conn = Client(address, authkey=b'1234')
local_server_message = {'activity' : 'run',
'url' : url,
'code' : code,
'input_data' : input_data}
conn.send(json.dumps(local_server_message))
while True:
early_exit = False
if conn.poll(20) == True:
response = json.loads(conn.recv())
if 'local_debug_message' in response.keys():
display_debug_message(response)
elif 'status' in response.keys():
early_exit = display_run_status(response, problem_id, input_data, output_data)
else:
sys.stdout.write(Fore.GREEN + Style.BRIGHT + 'TIME OUT' + Style.RESET_ALL)
sys.stdout.flush()
if early_exit == True:
break
conn.close()
def serversubmit(problem_id, code):
total_attempt = 0
while total_attempt <= 5:
total_attempt += 1
try:
import acwingcli.config as config
url = config.problem_cache[eval(problem_id)]['link']
address = ('localhost', 6001)
conn = Client(address, authkey=b'1234')
local_server_message = {'activity' : 'send',
'url' : url,
'code' : code}
conn.send(json.dumps(local_server_message))
except ConnectionRefusedError:
if len(utils.get_acwing_server_process()) == 0:
cmdwrite.status('No server found. Initializing server...')
subprocess.Popen(['acwingcli', '-runserver'], stdout=subprocess.PIPE)
time.sleep(1)
else:
if total_attempt > 5:
cmdwrite.status('local client maxed out attempt')
return
else:
break
while True:
early_exit = False
if conn.poll(20) == True:
response = json.loads(conn.recv())
if 'local_debug_message' in response.keys():
display_debug_message(response)
elif 'status' in response.keys():
early_exit = display_judge_status(response, problem_id)
else:
cmdwrite.client_debug('server time out')
if early_exit == True:
break
conn.close()
def global_context(lock_, total_cases_, finished_cases_, early_terminate_):
global lock
global total_cases
global finished_cases
global early_terminate
lock = lock_
total_cases = total_cases_
finished_cases = finished_cases_
early_terminate = early_terminate_
def shutdown_server():
try:
address = ('localhost', 6001)
conn = Client(address, authkey=b'1234')
local_server_message = { 'activity' : 'stopserver' }
conn.send(json.dumps(local_server_message))
while True:
early_exit = False
if conn.poll(20) == True:
cmdwrite.client_debug('shutdown message sent, waiting for response')
response = json.loads(conn.recv())
if 'local_debug_message' in response.keys():
display_debug_message(response)
if (response['local_debug_message'] == 'close'):
early_exit = True
cmdwrite.client_debug('server shutdown confirmation received')
if early_exit == True:
break
else:
cmdwrite.client_debug('no server shutdown confirmation received, exit')
break
except ConnectionRefusedError:
cmdwrite.client_debug('no server connection, exit')
except EOFError:
cmdwrite.client_debug('no server connection, exit')
def main():
args = vars(ap.parse_args())
if args.get('debuginfo') == True:
import acwingcli.commandline_dispatcher as cmd_dispatcher
cmd_dispatcher.debuginfo()
return
if args.get('login') == True:
prepare_session()
return
if args.get('setup') == True:
import acwingcli.config as config
config.setup_assistant()
return
if args.get('clean') == True:
actions.clean()
return
if not args.get('submit') is None:
code = get_string_from_file(args['submit']).decode('utf-8')
submit('https://www.acwing.com/problem/content/description/1/', code)
elif not args.get('run') is None:
file_abspath = os.path.abspath(args['run'])
code = get_string_from_file(file_abspath).decode('utf-8')
problem_id = get_problem_id_from_file(args['run'])
if not args.get('files') is None:
N = len(args.get('files'))
l = Lock()
total_cases = N
finished_cases = Value('i', 0)
early_terminate = Value('i', 0)
all_samples = list(map(lambda f : [get_string_from_file(f).decode('utf-8'),
get_string_from_file(f.replace('in', 'out')).decode('utf-8')],
args['files']))
funcall_args = [(problem_id, code, sample_in, sample_out,) for sample_in, sample_out in all_samples]
with Pool(min(N, 20), initializer = global_context, initargs = (l, total_cases, finished_cases, early_terminate)) as pool:
try:
exec_result = pool.starmap(runcode, funcall_args)
for r in exec_result:
print(r)
except Exception as e:
print(e.message)
pool.close()
pool.terminate()
del pool
finally:
pool.close()
pool.terminate()
del pool
elif args.get('all') == True:
owd = os.getcwd()
os.chdir(os.path.dirname(file_abspath))
# os.chdir(utils.get_or_create_problem_folder(problem_id))
files = glob.glob('sample*.in')
N = len(files)
l = Lock()
total_cases = N
finished_cases = Value('i', 0)
early_terminate = Value('i', 0)
all_samples = list(map(lambda f : [get_string_from_file(f).decode('utf-8'),
get_string_from_file(f.replace('in', 'out')).decode('utf-8')],
files))
funcall_args = [(problem_id, code, sample_in, sample_out,) for sample_in, sample_out in all_samples]
with Pool(min(N, 20), initializer = global_context, initargs = (l, total_cases, finished_cases, early_terminate)) as pool:
try:
exec_result = pool.starmap(runcode, funcall_args)
for r in exec_result:
print(r)
except Exception as e:
print(e.message)
pool.close()
pool.terminate()
del pool
finally:
pool.close()
pool.terminate()
del pool
os.chdir(owd)
elif not args.get('serversubmit') is None:
serversubmit(get_problem_id_from_file(args['serversubmit']),
get_string_from_file(os.path.abspath(args['serversubmit'])).decode('utf-8'))
elif not args.get('initserver') is None and args['initserver'] == True:
p = subprocess.Popen(['acwingcli', '-runserver'], stdout=subprocess.PIPE)
elif not args.get('get') is None:
import acwingcli.problembook as problembook
problembook.get_problem(args['get'])
elif not args.get('updateproblems') is None and args['updateproblems'] == True:
problem_list()
elif args.get('stopserver') == True:
print('stopserver received')
shutdown_server()
utils.clean_up_server_processes()
elif not args.get('runserver') is None:
runserver()
if __name__ == '__main__':
main()
| 46.264249 | 134 | 0.582708 | 2,002 | 17,858 | 5.039461 | 0.132368 | 0.034889 | 0.033898 | 0.032709 | 0.584994 | 0.541084 | 0.457924 | 0.421548 | 0.398553 | 0.381207 | 0 | 0.004536 | 0.284018 | 17,858 | 385 | 135 | 46.384416 | 0.781636 | 0.004088 | 0 | 0.478632 | 0 | 0 | 0.140198 | 0.005567 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0.002849 | 0.099715 | 0.005698 | 0.196581 | 0.014245 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1aa9303e674b4308db627878870a3d77956ca97a | 9,760 | py | Python | main.py | Readix/TrafficMonitoring | eaaf63b4973d44de37b7593eb4507e65b24b4e95 | [
"MIT"
] | null | null | null | main.py | Readix/TrafficMonitoring | eaaf63b4973d44de37b7593eb4507e65b24b4e95 | [
"MIT"
] | null | null | null | main.py | Readix/TrafficMonitoring | eaaf63b4973d44de37b7593eb4507e65b24b4e95 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
import json
import os
import re
import yolo.darknet.darknet as darknet
import haversine
import imutils as imu
from tracker.sort import *
#--------------------------------------------
'''
res_path - path to:
- calibration.npy
- perspective_matrix.npy
- mask.png
- sides.png
areas_path - path to json file with areas points
config_path, meta_path, weight_path - paths for yolo's data
url - path to video file or url of video stream
'''
res_path = 'resources/'
areas_path = 'areas.json'
config_path = 'yolo/configs/yolo.cfg'
meta_path = 'yolo/configs/yolo.data'
weight_path = 'yolo/configs/yolo.weights'
url = 'video/sample.mp4'
#--------------------------------------------
with open(areas_path) as fh:
config = json.load(fh)
net_main = darknet.load_net_custom(config_path.encode('ascii'), weight_path.encode('ascii'), 0, 1)
meta_main = darknet.load_meta(meta_path.encode('ascii'))
colors = {
'': (0, 0, 0),
'car': (0, 255, 255),
'mini_bus': (255, 0, 0),
'bus': (255, 0, 255),
'truck': (0, 0, 255),
'tram': (203, 192, 255),
'trolleybus': (0, 255, 0),
}
width = darknet.network_width(net_main)
height = darknet.network_height(net_main)
darknet_image = darknet.make_image(width, height, 3)
mask = cv2.imread(res_path + 'mask.jpg')
areas = config['areas']
persp_mtx = np.load(res_path + 'perspective_matrix.npy')
sides_png = cv2.resize(cv2.imread(res_path + 'sides.png'), (110, 55))
counters = dict()
for side in ['from', 'to']:
counters[side] = dict()
for area in areas:
counters[side][area['description']] = set()
camera_data = {
'cars_tracks': {},
'sort_tracker': Sort(),
}
def convert_back(x, y, w, h):
x1 = int(round(x - (w / 2)))
y1 = int(round(y - (h / 2)))
x2 = int(round(x + (w / 2)))
y2 = int(round(y + (h / 2)))
return x1, y1, x2, y2
def get_detections(image):
frame_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
frame_resized = cv2.resize(frame_rgb, (darknet.network_width(net_main), darknet.network_height(net_main)), interpolation=cv2.INTER_LINEAR)
darknet_frame = cv2.addWeighted(frame_resized, 1, mask, 1, 0, 3)
darknet.copy_image_from_bytes(darknet_image, darknet_frame.tobytes())
detections = darknet.detect_image(net_main, meta_main, darknet_image, thresh=0.15)
converted_detections = []
for detection in detections:
x, y, w, h, score, obj_type = detection[2][0], detection[2][1], detection[2][2], detection[2][3], detection[1], detection[0]
x1, y1, x2, y2 = convert_back(float(x), float(y), float(w), float(h))
converted_detections.append([x1 / width, y1 / height, x2 / width, y2 / height, score, obj_type.decode()])
return converted_detections
def analysis_and_display(detections, image):
cars_tracks = camera_data['cars_tracks']
sort_tracker = camera_data['sort_tracker']
trackers = sort_tracker.update(np.array(
list(map(lambda d: [*d[:-1]], detections))))
filtered_detections = []
for tracker in trackers:
x1, y1, x2, y2 = tracker[0], tracker[1], tracker[2], tracker[3]
cx1 = x1 + (x2 - x1) / 2
cy1 = y1 + (y2 - y1) / 2
min_detection = None
min_dist = -1.0
for detection in detections:
cx2 = detection[0] + (detection[2] - detection[0]) / 2
cy2 = detection[1] + (detection[3] - detection[1]) / 2
dist = ((cx2 - cx1) ** 2 + (cy2 - cy1) ** 2) ** 0.5
if min_dist == -1 or dist < min_dist:
min_dist = dist
min_detection = detection
if min_detection is not None:
filtered_detections.append({
'id': int(tracker[4]),
'x1': x1,
'y1': y1,
'x2': x2,
'y2': y2,
'score': min_detection[4],
'type': min_detection[5]
})
for detection in filtered_detections:
x1, y1, x2, y2 = detection['x1'], detection['y1'], detection['x2'], detection['y2']
cx = x1 + (x2 - x1) / 2
cy = y1 + (y2 - y1) / 2
original = np.array([((0.7, 0.4), (cx, cy))], dtype=np.float32)
converted = cv2.perspectiveTransform(original, persp_mtx)
detection['lat'] = float(converted[0][1][0])
detection['lng'] = float(converted[0][1][1])
point = Point(cx, cy)
for area in areas:
points = list(map(lambda p: (p['x'], p['y']), area['in']))
polygon = Polygon(points)
if polygon.contains(point):
detection['zone'] = area['description'] + ' in'
detection['zone_side'] = area['description']
detection['zone_direction'] = 'in'
points = list(map(lambda p: (p['x'], p['y']), area['out']))
polygon = Polygon(points)
if polygon.contains(point):
detection['zone'] = area['description'] + ' out'
detection['zone_side'] = area['description']
detection['zone_direction'] = 'out'
car_id = detection['id']
if car_id in cars_tracks:
car_tracks = cars_tracks[car_id]
else:
cars_tracks[car_id] = car_tracks = []
car_track = detection.copy()
car_track['millis'] = millis
car_tracks.append(car_track)
if len(car_tracks) > 7:
lat1, lng1, millis1 = car_tracks[-8]['lat'], car_tracks[-8]['lng'], car_tracks[-8]['millis']
lat2, lng2, millis2 = car_tracks[-1]['lat'], car_tracks[-1]['lng'], car_tracks[-1]['millis']
dist = haversine.haversine((lat1, lng1), (lat2, lng2))
dt = (millis2 - millis1) / 3600000
car_tracks[-1]['speed'] = dist / dt
detection['speed'] = dist / dt
now = time.time()
new_cars = []
for car_id in list(cars_tracks):
car_tracks = cars_tracks[car_id]
last_car_track = car_tracks[-1]
seconds = int(last_car_track["millis"] / 1000)
if now - seconds > 3:
del cars_tracks[car_id]
zone_from = None
zone_to = None
for car_track in car_tracks:
if "zone_direction" in car_track and "zone_side" in car_track:
if car_track["zone_direction"] == "in":
zone_from = car_track["zone_side"]
if car_track["zone_direction"] == "out":
zone_to = car_track["zone_side"]
new_cars.append({
'id': car_id,
"seconds": seconds,
"from": zone_from,
"to": zone_to,
"type": last_car_track["type"]
})
cv2.rectangle(image, (0, 0), (220, 150), (0,0,0), -1)
for new_car in new_cars:
if new_car['to'] is not None:
counters['to'][new_car['to']].add(new_car['id'])
if new_car['from'] is not None:
counters['from'][new_car['from']].add(new_car['id'])
cv2.putText(image, 'From', (20, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, [255, 255, 255], 1)
cv2.putText(image, 'To', (130, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, [255, 255, 255], 1)
for i, side in enumerate(counters['to']):
text = side + ': ' + str(len(counters['to'][side]))
cv2.putText(image, text, (130, 50 + 30*i), cv2.FONT_HERSHEY_SIMPLEX, 0.5, [255, 255, 255], 1)
for i, side in enumerate(counters['from']):
text = side + ': ' + str(len(counters['from'][side]))
cv2.putText(image, text, (20, 50 + 30*i), cv2.FONT_HERSHEY_SIMPLEX, 0.5, [255, 255, 255], 1)
for detection in filtered_detections:
h, w, _ = image.shape
cv2.rectangle(image, (int(detection['x1'] * w), int(detection['y1'] * h)), (int(detection['x2'] * w), int(detection['y2'] * h)), colors[detection['type']], 1)
if 'speed' in detection:
cv2.putText(image, str(round(detection['speed'], 1)), (int(detection['x1'] * w), int(detection['y1'] * h - 20)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, [255, 255, 255], 1)
image[:sides_png.shape[0],-sides_png.shape[1]:] = sides_png
for area in areas:
h, w, _ = image.shape
contur_out = np.array(list(map(lambda p: (int(p['x'] * w), int(p['y'] * h)), area['out'])), np.int32)
contur_in = np.array(list(map(lambda p: (int(p['x'] * w), int(p['y'] * h)), area['in'])), np.int32)
cv2.polylines(image,[contur_out.reshape((-1,1,2))], True, (0, 0, 255))
cv2.polylines(image,[contur_in.reshape((-1,1,2))], True, (255, 0, 0))
cv2.imshow('frame', image)
ch = 0xFF & cv2.waitKey(1)
if ch == 27:
exit(0)
if __name__ == '__main__':
frame_skip_count = 25 / 8
frame_ind = 0
calibration_data = np.load(res_path + 'calibration.npy', allow_pickle=True)
mtx = calibration_data[0]
dist = calibration_data[1]
while True:
try:
ret, frame_read = cap.read()
if frame_read is None or not ret:
cap = cv2.VideoCapture(url)
continue
except:
cap = cv2.VideoCapture(url)
continue
frame_ind += 1
if frame_ind < frame_skip_count:
continue
frame_ind = 0
millis = round(time.time() * 1000)
resized_image = cv2.resize(frame_read, (1920, 1080))
h, w = resized_image.shape[:2]
newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w, h), 0, (w, h))
frame_read = cv2.undistort(resized_image, mtx, dist, None, newcameramtx)
resized_image = imu.resize(frame_read, width=960)
detections = get_detections(resized_image)
analysis_and_display(detections, resized_image)
| 38.27451 | 175 | 0.569365 | 1,308 | 9,760 | 4.087156 | 0.18578 | 0.023569 | 0.01459 | 0.019641 | 0.235503 | 0.150767 | 0.139169 | 0.139169 | 0.095024 | 0.095024 | 0 | 0.054065 | 0.266598 | 9,760 | 254 | 176 | 38.425197 | 0.692791 | 0.009016 | 0 | 0.125 | 0 | 0 | 0.070502 | 0.00957 | 0 | 0 | 0.000425 | 0 | 0 | 1 | 0.014423 | false | 0 | 0.043269 | 0 | 0.067308 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1aad579bb647de889b27f15e5b9e096cbdc98efc | 1,400 | py | Python | 05_topic_modeling/topic_modeling.py | alyonavyshnevska/text_visualization_course | 882a49290edf98640d20805ade14d6dfa7903e51 | [
"MIT"
] | null | null | null | 05_topic_modeling/topic_modeling.py | alyonavyshnevska/text_visualization_course | 882a49290edf98640d20805ade14d6dfa7903e51 | [
"MIT"
] | 1 | 2019-04-23T01:23:17.000Z | 2019-05-01T15:53:20.000Z | 05_topic_modeling/topic_modeling.py | alyonavyshnevska/text_visualization_course | 882a49290edf98640d20805ade14d6dfa7903e51 | [
"MIT"
] | null | null | null | from gensim.models.ldamodel import LdaModel as ldamodel
from gensim import corpora
import pprint
from gensim.test.utils import datapath
import pyLDAvis
import pyLDAvis.gensim
from clean_data import clean as clean
def train_model(texts):
# turn our tokenized documents into a id <-> term dictionary
dictionary = corpora.Dictionary(texts)
# convert tokenized documents into a document-term matrix
corpus = [dictionary.doc2bow(text) for text in texts]
# Load a potentially pretrained model from disk.
if datapath("model_small"):
lda_model = ldamodel.load(datapath("model_small"))
else:
# train model
lda_model = ldamodel(corpus, num_topics=10, id2word=dictionary)
pprint.pprint(lda_model.top_topics(corpus, topn=5))
# Save model to disk.
temp_file = datapath("model_small")
lda_model.save(temp_file)
return lda_model, corpus, dictionary
def visualize_pyldavis(lda_model, corpus, dictionary):
prepared = pyLDAvis.gensim.prepare(lda_model, corpus, dictionary)
pyLDAvis.save_html(prepared, 'vis_topic_model_02.html')
pyLDAvis.show(prepared)
if __name__ == '__main__':
#list of docs as lists of strings
texts = clean('voted-kaggle-dataset.csv')
lda_model, corpus, dictionary = train_model(texts)
# print(lda_model.show_topics())
visualize_pyldavis(lda_model, corpus, dictionary) | 30.434783 | 71 | 0.730714 | 184 | 1,400 | 5.369565 | 0.407609 | 0.080972 | 0.07085 | 0.121457 | 0.135628 | 0.082996 | 0 | 0 | 0 | 0 | 0 | 0.00613 | 0.184286 | 1,400 | 46 | 72 | 30.434783 | 0.859019 | 0.182857 | 0 | 0 | 0 | 0 | 0.077397 | 0.041337 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.269231 | 0 | 0.384615 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1ab294b9434a7343b63cc5b761a5e72a521a3292 | 3,681 | py | Python | pylons/decorators/secure.py | KinSai1975/Menira.py | ca275ce244ee4804444e1827ba60010a55acc07c | [
"BSD-3-Clause"
] | 118 | 2015-01-04T06:55:14.000Z | 2022-01-14T08:32:41.000Z | pylons/decorators/secure.py | KinSai1975/Menira.py | ca275ce244ee4804444e1827ba60010a55acc07c | [
"BSD-3-Clause"
] | 21 | 2015-01-03T02:16:28.000Z | 2021-03-24T06:10:57.000Z | pylons/decorators/secure.py | KinSai1975/Menira.py | ca275ce244ee4804444e1827ba60010a55acc07c | [
"BSD-3-Clause"
] | 53 | 2015-01-04T03:21:08.000Z | 2021-08-04T20:52:01.000Z | """Security related decorators"""
import logging
import urlparse
from decorator import decorator
try:
import webhelpers.html.secure_form as secure_form
except ImportError:
import webhelpers.pylonslib.secure_form as secure_form
from pylons.controllers.util import abort, redirect
from pylons.decorators.util import get_pylons
__all__ = ['authenticate_form', 'https']
log = logging.getLogger(__name__)
csrf_detected_message = (
"Cross-site request forgery detected, request denied. See "
"http://en.wikipedia.org/wiki/Cross-site_request_forgery for more "
"information.")
def authenticated_form(params):
submitted_token = params.get(secure_form.token_key)
return submitted_token is not None and \
submitted_token == secure_form.authentication_token()
@decorator
def authenticate_form(func, *args, **kwargs):
"""Decorator for authenticating a form
This decorator uses an authorization token stored in the client's
session for prevention of certain Cross-site request forgery (CSRF)
attacks (See
http://en.wikipedia.org/wiki/Cross-site_request_forgery for more
information).
For use with the ``webhelpers.html.secure_form`` helper functions.
"""
request = get_pylons(args).request
if authenticated_form(request.params):
try:
del request.POST[secure_form.token_key]
except KeyError:
del request.GET[secure_form.token_key]
return func(*args, **kwargs)
else:
log.warn('Cross-site request forgery detected, request denied: %r '
'REMOTE_ADDR: %s' % (request, request.remote_addr))
abort(403, detail=csrf_detected_message)
def https(url_or_callable=None):
"""Decorator to redirect to the SSL version of a page if not
currently using HTTPS. Apply this decorator to controller methods
(actions).
Takes a url argument: either a string url, or a callable returning a
string url. The callable will be called with no arguments when the
decorated method is called. The url's scheme will be rewritten to
https if necessary.
Non-HTTPS POST requests are aborted (405 response code) by this
decorator.
Example:
.. code-block:: python
# redirect to HTTPS /pylons
@https('/pylons')
def index(self):
do_secure()
# redirect to HTTPS /auth/login, delaying the url() call until
# later (as the url object may not be functional when the
# decorator/method are defined)
@https(lambda: url(controller='auth', action='login'))
def login(self):
do_secure()
# redirect to HTTPS version of myself
@https()
def get(self):
do_secure()
"""
def wrapper(func, *args, **kwargs):
"""Decorator Wrapper function"""
request = get_pylons(args).request
if request.scheme.lower() == 'https':
return func(*args, **kwargs)
if request.method.upper() == 'POST':
# don't allow POSTs (raises an exception)
abort(405, headers=[('Allow', 'GET')])
if url_or_callable is None:
url = request.url
elif callable(url_or_callable):
url = url_or_callable()
else:
url = url_or_callable
# Ensure an https scheme, which also needs a host
parts = urlparse.urlparse(url)
url = urlparse.urlunparse(('https', parts[1] or request.host) +
parts[2:])
log.debug('Redirecting non-https request: %s to: %s',
request.path_info, url)
redirect(url)
return decorator(wrapper)
| 32.008696 | 75 | 0.653355 | 460 | 3,681 | 5.108696 | 0.376087 | 0.038298 | 0.034043 | 0.048936 | 0.182979 | 0.164255 | 0.093617 | 0.05617 | 0.05617 | 0.05617 | 0 | 0.00401 | 0.254822 | 3,681 | 114 | 76 | 32.289474 | 0.852716 | 0.38006 | 0 | 0.153846 | 0 | 0 | 0.135808 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.153846 | 0 | 0.307692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1ab4bfddddae575d022743618b16597b55734005 | 393 | py | Python | ntu_data.py | qiuwch/lt | 3a36f325a70a37f8152e8f62387628f6dabcabeb | [
"MIT"
] | null | null | null | ntu_data.py | qiuwch/lt | 3a36f325a70a37f8152e8f62387628f6dabcabeb | [
"MIT"
] | null | null | null | ntu_data.py | qiuwch/lt | 3a36f325a70a37f8152e8f62387628f6dabcabeb | [
"MIT"
] | null | null | null |
import os
import glob
seq_id = 'S001'
# cam_id = 'C001'
cam_id = '*'
pid = 'P001'
rid = 'R001'
aid = 'A007'
data_root = './data/NTU'
seq_path = '{seq_id}{cam_id}{pid}{rid}{aid}_rgb/img_00001.jpg'.format(**locals())
seq_path = os.path.join(data_root, seq_path)
print(seq_path)
def get_images(path):
files = glob.glob(path)
return files
images = get_images(seq_path)
print(images) | 16.375 | 81 | 0.676845 | 66 | 393 | 3.787879 | 0.469697 | 0.14 | 0.064 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.05988 | 0.150127 | 393 | 24 | 82 | 16.375 | 0.688623 | 0.038168 | 0 | 0 | 0 | 0 | 0.202128 | 0.130319 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.125 | 0 | 0.25 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1ab581d03430d3b70b00302350af75c95eb520dd | 6,876 | py | Python | pydax/_schema.py | cclauss/pydax | 75c7d7041041043695d693c0f36110a3f4cd1d9c | [
"Apache-2.0"
] | 11 | 2020-11-12T21:51:49.000Z | 2021-07-12T15:47:09.000Z | pydax/_schema.py | cclauss/pydax | 75c7d7041041043695d693c0f36110a3f4cd1d9c | [
"Apache-2.0"
] | 138 | 2020-11-14T01:35:08.000Z | 2021-07-22T05:52:29.000Z | pydax/_schema.py | cclauss/pydax | 75c7d7041041043695d693c0f36110a3f4cd1d9c | [
"Apache-2.0"
] | 5 | 2020-12-03T22:04:39.000Z | 2021-07-13T17:03:53.000Z | #
# Copyright 2020 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"Schema parsing and loading functionality."
from abc import ABC
from copy import deepcopy
from typing import Any, Dict, Union
import requests
import yaml
from . import typing as typing_
from ._schema_retrieval import retrieve_schema_file
SchemaDict = Dict[str, Any]
class SchemaCollection(ABC):
"""Abstract class that provides functionality to load and export a schema collection.
:param url_or_path: URL or path to a schema file.
:param tls_verification: When set to ``True``, verify the remote link is https and whether the TLS certificate is
valid. When set to a path to a file, use this file as a CA bundle file. When set to ``False``, allow http links
and do not verify any TLS certificates. Ignored if ``url_or_path`` is a local path.
:raises ValueError: An error occurred when parsing ``url_or_path`` as either a URL or path.
:raises InsecureConnectionError: The connection is insecure. See ``tls_verification`` for more details.
"""
def __init__(self, url_or_path: Union[typing_.PathLike, str], *,
tls_verification: Union[bool, typing_.PathLike] = True) -> None:
"""Constructor method.
"""
self._schema_collection: SchemaDict = self._load_retrieved_schema_file(
retrieve_schema_file(url_or_path,
tls_verification=tls_verification))
# The URL or path from which the schema was retrieved
self._retrieved_url_or_path: Union[typing_.PathLike, str] = url_or_path
def _load_retrieved_schema_file(self, schema_file_content: str) -> SchemaDict:
"""Safely loads retrieved schema file.
:param schema: Retrieved schema file content.
:return: Nested dictionary representation of a schema.
"""
return yaml.safe_load(schema_file_content)
def export_schema(self, *keys: str) -> SchemaDict:
"""Returns a copy of a loaded schema collection. Should be used for debug purposes only.
:param keys: The sequence of keys that leads to the portion of the schemata to be exported.
:return: Copy of the schema dictionary.
Example:
>>> schema_collection = DatasetSchemaCollection('./tests/schemata/datasets.yaml')
>>> schema_collection.export_schema('datasets', 'noaa_jfk', '1.1.4')
{'name': 'NOAA Weather Data – JFK Airport'...}
"""
schema: SchemaDict = self._schema_collection
for k in keys:
schema = schema[k]
return deepcopy(schema)
@property
def retrieved_url_or_path(self) -> Union[typing_.PathLike, str]:
"""The URL or path from which the schema was retrieved.
Example:
>>> schema_collection = DatasetSchemaCollection('./tests/schemata/datasets.yaml')
>>> schema_collection.retrieved_url_or_path
'./tests/schemata/datasets.yaml'
"""
return self._retrieved_url_or_path
class DatasetSchemaCollection(SchemaCollection):
"""Dataset schema class that inherits functionality from :class:`SchemaCollection`.
"""
# We have this class here because we reserve the potential to put specific dataset schema code here
pass
class FormatSchemaCollection(SchemaCollection):
"""Format schema class that inherits functionality from :class:`SchemaCollection`.
"""
# We have this class here because we reserve the potential to put specific format schema code here
pass
class LicenseSchemaCollection(SchemaCollection):
"""License schema class that inherits functionality from :class:`SchemaCollection`.
:param spdx_json_url: URL to the spdx json license file.
"""
def __init__(self, *args: Any, spdx_json_url: str = 'https://spdx.org/licenses/licenses.json', **kwargs: Any):
"Constructor Method."
super().__init__(*args, **kwargs)
self.spdx_license_json: dict = requests.get(spdx_json_url, stream=True).json()
def get_license_name(self, identifier: str) -> str:
"""Get the name of the license from its identifier. If not found in the license schema file, turn to SPDX
license database instead.
:param identifier: The identifier of the license.
:return: Name of the license.
"""
if identifier in self._schema_collection['licenses']:
return self._schema_collection['licenses'][identifier]['name']
else: # look up spdx database
# This is not efficient, but it's OK for now -- the database is not very large
spdx_licenses = self.spdx_license_json['licenses']
for license in spdx_licenses:
if license['licenseId'] == identifier:
return license['name']
raise ValueError(f'Unknown license {identifier}')
class SchemaCollectionManager():
"""Stores all loaded schema collections in :attr:`schema_collections`.
:param kwargs: Schema name and schema instance key-value pairs.
Example:
>>> dataset_schemata = DatasetSchemaCollection('./tests/schemata/datasets.yaml')
>>> schema_collection_manager = SchemaCollectionManager(datasets=dataset_schemata)
>>> license_schemata = LicenseSchemaCollection('./tests/schemata/licenses.yaml')
>>> schema_collection_manager.add_schema_collection('licenses', license_schemata)
>>> schema_collection_manager.schema_collections
{'datasets':..., 'licenses':...}
"""
def __init__(self, **kwargs: SchemaCollection) -> None:
"""Constructor method
"""
self.schema_collections: Dict[str, SchemaCollection] = {}
for name, val in kwargs.items():
self.add_schema_collection(name, val)
def add_schema_collection(self, name: str, val: SchemaCollection) -> None:
"""Store :class:`SchemaCollection` instances in a dictionary. If a schema with the same name as ``name`` is
already stored, it is overridden.
:param name: Schema collection name.
:param val: :class:`SchemaCollection` instance.
"""
if not isinstance(val, SchemaCollection):
raise TypeError('val must be a SchemaCollection instance.')
self.schema_collections[name] = val
| 39.517241 | 119 | 0.681792 | 839 | 6,876 | 5.445769 | 0.281287 | 0.059532 | 0.027577 | 0.015758 | 0.181659 | 0.150361 | 0.150361 | 0.122784 | 0.109433 | 0.109433 | 0 | 0.002077 | 0.22993 | 6,876 | 173 | 120 | 39.745665 | 0.860623 | 0.547993 | 0 | 0.037736 | 0 | 0 | 0.073602 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.150943 | false | 0.037736 | 0.132075 | 0 | 0.471698 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1ab79890c8715d44cb5a582ad20c54a470f52918 | 1,259 | py | Python | turngeneration/forms.py | jbradberry/django-turn-generation | dbfec9d0addbff2d8d54597b7520e171938c9107 | [
"MIT"
] | null | null | null | turngeneration/forms.py | jbradberry/django-turn-generation | dbfec9d0addbff2d8d54597b7520e171938c9107 | [
"MIT"
] | null | null | null | turngeneration/forms.py | jbradberry/django-turn-generation | dbfec9d0addbff2d8d54597b7520e171938c9107 | [
"MIT"
] | 1 | 2019-12-12T19:36:15.000Z | 2019-12-12T19:36:15.000Z | from django.contrib.contenttypes.models import ContentType
from django import forms
from . import models
class PauseForm(forms.ModelForm):
class Meta:
model = models.Pause
fields = ('reason',)
def clean(self):
cleaned_data = super(PauseForm, self).clean()
if not self.instance.generator.allow_pauses:
raise forms.ValidationError("Pauses are not enabled.")
if models.Pause.objects.filter(
generator=self.instance.generator,
content_type=ContentType.objects.get_for_model(self.instance.agent),
object_id=self.instance.agent.pk
).exists():
raise forms.ValidationError("You have already paused.")
return cleaned_data
class ReadyForm(forms.ModelForm):
class Meta:
model = models.Ready
fields = ()
def clean(self):
cleaned_data = super(ReadyForm, self).clean()
if models.Ready.objects.filter(
generator=self.instance.generator,
content_type=ContentType.objects.get_for_model(self.instance.agent),
object_id=self.instance.agent.pk
).exists():
raise forms.ValidationError("You are already marked as ready.")
return cleaned_data
| 29.27907 | 80 | 0.652105 | 141 | 1,259 | 5.730496 | 0.368794 | 0.10396 | 0.084158 | 0.056931 | 0.55198 | 0.55198 | 0.398515 | 0.398515 | 0.398515 | 0.398515 | 0 | 0 | 0.253376 | 1,259 | 42 | 81 | 29.97619 | 0.859574 | 0 | 0 | 0.451613 | 0 | 0 | 0.067514 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064516 | false | 0 | 0.096774 | 0 | 0.354839 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1ab98a6d7dd06d056ddf62a198dd9ded5f6d2846 | 2,047 | py | Python | bot.py | technicalwritingEditor/Discord2FB-Bot | e5e10329ea2056adf451f352c4d582dc281281d1 | [
"MIT"
] | 3 | 2018-06-03T00:58:27.000Z | 2021-10-06T09:41:11.000Z | bot.py | technicalwritingEditor/Discord2FB-Bot | e5e10329ea2056adf451f352c4d582dc281281d1 | [
"MIT"
] | null | null | null | bot.py | technicalwritingEditor/Discord2FB-Bot | e5e10329ea2056adf451f352c4d582dc281281d1 | [
"MIT"
] | 1 | 2018-07-13T19:45:33.000Z | 2018-07-13T19:45:33.000Z | import discord
from discord.ext import commands
import facebook
from discord.ext.commands import Bot
Client = discord.Client()
bot = commands.Bot(command_prefix = "/") #Tells what the prefix before every command should be.
@bot.event
async def on_ready():
'''
Function to just tell us that the
bot is not active. Everytime you run
the script this will come up as a confirmation
'''
print("Confirmation that "+ bot.user.name + "(" + bot.user.id + ") is running now for you!")
@bot.command(pass_context=True)
async def post(ctx, link_post: str):
'''
Command to help share a post on facebook.
Make sure to pass the link of the post as an argument
example: /post facebook.com
MAKE SURE YOUR ACCESS TOKEN IS FOR V2.7
'''
graph = facebook.GraphAPI(access_token = "<ENTER YOUR ACCESS TOKEN HERE>", version="2.7") #Creating object for GraphAPI.
graph.put_object(parent_object = "me", connection_name = "feed", message = "#PyconIndia #PyconIndia2018 #PyCon :D ", link = link_post) #Posts on your behalf on facebook.
await bot.say(":smiley: Done posting it! Glad to help you! :smiley:") #Sends message to user on discord once posted
@bot.command()
async def info():
'''
Gives necessary info about the bot
'''
embed = discord.Embed(title="FBPost Bot", description = "Post on Facebook from Discord using this bot!")
embed.add_field(name="Author", value="Rahul Arulkumaran")
await bot.say(embed=embed)
bot.remove_command('help')
@bot.command()
async def help():
'''
Gives the list and highlights
of what each bot command does
'''
embed = discord.Embed(title="FBPost Bot", description = "Post on Facebook from Discord using this bot!")
embed.add_field(name="/post <post_link>", value="This command posts the given link as your status on Facebook!")
embed.add_field(name="/info", value="Gives a little info about the bot", inline=False)
embed.add_field(name="/help", value="Gives this message", inline=False)
await bot.say(embed=embed)
if(__name__ == "__main__"):
bot.run("<ADD_YOUR_BOT_TOKEN>") #Do not change this token
| 37.218182 | 170 | 0.72936 | 318 | 2,047 | 4.613208 | 0.408805 | 0.034083 | 0.035446 | 0.046353 | 0.159509 | 0.130879 | 0.130879 | 0.130879 | 0.130879 | 0.130879 | 0 | 0.004598 | 0.149976 | 2,047 | 54 | 171 | 37.907407 | 0.838506 | 0.089888 | 0 | 0.206897 | 0 | 0 | 0.332406 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.034483 | 0.137931 | 0 | 0.137931 | 0.034483 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1ab9ac6dd4bb10c4ea54f939af124df3ccd014f8 | 561 | py | Python | tool/click_demo.py | KEVINYZY/python-tutorial | ae43536908eb8af56c34865f52a6e8644edc4fa3 | [
"Apache-2.0"
] | 2 | 2021-01-04T10:44:44.000Z | 2022-02-13T07:53:41.000Z | tool/click_demo.py | zm79287/python-tutorial | d0f7348e1da4ff954e3add66e1aae55d599283ee | [
"Apache-2.0"
] | null | null | null | tool/click_demo.py | zm79287/python-tutorial | d0f7348e1da4ff954e3add66e1aae55d599283ee | [
"Apache-2.0"
] | 2 | 2019-02-28T07:53:30.000Z | 2021-07-28T07:11:20.000Z | # -*- coding: utf-8 -*-
# Author: XuMing <xuming624@qq.com>
# Brief:
import click
@click.command()
@click.option('--count', default=1, help="num")
@click.option('--rate', type=float, help='rate')
@click.option('--gender', type=click.Choice(['man', 'woman']), default='man', help='select sex')
@click.option('--center', type=str, nargs=2, help='center of circle')
def hello(count, rate, gender, center):
print("count num:", count)
print("rate:", rate)
print("gender:", gender)
print("center:", center)
if __name__ == "__main__":
hello()
| 25.5 | 96 | 0.632799 | 74 | 561 | 4.689189 | 0.527027 | 0.126801 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012371 | 0.135472 | 561 | 21 | 97 | 26.714286 | 0.703093 | 0.110517 | 0 | 0 | 0 | 0 | 0.222222 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.076923 | 0 | 0.153846 | 0.307692 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1abbc4f259e0af349f90eba1f861bc51de4ccaec | 2,780 | py | Python | vertigo/datasets/fetchers.py | rmarkello/vertigo | 35c79faf3a62b9b3941f0c989640c2f5de8f819e | [
"Apache-2.0"
] | null | null | null | vertigo/datasets/fetchers.py | rmarkello/vertigo | 35c79faf3a62b9b3941f0c989640c2f5de8f819e | [
"Apache-2.0"
] | null | null | null | vertigo/datasets/fetchers.py | rmarkello/vertigo | 35c79faf3a62b9b3941f0c989640c2f5de8f819e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Functions for fetching datasets from the internet
"""
from collections import namedtuple
import os.path as op
from .osf import _get_data_dir, _get_dataset_info
from .utils import _fetch_files
ANNOT = namedtuple('Surface', ('lh', 'rh'))
def fetch_fsaverage(version='fsaverage', data_dir=None, url=None, resume=True,
verbose=1):
"""
Downloads files for fsaverage FreeSurfer template
Parameters
----------
version : str, optional
One of {'fsaverage', 'fsaverage3', 'fsaverage4', 'fsaverage5',
'fsaverage6'}. Default: 'fsaverage'
data_dir : str or os.PathLike, optional
Path to use as data directory. If not specified, will check for
environmental variable 'NNT_DATA'; if that is not set, will use
`~/nnt-data` instead. Default: None
url : str, optional
URL from which to download data. Default: None
resume : bool, optional
Whether to attempt to resume partial download, if possible. Default:
True
verbose : int, optional
Modifies verbosity of download, where higher numbers mean more updates.
Default: 1
Returns
-------
filenames : dict
Dictionary with keys ['surf'] where corresponding values are length-2
lists of downloaded template files (each list composed of files for the
left and right hemisphere).
References
----------
"""
from ..freesurfer import _check_fs_subjid # avoid circular import
versions = [
'fsaverage', 'fsaverage3', 'fsaverage4', 'fsaverage5', 'fsaverage6'
]
if version not in versions:
raise ValueError('The version of fsaverage requested "{}" does not '
'exist. Must be one of {}'.format(version, versions))
dataset_name = 'tpl-fsaverage'
keys = ['orig', 'white', 'smoothwm', 'pial', 'inflated', 'sphere']
data_dir = _get_data_dir(data_dir=data_dir)
info = _get_dataset_info(dataset_name)[version]
if url is None:
url = info['url']
opts = {
'uncompress': True,
'md5sum': info['md5'],
'move': '{}.tar.gz'.format(dataset_name)
}
filenames = [
op.join(version, 'surf', '{}.{}'.format(hemi, surf))
for surf in keys for hemi in ['lh', 'rh']
]
try:
data_dir = _check_fs_subjid(version)[1]
data = [op.join(data_dir, f) for f in filenames]
except FileNotFoundError:
data = _fetch_files(data_dir, resume=resume, verbose=verbose,
files=[(op.join(dataset_name, f), url, opts)
for f in filenames])
data = [ANNOT(*data[i:i + 2]) for i in range(0, len(keys) * 2, 2)]
return dict(zip(keys, data))
| 31.235955 | 79 | 0.611871 | 337 | 2,780 | 4.940653 | 0.439169 | 0.042042 | 0.012012 | 0.046847 | 0.058859 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009369 | 0.270504 | 2,780 | 88 | 80 | 31.590909 | 0.811637 | 0.375899 | 0 | 0 | 0 | 0 | 0.147826 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026316 | false | 0 | 0.131579 | 0 | 0.184211 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1abc0cd17b3be692c4ae6a95012e1e744129a64f | 6,798 | py | Python | rdkit/ML/ModelPackage/UnitTestPackage.py | kazuyaujihara/rdkit | 06027dcd05674787b61f27ba46ec0d42a6037540 | [
"BSD-3-Clause"
] | 1,609 | 2015-01-05T02:41:13.000Z | 2022-03-30T21:57:24.000Z | rdkit/ML/ModelPackage/UnitTestPackage.py | kazuyaujihara/rdkit | 06027dcd05674787b61f27ba46ec0d42a6037540 | [
"BSD-3-Clause"
] | 3,412 | 2015-01-06T12:13:33.000Z | 2022-03-31T17:25:41.000Z | rdkit/ML/ModelPackage/UnitTestPackage.py | bp-kelley/rdkit | e0de7c9622ce73894b1e7d9568532f6d5638058a | [
"BSD-3-Clause"
] | 811 | 2015-01-11T03:33:48.000Z | 2022-03-28T11:57:49.000Z | #
# Copyright (C) 2002-2008 greg Landrum and Rational Discovery LLC
#
""" unit tests for the model and descriptor packager """
import os
import random
import unittest
from xml.dom import minidom
from xml.etree import ElementTree as ET
from rdkit import Chem
from rdkit import RDConfig
from rdkit.Chem import Descriptors
from rdkit.ML.Composite import Composite
from rdkit.ML.Data import DataUtils
from rdkit.ML.Descriptors.MoleculeDescriptors import MolecularDescriptorCalculator
from rdkit.ML.ModelPackage import Packager, PackageUtils
from rdkit.ML.ModelPackage.Packager import ModelPackage
from io import BytesIO
import pickle
def feq(a, b, tol=1e-4):
return abs(a - b) <= tol
class TestCase(unittest.TestCase):
def setUp(self):
self.dataDir = os.path.join(RDConfig.RDCodeDir, 'ML/ModelPackage/test_data')
self.testD = [
# NOTE: the confidences here can be twitchy due to changes in descriptors:
('Fc1ccc(NC(=O)c2cccnc2Oc3cccc(c3)C(F)(F)F)c(F)c1', 0, 0.8),
# (r'CN/1(=C\C=C(/C=C1)\C\2=C\C=N(C)(Cl)\C=C2)Cl',0,0.70),
(r'NS(=O)(=O)c1cc(ccc1Cl)C2(O)NC(=O)c3ccccc32', 1, 0.70),
]
def _loadPackage(self):
with open(os.path.join(self.dataDir, 'Jan9_build3_pkg.pkl'), 'r') as pkgTF:
buf = pkgTF.read().replace('\r\n', '\n').encode('utf-8')
pkgTF.close()
io = BytesIO(buf)
pkg = pickle.load(io)
return pkg
def _verify(self, pkg, testD):
for smi, pred, conf in testD:
m = Chem.MolFromSmiles(smi)
self.assertTrue(m is not None, 'SMILES: %s failed\n' % (smi))
p, c = pkg.Classify(m)
assert p == pred, 'bad prediction (%d) for smiles %s' % (p, smi)
assert feq(c, conf), 'bad confidence (%f) for smiles %s' % (c, smi)
def _verify2(self, pkg, testD):
for smi, pred, conf in testD:
m = Chem.MolFromSmiles(smi)
self.assertTrue(m is not None, 'SMILES: %s failed\n' % (smi))
p, c = pkg.Classify(m)
assert p == pred, 'bad prediction (%d) for smiles %s' % (p, smi)
assert feq(c, conf), 'bad confidence (%f) for smiles %s' % (c, smi)
p, c = pkg.Classify(m)
assert p == pred, 'bad prediction (%d) for smiles %s' % (p, smi)
assert feq(c, conf), 'bad confidence (%f) for smiles %s' % (c, smi)
def testBuild(self):
# """ tests building and screening a packager """
with open(os.path.join(self.dataDir, 'Jan9_build3_calc.dsc'), 'r') as calcTF:
buf = calcTF.read().replace('\r\n', '\n').encode('utf-8')
calcTF.close()
calc = pickle.load(BytesIO(buf))
with open(os.path.join(self.dataDir, 'Jan9_build3_model.pkl'), 'rb') as modelF:
model = pickle.load(modelF)
pkg = Packager.ModelPackage(descCalc=calc, model=model)
self._verify(pkg, self.testD)
def testLoad(self):
# """ tests loading and screening a packager """
pkg = self._loadPackage()
self._verify(pkg, self.testD)
def testLoad2(self):
# """ tests loading and screening a packager 2 """
pkg = self._loadPackage()
self._verify2(pkg, self.testD)
def testPerm1(self):
# """ tests the descriptor remapping stuff in a packager """
pkg = self._loadPackage()
calc = pkg.GetCalculator()
names = calc.GetDescriptorNames()
ref = {}
DataUtils.InitRandomNumbers((23, 42))
for smi, _, _ in self.testD:
for desc in names:
fn = getattr(Descriptors, desc, lambda x: 777)
m = Chem.MolFromSmiles(smi)
ref[desc] = fn(m)
for _ in range(5):
perm = list(names)
random.shuffle(perm, random=random.random)
m = Chem.MolFromSmiles(smi)
for desc in perm:
fn = getattr(Descriptors, desc, lambda x: 777)
val = fn(m)
assert feq(val, ref[desc], 1e-4), '%s: %s(%s): %f!=%f' % (str(perm), smi, desc, val,
ref[desc])
def testPerm2(self):
# """ tests the descriptor remapping stuff in a packager """
pkg = self._loadPackage()
calc = pkg.GetCalculator()
names = calc.GetDescriptorNames()
DataUtils.InitRandomNumbers((23, 42))
perm = list(names)
random.shuffle(perm, random=random.random)
calc.simpleList = perm
calc.descriptorNames = perm
pkg.Init()
self._verify(pkg, self.testD)
def test_ModelPackage(self):
pkg = self._loadPackage()
self.assertTrue(isinstance(pkg.GetCalculator(), MolecularDescriptorCalculator))
pkg.SetCalculator('calculator')
self.assertEqual(pkg.GetCalculator(), 'calculator')
self.assertTrue(isinstance(pkg.GetModel(), Composite.Composite))
pkg.SetModel('model')
self.assertEqual(pkg.GetModel(), 'model')
self.assertEqual(pkg.GetDataset(), None)
pkg.SetDataset('dataset')
self.assertEqual(pkg.GetDataset(), 'dataset')
self.assertEqual(pkg.GetNotes(), 'General purpose model built from PhysProp data')
pkg.SetNotes('notes')
self.assertEqual(pkg.GetNotes(), 'notes')
# Here seems to be a difference between Python 2 and 3. The next assert works in Python 3,
# but fails in Python 2
# self.assertFalse(hasattr(pkg, '_supplementalData'))
self.assertEqual(pkg.GetSupplementalData(), [])
self.assertTrue(hasattr(pkg, '_supplementalData'))
delattr(pkg, '_supplementalData')
pkg.AddSupplementalData('supp1')
self.assertTrue(hasattr(pkg, '_supplementalData'))
self.assertEqual(pkg.GetSupplementalData(), ['supp1'])
pkg.AddSupplementalData('supp2')
self.assertEqual(pkg.GetSupplementalData(), ['supp1', 'supp2'])
pkg = ModelPackage()
self.assertFalse(pkg._initialized)
pkg.Init()
self.assertFalse(pkg._initialized)
def test_PackageUtils(self):
pkg = self._loadPackage()
xml = PackageUtils.PackageToXml(
pkg, dataPerformance=[('label', ['accuracy', 'avgCorrect', 'avgIncorrect']), ],
recommendedThreshold=0.2, classDescriptions=[('a', 'texta'), ('b', 'textb')],
modelType='model type', modelOrganism='model organism')
s = prettyXML(xml.getroot())
self.assertIn('<RDModelInfo>', s)
def prettyXML(xml):
s = ET.tostring(xml, encoding='utf-8')
tree = minidom.parseString(s)
return tree.toprettyxml(indent=' ')
if __name__ == '__main__': # pragma: nocover
unittest.main()
| 38.191011 | 104 | 0.597823 | 811 | 6,798 | 4.963009 | 0.286067 | 0.017391 | 0.040248 | 0.02087 | 0.354783 | 0.325217 | 0.306584 | 0.24 | 0.228075 | 0.175155 | 0 | 0.016084 | 0.268314 | 6,798 | 177 | 105 | 38.40678 | 0.793124 | 0.101059 | 0 | 0.328358 | 0 | 0.007463 | 0.119521 | 0.022164 | 0 | 0 | 0 | 0 | 0.186567 | 1 | 0.097015 | false | 0 | 0.11194 | 0.007463 | 0.238806 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1abce33b89f98cb52f1c4e73fa1fee092929d3eb | 9,545 | py | Python | plugins/logger/standardout/standard_out.py | Ghostkeeper/Luna | 0dfc8694538c9d1ad3941602de2d4b6b815db657 | [
"CC0-1.0"
] | null | null | null | plugins/logger/standardout/standard_out.py | Ghostkeeper/Luna | 0dfc8694538c9d1ad3941602de2d4b6b815db657 | [
"CC0-1.0"
] | null | null | null | plugins/logger/standardout/standard_out.py | Ghostkeeper/Luna | 0dfc8694538c9d1ad3941602de2d4b6b815db657 | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python
#-*- coding: utf-8 -*-
#This software is distributed under the Creative Commons license (CC0) version 1.0. A copy of this license should have been distributed with this software.
#The license can also be read online: <https://creativecommons.org/publicdomain/zero/1.0/>. If this online license differs from the license provided with this software, the license provided with this software should be applied.
"""
Implements the logger plug-in interface.
"""
import ctypes #For printing in colour on Windows machines.
import datetime #For putting timestamps alongside each message.
import standardout.buffer_info #To store the state of the console on Windows.
#Set up the default state of the Windows StdOut handle.
try:
_win_kernel = ctypes.WinDLL("kernel32")
except AttributeError:
_win_kernel = None
if _win_kernel: #We're on Windows Bash.
_standard_out_handle = _win_kernel.GetStdHandle(-11) #-11 is the flag for standard output in the Windows API.
_default_console_attributes = _win_kernel.GetConsoleScreenBufferInfo(-11)
#Test whether changing the colour actually works.
_original_buffer_state = standardout.buffer_info.BufferInfo()
_win_kernel.GetConsoleScreenBufferInfo(_standard_out_handle, ctypes.byref(_original_buffer_state))
_win_kernel.SetConsoleTextAttribute(_standard_out_handle, 1) #Change to dark blue so we know it's not equal to the user's preferred state.
_pre_buffer_state = standardout.buffer_info.BufferInfo()
_win_kernel.GetConsoleScreenBufferInfo(_standard_out_handle, ctypes.byref(_pre_buffer_state))
_win_kernel.SetConsoleTextAttribute(_standard_out_handle, 4) #Change to dark red.
_post_buffer_state = standardout.buffer_info.BufferInfo()
_win_kernel.GetConsoleScreenBufferInfo(_standard_out_handle, ctypes.byref(_post_buffer_state))
_win_kernel.SetConsoleTextAttribute(_standard_out_handle, _original_buffer_state.wAttributes) #Change back to user's preference.
if _pre_buffer_state.wAttributes == _post_buffer_state.wAttributes: #Didn't change.
_win_kernel = None #We have the Windows kernel, but this terminal doesn't support changes to it. Fall back to ANSI.
def critical(message, title="Critical", stack_trace=None, exception=None):
"""
Logs a new critical message.
A timestamp is added alongside the message. If a title is provided, it is
written before the message. If a stack trace is provided, it is printed
after the message.
:param message: The message string.
:param title: A header for the message.
:param stack_trace: A trace of the call stack where the message originated,
as a list of ``FrameInfo`` objects, most recent frame first.
:param exception: An exception that was raised, if any.
"""
formatted = datetime.datetime.strftime(datetime.datetime.now(), "[%H:%M:%S] ") #Format the date and time.
if title != "Critical": #Only include the title if it is special, because the default is already indicated by the colour.
formatted += title + ": "
formatted += message
_colour_print(formatted, "magenta")
if stack_trace:
_print_stack_trace(stack_trace, exception)
def debug(message, title="Debug", stack_trace=None, exception=None):
"""
Logs a new debug message.
A timestamp is added alongside the message. If a title is provided, it is
written before the message. If a stack trace is provided, it is printed
after the message.
:param message: The message string.
:param title: A header for the message.
:param stack_trace: A trace of the call stack where the message originated,
as a list of ``FrameInfo`` objects, most recent frame first.
:param exception: An exception that was raised, if any.
"""
formatted = datetime.datetime.strftime(datetime.datetime.now(), "[%H:%M:%S] ") #Format the date and time.
if title != "Debug": #Only include the title if it is special, because the default is already indicated by the colour.
formatted += title + ": "
formatted += message
_colour_print(formatted, "blue")
if stack_trace:
_print_stack_trace(stack_trace, exception)
def error(message, title="Error", stack_trace=None, exception=None):
"""
Logs a new error message.
A timestamp is added alongside the message. If a title is provided, it is
written before the message. If a stack trace is provided, it is printed
after the message.
:param message: The message string.
:param title: A header for the message.
:param stack_trace: A trace of the call stack where the message originated,
as a list of ``FrameInfo`` objects, most recent frame first.
:param exception: An exception that was raised, if any.
"""
formatted = datetime.datetime.strftime(datetime.datetime.now(), "[%H:%M:%S] ") #Format the date and time.
if title != "Error": #Only include the title if it is special, because the default is already indicated by the colour.
formatted += title + ": "
formatted += message
_colour_print(formatted, "red")
if stack_trace:
_print_stack_trace(stack_trace, exception)
def info(message, title="Information", stack_trace=None, exception=None):
"""
Logs a new information message.
A timestamp is added alongside the message. If a title is provided, it is
written before the message. If a stack trace is provided, it is printed
after the message.
:param message: The message string.
:param title: A header for the message.
:param stack_trace: A trace of the call stack where the message originated,
as a list of ``FrameInfo`` objects, most recent frame first.
:param exception: An exception that was raised, if any.
"""
formatted = datetime.datetime.strftime(datetime.datetime.now(), "[%H:%M:%S] ") #Format the date and time.
if title != "Information": #Only include the title if it is special, because the default is already indicated by the colour.
formatted += title + ": "
formatted += message
_colour_print(formatted, "green")
if stack_trace:
_print_stack_trace(stack_trace, exception)
def warning(message, title="Warning", stack_trace=None, exception=None):
"""
Logs a new warning message.
A timestamp is added alongside the message. If a title is provided, it is
written before the message. If a stack trace is provided, it is printed
after the message.
:param message: The message string.
:param title: A header for the message.
:param stack_trace: A trace of the call stack where the message originated,
as a list of ``FrameInfo`` objects, most recent frame first.
:param exception: An exception that was raised, if any.
"""
formatted = datetime.datetime.strftime(datetime.datetime.now(), "[%H:%M:%S] ") #Format the date and time.
if title != "Warning": #Only include the title if it is special, because the default is already indicated by the colour.
formatted += title + ": "
formatted += message
_colour_print(formatted, "yellow")
if stack_trace:
_print_stack_trace(stack_trace, exception)
_win_colour_codes = {
"red": 12,
"yellow": 14,
"green": 10,
"cyan": 11,
"blue": 9,
"magenta": 13,
"dark_red": 4,
"dark_yellow": 6,
"dark_green": 2,
"dark_cyan": 3,
"dark_blue": 1,
"dark_magenta": 5,
"black": 0,
"dark_grey": 8,
"light_grey": 7,
"white": 15
}
"""
The colour codes for I/O streams in the Windows API.
"""
_ansi_colour_codes = {
"red": "\033[38m",
"yellow": "\033[33m",
"green": "\033[32m",
"cyan": "\033[36m",
"blue": "\033[34m",
"magenta": "\033[35m",
"black": "\033[30m",
"white": "\033[37m"
}
"""
The colour codes in the ANSI-specification for escape codes.
"""
def _colour_print(message, colour="default"):
"""
Prints a message with specified colour-coding.
The colour needs to be in the ``_win_colour_codes`` dictionary as well as
the ``_ansi_colour_codes`` dictionary in order to be supported by both
terminals. If a colour code is provided that is not supported by a terminal,
that message will show up in the default colour for that terminal.
:param message: The text to print.
:param colour: The colour of the message to display. If the colour is not
supported, the default colour is used.
"""
if _win_kernel:
buffer_state = standardout.buffer_info.BufferInfo()
_win_kernel.GetConsoleScreenBufferInfo(_standard_out_handle, ctypes.byref(buffer_state)) #Store the old state of the output channel so we can restore it afterwards.
if colour in _win_colour_codes:
_win_kernel.SetConsoleTextAttribute(_standard_out_handle, _win_colour_codes[colour]) #Set the colour of the terminal to the desired colour.
#Else, don't set the colour (so it stays default).
print(message)
_win_kernel.SetConsoleTextAttribute(_standard_out_handle, buffer_state.wAttributes) #Reset to old state.
else: #Hope we have an ANSI-enabled console.
if colour in _ansi_colour_codes:
print(_ansi_colour_codes[colour] + message + "\033[m") #Start code, then message, then revert to default colour.
else:
print(message) #Stay on default colour.
def _print_stack_trace(stack_trace, exception=None):
"""
Prints a formatted stack trace.
The stack trace is formatted similarly to how Python formats its stack
trace.
:param stack_trace: A stack trace, as a list of ``FrameInfo`` objects
resulting from ``inspect.getouterframes`` or ``inspect.getinnerframes``,
most recent frame first.
:param exception: An exception that was raised, if any.
"""
print("Stack trace:")
for frame in stack_trace:
print("\tFile \"{file_name}\", line {line_number}, in {function}".format(file_name=frame.filename, line_number=frame.lineno, function=frame.function))
for line in frame.code_context:
print("\t\t" + line.strip())
if exception:
print("\t" + exception.__class__.__name__ + ": " + str(exception)) | 42.995495 | 227 | 0.753064 | 1,418 | 9,545 | 4.929478 | 0.188999 | 0.055794 | 0.02432 | 0.018598 | 0.58598 | 0.58226 | 0.550072 | 0.550072 | 0.499285 | 0.492704 | 0 | 0.00997 | 0.148874 | 9,545 | 222 | 228 | 42.995496 | 0.850443 | 0.531378 | 0 | 0.3 | 0 | 0 | 0.111032 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.063636 | false | 0 | 0.027273 | 0 | 0.090909 | 0.172727 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1abe388e69a825f7770361eb67cf1c3461ead767 | 2,168 | py | Python | notes/algo-ds-practice/problems/dp/double_helix.py | Anmol-Singh-Jaggi/interview-notes | 65af75e2b5725894fa5e13bb5cd9ecf152a0d652 | [
"MIT"
] | 6 | 2020-07-05T05:15:19.000Z | 2021-01-24T20:17:14.000Z | notes/algo-ds-practice/problems/dp/double_helix.py | Anmol-Singh-Jaggi/interview-notes | 65af75e2b5725894fa5e13bb5cd9ecf152a0d652 | [
"MIT"
] | null | null | null | notes/algo-ds-practice/problems/dp/double_helix.py | Anmol-Singh-Jaggi/interview-notes | 65af75e2b5725894fa5e13bb5cd9ecf152a0d652 | [
"MIT"
] | 2 | 2020-09-14T06:46:37.000Z | 2021-06-15T09:17:21.000Z | '''
Two finite, strictly increasing, integer sequences are given.
Any common integer between the two sequences constitute an intersection point.
Take for example the following two sequences where intersection points are printed in bold:
First= 3 5 7 9 20 25 30 40 55 56 57 60 62
Second= 1 4 7 11 14 25 44 47 55 57 100
You can ‘walk” over these two sequences in the following way:
You may start at the beginning of any of the two sequences. Now start moving forward.
At each intersection point, you have the choice of either continuing with the same sequence you’re currently on, or switching to the other sequence.
The objective is finding a path that produces the maximum sum of data you walked over.
In the above example, the largest possible sum is 450, which is the result of adding 3, 5, 7, 9, 20, 25, 44, 47, 55, 56, 57, 60,and 62.
SOLUTION:
Use DP:
Let arrs[0] be the first array and arrs[1] be the second.
If arrs[k][i] present at 'idx' in arrs[1-k], then
we can either jump or not jump:
ans[k][i] = max(ans[k][i+1], ans[1-k][idx+1])
else
ans[k][i] = ans[k][i+1] + arr[i]
'''
import sys
def double_helix(arrs, cache, indices, k, i):
if i >= len(arrs[k]):
return 0
elem = arrs[k][i]
if cache[k][i] is not None:
return cache[k][i]
ans = None
if elem in indices[1 - k]:
ans1 = double_helix(arrs, cache, indices, k, i + 1)
ans2 = double_helix(arrs, cache, indices, 1 - k, indices[1 - k][elem] + 1)
ans = max(ans1, ans2) + elem
else:
ans = double_helix(arrs, cache, indices, k, i + 1) + elem
cache[k][i] = ans
return ans
def main():
sys.setrecursionlimit(10000000)
arr1 = [1, 3, 4, 5]
arr2 = [5, 6, 7, 8, 9, 10]
arrs = [arr1, arr2]
cache = [{}, {}]
indices = [{}, {}]
for i in range(len(arr1)):
indices[0][arr1[i]] = i
cache[0][i] = None
for i in range(len(arr2)):
indices[1][arr2[i]] = i
cache[1][i] = None
ans1 = double_helix(arrs, cache, indices, 0, 0)
ans2 = double_helix(arrs, cache, indices, 1, 0)
ans = max(ans1, ans2)
print(ans)
if __name__ == "__main__":
main()
| 31.42029 | 152 | 0.627768 | 377 | 2,168 | 3.578249 | 0.371353 | 0.017791 | 0.066716 | 0.088955 | 0.171979 | 0.151223 | 0.113417 | 0.044477 | 0 | 0 | 0 | 0.076308 | 0.250461 | 2,168 | 68 | 153 | 31.882353 | 0.752615 | 0.506458 | 0 | 0 | 0 | 0 | 0.00754 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057143 | false | 0 | 0.028571 | 0 | 0.171429 | 0.028571 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1abfc9f54d479adbe3c36baf00a9a7fac7331057 | 40,724 | py | Python | libcst/matchers/_matcher_base.py | dendisuhubdy/LibCST | 1acf2c83d132d4f665dd5de3684330463cbd361e | [
"Apache-2.0"
] | null | null | null | libcst/matchers/_matcher_base.py | dendisuhubdy/LibCST | 1acf2c83d132d4f665dd5de3684330463cbd361e | [
"Apache-2.0"
] | null | null | null | libcst/matchers/_matcher_base.py | dendisuhubdy/LibCST | 1acf2c83d132d4f665dd5de3684330463cbd361e | [
"Apache-2.0"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-strict
import collections.abc
import re
from dataclasses import fields
from enum import Enum, auto
from typing import (
Callable,
Dict,
Generic,
List,
Mapping,
NoReturn,
Optional,
Pattern,
Sequence,
Type,
TypeVar,
Union,
cast,
)
import libcst
import libcst.metadata as meta
from libcst import MaybeSentinel, RemovalSentinel
class DoNotCareSentinel(Enum):
"""
A sentinel that is used in matcher classes to indicate that a caller
does not care what this value is. We recommend that you do not use this
directly, and instead use the :func:`DoNotCare` helper. You do not
need to use this for concrete matcher attributes since :func:`DoNotCare`
is already the default.
"""
DEFAULT = auto()
def __repr__(self) -> str:
return "DoNotCare()"
_MatcherT = TypeVar("_MatcherT", covariant=True)
_CallableT = TypeVar("_CallableT", bound="Callable", covariant=True)
_BaseMatcherNodeSelfT = TypeVar("_BaseMatcherNodeSelfT", bound="BaseMatcherNode")
_OtherNodeT = TypeVar("_OtherNodeT")
_MetadataValueT = TypeVar("_MetadataValueT")
_METADATA_MISSING_SENTINEL = object()
class BaseMatcherNode:
"""
Base class that all concrete matchers subclass from. :class:`OneOf` and
:class:`AllOf` also subclass from this in order to allow them to be used in
any place that a concrete matcher is allowed. This means that, for example,
you can call :func:`matches` with a concrete matcher, or a :class:`OneOf` with
several concrete matchers as options.
"""
def __or__(
self: _BaseMatcherNodeSelfT, other: _OtherNodeT
) -> "OneOf[Union[_BaseMatcherNodeSelfT, _OtherNodeT]]":
# Without a cast, pyre thinks that the below OneOf is type OneOf[object]
# even though it has the types passed into it.
return cast(
OneOf[Union[_BaseMatcherNodeSelfT, _OtherNodeT]], OneOf(self, other)
)
def __and__(
self: _BaseMatcherNodeSelfT, other: _OtherNodeT
) -> "AllOf[Union[_BaseMatcherNodeSelfT, _OtherNodeT]]":
# Without a cast, pyre thinks that the below AllOf is type AllOf[object]
# even though it has the types passed into it.
return cast(
AllOf[Union[_BaseMatcherNodeSelfT, _OtherNodeT]], AllOf(self, other)
)
def __invert__(self: _BaseMatcherNodeSelfT) -> "_BaseMatcherNodeSelfT":
return cast(_BaseMatcherNodeSelfT, InverseOf(self))
def DoNotCare() -> DoNotCareSentinel:
"""
Used when you want to match exactly one node, but you do not care what node it is.
Useful inside sequences such as a :class:`libcst.matchers.Call`'s args attribte.
You do not need to use this for concrete matcher attributes since :func:`DoNotCare`
is already the default.
For example, the following matcher would match against any function calls with
three arguments, regardless of the arguments themselves and regardless of the
function name that we were calling::
m.Call(args=[m.DoNotCare(), m.DoNotCare(), m.DoNotCare()])
"""
return DoNotCareSentinel.DEFAULT
class OneOf(Generic[_MatcherT], BaseMatcherNode):
"""
Matcher that matches any one of its options. Useful when you want to match
against one of several options for a single node. You can also construct a
:class:`OneOf` matcher by using Python's bitwise or operator with concrete
matcher classes.
For example, you could match against ``True``/``False`` like::
m.OneOf(m.Name("True"), m.Name("False"))
Or you could use the shorthand, like::
m.Name("True") | m.Name("False")
Note that a :class:`OneOf` matcher can be used anywhere you are defining
a matcher attribute. So, an alternate form to the first example looks like::
m.Name(m.OneOf("True", "False"))
A downside to the alternate form is that you can no longer use Python's
bitwise or operator to construct the :class:`OneOf` since it is not defined
for strings. However, the upside is that it is more concise. We do not
recommend any one form over the other, and leave it up to you to decide what
is best for your use case.
"""
def __init__(self, *options: Union[_MatcherT, "OneOf[_MatcherT]"]) -> None:
actual_options: List[_MatcherT] = []
for option in options:
if isinstance(option, AllOf):
raise Exception("Cannot use AllOf and OneOf in combination!")
elif isinstance(option, OneOf):
actual_options.extend(option.options)
else:
actual_options.append(option)
if len(actual_options) < 2:
raise Exception("Must provide at least two options to OneOf!")
self._options: Sequence[_MatcherT] = tuple(actual_options)
@property
def options(self) -> Sequence[_MatcherT]:
"""
The normalized list of options that we can choose from to satisfy a
:class:`OneOf` matcher. If any of these matchers are true, the
:class:`OneOf` matcher will also be considered a match.
"""
return self._options
def __or__(self, other: _OtherNodeT) -> "OneOf[Union[_MatcherT, _OtherNodeT]]":
# Without a cast, pyre thinks that the below OneOf is type OneOf[object]
# even though it has the types passed into it.
return cast(OneOf[Union[_MatcherT, _OtherNodeT]], OneOf(self, other))
def __and__(self, other: _OtherNodeT) -> NoReturn:
raise Exception("Cannot use AllOf and OneOf in combination!")
def __invert__(self) -> "AllOf[_MatcherT]":
# Invert using De Morgan's Law so we don't have to complicate types.
return cast(AllOf[_MatcherT], AllOf(*[DoesNotMatch(m) for m in self._options]))
def __repr__(self) -> str:
return f"OneOf({', '.join([repr(o) for o in self._options])})"
class AllOf(Generic[_MatcherT], BaseMatcherNode):
"""
Matcher that matches all of its options. Useful when you want to match
against a concrete matcher and a :class:`MatchIfTrue` at the same time. Also
useful when you want to match against a concrete matcher and a
:func:`DoesNotMatch` at the same time. You can also construct a
:class:`AllOf` matcher by using Python's bitwise and operator with concrete
matcher classes.
For example, you could match against ``True`` in a roundabout way like::
m.AllOf(m.Name(), m.Name("True"))
Or you could use the shorthand, like::
m.Name() & m.Name("True")
Similar to :class:`OneOf`, this can be used in place of any concrete matcher.
Real-world cases where :class:`AllOf` is useful are hard to come by but they
are still provided for the limited edge cases in which they make sense. In
the example above, we are redundantly matching against any LibCST
:class:`~libcst.Name` node as well as LibCST :class:`~libcst.Name` nodes that
have the ``value`` of ``True``. We could drop the first option entirely and
get the same result. Often, if you are using a :class:`AllOf`,
you can refactor your code to be simpler.
For example, the following matches any function call to ``foo``, and
any function call which takes zero arguments::
m.AllOf(m.Call(func=m.Name("foo")), m.Call(args=()))
This could be refactored into the following equivalent concrete matcher::
m.Call(func=m.Name("foo"), args=())
"""
def __init__(self, *options: Union[_MatcherT, "AllOf[_MatcherT]"]) -> None:
actual_options: List[_MatcherT] = []
for option in options:
if isinstance(option, OneOf):
raise Exception("Cannot use AllOf and OneOf in combination!")
elif isinstance(option, AllOf):
actual_options.extend(option.options)
else:
actual_options.append(option)
if len(actual_options) < 2:
raise Exception("Must provide at least two options to AllOf!")
self._options: Sequence[_MatcherT] = tuple(actual_options)
@property
def options(self) -> Sequence[_MatcherT]:
"""
The normalized list of options that we can choose from to satisfy a
:class:`AllOf` matcher. If all of these matchers are true, the
:class:`AllOf` matcher will also be considered a match.
"""
return self._options
def __or__(self, other: _OtherNodeT) -> NoReturn:
raise Exception("Cannot use AllOf and OneOf in combination!")
def __and__(self, other: _OtherNodeT) -> "AllOf[Union[_MatcherT, _OtherNodeT]]":
# Without a cast, pyre thinks that the below AllOf is type AllOf[object]
# even though it has the types passed into it.
return cast(AllOf[Union[_MatcherT, _OtherNodeT]], AllOf(self, other))
def __invert__(self) -> "OneOf[_MatcherT]":
# Invert using De Morgan's Law so we don't have to complicate types.
return cast(OneOf[_MatcherT], OneOf(*[DoesNotMatch(m) for m in self._options]))
def __repr__(self) -> str:
return f"AllOf({', '.join([repr(o) for o in self._options])})"
class InverseOf(Generic[_MatcherT]):
"""
Matcher that inverts the match result of its child. You can also construct a
:class:`InverseOf` matcher by using Python's bitwise invert operator with concrete
matcher classes or any special matcher.
Note that you should refrain from constructing a :class:`InverseOf` directly, and
should instead use the :func:`DoesNotMatch` helper function.
For example, the following matches against any identifier that isn't
``True``/``False``::
m.DoesNotMatch(m.OneOf(m.Name("True"), m.Name("False")))
Or you could use the shorthand, like:
~(m.Name("True") | m.Name("False"))
"""
def __init__(self, matcher: _MatcherT) -> None:
self._matcher: _MatcherT = matcher
@property
def matcher(self) -> _MatcherT:
"""
The matcher that we will evaluate and invert. If this matcher is true, then
:class:`InverseOf` will be considered not a match, and vice-versa.
"""
return self._matcher
def __or__(self, other: _OtherNodeT) -> "OneOf[Union[_MatcherT, _OtherNodeT]]":
# Without a cast, pyre thinks that the below OneOf is type OneOf[object]
# even though it has the types passed into it.
return cast(OneOf[Union[_MatcherT, _OtherNodeT]], OneOf(self, other))
def __and__(self, other: _OtherNodeT) -> "AllOf[Union[_MatcherT, _OtherNodeT]]":
# Without a cast, pyre thinks that the below AllOf is type AllOf[object]
# even though it has the types passed into it.
return cast(AllOf[Union[_MatcherT, _OtherNodeT]], AllOf(self, other))
def __getattr__(self, key: str) -> object:
# We lie about types to make InverseOf appear transparent. So, its conceivable
# that somebody might try to dereference an attribute on the _MatcherT wrapped
# node and become surprised that it doesn't work.
return getattr(self._matcher, key)
def __invert__(self) -> _MatcherT:
return self._matcher
def __repr__(self) -> str:
return f"DoesNotMatch({repr(self._matcher)})"
class MatchIfTrue(Generic[_CallableT]):
"""
Matcher that matches if its child callable returns ``True``. The child callable
should take one argument which is the attribute on the LibCST node we are
trying to match against. This is useful if you want to do complex logic to
determine if an attribute should match or not. One example of this is the
:func:`MatchRegex` matcher build on top of :class:`MatchIfTrue` which takes a
regular expression and matches any string attribute where a regex match is found.
For example, to match on any identifier spelled with the letter ``e``::
m.Name(value=m.MatchIfTrue(lambda value: "e" in value))
This can be used in place of any concrete matcher as long as it is not the
root matcher. Calling :func:`matches` directly on a :class:`MatchIfTrue` is
redundant since you can just call the child callable directly with the node
you are passing to :func:`matches`.
"""
def __init__(self, func: _CallableT) -> None:
# Without a cast, pyre thinks that self.func is not a function, even though
# it recognizes that it is a _CallableT bound to Callable.
self._func: Callable[..., bool] = cast(Callable[..., bool], func)
@property
def func(self) -> Callable[..., bool]:
"""
The function that we will call with a LibCST node in order to determine
if we match. If the function returns ``True`` then we consider ourselves
to be a match.
"""
return self._func
def __or__(
self, other: _OtherNodeT
) -> "OneOf[Union[MatchIfTrue[_CallableT], _OtherNodeT]]":
# Without a cast, pyre thinks that the below OneOf is type OneOf[object]
# even though it has the types passed into it.
return cast(
OneOf[Union[MatchIfTrue[_CallableT], _OtherNodeT]], OneOf(self, other)
)
def __and__(
self, other: _OtherNodeT
) -> "AllOf[Union[MatchIfTrue[_CallableT], _OtherNodeT]]":
# Without a cast, pyre thinks that the below AllOf is type AllOf[object]
# even though it has the types passed into it.
return cast(
AllOf[Union[MatchIfTrue[_CallableT], _OtherNodeT]], AllOf(self, other)
)
def __invert__(self) -> "MatchIfTrue[_CallableT]":
# Construct a wrapped version of MatchIfTrue for typing simplicity.
# Without the cast, pyre doesn't seem to think the lambda is valid.
return MatchIfTrue(cast(_CallableT, lambda val: not self._func(val)))
def __repr__(self) -> str:
# pyre-ignore Pyre doesn't believe that functions have a repr.
return f"MatchIfTrue({repr(self._func)})"
def MatchRegex(regex: Union[str, Pattern[str]]) -> MatchIfTrue[Callable[[str], bool]]:
"""
Used as a convenience wrapper to :class:`MatchIfTrue` which allows for
matching a string attribute against a regex. ``regex`` can be any regular
expression string or a compiled ``Pattern``. This uses Python's re module
under the hood and is compatible with syntax documented on
`docs.python.org <https://docs.python.org/3/library/re.html>`_.
For example, to match against any identifier that is at least one character
long and only contains alphabetical characters::
m.Name(value=m.MatchRegex(r'[A-Za-z]+'))
This can be used in place of any string literal when constructing a concrete
matcher.
"""
def _match_func(value: object) -> bool:
if isinstance(value, str):
# pyre-ignore Pyre doesn't think a 'Pattern' can be passed to fullmatch.
return bool(re.fullmatch(regex, value))
else:
return False
return MatchIfTrue(_match_func)
class MatchMetadata:
"""
Matcher that looks up the metadata on the current node using the provided
metadata provider and compares the value on the node against the value provided
to :class:`MatchMetadata`.
For example, to match against any function call which has one parameter which
is used in a load expression context::
m.Call(
args=[
m.Arg(
m.MatchMetadata(
meta.ExpressionContextProvider,
meta.ExpressionContext.LOAD,
)
)
]
)
To match against any :class:`~libcst.Name` node for the identifier ``foo``
which is the target of an assignment::
m.Name(
value="foo",
metadata=m.MatchMetadata(
meta.ExpressionContextProvider,
meta.ExpressionContext.STORE,
)
)
This can be used in place of any concrete matcher as long as it is not the
root matcher. Calling :func:`matches` directly on a :class:`MatchMetadata` is
redundant since you can just check the metadata on the root node that you
are passing to :func:`matches`.
"""
def __init__(
self,
key: Type[meta.BaseMetadataProvider[_MetadataValueT]],
value: _MetadataValueT,
) -> None:
self.key: Type[meta.BaseMetadataProvider[_MetadataValueT]] = key
self.value: _MetadataValueT = value
def __or__(self, other: _OtherNodeT) -> "OneOf[Union[MatchMetadata, _OtherNodeT]]":
# Without the cast, pyre doesn't know this is valid
return cast(OneOf[Union[MatchMetadata, _OtherNodeT]], OneOf(self, other))
def __and__(self, other: _OtherNodeT) -> "AllOf[Union[MatchMetadata, _OtherNodeT]]":
# Without the cast, pyre doesn't know this is valid
return cast(AllOf[Union[MatchMetadata, _OtherNodeT]], AllOf(self, other))
def __invert__(self) -> "MatchMetadata":
# We intentionally lie here, for the same reason given in the documentation
# for DoesNotMatch.
return cast(MatchMetadata, InverseOf(self))
def __repr__(self) -> str:
return f"MatchMetadata(key={repr(self.key)}, value={repr(self.value)})"
class _BaseWildcardNode:
"""
A typing-only class for internal helpers in this module to be able to
specify that they take a wildcard node type.
"""
pass
class AtLeastN(Generic[_MatcherT], _BaseWildcardNode):
"""
Matcher that matches ``n`` or more of a particular matcher in a sequence.
:class:`AtLeastN` defaults to matching against the :func:`DoNotCare` matcher,
so if you do not specify a concrete matcher as a child, :class:`AtLeastN`
will match only by count.
For example, this will match all function calls with at least 3 arguments::
m.Call(args=[m.AtLeastN(n=3)])
This will match all function calls with 3 or more integer arguments::
m.Call(args=[m.AtLeastN(n=3, matcher=m.Arg(m.Integer()))])
You can combine sequence matchers with concrete matchers and special matchers
and it will behave as you expect. For example, this will match all function
calls that have 2 or more integer arguments, followed by any arbitrary
argument::
m.Call(args=[m.AtLeastN(n=2, matcher=m.Arg(m.Integer())), m.DoNotCare()])
And finally, this will match all function calls that have at least 5
arguments, the final one being an integer::
m.Call(args=[m.AtLeastN(n=4), m.Arg(m.Integer())])
"""
def __init__(
self,
matcher: Union[_MatcherT, DoNotCareSentinel] = DoNotCareSentinel.DEFAULT,
*,
n: int,
) -> None:
if n < 0:
raise Exception(f"{self.__class__.__name__} n attribute must be positive")
self._n: int = n
self._matcher: Union[_MatcherT, DoNotCareSentinel] = matcher
@property
def n(self) -> int:
"""
The number of nodes in a row that must match :attr:`AtLeastN.matcher` for
this matcher to be considered a match. If there are less than ``n`` matches,
this matcher will not be considered a match. If there are equal to or more
than ``n`` matches, this matcher will be considered a match.
"""
return self._n
@property
def matcher(self) -> Union[_MatcherT, DoNotCareSentinel]:
"""
The matcher which each node in a sequence needs to match.
"""
return self._matcher
def __or__(self, other: object) -> NoReturn:
raise Exception(f"AtLeastN cannot be used in a OneOf matcher")
def __and__(self, other: object) -> NoReturn:
raise Exception(f"AtLeastN cannot be used in an AllOf matcher")
def __invert__(self) -> NoReturn:
raise Exception("Cannot invert an AtLeastN matcher!")
def __repr__(self) -> str:
if self._n == 0:
return f"ZeroOrMore({repr(self._matcher)})"
else:
return f"AtLeastN({repr(self._matcher)}, n={self._n})"
def ZeroOrMore(
matcher: Union[_MatcherT, DoNotCareSentinel] = DoNotCareSentinel.DEFAULT
) -> AtLeastN[Union[_MatcherT, DoNotCareSentinel]]:
"""
Used as a convenience wrapper to :class:`AtLeastN` when ``n`` is equal to ``0``.
Use this when you want to match against any number of nodes in a sequence.
For example, this will match any function call with zero or more arguments, as
long as all of the arguments are integers::
m.Call(args=[m.ZeroOrMore(m.Arg(m.Integer()))])
This will match any function call where the first argument is an integer and
it doesn't matter what the rest of the arguments are::
m.Call(args=[m.Arg(m.Integer()), m.ZeroOrMore()])
"""
return cast(AtLeastN[Union[_MatcherT, DoNotCareSentinel]], AtLeastN(matcher, n=0))
class AtMostN(Generic[_MatcherT], _BaseWildcardNode):
"""
Matcher that matches ``n`` or less of a particular matcher in a sequence.
:class:`AtMostN` defaults to matching against the :func:`DoNotCare` matcher,
so if you do not specify a concrete matcher as a child, :class:`AtMostN` will
match only by count.
For example, this will match all function calls with 3 or fewer arguments::
m.Call(args=[m.AtMostN(n=3)])
This will match all function calls with 0, 1 or 2 string arguments::
m.Call(args=[m.AtMostN(n=2, matcher=m.Arg(m.SimpleString()))])
You can combine sequence matchers with concrete matchers and special matchers
and it will behave as you expect. For example, this will match all function
calls that have 0, 1 or 2 string arguments, followed by an arbitrary
argument::
m.Call(args=[m.AtMostN(n=2, matcher=m.Arg(m.SimpleString())), m.DoNotCare()])
And finally, this will match all function calls that have at least 2
arguments, the final one being a string::
m.Call(args=[m.AtMostN(n=2), m.Arg(m.SimpleString())])
"""
def __init__(
self,
matcher: Union[_MatcherT, DoNotCareSentinel] = DoNotCareSentinel.DEFAULT,
*,
n: int,
) -> None:
if n < 0:
raise Exception(f"{self.__class__.__name__} n attribute must be positive")
self._n: int = n
self._matcher: Union[_MatcherT, DoNotCareSentinel] = matcher
@property
def n(self) -> int:
"""
The number of nodes in a row that must match :attr:`AtLeastN.matcher` for
this matcher to be considered a match. If there are less than or equal to
``n`` matches, then this matcher will be considered a match. Any more than
``n`` matches in a row and this matcher will stop matching and be considered
not a match.
"""
return self._n
@property
def matcher(self) -> Union[_MatcherT, DoNotCareSentinel]:
"""
The matcher which each node in a sequence needs to match.
"""
return self._matcher
def __or__(self, other: object) -> NoReturn:
raise Exception(f"AtMostN cannot be used in a OneOf matcher")
def __and__(self, other: object) -> NoReturn:
raise Exception(f"AtMostN cannot be used in an AllOf matcher")
def __invert__(self) -> NoReturn:
raise Exception("Cannot invert an AtMostN matcher!")
def __repr__(self) -> str:
if self._n == 1:
return f"ZeroOrOne({repr(self._matcher)})"
else:
return f"AtMostN({repr(self._matcher)}, n={self._n})"
def ZeroOrOne(
matcher: Union[_MatcherT, DoNotCareSentinel] = DoNotCareSentinel.DEFAULT
) -> AtMostN[Union[_MatcherT, DoNotCareSentinel]]:
"""
Used as a convenience wrapper to :class:`AtMostN` when ``n`` is equal to ``1``.
This is effectively a maybe clause.
For example, this will match any function call with zero or one integer
argument::
m.Call(args=[m.ZeroOrOne(m.Arg(m.Integer()))])
This will match any function call that has two or three arguments, and
the first and last arguments are strings::
m.Call(args=[m.Arg(m.SimpleString()), m.ZeroOrOne(), m.Arg(m.SimpleString())])
"""
return cast(AtMostN[Union[_MatcherT, DoNotCareSentinel]], AtMostN(matcher, n=1))
def DoesNotMatch(obj: _OtherNodeT) -> _OtherNodeT:
"""
Matcher helper that inverts the match result of its child. You can also invert a
matcher by using Python's bitwise invert operator on concrete matchers or any
special matcher.
For example, the following matches against any identifier that isn't
``True``/``False``::
m.DoesNotMatch(m.OneOf(m.Name("True"), m.Name("False")))
Or you could use the shorthand, like::
~(m.Name("True") | m.Name("False"))
This can be used in place of any concrete matcher as long as it is not the
root matcher. Calling :func:`matches` directly on a :func:`DoesNotMatch` is
redundant since you can invert the return of :func:`matches` using a bitwise not.
"""
# This type is a complete, dirty lie, but there's no way to recursively apply
# a parameter to each type inside a Union that may be in a _OtherNodeT.
# However, given the way InverseOf works (it will unwrap itself if
# inverted again), and the way we apply De Morgan's law for OneOf and AllOf,
# this lie ends up getting us correct typing. Anywhere a node is valid, using
# DoesNotMatch(node) is also valid.
#
# ~MatchIfTrue is still MatchIfTrue
# ~OneOf[x] is AllOf[~x]
# ~AllOf[x] is OneOf[~x]
# ~~x is x
#
# So, under all circumstances, since OneOf/AllOf are both allowed in every
# instance, and given that inverting MatchIfTrue is still MatchIfTrue,
# and inverting an inverted value returns us the original, its clear that
# there are no operations we can possibly do that bring us outside of the
# types specified in the concrete matchers as long as we lie that DoesNotMatch
# returns the value passed in.
if isinstance(obj, (BaseMatcherNode, MatchIfTrue, MatchMetadata, InverseOf)):
# We can use the overridden __invert__ in this case. Pyre doesn't think
# we can though, and casting doesn't fix the issue.
# pyre-ignore All three types above have overridden __invert__.
inverse = ~obj
else:
# We must wrap in a InverseOf.
inverse = InverseOf(obj)
return cast(_OtherNodeT, inverse)
def _sequence_matches( # noqa: C901
nodes: Sequence[Union[MaybeSentinel, libcst.CSTNode]],
matchers: Sequence[
Union[
BaseMatcherNode,
_BaseWildcardNode,
MatchIfTrue[Callable[..., bool]],
MatchMetadata,
DoNotCareSentinel,
]
],
metadata_lookup: Callable[[meta.ProviderT, libcst.CSTNode], object],
) -> bool:
if not nodes and not matchers:
# Base case, empty lists are alwatys matches
return True
if not nodes and matchers:
# Base case, we have one or more matcher that wasn't matched
return all(
(isinstance(m, AtLeastN) and m.n == 0) or isinstance(m, AtMostN)
for m in matchers
)
if nodes and not matchers:
# Base case, we have nodes left that don't match any matcher
return False
# Recursive case, nodes and matchers LHS matches
node = nodes[0]
matcher = matchers[0]
if isinstance(matcher, DoNotCareSentinel):
# We don't care about the value for this node.
return _sequence_matches(nodes[1:], matchers[1:], metadata_lookup)
elif isinstance(matcher, _BaseWildcardNode):
if isinstance(matcher, AtMostN):
if matcher.n > 0:
# First, assume that this does match a node (greedy).
# Consume one node since it matched this matcher.
if _attribute_matches(
nodes[0], matcher.matcher, metadata_lookup
) and _sequence_matches(
nodes[1:],
[AtMostN(matcher.matcher, n=matcher.n - 1), *matchers[1:]],
metadata_lookup,
):
return True
# Finally, assume that this does not match the current node.
# Consume the matcher but not the node.
if _sequence_matches(nodes, matchers[1:], metadata_lookup):
return True
elif isinstance(matcher, AtLeastN):
if matcher.n > 0:
# Only match if we can consume one of the matches, since we still
# need to match N nodes.
if _attribute_matches(
nodes[0], matcher.matcher, metadata_lookup
) and _sequence_matches(
nodes[1:],
[AtLeastN(matcher.matcher, n=matcher.n - 1), *matchers[1:]],
metadata_lookup,
):
return True
else:
# First, assume that this does match a node (greedy).
# Consume one node since it matched this matcher.
if _attribute_matches(
nodes[0], matcher.matcher, metadata_lookup
) and _sequence_matches(nodes[1:], matchers, metadata_lookup):
return True
# Now, assume that this does not match the current node.
# Consume the matcher but not the node.
if _sequence_matches(nodes, matchers[1:], metadata_lookup):
return True
else:
# There are no other types of wildcard consumers, but we're making
# pyre happy with that fact.
raise Exception(f"Logic error unrecognized wildcard {type(matcher)}!")
elif _matches(node, matcher, metadata_lookup):
# These values match directly
return _sequence_matches(nodes[1:], matchers[1:], metadata_lookup)
# Failed recursive case, no match
return False
_AttributeValueT = Optional[Union[MaybeSentinel, libcst.CSTNode, str, bool]]
_AttributeMatcherT = Optional[Union[BaseMatcherNode, DoNotCareSentinel, str, bool]]
def _attribute_matches( # noqa: C901
node: Union[_AttributeValueT, Sequence[_AttributeValueT]],
matcher: Union[_AttributeMatcherT, Sequence[_AttributeMatcherT]],
metadata_lookup: Callable[[meta.ProviderT, libcst.CSTNode], object],
) -> bool:
if isinstance(matcher, DoNotCareSentinel):
# We don't care what this is, so don't penalize a non-match.
return True
if isinstance(matcher, InverseOf):
# Return the opposite evaluation
return not _attribute_matches(node, matcher.matcher, metadata_lookup)
if isinstance(matcher, MatchIfTrue):
# We should only return if the matcher function is true.
return matcher.func(node)
if matcher is None:
# Should exactly be None
return node is None
if isinstance(matcher, str):
# Should exactly match matcher text
return node == matcher
if isinstance(matcher, bool):
# Should exactly match matcher bool
return node is matcher
if isinstance(node, collections.abc.Sequence):
# Given we've generated the types for matchers based on LibCST, we know that
# this is true unless the node is badly constructed and types were ignored.
node = cast(
Sequence[Union[MaybeSentinel, RemovalSentinel, libcst.CSTNode]], node
)
if isinstance(matcher, OneOf):
# We should compare against each of the sequences in the OneOf
for m in matcher.options:
if isinstance(m, collections.abc.Sequence):
# Should match the sequence of requested nodes
if _sequence_matches(node, m, metadata_lookup):
return True
elif isinstance(m, MatchIfTrue):
return matcher.func(node)
elif isinstance(matcher, AllOf):
# We should compare against each of the sequences in the AllOf
for m in matcher.options:
if isinstance(m, collections.abc.Sequence):
# Should match the sequence of requested nodes
if not _sequence_matches(node, m, metadata_lookup):
return False
elif isinstance(m, MatchIfTrue):
return matcher.func(node)
else:
# The value in the AllOf wasn't a sequence, it can't match.
return False
# We passed the checks above for each node, so we passed.
return True
elif isinstance(matcher, collections.abc.Sequence):
# We should assume that this matcher is a sequence to compare. Given
# the way we generate match classes, this should be true unless the
# match is badly constructed and types were ignored.
return _sequence_matches(
node,
cast(
Sequence[
Union[
BaseMatcherNode,
_BaseWildcardNode,
MatchIfTrue[Callable[..., bool]],
DoNotCareSentinel,
]
],
matcher,
),
metadata_lookup,
)
# We exhausted our possibilities, there's no match
return False
# Base case, should match node via matcher. We know the type of node is
# correct here because we generate matchers directly off of LibCST nodes,
# so the only way it is wrong is if the node was badly constructed and
# types were ignored.
return _matches(
cast(Union[MaybeSentinel, RemovalSentinel, libcst.CSTNode], node),
cast(Union[BaseMatcherNode, MatchIfTrue, MatchMetadata], matcher),
metadata_lookup,
)
def _metadata_matches(
node: libcst.CSTNode,
metadata: Union[
MatchMetadata,
AllOf[MatchMetadata],
OneOf[MatchMetadata],
InverseOf[MatchMetadata],
],
metadata_lookup: Callable[[meta.ProviderT, libcst.CSTNode], object],
) -> bool:
if isinstance(metadata, OneOf):
return any(
_metadata_matches(node, metadata, metadata_lookup)
for metadata in metadata.options
)
elif isinstance(metadata, AllOf):
return all(
_metadata_matches(node, metadata, metadata_lookup)
for metadata in metadata.options
)
elif isinstance(metadata, InverseOf):
return not _metadata_matches(node, metadata.matcher, metadata_lookup)
else:
actual_value = metadata_lookup(metadata.key, node)
return actual_value == metadata.value
def _node_matches(
node: libcst.CSTNode,
matcher: Union[
BaseMatcherNode,
MatchIfTrue[Callable[..., bool]],
MatchMetadata,
InverseOf[
Union[BaseMatcherNode, MatchIfTrue[Callable[..., bool]], MatchMetadata]
],
],
metadata_lookup: Callable[[meta.ProviderT, libcst.CSTNode], object],
) -> bool:
# If this is a InverseOf, then invert the result.
if isinstance(matcher, InverseOf):
return not _node_matches(node, matcher.matcher, metadata_lookup)
# Now, check if this is a lambda matcher.
if isinstance(matcher, MatchIfTrue):
return matcher.func(node)
if isinstance(matcher, MatchMetadata):
return _metadata_matches(node, matcher, metadata_lookup)
# Now, check that the node and matcher classes are the same.
if node.__class__.__name__ != matcher.__class__.__name__:
return False
# Now, check that the children match for each attribute.
for field in fields(matcher):
if field.name == "_metadata":
# We don't care about this field, its a dataclasses implementation detail.
continue
elif field.name == "metadata":
# Special field we respect for matching metadata on a particular node.
desired = getattr(matcher, field.name)
if isinstance(desired, DoNotCareSentinel):
# We don't care about this
continue
if not _metadata_matches(node, desired, metadata_lookup):
return False
else:
desired = getattr(matcher, field.name)
actual = getattr(node, field.name)
if not _attribute_matches(actual, desired, metadata_lookup):
return False
# We didn't find a non-match in the above loop, so it matches!
return True
def _matches(
node: Union[MaybeSentinel, libcst.CSTNode],
matcher: Union[
BaseMatcherNode,
MatchIfTrue[Callable[..., bool]],
MatchMetadata,
InverseOf[
Union[BaseMatcherNode, MatchIfTrue[Callable[..., bool]], MatchMetadata]
],
],
metadata_lookup: Callable[[meta.ProviderT, libcst.CSTNode], object],
) -> bool:
if isinstance(node, MaybeSentinel):
# We can't possibly match on a maybe sentinel, so it only matches if
# the matcher we have is a InverseOf.
return isinstance(matcher, InverseOf)
# Now, evaluate the matcher node itself.
if isinstance(matcher, OneOf):
return any(
_node_matches(node, matcher, metadata_lookup) for matcher in matcher.options
)
elif isinstance(matcher, AllOf):
return all(
_node_matches(node, matcher, metadata_lookup) for matcher in matcher.options
)
else:
return _node_matches(node, matcher, metadata_lookup)
def _construct_metadata_fetcher_null() -> Callable[
[meta.ProviderT, libcst.CSTNode], object
]:
def _fetch() -> object:
return _METADATA_MISSING_SENTINEL
return _fetch
def _construct_metadata_fetcher_dependent(
dependent_class: libcst.MetadataDependent,
) -> Callable[[meta.ProviderT, libcst.CSTNode], object]:
def _fetch(provider: meta.ProviderT, node: libcst.CSTNode) -> object:
return dependent_class.get_metadata(provider, node, _METADATA_MISSING_SENTINEL)
return _fetch
def _construct_metadata_fetcher_wrapper(
wrapper: libcst.MetadataWrapper,
) -> Callable[[meta.ProviderT, libcst.CSTNode], object]:
metadata: Dict[meta.ProviderT, Mapping[libcst.CSTNode, object]] = {}
def _fetch(provider: meta.ProviderT, node: libcst.CSTNode) -> object:
if provider not in metadata:
metadata[provider] = wrapper.resolve(provider)
return metadata.get(provider, {}).get(node, _METADATA_MISSING_SENTINEL)
return _fetch
def matches(
node: Union[MaybeSentinel, RemovalSentinel, libcst.CSTNode],
matcher: BaseMatcherNode,
*,
metadata_resolver: Optional[
Union[libcst.MetadataDependent, libcst.MetadataWrapper]
] = None,
) -> bool:
"""
Given an arbitrary node from a LibCST tree, and an arbitrary matcher, returns
``True`` if the node matches the shape defined by the matcher. Note that the node
can also be a :class:`~libcst.RemovalSentinel` or a :class:`~libcst.MaybeSentinel`
in order to use matches directly on transform results and node attributes. In these
cases, :func:`matches` will always return ``False``.
The matcher can be any concrete matcher that subclasses from :class:`BaseMatcherNode`,
or a :class:`OneOf`/:class:`AllOf` special matcher. It cannot be a
:class:`MatchIfTrue` or :func:`DoesNotMatch` matcher since this is redundant. It
cannot be a :class:`AtLeastN` or :class:`AtMostN` matcher because these types are
wildcards which can only be used inside sequences.
"""
if isinstance(node, RemovalSentinel):
# We can't possibly match on a removal sentinel, so it doesn't match.
return False
if isinstance(matcher, (AtLeastN, AtMostN, MatchIfTrue)):
# We can't match this, since these matchers are forbidden at top level.
# These are not subclasses of BaseMatcherNode, but in the case that the
# user is not using type checking, this should still behave correctly.
return False
if metadata_resolver is None:
fetcher = _construct_metadata_fetcher_null()
elif isinstance(metadata_resolver, libcst.MetadataWrapper):
fetcher = _construct_metadata_fetcher_wrapper(metadata_resolver)
else:
fetcher = _construct_metadata_fetcher_dependent(metadata_resolver)
return _matches(node, matcher, fetcher)
| 38.637571 | 90 | 0.651336 | 5,146 | 40,724 | 5.054023 | 0.108045 | 0.017225 | 0.005191 | 0.005383 | 0.512227 | 0.475584 | 0.415757 | 0.37946 | 0.354852 | 0.336858 | 0 | 0.001931 | 0.262597 | 40,724 | 1,053 | 91 | 38.674264 | 0.864136 | 0.45499 | 0 | 0.503145 | 0 | 0 | 0.080959 | 0.031031 | 0 | 0 | 0 | 0 | 0 | 1 | 0.138365 | false | 0.002096 | 0.016771 | 0.052411 | 0.371069 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1ac0100cb9b2c279cea35555f4fa8cbff672edda | 4,911 | py | Python | visualize/loss_function.py | Drchen-AI/NN_DL_tensorflow2.0 | 3bf26a0b48e2aa78eecb3910104738612c176678 | [
"MIT"
] | null | null | null | visualize/loss_function.py | Drchen-AI/NN_DL_tensorflow2.0 | 3bf26a0b48e2aa78eecb3910104738612c176678 | [
"MIT"
] | null | null | null | visualize/loss_function.py | Drchen-AI/NN_DL_tensorflow2.0 | 3bf26a0b48e2aa78eecb3910104738612c176678 | [
"MIT"
] | null | null | null | # coding:utf8
import torch
from torch import nn, optim # nn 神经网络模块 optim优化函数模块
from torch.utils.data import DataLoader
from torch.autograd import Variable
from torchvision import transforms, datasets
from visdom import Visdom # 可视化处理模块
import time
import numpy as np
# 可视化app
viz = Visdom()
# 超参数
BATCH_SIZE = 40
LR = 1e-3
EPOCH = 2
# 判断是否使用gpu
USE_GPU = True
if USE_GPU:
gpu_status = torch.cuda.is_available()
else:
gpu_status = False
transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.1307,), (0.3081,))])
# 数据引入
train_dataset = datasets.MNIST('../data', True, transform, download=True)
test_dataset = datasets.MNIST('../data', False, transform)
train_loader = DataLoader(train_dataset, BATCH_SIZE, True)
# 为加快测试,把测试数据从10000缩小到2000
test_data = torch.unsqueeze(test_dataset.test_data, 1)[:1500]
test_label = test_dataset.test_labels[:1500]
# visdom可视化部分数据
viz.images(test_data[:100], nrow=10)
#viz.images(test_data[:100], nrow=10)
# 为防止可视化视窗重叠现象,停顿0.5秒
time.sleep(0.5)
if gpu_status:
test_data = test_data.cuda()
test_data = Variable(test_data, volatile=True).float()
# 创建线图可视化窗口
line = viz.line(np.arange(10))
# 创建cnn神经网络
class CNN(nn.Module):
def __init__(self, in_dim, n_class):
super(CNN, self).__init__()
self.conv = nn.Sequential(
# channel 为信息高度 padding为图片留白 kernel_size 扫描模块size(5x5)
nn.Conv2d(in_channels=in_dim, out_channels=16,kernel_size=5,stride=1, padding=2),
nn.ReLU(),
# 平面缩减 28x28 >> 14*14
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(16, 32, 3, 1, 1),
nn.ReLU(),
# 14x14 >> 7x7
nn.MaxPool2d(2)
)
self.fc = nn.Sequential(
nn.Linear(32*7*7, 120),
nn.Linear(120, n_class)
)
def forward(self, x):
out = self.conv(x)
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
net = CNN(1,10)
if gpu_status :
net = net.cuda()
#print("#"*26, "使用gpu", "#"*26)
else:
#print("#" * 26, "使用cpu", "#" * 26)
pass
# loss、optimizer 函数设置
loss_f = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=LR)
# 起始时间设置
start_time = time.time()
# 可视化所需数据点
time_p, tr_acc, ts_acc, loss_p = [], [], [], []
# 创建可视化数据视窗
text = viz.text("<h1>convolution Nueral Network</h1>")
for epoch in range(EPOCH):
# 由于分批次学习,输出loss为一批平均,需要累积or平均每个batch的loss,acc
sum_loss, sum_acc, sum_step = 0., 0., 0.
for i, (tx, ty) in enumerate(train_loader, 1):
if gpu_status:
tx, ty = tx.cuda(), ty.cuda()
tx = Variable(tx)
ty = Variable(ty)
out = net(tx)
loss = loss_f(out, ty)
#print(tx.size())
#print(ty.size())
#print(out.size())
sum_loss += loss.item()*len(ty)
#print(sum_loss)
pred_tr = torch.max(out,1)[1]
sum_acc += sum(pred_tr==ty).item()
sum_step += ty.size(0)
# 学习反馈
optimizer.zero_grad()
loss.backward()
optimizer.step()
# 每40个batch可视化一下数据
if i % 40 == 0:
if gpu_status:
test_data = test_data.cuda()
test_out = net(test_data)
print(test_out.size())
# 如果用gpu运行out数据为cuda格式需要.cpu()转化为cpu数据 在进行比较
pred_ts = torch.max(test_out, 1)[1].cpu().data.squeeze()
print(pred_ts.size())
rightnum = pred_ts.eq(test_label.view_as(pred_ts)).sum().item()
#rightnum =sum(pred_tr==ty).item()
# sum_acc += sum(pred_tr==ty).item()
acc = rightnum/float(test_label.size(0))
print("epoch: [{}/{}] | Loss: {:.4f} | TR_acc: {:.4f} | TS_acc: {:.4f} | Time: {:.1f}".format(epoch+1, EPOCH,
sum_loss/(sum_step), sum_acc/(sum_step), acc, time.time()-start_time))
# 可视化部分
time_p.append(time.time()-start_time)
tr_acc.append(sum_acc/sum_step)
ts_acc.append(acc)
loss_p.append(sum_loss/sum_step)
viz.line(X=np.column_stack((np.array(time_p), np.array(time_p), np.array(time_p))),
Y=np.column_stack((np.array(loss_p), np.array(tr_acc), np.array(ts_acc))),
win=line,
opts=dict(legend=["Loss", "TRAIN_acc", "TEST_acc"]))
# visdom text 支持html语句
viz.text("<p style='color:red'>epoch:{}</p><br><p style='color:blue'>Loss:{:.4f}</p><br>"
"<p style='color:BlueViolet'>TRAIN_acc:{:.4f}</p><br><p style='color:orange'>TEST_acc:{:.4f}</p><br>"
"<p style='color:green'>Time:{:.2f}</p>".format(epoch, sum_loss/sum_step, sum_acc/sum_step, acc,
time.time()-start_time),
win=text)
sum_loss, sum_acc, sum_step = 0., 0., 0.
| 32.959732 | 122 | 0.576461 | 667 | 4,911 | 4.073463 | 0.296852 | 0.032389 | 0.023187 | 0.023923 | 0.172617 | 0.155318 | 0.142805 | 0.094222 | 0.080972 | 0.038278 | 0 | 0.037883 | 0.268988 | 4,911 | 148 | 123 | 33.182432 | 0.718942 | 0.131134 | 0 | 0.12 | 0 | 0.03 | 0.085877 | 0.048261 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02 | false | 0.01 | 0.08 | 0 | 0.12 | 0.03 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1ac1d6d3fc31da110e9581cf6f625215065fc6d5 | 3,737 | py | Python | Car.py | molenathyhoangxuannguyen/Math_Quiz_1-Car | cbcd31bf12e13fe3a31725a46df9af14214b77c2 | [
"MIT"
] | null | null | null | Car.py | molenathyhoangxuannguyen/Math_Quiz_1-Car | cbcd31bf12e13fe3a31725a46df9af14214b77c2 | [
"MIT"
] | null | null | null | Car.py | molenathyhoangxuannguyen/Math_Quiz_1-Car | cbcd31bf12e13fe3a31725a46df9af14214b77c2 | [
"MIT"
] | null | null | null | #Written by Thy H. Nguyen
import turtle
from math import pi
import random
def main():
wns = turtle.Screen()
def banh_xe(bichthuy):
bichthuy.shape("circle")
bichthuy.shapesize(0.1,0.1)
bichthuy.speed(10)
bichthuy.pensize(6)
def ve_hinh_tron(number,cao):
for i in range(number):
bichthuy.right(1)
bichthuy.forward(cao)
def ve_tron(solieu, chieucao):
for i in range(solieu):
bichthuy.left(1)
bichthuy.backward(chieucao)
ve_hinh_tron(360, 0.5)
ve_tron(90, 0.5)
bichthuy.left(90)
bichthuy.forward(40)
bichthuy.right(90)
ve_hinh_tron(180,2.5)
bichthuy.right(90)
bichthuy.forward(40)
bichthuy.right(-90)
ve_hinh_tron(360, 0.5)
ve_tron(180, 0.5)
bichthuy.left(90)
bichthuy.forward((900/pi)-80-(360/pi))
wns.bgcolor("#e6ffff")
def annyong(mauu, yuri):
ngoc = turtle.Turtle()
ngoc.color("#000080")
ngoc.fillcolor(yuri)
ngoc.penup()
ngoc.forward(mauu)
ngoc.pendown()
ngoc.begin_fill()
banh_xe(ngoc)
ngoc.end_fill()
lan_nay_1 = random.choice(["#00e600", "#00cc7a"])
lan_nay_2 = random.choice(["#00e600", "#00cc7a"])
lan_nay_3 = random.choice(["#00e600", "#00cc7a"])
lan_nay_4 = random.choice(["#00e600", "#00cc7a"])
lan_nay_5 = random.choice(["#00e600", "#00cc7a"])
lan_nay_6 = random.choice(["#00e600", "#00cc7a"])
lan_nay_7 = random.choice(["#00e600", "#00cc7a"])
lan_nay_8 = random.choice(["#00e600", "#00cc7a"])
annyong(3 * 150, lan_nay_1)
annyong(1 * 150, lan_nay_2)
annyong(-1 * 150, lan_nay_3)
annyong(-3 * 150, lan_nay_4)
annyong(-4 * 150, lan_nay_5)
annyong(-2 * 150, lan_nay_6)
annyong(0*150, lan_nay_7)
annyong(2*150, lan_nay_8)
def nut_that(brother):
abcxyz = turtle.Turtle()
abcxyz.color("#ffff00")
abcxyz.shape("circle")
abcxyz.shapesize(0.1, 0.1)
abcxyz.pensize(6)
abcxyz.penup()
abcxyz.forward(brother)
abcxyz.right(90)
abcxyz.forward(90/pi)
abcxyz.pendown()
abcxyz.dot()
nut_that(-4*150-90/pi)
nut_that(3*150+90/pi+900/pi-180/pi-80)
conrua = turtle.Turtle()
conrua.color("#ffff00")
conrua.shape("circle")
conrua.shapesize(0.1, 0.1)
conrua.pensize(6)
conrua.penup()
conrua.forward(-4*150-90/pi)
conrua.right(90)
conrua.forward(90/pi)
conrua.left(90)
conrua.pendown()
conrua.forward(((900/pi)-(80+180/pi))*8+180/pi)
def dau_cuoi_tuong_ung(chieu_rong, chu_cai):
thy = turtle.Turtle()
thy.color("#660066")
thy.pensize(6)
thy.penup()
thy.forward(chieu_rong)
thy.right(90)
thy.forward(90)
thy.pendown()
thy.write(chu_cai, move=False, align="center", font=("TimesNewRoman", 40, "bold"))
dau_cuoi_tuong_ung(-4*150-45,"A")
dau_cuoi_tuong_ung(4*150+50, "B")
answer = wns.numinput("Can you calculate the length of AB ?", "Your answer: ", default=None, minval=0,
maxval=10000000)
def written(soluong, vietchu):
jordan = turtle.Turtle()
jordan.color("#660066")
jordan.pensize(6)
jordan.penup()
jordan.left(90)
jordan.forward(soluong)
jordan.right(90)
jordan.forward(0)
jordan.pendown()
jordan.write(vietchu, move=False, align="center", font =("TimesNewRoman",40,"bold") )
written(210, "Can you calculate the length of AB ?")
written(150, answer)
wns.exitonclick()
main()
| 27.07971 | 107 | 0.578539 | 498 | 3,737 | 4.214859 | 0.24498 | 0.045736 | 0.068604 | 0.091472 | 0.331586 | 0.264412 | 0.14626 | 0.098142 | 0.041925 | 0.041925 | 0 | 0.106383 | 0.270538 | 3,737 | 137 | 108 | 27.277372 | 0.66361 | 0.006422 | 0 | 0.070796 | 0 | 0 | 0.082435 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.070796 | false | 0 | 0.026549 | 0 | 0.097345 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1ac3f96b97737e3eb3f7b8408474c5d059166aee | 442 | py | Python | functions/odd_and_even_sum.py | MaggieIllustrations/softuni-github-programming | f5695cb14602f3d2974359f6d8734332acc650d3 | [
"MIT"
] | null | null | null | functions/odd_and_even_sum.py | MaggieIllustrations/softuni-github-programming | f5695cb14602f3d2974359f6d8734332acc650d3 | [
"MIT"
] | null | null | null | functions/odd_and_even_sum.py | MaggieIllustrations/softuni-github-programming | f5695cb14602f3d2974359f6d8734332acc650d3 | [
"MIT"
] | 1 | 2022-01-14T17:12:44.000Z | 2022-01-14T17:12:44.000Z | number = input()
def get_sums(type_numbers, number):
result = 0
if type_numbers == "even":
result = sum(list(map(int, filter(lambda x: int(x) % 2 == 0, number))))
elif type_numbers == "odd":
result = sum(list(map(int, filter(lambda x: int(x) % 2 == 1, number))))
return result
evens_sum = get_sums("even", number)
odds_sum = get_sums("odd", number)
print(f"Odd sum = {odds_sum}, Even sum = {evens_sum}")
| 24.555556 | 79 | 0.615385 | 68 | 442 | 3.852941 | 0.397059 | 0.080153 | 0.099237 | 0.122137 | 0.282443 | 0.282443 | 0.282443 | 0.282443 | 0.282443 | 0.282443 | 0 | 0.014409 | 0.214932 | 442 | 17 | 80 | 26 | 0.740634 | 0 | 0 | 0 | 0 | 0 | 0.131519 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0 | 0 | 0.181818 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1ac8d5589ceca70f9e89f1d4ba9ffbf6e4b2d824 | 1,257 | py | Python | yandex_algorithm2/home1c.py | erjan/coding_exercises | 53ba035be85f1e7a12b4d4dbf546863324740467 | [
"Apache-2.0"
] | null | null | null | yandex_algorithm2/home1c.py | erjan/coding_exercises | 53ba035be85f1e7a12b4d4dbf546863324740467 | [
"Apache-2.0"
] | null | null | null | yandex_algorithm2/home1c.py | erjan/coding_exercises | 53ba035be85f1e7a12b4d4dbf546863324740467 | [
"Apache-2.0"
] | null | null | null | '''
Как известно, два наиболее распространённых формата записи даты — это европейский (сначала день, потом месяц, потом год) и американски (сначала месяц, потом день, потом год). Системный администратор поменял дату на одном из бэкапов и сейчас хочет вернуть дату обратно. Но он не проверил, в каком формате дата используется в системе. Может ли он обойтись без этой информации?
Иначе говоря, вам даётся запись некоторой корректной даты. Требуется выяснить, однозначно ли по этой записи определяется дата даже без дополнительной информации о формате.
Формат ввода
Первая строка входных данных содержит три целых числа —
x
,
y
и
z
(
1
≤
x
≤
3
1
,
1
≤
y
≤
3
1
,
1
9
7
0
≤
z
≤
2
0
6
9
. Гарантируется, что хотя бы в одном формате запись
x
y
z
задаёт корректную дату.
Формат вывода
Выведите 1, если дата определяется однозначно, и 0 в противном случае.
'''
def helper(l):
first = l[0]
second = l[1]
if first == second:
return 1
if first <= 12 and second <= 12:
return 0
elif first > 12 and second <= 12:
return 1
elif first <= 12 and second > 12:
return 1
else:
return 0
if __name__ == "__main__":
l = list(map(int, input().split()))
result = helper(l)
print(result)
| 18.761194 | 374 | 0.686555 | 200 | 1,257 | 4.315 | 0.565 | 0.024334 | 0.034762 | 0.05562 | 0.095017 | 0.095017 | 0.067207 | 0.067207 | 0 | 0 | 0 | 0.036765 | 0.242641 | 1,257 | 66 | 375 | 19.045455 | 0.861345 | 0.679395 | 0 | 0.294118 | 0 | 0 | 0.020253 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0 | 0 | 0.352941 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1acc2f32cc9a7a1f5f4e8cb7660bd8849614cb8a | 3,035 | py | Python | app/models/users.py | dwcaraway/govly | c3a134c2d8ae911c0ab05d9b96014a7c18bfac45 | [
"MIT"
] | 5 | 2018-03-14T18:55:35.000Z | 2021-10-04T00:16:38.000Z | app/models/users.py | dwcaraway/govly | c3a134c2d8ae911c0ab05d9b96014a7c18bfac45 | [
"MIT"
] | null | null | null | app/models/users.py | dwcaraway/govly | c3a134c2d8ae911c0ab05d9b96014a7c18bfac45 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
vitals.models.users
~~~~~~~~~~~~~~~~~~~
:author: Dave Caraway
:copyright: © 2014-2015, Fog Mine LLC
:license: Proprietary, see LICENSE for more details.
"""
import base64
import os
from flask.ext.security import RoleMixin, UserMixin
from ..framework.sql import (
db,
Model,
ReferenceColumn,
)
roles_users = db.Table('roles_users',
db.Column('user_id', db.Integer(), db.ForeignKey('users.id')),
db.Column('role_id', db.Integer(), db.ForeignKey('roles.id')))
class Role(RoleMixin, Model):
__tablename__ = "roles"
name = db.Column(db.String(80), unique=True)
description = db.Column(db.String(255))
bitmask = db.Column(db.SmallInteger, unique=True)
class Connection(Model):
__tablename__ = "connections"
user_id = ReferenceColumn("users")
provider_id = db.Column(db.String(255))
provider_user_id = db.Column(db.String(255))
access_token = db.Column(db.String(255))
secret = db.Column(db.String(255))
display_name = db.Column(db.String(255))
full_name = db.Column(db.String(255))
profile_url = db.Column(db.String(512))
image_url = db.Column(db.String(512))
rank = db.Column(db.Integer)
class Invite(Model):
__tablename__ = "invitations"
invitor_id = db.Column(db.Integer, db.ForeignKey('users.id'))
invitee_id = db.Column(db.Integer, db.ForeignKey('users.id'))
invitee_email = db.Column(db.String(128), unique=True, nullable=False)
token = db.Column(db.String(255), unique=True, nullable=False)
created = db.Column(db.DateTime(), default=db.func.now())
def generate_secret():
"""Generate a random string used for salts and secret keys."""
return base64.b64encode(os.urandom(48)).decode('utf-8')
class User(UserMixin, Model):
__tablename__ = "users"
email = db.Column(db.String(128), unique=True, nullable=False)
password = db.Column(db.String(120))
first_name = db.Column(db.String(120), nullable=False)
last_name = db.Column(db.String(120), nullable=False)
active = db.Column(db.Boolean(), default=True)
secret = db.Column(db.String(64), default=generate_secret)
created = db.Column(db.DateTime(), default=db.func.now())
confirmed_at = db.Column(db.DateTime())
last_login_at = db.Column(db.DateTime())
current_login_at = db.Column(db.DateTime())
last_login_ip = db.Column(db.String(100))
current_login_ip = db.Column(db.String(100))
login_count = db.Column(db.Integer)
roles = db.relationship('Role', secondary=roles_users,
backref=db.backref('users', lazy='dynamic'))
connections = db.relationship('Connection',
backref=db.backref('user', lazy='joined'), cascade='all')
invitations = db.relationship('Invite', backref='invitor', foreign_keys='Invite.invitor_id')
invited_by = db.relationship('Invite', uselist=False, backref='invitee', foreign_keys='Invite.invitee_id')
def reset_secret(self):
self.secret = generate_secret()
self.save()
| 32.634409 | 110 | 0.677759 | 408 | 3,035 | 4.911765 | 0.301471 | 0.127745 | 0.149701 | 0.151697 | 0.402196 | 0.337325 | 0.221058 | 0.168663 | 0.132735 | 0.091816 | 0 | 0.02874 | 0.163097 | 3,035 | 92 | 111 | 32.98913 | 0.759843 | 0.076771 | 0 | 0.032787 | 0 | 0 | 0.071506 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032787 | false | 0.016393 | 0.065574 | 0 | 0.819672 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1acc37fde77112dc03495f8a8ad528c34e34685e | 7,273 | py | Python | data.py | jgoppert/iekf_analysis | d41ad34b37ef2636e20680accf399ea4a9332811 | [
"BSD-3-Clause"
] | 5 | 2018-01-16T06:46:38.000Z | 2019-06-19T10:17:12.000Z | data.py | jgoppert/iekf_analysis | d41ad34b37ef2636e20680accf399ea4a9332811 | [
"BSD-3-Clause"
] | null | null | null | data.py | jgoppert/iekf_analysis | d41ad34b37ef2636e20680accf399ea4a9332811 | [
"BSD-3-Clause"
] | null | null | null | from transforms3d.taitbryan import quat2euler
import matplotlib.pyplot as plt
import numpy as np
from util import X, Xe
class Data(object):
"""
Data object for sim data
"""
def __init__(self):
self.x = []
self.J = []
self.K_mag = []
self.K_gps = []
self.K_accel = []
self.K_lidar = []
self.K_baro = []
self.mag_fault = []
self.gps_fault = []
self.accel_fault = []
self.lidar_fault = []
self.baro_fault = []
self.dx = []
self.xh = []
self.y = []
self.Jh = []
self.t = []
self.P = []
self.euler = None
self.euler_est = None
def finalize(self):
"""
Turn lists to arrays, prepare for plotting
"""
data = self.__dict__
for key in data:
data[key] = np.array(data[key])
try:
self.euler = np.array([quat2euler(qi)
for qi in self.x[:, X.q_nb_0: X.q_nb_3 + 1]])
self.euler_est = np.array([
quat2euler(qi)
for qi in self.xh[:, X.q_nb_0: X.q_nb_3 + 1]])
self.agl_est = self.xh[:, X.terrain_alt] - (-self.xh[:, X.pos_D])
self.agl = self.x[:, X.terrain_alt] - (-self.x[:, X.pos_D])
except IndexError as e:
print(e)
def __repr__(self):
return repr(self.__dict__)
def plot_est(self, i, color, name):
plt.plot(self.t, self.xh[:, i], color + '-', label=name + '-est')
plt.plot(self.t, self.x[:, i], color + '--', label=name)
def plot_est_stddev(self, i, i_error, color, name):
plt.plot(self.t,
self.xh[:, i] + np.sqrt(self.P[:, i_error]), color + '-.')
plt.plot(self.t,
self.xh[:, i] - np.sqrt(self.P[:, i_error]), color + '-.')
def analysis(self):
"""
Show plots of data
"""
plt.rcParams['lines.linewidth'] = 2
plt.figure(figsize=(15, 10))
plt.title('euler angles')
plt.plot(self.t,
np.rad2deg(self.euler_est[:, 2]), 'r-', label='roll-est')
plt.plot(self.t, np.rad2deg(self.euler[:, 2]), 'r--', label='roll')
plt.plot(self.t,
np.rad2deg(self.euler_est[:, 1]), 'g-', label='pitch-est')
plt.plot(self.t, np.rad2deg(self.euler[:, 1]), 'g--', label='pitch')
plt.plot(self.t,
np.rad2deg(self.euler_est[:, 0]), 'b-', label='yaw-est')
plt.plot(self.t, np.rad2deg(self.euler[:, 0]), 'b--', label='yaw')
plt.legend(loc='best', ncol=3)
plt.xlabel('t, sec')
plt.ylabel('deg')
plt.grid()
plt.figure(figsize=(15, 10))
plt.subplot(221)
plt.title('position')
self.plot_est(X.pos_N, 'r', 'N')
self.plot_est(X.pos_E, 'g', 'E')
self.plot_est(X.pos_D, 'b', 'D')
axis = plt.gca().axis()
self.plot_est_stddev(X.pos_N, Xe.pos_N, 'r', 'N')
self.plot_est_stddev(X.pos_E, Xe.pos_E, 'g', 'E')
self.plot_est_stddev(X.pos_D, Xe.pos_D, 'b', 'D')
plt.axis(axis)
plt.legend(loc='best', ncol=3)
plt.xlabel('t, sec')
plt.grid()
plt.subplot(222)
plt.title('velocity')
self.plot_est(X.vel_N, 'r', 'N')
self.plot_est(X.vel_E, 'g', 'E')
self.plot_est(X.vel_D, 'b', 'D')
axis = plt.gca().axis()
self.plot_est_stddev(X.vel_N, Xe.vel_N, 'r', 'N')
self.plot_est_stddev(X.vel_E, Xe.vel_E, 'g', 'E')
self.plot_est_stddev(X.vel_D, Xe.vel_D, 'b', 'D')
plt.axis(axis)
plt.legend(loc='best', ncol=3)
plt.xlabel('t, sec')
plt.grid()
plt.subplot(223)
plt.title('gyro bias')
self.plot_est(X.gyro_bias_bx, 'r', 'X')
self.plot_est(X.gyro_bias_by, 'g', 'Y')
self.plot_est(X.gyro_bias_bz, 'b', 'Z')
axis = plt.gca().axis()
self.plot_est_stddev(X.gyro_bias_bx, Xe.gyro_bias_bx, 'r', 'X')
self.plot_est_stddev(X.gyro_bias_by, Xe.gyro_bias_by, 'g', 'Y')
self.plot_est_stddev(X.gyro_bias_bz, Xe.gyro_bias_bz, 'b', 'Z')
plt.axis(axis)
plt.legend(loc='best', ncol=3)
plt.xlabel('t, sec')
plt.grid()
plt.subplot(224)
plt.title('accel scale')
self.plot_est(X.accel_scale, 'r', '')
axis = plt.gca().axis()
self.plot_est_stddev(X.accel_scale, Xe.accel_scale, 'r', '')
plt.axis(axis)
plt.xlabel('t, sec')
plt.grid()
plt.figure(figsize=(15, 10))
plt.subplot(221)
plt.title('agl')
plt.plot(self.t, self.agl, '--')
plt.plot(self.t, self.agl_est, '-')
plt.xlabel('t, sec')
plt.grid()
plt.figure(figsize=(15, 10))
plt.subplot(221)
plt.title('terrain alt')
self.plot_est(X.terrain_alt, 'b', '')
axis = plt.gca().axis()
self.plot_est_stddev(X.terrain_alt, Xe.terrain_alt, 'r', '')
plt.axis(axis)
plt.xlabel('t, sec')
plt.grid()
plt.subplot(222)
plt.title('baro bias')
self.plot_est(X.baro_bias, 'b', '')
axis = plt.gca().axis()
self.plot_est_stddev(X.baro_bias, Xe.baro_bias, 'r', '')
plt.axis(axis)
plt.xlabel('t, sec')
plt.grid()
plt.subplot(223)
plt.title('Invariants')
plt.plot(self.t, self.J, '--')
plt.gca().set_prop_cycle(None)
plt.plot(self.t, self.Jh, '-')
plt.xlabel('t, sec')
plt.grid()
plt.figure(figsize=(15, 5))
plt.title('rotation std dev.')
plt.plot(self.t, np.rad2deg(np.sqrt(self.P[:, Xe.rot_bx])), label='N')
plt.plot(self.t, np.rad2deg(np.sqrt(self.P[:, Xe.rot_by])), label='E')
plt.plot(self.t, np.rad2deg(np.sqrt(self.P[:, Xe.rot_bz])), label='D')
plt.xlabel('t, sec')
plt.ylabel('deg')
plt.legend(loc='best', ncol=3)
plt.grid()
plt.figure(figsize=(15, 15))
plt.subplot(321)
plt.title('K mag')
plt.plot(self.t, self.K_mag)
plt.xlabel('t, sec')
plt.grid()
plt.subplot(322)
plt.title('K gps')
plt.plot(self.t, self.K_gps)
plt.xlabel('t, sec')
plt.grid()
plt.subplot(323)
plt.title('K accel')
plt.plot(self.t, self.K_accel)
plt.xlabel('t, sec')
plt.grid()
plt.subplot(324)
plt.title('K_baro')
plt.plot(self.t, self.K_baro)
plt.xlabel('t, sec')
plt.grid()
plt.subplot(325)
plt.title('K_lidar')
plt.plot(self.t, self.K_lidar)
plt.xlabel('t, sec')
plt.grid()
plt.figure(figsize=(15, 5))
plt.title('faults')
plt.plot(self.t, self.lidar_fault, label='lidar', alpha=0.5)
plt.plot(self.t, self.accel_fault, label='accel', alpha=0.5)
plt.plot(self.t, self.mag_fault, label='mag', alpha=0.5)
plt.plot(self.t, self.gps_fault, label='gps', alpha=0.5)
plt.plot(self.t, self.baro_fault, label='baro', alpha=0.5)
plt.gca().set_ylim([-1, 2])
plt.xlabel('t, sec')
plt.legend(loc='best', ncol=3)
plt.grid()
| 31.621739 | 78 | 0.514643 | 1,076 | 7,273 | 3.339219 | 0.131041 | 0.038965 | 0.082661 | 0.090175 | 0.641247 | 0.600612 | 0.554411 | 0.509045 | 0.35931 | 0.276092 | 0 | 0.021381 | 0.299051 | 7,273 | 229 | 79 | 31.759825 | 0.683405 | 0.011825 | 0 | 0.364583 | 0 | 0 | 0.057271 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03125 | false | 0 | 0.020833 | 0.005208 | 0.0625 | 0.005208 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1acfcd0292220871f431e4e473efb911e7c66800 | 1,098 | py | Python | conary_test/repositorytest/filecontentstest.py | sassoftware/conary | d418968acd5e11ee17ed6d91ca395ea10a040222 | [
"Apache-2.0"
] | 43 | 2015-03-31T01:37:10.000Z | 2021-11-14T16:26:48.000Z | conary_test/repositorytest/filecontentstest.py | sassoftware/conary | d418968acd5e11ee17ed6d91ca395ea10a040222 | [
"Apache-2.0"
] | 9 | 2015-06-10T16:39:41.000Z | 2020-01-27T16:35:01.000Z | conary_test/repositorytest/filecontentstest.py | sassoftware/conary | d418968acd5e11ee17ed6d91ca395ea10a040222 | [
"Apache-2.0"
] | 9 | 2015-04-07T08:12:37.000Z | 2020-01-26T09:54:18.000Z | #
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os, tempfile, unittest
from conary.repository.filecontents import FromFile
class FileContentsTest(unittest.TestCase):
def testFile(self):
(fd, name) = tempfile.mkstemp()
os.close(fd)
try:
f = open(name, "w")
f.write("hello")
f.close()
f = open(name, "r")
fc = FromFile(f)
assert(fc.get().read() == "hello")
assert(fc.get().read() == "hello")
finally:
os.unlink(name)
| 28.153846 | 74 | 0.640255 | 146 | 1,098 | 4.815068 | 0.657534 | 0.085349 | 0.036984 | 0.045519 | 0.056899 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00489 | 0.255009 | 1,098 | 38 | 75 | 28.894737 | 0.854523 | 0.504554 | 0 | 0.125 | 0 | 0 | 0.032197 | 0 | 0 | 0 | 0 | 0 | 0.125 | 1 | 0.0625 | false | 0 | 0.125 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1ad07d6aab73d167ba977ffa3a12a00f276ccca7 | 2,582 | py | Python | global_finprint/annotation/urls/assignment.py | GlobalFinPrint/global_finprint | 8a91ceaaed42aaa716d8c9f27518ba673ebf351c | [
"Apache-2.0"
] | null | null | null | global_finprint/annotation/urls/assignment.py | GlobalFinPrint/global_finprint | 8a91ceaaed42aaa716d8c9f27518ba673ebf351c | [
"Apache-2.0"
] | 6 | 2020-06-05T18:42:32.000Z | 2022-01-13T00:48:57.000Z | global_finprint/annotation/urls/assignment.py | GlobalFinPrint/global_finprint | 8a91ceaaed42aaa716d8c9f27518ba673ebf351c | [
"Apache-2.0"
] | null | null | null | from django.conf.urls import url, include
from django.views.decorators.csrf import csrf_exempt
from global_finprint.annotation.views.assignment import VideoAutoAssignView, ManageAssignmentView, ObservationListView, \
AssignmentListView, AssignmentListTbodyView, AssignmentModalBodyView, UnassignModalBodyView, AssignMultipleVideosModel,\
AssignMultipleVideoToAnnotators, RestrictFilterDropDown, AssignedAnnotatorPopup, VideoCountForAutoAssignView,\
TotalVideoCountForAutoAssignment, EditMeasurablesInline, MeasurableDelete
from global_finprint.annotation.views.compare import AssignmentCompareView, AssignmentDetailView
urlpatterns = [
url(r"^$", AssignmentListView.as_view(), name='assignment_list'),
url(r"^search$", AssignmentListTbodyView.as_view(), name='assignment_search'),
url(r"^review/(?P<assignment_id>\d+)$", ObservationListView.as_view(), name='assignment_review'),
url(r"^manage/(?P<assignment_id>\d+)$", ManageAssignmentView.as_view(), name='assignment_manage'),
url(r"^modal/(?P<set_id>\d+)$", AssignmentModalBodyView.as_view(), name='assignment_modal'),
url(r"^unassign_modal/(?P<assignment_id>\d+)$", UnassignModalBodyView.as_view(), name='unassign_modal'),
url(r"^auto$", VideoAutoAssignView.as_view(), name='auto_assign'),
url(r"^detail/(?P<assignment_id>\d+)$", AssignmentDetailView.as_view(), name='get_assignment_detail'),
url(r"^compare/(?P<set_id>\d+)$", AssignmentCompareView.as_view(), name='assignment_compare'),
url(r"^master/", include('global_finprint.annotation.urls.master')),
url(r"^measurables/edit/(?P<evt_id>\d+)$", csrf_exempt(EditMeasurablesInline.as_view()),
name='edit_measurables_inline'),
url(r"^measurables/delete/(?P<measurable_id>\d+)$", csrf_exempt(MeasurableDelete.as_view()),
name='master_measurable_delete'),
url(r"^assign_selected_videos$", csrf_exempt(AssignMultipleVideosModel.as_view()),
name='assign_selected_videos'),
url(r"^save_multi_video_assignment$", csrf_exempt(AssignMultipleVideoToAnnotators.as_view()),
name='multi_video_assignment'),
url(r"^filter_change$",csrf_exempt(RestrictFilterDropDown.as_view()),
name='restrict_reefs_sets_based_on_trip'),
url(r"^assigned_annotator/(?P<set_id>\d+)$",csrf_exempt(AssignedAnnotatorPopup.as_view()),
name='assigned anotator details per video'),
url(r"^auto_count$", VideoCountForAutoAssignView.as_view(), name='auto_assign_count'),
url(r"^total_count$",TotalVideoCountForAutoAssignment.as_view(),name='total_assign_count'),
]
| 52.693878 | 124 | 0.755616 | 282 | 2,582 | 6.659574 | 0.280142 | 0.038339 | 0.090522 | 0.063898 | 0.056443 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.093726 | 2,582 | 48 | 125 | 53.791667 | 0.802564 | 0 | 0 | 0 | 0 | 0 | 0.30519 | 0.20488 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.121212 | 0 | 0.121212 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1ad0ae27acc8dbfede485ff88f989be820ffa27e | 3,512 | py | Python | pytext/data/test/data_test.py | NunoEdgarGFlowHub/pytext | 2358b2d7c8c4e6800c73f4bd1c9731723e503ed6 | [
"BSD-3-Clause"
] | 1 | 2019-02-25T01:50:03.000Z | 2019-02-25T01:50:03.000Z | pytext/data/test/data_test.py | NunoEdgarGFlowHub/pytext | 2358b2d7c8c4e6800c73f4bd1c9731723e503ed6 | [
"BSD-3-Clause"
] | null | null | null | pytext/data/test/data_test.py | NunoEdgarGFlowHub/pytext | 2358b2d7c8c4e6800c73f4bd1c9731723e503ed6 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
from pytext.common.constants import Stage
from pytext.data import Data, RawBatcher, types
from pytext.data.sources.data_source import SafeFileWrapper
from pytext.data.sources.tsv import TSVDataSource
from pytext.data.tensorizers import LabelTensorizer, WordTensorizer
from pytext.utils.test_utils import import_tests_module
tests_module = import_tests_module()
class DataTest(unittest.TestCase):
def setUp(self):
self.data_source = TSVDataSource(
SafeFileWrapper(tests_module.test_file("train_dense_features_tiny.tsv")),
SafeFileWrapper(tests_module.test_file("test_dense_features_tiny.tsv")),
eval_file=None,
field_names=["label", "slots", "text", "dense"],
schema={"text": types.Text, "label": types.Label},
)
self.tensorizers = {
"tokens": WordTensorizer(column="text"),
"labels": LabelTensorizer(column="label"),
}
def test_create_data_no_batcher_provided(self):
data = Data(self.data_source, self.tensorizers)
batches = list(data.batches(Stage.TRAIN))
# We should have made at least one non-empty batch
self.assertTrue(batches)
batch, tensors = next(iter(batches))
self.assertTrue(batch)
self.assertTrue(tensors)
def test_create_batches(self):
data = Data(self.data_source, self.tensorizers, RawBatcher(batch_size=16))
batches = list(data.batches(Stage.TRAIN))
self.assertEqual(1, len(batches))
batch, batch_tensors = next(iter(batches))
self.assertEqual(set(self.tensorizers), set(batch_tensors))
tokens, seq_lens = batch_tensors["tokens"]
self.assertEqual((10,), seq_lens.size())
self.assertEqual((10,), batch_tensors["labels"].size())
self.assertEqual(10, len(batch))
example = next(iter(batch))
self.assertEqual({"text", "label"}, set(example))
def test_create_batches_different_tensorizers(self):
tensorizers = {"tokens": WordTensorizer(column="text")}
data = Data(self.data_source, tensorizers, RawBatcher(batch_size=16))
batches = list(data.batches(Stage.TRAIN))
self.assertEqual(1, len(batches))
batch, batch_tensors = next(iter(batches))
self.assertEqual({"tokens"}, set(batch_tensors))
tokens, seq_lens = batch_tensors["tokens"]
self.assertEqual((10,), seq_lens.size())
self.assertEqual(10, len(batch))
example = next(iter(batch))
self.assertEqual({"text", "label"}, set(example))
def test_data_initializes_tensorsizers(self):
tensorizers = {
"tokens": WordTensorizer(column="text"),
"labels": LabelTensorizer(column="label"),
}
with self.assertRaises(AttributeError):
# verify WordTensorizer isn't in an initialized state yet
tensorizers["tokens"].vocab
Data(self.data_source, tensorizers)
# Tensorizers should have been initialized
self.assertEqual(49, len(tensorizers["tokens"].vocab))
self.assertEqual(7, len(tensorizers["labels"].labels))
class RawBatcherTest(unittest.TestCase):
def test_raw_batcher(self):
data = range(10)
batcher = RawBatcher(batch_size=3)
self.assertEqual(
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]], list(batcher.batchify(data))
)
| 39.909091 | 85 | 0.660308 | 402 | 3,512 | 5.634328 | 0.293532 | 0.092715 | 0.030905 | 0.031788 | 0.471082 | 0.418543 | 0.370861 | 0.370861 | 0.339514 | 0.339514 | 0 | 0.011983 | 0.215831 | 3,512 | 87 | 86 | 40.367816 | 0.810458 | 0.067198 | 0 | 0.304348 | 0 | 0 | 0.060226 | 0.017426 | 0 | 0 | 0 | 0 | 0.26087 | 1 | 0.086957 | false | 0 | 0.115942 | 0 | 0.231884 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1ad0bb118d7657c0226c9703cf3e0988adf2f733 | 1,765 | py | Python | main.py | GrDaniel/duplicates_finder | edb5bb601494f50425850ce5bcfb4e37955541ba | [
"MIT"
] | null | null | null | main.py | GrDaniel/duplicates_finder | edb5bb601494f50425850ce5bcfb4e37955541ba | [
"MIT"
] | null | null | null | main.py | GrDaniel/duplicates_finder | edb5bb601494f50425850ce5bcfb4e37955541ba | [
"MIT"
] | null | null | null | import os
from itertools import chain
from hashlib import sha256
class DuplicatesRemover(object):
def __init__(self, search_path: str, file_type: str):
self.path = search_path
self.f_type = file_type
def find_duplicates(self):
file_paths = self.collect_file_paths()
files_size = self.calcucalate_files_size(file_paths)
files_with_identical_size = self. find_files_with_identical_size(files_size)
files_hashes = self.calculate_file_hash(files_with_identical_size)
def collect_file_paths(self):
file_tree = os.walk(self.path)
file_paths = []
for root_dir, dirs, files in file_tree:
for file_name in files:
file_paths.append(f"{root_dir}/{file_name}")
return file_paths
@staticmethod
def calcucalate_files_size(file_paths):
return {file_path: os.path.getsize(file_path) for file_path in file_paths}
def find_files_with_identical_size(self, files_size):
"""
:param files_size: {file_path: file_size}
rev_dict: {file_size1: {file1_path, file2_path}, file_size2: {file5_path, file12_path}...}
:return set(file1_path, file2_path)
"""
rev_dict = {}
for f_path, f_size in files_size.items():
rev_dict.setdefault(f_size, set()).add(f_path)
result = set(chain.from_iterable(values for key, values in rev_dict.items() if len(values) > 1))
return result
@staticmethod
def calculate_file_hash(files_path):
result = {}
for path in files_path:
with open(path, "rb") as file:
f_read = file.read()
f_hash = sha256(f_read)
result[path] = f_hash
return result
| 33.942308 | 104 | 0.648159 | 238 | 1,765 | 4.457983 | 0.281513 | 0.076343 | 0.067861 | 0.082941 | 0.131951 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012298 | 0.26289 | 1,765 | 51 | 105 | 34.607843 | 0.803228 | 0.095184 | 0 | 0.108108 | 0 | 0 | 0.015424 | 0.014139 | 0 | 0 | 0 | 0 | 0 | 1 | 0.162162 | false | 0 | 0.081081 | 0.027027 | 0.378378 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
46b8e3c90bcd926a8f712d20d77daa3692772f66 | 6,095 | py | Python | test/unit/test_network_util.py | shannonxtreme/DeepReg | 373f6c28fed1d7376d5c39340b08a3814804efb2 | [
"Apache-2.0"
] | null | null | null | test/unit/test_network_util.py | shannonxtreme/DeepReg | 373f6c28fed1d7376d5c39340b08a3814804efb2 | [
"Apache-2.0"
] | null | null | null | test/unit/test_network_util.py | shannonxtreme/DeepReg | 373f6c28fed1d7376d5c39340b08a3814804efb2 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
"""
Tests for deepreg/model/network/util
"""
import pytest
import tensorflow as tf
import deepreg.model.network.util as util
from deepreg.model.backbone import global_net, local_net, u_net
def test_wrong_inputs():
"""
Function to test wrong input types passed to build backbone func
"""
# Wrong image_size type: int, vs tuple, Fail
with pytest.raises(ValueError) as err_info:
util.build_backbone(
image_size=1, out_channels=1, model_config={}, method_name="ddf"
)
assert "image_size must be tuple of length 3" in str(err_info.value)
# Wrong out_channels type: str, vs int, Fail
with pytest.raises(ValueError) as err_info:
util.build_backbone(
image_size=(1, 2, 3), out_channels="", model_config={}, method_name="ddf"
)
assert "out_channels must be int >=1" in str(err_info.value)
# Wrong model_config type: list, vs dic, Fail
with pytest.raises(ValueError) as err_info:
util.build_backbone(
image_size=(1, 2, 3), out_channels=1, model_config=[], method_name="ddf"
)
assert "model_config must be a dict having key 'backbone'" in str(err_info.value)
# Wrong method_name
with pytest.raises(ValueError) as err_info:
util.build_backbone(
image_size=(1, 2, 3),
out_channels=1,
model_config={"backbone": "local"},
method_name="wrong",
)
assert (
"method name has to be one of ddf/dvf/conditional/affine in build_backbone"
in str(err_info.value)
)
def test_value_raised_if_wrong_method():
"""
Checking ValueError raised if string not
in accepted methods name
"""
# expect ddf, dvf or conditional, Fail
with pytest.raises(ValueError):
util.build_backbone(
image_size=(1, 2, 3), out_channels=1, model_config={}, method_name=""
)
def test_value_raised_if_unknown_config():
"""
Checking ValueError raised if string for
backbone unknown
"""
# expect local, global or unet
with pytest.raises(ValueError):
util.build_backbone(
image_size=(1, 2, 3),
out_channels=1,
model_config={"backbone": "random"},
method_name="ddf",
)
def test_global_return():
"""
Testing that build_backbone func returns an object
of type GlobalNet from backbone module when initialised
with the associated GlobalNet config.
"""
out = util.build_backbone(
image_size=(1, 2, 3),
out_channels=1,
model_config={
"backbone": "global",
"global": {"num_channel_initial": 4, "extract_levels": [1, 2, 3]},
},
method_name="ddf",
)
assert isinstance(
out,
type(global_net.GlobalNet([1, 2, 3], 4, 4, [1, 2, 3], "he_normal", "sigmoid")),
)
def test_local_return():
"""
Testing that build_backbone func returns an object
of type LocalNet from backbone module when initialised
with the associated LocalNet config.
"""
out = util.build_backbone(
image_size=(1, 2, 3),
out_channels=1,
model_config={
"backbone": "local",
"local": {"num_channel_initial": 4, "extract_levels": [1, 2, 3]},
},
method_name="ddf",
)
assert isinstance(
out,
type(local_net.LocalNet([1, 2, 3], 4, 4, [1, 2, 3], "he_normal", "sigmoid")),
)
def test_unet_return():
"""
Testing that build_backbone func returns an object
of type UNet form backbone module when initialised
with the associated UNet config.
"""
out = util.build_backbone(
image_size=(1, 2, 3),
out_channels=1,
model_config={
"backbone": "unet",
"unet": {"num_channel_initial": 4, "depth": 4},
},
method_name="ddf",
)
assert isinstance(out, type(u_net.UNet([1, 2, 3], 4, 4, 4, "he_normal", "sigmoid")))
def test_wrong_inputs_build_inputs():
"""
Function to test wrong input types passed to build backbone func
"""
# Wrong image_size type: int, vs tuple, Fail
with pytest.raises(Exception):
util.build_inputs(
moving_image_size=1,
fixed_image_size=(),
index_size=1,
batch_size=1,
labeled=True,
)
# Wrong fixed_images type: int, vs tuple, Fail
with pytest.raises(Exception):
util.build_inputs(
moving_image_size=(),
fixed_image_size=1,
index_size=1,
batch_size=1,
labeled=True,
)
# Wrong index_size type: list, vs int, Fail
with pytest.raises(Exception):
util.build_inputs(
moving_image_size=(),
fixed_image_size=(),
index_size=[],
batch_size=1,
labeled=True,
)
# Wrong batch_size type: list, vs int, Fail
with pytest.raises(Exception):
util.build_inputs(
moving_image_size=(),
fixed_image_size=(),
index_size=1,
batch_size=[],
labeled=True,
)
def test_return_types_build_inputs():
"""
Test that returns 5 items of type tf.keras.inputs.
"""
out = util.build_inputs(
moving_image_size=(1, 2, 3),
fixed_image_size=(1, 2, 3),
index_size=1,
batch_size=1,
labeled=True,
)
# Asserting all items tf.keras.inputs - Pass
assert all(isinstance(item, type(tf.keras.Input(1))) for item in out)
mov_im, fixed_im, mov_l, fixed_l, indices = util.build_inputs(
moving_image_size=(1, 2, 3),
fixed_image_size=(1, 2, 3),
index_size=1,
batch_size=1,
labeled=False,
)
# Asserting all items bar mov_l and fixed_l tf.keras.inputs - Pass
assert all(
isinstance(item, type(tf.keras.Input(1)))
for item in [mov_im, fixed_im, indices]
)
assert all(isinstance(item, type(None)) for item in [mov_l, fixed_l])
| 29.587379 | 88 | 0.597539 | 790 | 6,095 | 4.403797 | 0.165823 | 0.03593 | 0.016384 | 0.037942 | 0.735556 | 0.688991 | 0.643001 | 0.619143 | 0.582351 | 0.546996 | 0 | 0.022001 | 0.29155 | 6,095 | 205 | 89 | 29.731707 | 0.783696 | 0.210664 | 0 | 0.50365 | 0 | 0 | 0.092908 | 0.005605 | 0 | 0 | 0 | 0 | 0.072993 | 1 | 0.058394 | false | 0 | 0.029197 | 0 | 0.087591 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
46ba3880bb819e029eff762b12b3a9362a955d15 | 4,841 | py | Python | code/tmp_rtrip/test/test_importlib/test_locks.py | emilyemorehouse/ast-and-me | 3f58117512e125e1ecbe3c72f2f0d26adb80b7b3 | [
"MIT"
] | 24 | 2018-01-23T05:28:40.000Z | 2021-04-13T20:52:59.000Z | code/tmp_rtrip/test/test_importlib/test_locks.py | emilyemorehouse/ast-and-me | 3f58117512e125e1ecbe3c72f2f0d26adb80b7b3 | [
"MIT"
] | 17 | 2017-12-21T18:32:31.000Z | 2018-12-18T17:09:50.000Z | code/tmp_rtrip/test/test_importlib/test_locks.py | emilyemorehouse/ast-and-me | 3f58117512e125e1ecbe3c72f2f0d26adb80b7b3 | [
"MIT"
] | null | null | null | from . import util as test_util
init = test_util.import_importlib('importlib')
import sys
import unittest
import weakref
from test import support
try:
import threading
except ImportError:
threading = None
else:
from test import lock_tests
if threading is not None:
class ModuleLockAsRLockTests:
locktype = classmethod(lambda cls: cls.LockType('some_lock'))
test__is_owned = None
test_try_acquire = None
test_try_acquire_contended = None
test_with = None
test_timeout = None
test_release_save_unacquired = None
test_repr = None
test_locked_repr = None
LOCK_TYPES = {kind: splitinit._bootstrap._ModuleLock for kind,
splitinit in init.items()}
Frozen_ModuleLockAsRLockTests, Source_ModuleLockAsRLockTests = (test_util
.test_both(ModuleLockAsRLockTests, lock_tests.RLockTests, LockType=
LOCK_TYPES))
else:
LOCK_TYPES = {}
class Frozen_ModuleLockAsRLockTests(unittest.TestCase):
pass
class Source_ModuleLockAsRLockTests(unittest.TestCase):
pass
if threading is not None:
class DeadlockAvoidanceTests:
def setUp(self):
try:
self.old_switchinterval = sys.getswitchinterval()
support.setswitchinterval(1e-06)
except AttributeError:
self.old_switchinterval = None
def tearDown(self):
if self.old_switchinterval is not None:
sys.setswitchinterval(self.old_switchinterval)
def run_deadlock_avoidance_test(self, create_deadlock):
NLOCKS = 10
locks = [self.LockType(str(i)) for i in range(NLOCKS)]
pairs = [(locks[i], locks[(i + 1) % NLOCKS]) for i in range(NLOCKS)
]
if create_deadlock:
NTHREADS = NLOCKS
else:
NTHREADS = NLOCKS - 1
barrier = threading.Barrier(NTHREADS)
results = []
def _acquire(lock):
"""Try to acquire the lock. Return True on success,
False on deadlock."""
try:
lock.acquire()
except self.DeadlockError:
return False
else:
return True
def f():
a, b = pairs.pop()
ra = _acquire(a)
barrier.wait()
rb = _acquire(b)
results.append((ra, rb))
if rb:
b.release()
if ra:
a.release()
lock_tests.Bunch(f, NTHREADS).wait_for_finished()
self.assertEqual(len(results), NTHREADS)
return results
def test_deadlock(self):
results = self.run_deadlock_avoidance_test(True)
nb_deadlocks = results.count((True, False))
self.assertGreaterEqual(nb_deadlocks, 1)
self.assertEqual(results.count((True, True)), len(results) -
nb_deadlocks)
def test_no_deadlock(self):
results = self.run_deadlock_avoidance_test(False)
self.assertEqual(results.count((True, False)), 0)
self.assertEqual(results.count((True, True)), len(results))
DEADLOCK_ERRORS = {kind: splitinit._bootstrap._DeadlockError for kind,
splitinit in init.items()}
Frozen_DeadlockAvoidanceTests, Source_DeadlockAvoidanceTests = (test_util
.test_both(DeadlockAvoidanceTests, LockType=LOCK_TYPES,
DeadlockError=DEADLOCK_ERRORS))
else:
DEADLOCK_ERRORS = {}
class Frozen_DeadlockAvoidanceTests(unittest.TestCase):
pass
class Source_DeadlockAvoidanceTests(unittest.TestCase):
pass
class LifetimeTests:
@property
def bootstrap(self):
return self.init._bootstrap
def test_lock_lifetime(self):
name = 'xyzzy'
self.assertNotIn(name, self.bootstrap._module_locks)
lock = self.bootstrap._get_module_lock(name)
self.assertIn(name, self.bootstrap._module_locks)
wr = weakref.ref(lock)
del lock
support.gc_collect()
self.assertNotIn(name, self.bootstrap._module_locks)
self.assertIsNone(wr())
def test_all_locks(self):
support.gc_collect()
self.assertEqual(0, len(self.bootstrap._module_locks), self.
bootstrap._module_locks)
Frozen_LifetimeTests, Source_LifetimeTests = test_util.test_both(LifetimeTests,
init=init)
@support.reap_threads
def test_main():
support.run_unittest(Frozen_ModuleLockAsRLockTests,
Source_ModuleLockAsRLockTests, Frozen_DeadlockAvoidanceTests,
Source_DeadlockAvoidanceTests, Frozen_LifetimeTests,
Source_LifetimeTests)
if __name__ == '__main__':
test_main()
| 30.639241 | 79 | 0.622185 | 492 | 4,841 | 5.890244 | 0.256098 | 0.019324 | 0.032781 | 0.041408 | 0.221532 | 0.133195 | 0.115942 | 0.063492 | 0 | 0 | 0 | 0.002947 | 0.299112 | 4,841 | 157 | 80 | 30.834395 | 0.851164 | 0.01384 | 0 | 0.16 | 0 | 0 | 0.006524 | 0 | 0 | 0 | 0 | 0 | 0.08 | 1 | 0.088 | false | 0.032 | 0.072 | 0.008 | 0.32 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
46bc10fa6ebc50719d08ff180aa6f6a2e590a585 | 9,053 | py | Python | archived_code/sed_test_AI_64_8.py | jacob975/deep_learning | 52a5073589cf78aeadfde8ea51f687bc497a059b | [
"MIT"
] | null | null | null | archived_code/sed_test_AI_64_8.py | jacob975/deep_learning | 52a5073589cf78aeadfde8ea51f687bc497a059b | [
"MIT"
] | 10 | 2018-03-14T08:44:12.000Z | 2018-11-13T13:45:53.000Z | archived_code/sed_test_AI_64_8.py | jacob975/deep_learning | 52a5073589cf78aeadfde8ea51f687bc497a059b | [
"MIT"
] | null | null | null | #!/usr/bin/python3
'''
Abstract:
This is a code for test AI with given sed data.
Usage:
sed_test_AI_64_8.py [source] [id] [directory] [AI]
Editor and Practicer:
Jacob975
##################################
# Python3 #
# This code is made in python3 #
##################################
20170225
####################################
update log
20180225 version alpha 1:
the code work well.
20180226 version alpha 2:
the AI can be choosed.
20180412 version alpha 3:
1. make directory a argument
20180414 version alpha 4:
1. add funcs for print precision and recall-rate.
20180523 version alpha 5:
1. delete some plot functions I rarely use for a long time.
20180601 version alpha 6:
1. delete some functions never used in this code.
2. add a func to save predicted labels.
'''
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
from sklearn.metrics import confusion_matrix
import time
from sys import argv
from save_lib import save_cls_pred, save_cls_true, save_arrangement, save_coords, save_label_pred
from load_lib import print_precision, print_recall_rate
import astro_mnist
import os
# We also need PrettyTensor.
import prettytensor as pt
def plot_confusion_matrix(cls_pred):
# This is called from print_test_accuracy() below.
# cls_pred is an array of the predicted class-number for
# all images in the test-set.
# Get the true classifications for the test-set.
cls_true = data.test.cls
# Get the confusion matrix using sklearn.
cm = confusion_matrix(y_true=cls_true,
y_pred=cls_pred)
# Print the confusion matrix as text.
print(cm)
print_precision(y_true = cls_true, y_pred = cls_pred)
print_recall_rate(y_true = cls_true, y_pred = cls_pred)
def print_test_accuracy(show_confusion_matrix=False):
# For all the images in the test-set,
# calculate the predicted classes and whether they are correct.
correct, cls_pred = predict_cls_test()
# save cls_pred and cls_true
save_cls_pred(images_name[:-4], directory, cls_pred)
save_cls_true(images_name[:-4], directory, data.test.cls)
# Classification accuracy and the number of correct classifications.
acc, num_correct = cls_accuracy(correct)
# Number of images being classified.
num_images = len(correct)
# Print the accuracy.
msg = "Accuracy on Test-Set: {0:.1%} ({1} / {2})"
print(msg.format(acc, num_correct, num_images))
# Plot the confusion matrix, if desired.
if show_confusion_matrix:
print("Confusion Matrix:")
plot_confusion_matrix(cls_pred=cls_pred)
def predict_cls(images, labels, cls_true):
# Number of images.
num_images = len(images)
# Allocate an array for the predicted classes which
# will be calculated in batches and filled into this array.
cls_pred = np.zeros(shape=num_images, dtype=np.int)
# Now calculate the predicted classes for the batches.
# We will just iterate through all the batches.
# There might be a more clever and Pythonic way of doing this.
# The starting index for the next batch is denoted i.
i = 0
while i < num_images:
# The ending index for the next batch is denoted j.
j = min(i + batch_size, num_images)
# Create a feed-dict with the images and labels
# between index i and j.
feed_dict = {x: images[i:j, :],
y_true: labels[i:j, :]}
# Calculate the predicted class using TensorFlow.
cls_pred[i:j] = session.run(y_pred_cls, feed_dict=feed_dict)
# Set the start-index for the next batch to the
# end-index of the current batch.
i = j
# Create a boolean array whether each image is correctly classified.
correct = (cls_true == cls_pred)
return correct, cls_pred
def predict_label(images, labels):
# Number of images.
num_images = len(images)
# initialize
label_pred = np.zeros(num_images*3).reshape((num_images, 3))
feed_dict = {x: images[:,:], y_true: labels[:,:]}
# process
label_pred = session.run(y_pred, feed_dict=feed_dict)
return label_pred
def predict_cls_test():
return predict_cls(images = data.test.images,
labels = data.test.labels,
cls_true = data.test.cls)
def cls_accuracy(correct):
# Calculate the number of correctly classified images.
# When summing a boolean array, False means 0 and True means 1.
correct_sum = correct.sum()
# Classification accuracy is the number of correctly classified
# images divided by the total number of images in the test-set.
acc = float(correct_sum) / len(correct)
return acc, correct_sum
#--------------------------------------------
# main code
if __name__ == "__main__":
VERBOSE = 0
# measure times
start_time = time.time()
directory = argv[4]
#-----------------------------------
# Load Data
images_name = argv[1]
labels_name = argv[2]
coords_name = argv[3]
AI_saved_dir = argv[5]
data, tracer, coords = astro_mnist.read_data_sets(images_name, labels_name, coords_name, train_weight = 0, validation_weight = 0, test_weight = 1)
print("Size of:")
print("- Training-set:\t\t{}".format(len(data.train.labels)))
print("- Test-set:\t\t{}".format(len(data.test.labels)))
print("- Validation-set:\t{}".format(len(data.validation.labels)))
data.test.cls = np.argmax(data.test.labels, axis=1)
# save arrangement and coords
failure = save_arrangement(images_name[:-4], directory, data, tracer)
if not failure:
print ("tracer and data is saved.")
failure = save_coords(images_name[:-4], directory, coords)
if not failure:
print ("coords are saved.")
#-----------------------------------
# Data dimension
# We know that MNIST images are 28 pixels in each dimension.
img_size = len(data.test.images[0])
print ("image size = {0}".format(img_size))
# Images are stored in one-dimensional arrays of this length.
img_size_flat = img_size * 1
# Tuple with height and width of images used to reshape arrays.
img_shape = (img_size, 1)
# Number of colour channels for the images: 1 channel for gray-scale.
num_channels = 1
# Number of classes, one class for each of 10 digits.
num_classes = 3
#-----------------------------------
# Get the true classes for those images.
data.test.cls = np.argmax(data.test.labels, axis=1)
# Get the first images from the test-set.
images = data.test.images[0:9]
# Get the true classes for those images.
cls_true = data.test.cls[0:9]
#-----------------------------------
# Tensorflow Graph
x = tf.placeholder(tf.float32, shape=[None, img_size_flat], name='x')
x_image = tf.reshape(x, [-1, img_size, 1, num_channels])
y_true = tf.placeholder(tf.float32, shape=[None, num_classes], name='y_true')
y_true_cls = tf.argmax(y_true, axis=1)
#-----------------------------------
# PrettyTensor Implementation
x_pretty = pt.wrap(x_image)
with pt.defaults_scope(activation_fn=tf.nn.relu6):
y_pred, loss = x_pretty.\
flatten().\
fully_connected(size = 64, name='layer_fc1').\
fully_connected(size = 64, name='layer_fc2').\
fully_connected(size = 64, name='layer_fc3').\
fully_connected(size = 64, name='layer_fc4').\
fully_connected(size = 64, name='layer_fc5').\
fully_connected(size = 64, name='layer_fc6').\
fully_connected(size = 64, name='layer_fc7').\
fully_connected(size = 64, name='layer_fc8').\
softmax_classifier(num_classes=num_classes, labels=y_true)
y_pred_cls = tf.argmax(y_pred, axis=1)
correct_prediction = tf.equal(y_pred_cls, y_true_cls)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
#-----------------------------------
# Saver
saver = tf.train.Saver()
print ("AI:{0}".format(AI_saved_dir))
if not os.path.exists(AI_saved_dir):
print ("No AI can be restore, please check folder ./checkpoints")
exit()
save_path = os.path.join(AI_saved_dir, 'best_validation')
#-----------------------------------
# Tensorflow run
session = tf.Session()
# restore previous weight
saver.restore(sess=session, save_path=save_path)
batch_size = 512
print ("batch_size = {0}".format(batch_size))
# test the restored AI, show confusion matrix and example_errors
# and save the cls of prediction
print_test_accuracy(show_confusion_matrix=True)
# save labels of prediction
label_pred = predict_label(data.test.images, data.test.labels)
save_label_pred(images_name[:-4], directory, label_pred)
session.close()
#-----------------------------------
# measuring time
elapsed_time = time.time() - start_time
print ("Exiting Main Program, spending ", elapsed_time, "seconds.")
| 37.102459 | 150 | 0.642218 | 1,259 | 9,053 | 4.444797 | 0.24305 | 0.020014 | 0.025733 | 0.028592 | 0.201573 | 0.145818 | 0.061115 | 0.028234 | 0.023946 | 0.013581 | 0 | 0.022062 | 0.218933 | 9,053 | 243 | 151 | 37.255144 | 0.76934 | 0.372363 | 0 | 0.05 | 0 | 0 | 0.071505 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.091667 | 0.008333 | 0.175 | 0.158333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
46bc78afaeb62cb0bbad5b4d159a408a129ad787 | 677 | py | Python | compare_lists/compare.py | Esukhia/text_utils | 562065d4dedba127f8aaeee03ca3d9f071805f62 | [
"MIT"
] | 1 | 2017-01-26T22:37:57.000Z | 2017-01-26T22:37:57.000Z | compare_lists/compare.py | Esukhia/tibtext_utils | 562065d4dedba127f8aaeee03ca3d9f071805f62 | [
"MIT"
] | null | null | null | compare_lists/compare.py | Esukhia/tibtext_utils | 562065d4dedba127f8aaeee03ca3d9f071805f62 | [
"MIT"
] | null | null | null | from PyTib.common import open_file, write_file
import os
monlam = open_file('input/monlam1_total_corrected.txt').split('\n')
monlam_entries = [a.split(' | ')[0] for a in monlam]
monlam_entries = [a.rstrip('་') for a in monlam_entries]
monlam_dict = {a: True for a in monlam_entries}
non_monlam = {}
in_path = 'input/user_vocabs'
for f in os.listdir(in_path):
content = open_file('{}/{}'.format(in_path, f)).replace('\n', ' ').split(' ')
words = [a.rstrip('་') for a in content]
for word in words:
if word not in monlam_dict:
non_monlam[word] = True
non_words = '\n'.join(list(non_monlam.keys()))
write_file('output/non_monlam.txt', non_words)
| 35.631579 | 81 | 0.675037 | 110 | 677 | 3.963636 | 0.381818 | 0.119266 | 0.055046 | 0.082569 | 0.137615 | 0.06422 | 0 | 0 | 0 | 0 | 0 | 0.003521 | 0.161004 | 677 | 18 | 82 | 37.611111 | 0.760563 | 0 | 0 | 0 | 0 | 0 | 0.131462 | 0.079764 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
46bc7d4de06ca22e044fe05c60711faebec83394 | 801 | py | Python | lustre/minification.py | half-cambodian-hacker-man/lustre | 93e2196a962cafcfd7fa0be93a6b0d563c46ba75 | [
"MIT"
] | 3 | 2020-09-06T02:21:09.000Z | 2020-09-30T00:05:54.000Z | lustre/minification.py | videogame-hacker/lustre | 93e2196a962cafcfd7fa0be93a6b0d563c46ba75 | [
"MIT"
] | null | null | null | lustre/minification.py | videogame-hacker/lustre | 93e2196a962cafcfd7fa0be93a6b0d563c46ba75 | [
"MIT"
] | null | null | null | import typing
from starlette.responses import Response, HTMLResponse
from starlette.templating import _TemplateResponse
try:
import htmlmin
except ImportError:
htmlmin = None
def setup_html_minification(
response_classes=[HTMLResponse, _TemplateResponse], **minification_config
):
assert htmlmin is not None, "htmlmin must be installed to use HTML minification"
original_render = Response.render
def minify_and_render(self, content: typing.Any) -> bytes:
if isinstance(content, str):
minified_content = htmlmin.minify(content, **minification_config)
return original_render(self, minified_content)
return original_render(self, content)
for response_class in response_classes:
response_class.render = minify_and_render
| 28.607143 | 84 | 0.750312 | 90 | 801 | 6.466667 | 0.488889 | 0.072165 | 0.051546 | 0.082474 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.188514 | 801 | 27 | 85 | 29.666667 | 0.895385 | 0 | 0 | 0 | 0 | 0 | 0.062422 | 0 | 0 | 0 | 0 | 0 | 0.052632 | 1 | 0.105263 | false | 0 | 0.263158 | 0 | 0.473684 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
46c8ae10d53e8ce4d08d1a08d67812ca60bf79fe | 5,407 | py | Python | VirusTotalAVBot/modules/virustotal.py | kenanismayilov335/VirusTotal-File-Scan-Bot | 369e8bba10ce40cf9e9bdaeba018496e8509cd8d | [
"MIT"
] | null | null | null | VirusTotalAVBot/modules/virustotal.py | kenanismayilov335/VirusTotal-File-Scan-Bot | 369e8bba10ce40cf9e9bdaeba018496e8509cd8d | [
"MIT"
] | null | null | null | VirusTotalAVBot/modules/virustotal.py | kenanismayilov335/VirusTotal-File-Scan-Bot | 369e8bba10ce40cf9e9bdaeba018496e8509cd8d | [
"MIT"
] | 6 | 2020-11-01T17:46:27.000Z | 2022-03-01T14:34:17.000Z | import hashlib
import logging
import os
import requests
import time
from VirusTotalAVBot import VT_API
logger = logging.getLogger("VirusTotal Methods")
api_base_url = "https://www.virustotal.com/api/v3"
header = {'x-apikey': VT_API}
def vthash(filehash: str):
"""Returns the analysis data class for a file in VirusTotal's database"""
endpoint_path = f'/files/{filehash}'
endpoint = f"{api_base_url}{endpoint_path}"
r = requests.get(endpoint, headers=header)
if r.status_code == 404 and r.json()['error']['code'] == 'NotFoundError':
return None
elif r.status_code == 200:
return analysisdata(r)
def replytofile(path: str, message):
"""Coordinates the process of searching if the file already exists on VirusTotal's database, or needs to be
uploaded for analysis"""
response = ''
md5hash = findhash(path)
endpoint_path = f'/files/{md5hash}'
endpoint = f"{api_base_url}{endpoint_path}"
r = requests.get(endpoint, headers=header)
if r.status_code == 200:
av_data = r.json()['data']['attributes']['last_analysis_results']
response = simplifiedview(av_data, md5hash)
elif r.status_code == 404:
endpoint_path = '/files'
file = open(path, 'rb')
files = {'file': file}
message.edit_text("File is uploading to VirusTotal.")
if os.path.getsize(path) < 33554432:
requests.post(f'{api_base_url}{endpoint_path}', files=files, headers=header)
else:
requests.post(uploadurl(), files=files, headers=header)
file.close()
del file
message.edit_text("File has been uploaded to VirusTotal and is being analysed (90 seconds)")
time.sleep(90)
endpoint_path = f'/files/{md5hash}'
endpoint = f"{api_base_url}{endpoint_path}"
r = requests.get(endpoint, headers=header)
if analysisdata(r) is None:
return None
else:
av_data = analysisdata(r)
response = simplifiedview(av_data, md5hash)
return response
def simplifiedview(av_data: dict, filehash: str) -> str:
"""Builds and returns a simplified string containing basic information about the analysis"""
neg_detections = 0
pos_detections = 0
error_detections = 0
for engine in av_data:
if av_data[engine]['category'] == 'malicious' or av_data[engine]['category'] == 'suspicious':
neg_detections += 1
elif av_data[engine]['category'] == 'undetected':
pos_detections += 1
elif av_data[engine]['category'] == 'timeout' or av_data[engine]['category'] == 'type-unsupported' \
or av_data[engine]['category'] == 'failure':
error_detections += 1
vt_url = f'https://www.virustotal.com/gui/file/{filehash}'
response = f"__VirusTotal Analysis Summary__:\n\nHash: `{filehash}`\n\nLink: [Click Here]({vt_url})\n\n❌" \
f" **Negative: {neg_detections}**\n\n✅ Positive: {pos_detections}\n\n⚠ " \
f"Error/Unsupported File: {error_detections}"
return response
def detailedview(av_data: dict, filehash: str) -> str:
"""Builds and returns a string containing detailed information regarding the analysis for each antivirus engine"""
vt_url = f'https://www.virustotal.com/gui/file/{filehash}'
response = f"__VirusTotal Analysis Summary__:\n\nHash: `{filehash}`\n\nLink: [Click Here]({vt_url})\n\n"
for engine in av_data:
if av_data[engine]['category'] == 'malicious' or av_data[engine]['category'] == 'suspicious':
response = response + f"❌ **{av_data[engine]['engine_name']}: {av_data[engine]['result']}**\n"
for engine in av_data:
if av_data[engine]['category'] == 'undetected':
response = response + f"✅ {av_data[engine]['engine_name']}: Undetected\n"
for engine in av_data:
if av_data[engine]['category'] == 'timeout' or av_data[engine]['category'] == 'type-unsupported' \
or av_data[engine]['category'] == 'failure':
response = response + f"⚠ {av_data[engine]['engine_name']}: Unsupported File\n"
return response
def uploadurl() -> str:
"""This method generates a special URL to upload files larger than 32MB. Do note that URLs generated from this
endpoint will be one time use only."""
r = requests.get(f"{api_base_url}/files/upload_url", headers=header)
res = r.json()
return res['data']
def findhash(path: str) -> str:
"""Calculates the MD5 Hash for the path specified"""
h = hashlib.md5()
filefrompath = open(path, 'rb')
with filefrompath as file:
chunk = file.read(1024)
while len(chunk) > 0:
h.update(chunk)
chunk = file.read(1024)
filehash = h.hexdigest()
filefrompath.close()
del filefrompath
return filehash
def analysisdata(r):
"""Method to identify if analysis results are available in the json output"""
if 'data' in r.json():
if 'attributes' in r.json()['data']:
if 'last_analysis_results' in r.json()['data']['attributes']:
if r.json()['data']['attributes']['last_analysis_results']:
av_data = r.json()['data']['attributes']['last_analysis_results']
return av_data
else:
return None
else:
return None
| 32.572289 | 118 | 0.630294 | 688 | 5,407 | 4.832849 | 0.258721 | 0.050526 | 0.057744 | 0.07218 | 0.41594 | 0.355188 | 0.348271 | 0.321805 | 0.321805 | 0.295338 | 0 | 0.011879 | 0.2371 | 5,407 | 165 | 119 | 32.769697 | 0.792727 | 0.120769 | 0 | 0.330189 | 0 | 0.018868 | 0.281316 | 0.095329 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066038 | false | 0 | 0.056604 | 0 | 0.226415 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
46cd1e657ec4409874672e0022455763ebebbf47 | 1,868 | py | Python | CODES/S18 - Selenium WebDriver -_ Working With Web Elements/10-HiddenElements.py | PacktPublishing/-Selenium-WebDriver-With-Python-3.x---Novice-To-Ninja-v- | 7be863a0a9c8da7e31a413742da92c2fcfd0b38a | [
"MIT"
] | 11 | 2019-05-17T00:54:17.000Z | 2021-11-12T22:12:18.000Z | CODES/S18 - Selenium WebDriver -_ Working With Web Elements/10-HiddenElements.py | PacktPublishing/-Selenium-WebDriver-With-Python-3.x---Novice-To-Ninja-v- | 7be863a0a9c8da7e31a413742da92c2fcfd0b38a | [
"MIT"
] | null | null | null | CODES/S18 - Selenium WebDriver -_ Working With Web Elements/10-HiddenElements.py | PacktPublishing/-Selenium-WebDriver-With-Python-3.x---Novice-To-Ninja-v- | 7be863a0a9c8da7e31a413742da92c2fcfd0b38a | [
"MIT"
] | 12 | 2019-06-17T00:56:01.000Z | 2021-09-29T11:38:53.000Z | from selenium import webdriver
import time
class HiddenElements():
def testLetsKodeIt(self):
baseUrl = "https://letskodeit.teachable.com/pages/practice"
driver = webdriver.Firefox()
driver.maximize_window()
driver.get(baseUrl)
driver.implicitly_wait(2)
# Find the state of the text box
textBoxElement = driver.find_element_by_id("displayed-text")
textBoxState = textBoxElement.is_displayed() # True if visible, False if hidden
# Exception if not present in the DOM
print("Text is visible? " + str(textBoxState))
time.sleep(2)
# Click the Hide button
driver.find_element_by_id("hide-textbox").click()
# Find the state of the text box
textBoxState = textBoxElement.is_displayed()
print("Text is visible? " + str(textBoxState))
time.sleep(2)
# Added code to scroll up because the element was hiding behind the top navigation menu
# You will learn about scrolling in future lecture
driver.execute_script("window.scrollBy(0, -150);")
# Click the Show button
driver.find_element_by_id("show-textbox").click()
# Find the state of the text box
textBoxState = textBoxElement.is_displayed()
print("Text is visible? " + str(textBoxState))
time.sleep(2)
# Browser Close
driver.quit()
def testExpedia(self):
baseUrl = "http://www.expedia.com"
driver = webdriver.Firefox()
driver.maximize_window()
driver.get(baseUrl)
driver.implicitly_wait(3)
driver.find_element_by_id("tab-flight-tab").click()
drpdwnElement = driver.find_element_by_id("flight-age-select-1")
print("Element visible? " + str(drpdwnElement.is_displayed()))
ff = HiddenElements()
ff.testLetsKodeIt()
ff.testExpedia() | 33.963636 | 95 | 0.654711 | 223 | 1,868 | 5.376682 | 0.426009 | 0.041701 | 0.070892 | 0.079233 | 0.477064 | 0.42452 | 0.379483 | 0.359466 | 0.359466 | 0.323603 | 0 | 0.007092 | 0.245182 | 1,868 | 55 | 96 | 33.963636 | 0.843262 | 0.189507 | 0 | 0.428571 | 0 | 0 | 0.15492 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057143 | false | 0 | 0.057143 | 0 | 0.142857 | 0.114286 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
46cd3a5d74191e0c2f0c767df32bfdb8ff7e82a4 | 1,091 | py | Python | WPC_CSV2XLSX.py | ChenKuanSun/WPECCrawler | 9b98fe7da96768fe102c1c494281103cf66aa6ff | [
"MIT"
] | null | null | null | WPC_CSV2XLSX.py | ChenKuanSun/WPECCrawler | 9b98fe7da96768fe102c1c494281103cf66aa6ff | [
"MIT"
] | null | null | null | WPC_CSV2XLSX.py | ChenKuanSun/WPECCrawler | 9b98fe7da96768fe102c1c494281103cf66aa6ff | [
"MIT"
] | null | null | null | #CodeMod from Trinh Nguyen http://www.dangtrinh.com/2013/10/python-convert-csv-to-excel.html
import csv
import os
from openpyxl import Workbook
from openpyxl.utils import get_column_letter
def csv_to_excel(csv_path, excel_path):
csv_file = open(csv_path, encoding = 'utf-8-sig') #ChineseFixUTF8
csv.register_dialect('comma', delimiter=",")# CSV use , cut
reader = csv.reader(csv_file, dialect='comma')
wb = Workbook()
ws = wb.worksheets[0]
for row_index, row in enumerate(reader):
for column_index, cell in enumerate(row):
column_letter = get_column_letter((column_index + 1))
ws.cell('%s%s'%(column_letter, (row_index + 1))).value = cell
wb.save(filename = excel_path)
csv_file.close()
WPC_PATH = "./01_WPCOutput/"
csv_list = os.listdir(WPC_PATH)
i=1
for j in csv_list:
print(str(i)+"."+j)
i+=1
cp = WPC_PATH+csv_list[eval(input("Which file is CSV?\n"))-1]
while cp.find(".csv") < 0:
cp = WPC_PATH+csv_list[eval(input("Is not CSV file! Which file is ?\n"))-1]
print("Loading file...." + cp)
ep = WPC_PATH+'myexcel.xlsx'
csv_to_excel(cp, ep)
print("Complete file>" + ep)
| 34.09375 | 92 | 0.71769 | 186 | 1,091 | 4.037634 | 0.424731 | 0.046605 | 0.039947 | 0.04261 | 0.066578 | 0.066578 | 0.066578 | 0 | 0 | 0 | 0 | 0.018809 | 0.122823 | 1,091 | 31 | 93 | 35.193548 | 0.765935 | 0.109074 | 0 | 0 | 0 | 0 | 0.144479 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.137931 | 0 | 0.172414 | 0.103448 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
46d17d184f4c0040c0f1034f4461fae1fd05d6e6 | 1,103 | py | Python | {{cookiecutter.company_name}}-cli/setup.py | farooq-teqniqly/cli-cookiecutter | e9a8262d5f11d6bfc313e906fad4e683950e52c3 | [
"MIT"
] | null | null | null | {{cookiecutter.company_name}}-cli/setup.py | farooq-teqniqly/cli-cookiecutter | e9a8262d5f11d6bfc313e906fad4e683950e52c3 | [
"MIT"
] | null | null | null | {{cookiecutter.company_name}}-cli/setup.py | farooq-teqniqly/cli-cookiecutter | e9a8262d5f11d6bfc313e906fad4e683950e52c3 | [
"MIT"
] | null | null | null | import setuptools
import os
# Change to the setup.py directory to read files relative to it.
cwd = os.getcwd()
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
with open("README.md", "r") as f:
long_description = f.read()
setuptools.setup(
name="{{cookiecutter.company_name}}-CLI",
version=1.0,
author="{{cookiecutter.company_name}}",
author_email="{{cookiecutter.author_email}}",
description="{{cookiecutter.cli_description}}",
long_description=long_description,
long_description_content_type="text/markdown",
packages=setuptools.find_namespace_packages(exclude=("tests",)),
include_package_data=True,
install_requires=["click==7.1.2"],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.7",
entry_points={"console_scripts": ["{{cookiecutter.cli_executable_name}}={{cookiecutter.company_name}}software.cli.main:cli"]}
)
# Pop back to the original directory.
os.chdir(cwd)
| 31.514286 | 129 | 0.700816 | 135 | 1,103 | 5.533333 | 0.585185 | 0.080321 | 0.092369 | 0.072289 | 0.074967 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008529 | 0.149592 | 1,103 | 34 | 130 | 32.441176 | 0.787846 | 0.088849 | 0 | 0 | 0 | 0 | 0.376248 | 0.209581 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.071429 | 0 | 0.071429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
46d1b76cc358cb882e976013873aa0558d4d7221 | 2,382 | py | Python | tests/__init__.py | deniskorobicyn/kozmic-ci | 0af754b81891722824c6bea85154590f15931030 | [
"BSD-3-Clause"
] | 1 | 2021-06-05T18:36:13.000Z | 2021-06-05T18:36:13.000Z | tests/__init__.py | deniskorobicyn/kozmic-ci | 0af754b81891722824c6bea85154590f15931030 | [
"BSD-3-Clause"
] | null | null | null | tests/__init__.py | deniskorobicyn/kozmic-ci | 0af754b81891722824c6bea85154590f15931030 | [
"BSD-3-Clause"
] | null | null | null | import os
import collections
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.webtest import TestApp, get_scopefunc
from kozmic import create_app, db
from . import factories
class SQLAlchemyMixin(object):
@property
def db(self):
return self.app.extensions['sqlalchemy'].db
def create_database(self):
self.db.session = self.db.create_scoped_session({
'scopefunc': get_scopefunc(),
})
self.db.create_all()
def drop_database(self):
self.db.drop_all()
class SQLAlchemyFixtureMixin(object):
def get_fixtures(self):
return getattr(self, 'FIXTURES', [])
def load_fixtures(self):
for fixture in self.get_fixtures():
if callable(fixture):
models_to_merge = fixture()
if isinstance(models_to_merge, db.Model):
models_to_merge = [models_to_merge]
elif isinstance(fixture, collections.Iterable):
models_to_merge = fixture
elif isinstance(fixture, self.db.Model):
models_to_merge = [fixture]
else:
raise Exception(
'Don\'t know how to handle fixture of {} type: {}.'.format(
type(fixture), fixture))
for model in models_to_merge:
self.db.session.merge(model)
self.db.session.commit()
self.db.session.remove()
class WebTestMixin(object):
def create_app(self):
config = os.environ.get('KOZMIC_CONFIG', 'kozmic.config.TestingConfig')
return create_app(config)
def setup_app_and_ctx(self):
self.app = self.create_app()
self.ctx = self.app.app_context()
self.ctx.push()
self.w = TestApp(self.app)
def teardown_app_and_ctx(self):
self.ctx.pop()
def login(self, user_id):
with self.w.session_transaction() as sess:
sess['user_id'] = user_id
class TestCase(WebTestMixin, SQLAlchemyMixin, SQLAlchemyFixtureMixin):
def setup_method(self, method):
self.setup_app_and_ctx()
self.drop_database()
self.create_database()
factories.setup(self.db.session)
self.load_fixtures()
def teardown_method(self, method):
self.db.session.rollback()
factories.reset()
self.teardown_app_and_ctx()
| 29.407407 | 79 | 0.617968 | 278 | 2,382 | 5.107914 | 0.291367 | 0.042254 | 0.064085 | 0.042254 | 0.06831 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.281276 | 2,382 | 80 | 80 | 29.775 | 0.829439 | 0 | 0 | 0 | 0 | 0 | 0.032746 | 0.011335 | 0 | 0 | 0 | 0 | 0 | 1 | 0.174603 | false | 0 | 0.095238 | 0.031746 | 0.380952 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
46d2957beb8300483d8b056de5349aadf1799cfd | 6,046 | py | Python | coding/python/tkinter_canvas_image.py | jujumo/memento | 9879c74d7b9c64ba2e2a1d8bae20e2d353ccd7bd | [
"MIT"
] | 1 | 2019-08-05T17:53:33.000Z | 2019-08-05T17:53:33.000Z | coding/python/tkinter_canvas_image.py | jujumo/memento | 9879c74d7b9c64ba2e2a1d8bae20e2d353ccd7bd | [
"MIT"
] | null | null | null | coding/python/tkinter_canvas_image.py | jujumo/memento | 9879c74d7b9c64ba2e2a1d8bae20e2d353ccd7bd | [
"MIT"
] | null | null | null | import argparse
import logging
import sys
import numpy as np
from PIL import Image, ImageTk
import tkinter as tk
logger = logging.getLogger('tkcanvasimage')
logger.addHandler(logging.StreamHandler(sys.stdout))
class TkDrawable:
def __init__(self, scaling_factor=1.0, position=[0, 0]):
self._canvas = None
self._canvas_id = []
self._scaling_factor = scaling_factor
self._position = position
def bind_canvas(self, canvas):
self._canvas = canvas
self.scaling_factor = canvas.scaling_factor
@property
def scaling_factor(self):
return self._scaling_factor
@scaling_factor.setter
def scaling_factor(self, scaling_factor):
self._scaling_factor = scaling_factor
@property
def position(self):
return self._position
@position.setter
def position(self, position):
self._position = position
def clear(self):
for cid in self._canvas_id:
self._canvas.delete(cid)
self._canvas_id = []
def draw(self):
raise NotImplementedError()
def redraw(self):
self.clear()
self.draw()
class TkImage(TkDrawable):
def __init__(self, scale=1.0, position=[0, 0]):
super().__init__(scale, position)
self._image_pil = None
self._image_tk = None
@property
def resolution(self):
image_size = [int(j) for j in self._image_pil.size]
return image_size
@property
def apparent_size(self):
if self._image_pil is None:
return None
scaled_image_size = [int(j * self.scaling_factor) for j in self._image_pil.size]
return scaled_image_size
def from_pil(self, image_pil):
self._image_pil = image_pil
self.redraw()
return self
def from_numpy(self, image_np):
if not isinstance(image_np, np.ndarray):
raise ValueError()
""" expect numpy image """
image = image_np[:, :, [2, 1, 0]] # BGR -> RGB
image = Image.fromarray(image)
self.from_pil(image)
def draw(self):
""" rasterize _image_tk at the given scale """
try:
if self._canvas is None:
raise ValueError
if self._image_pil is None:
raise ValueError
assert self._canvas is not None
apparent_size = self.apparent_size
scaled_image = self._image_pil.resize(apparent_size)
self._image_tk = ImageTk.PhotoImage(scaled_image)
image_id = self._canvas.create_image(*self.position, image=self._image_tk, anchor='nw')
self._canvas_id.append(image_id)
except ValueError:
self._image_tk = None
self.clear()
class TkScatter(TkDrawable):
def __init__(self, radius=5, scale=1.0, position=[0, 0]):
super().__init__(scale, position)
self._radius = radius
self._data = None
def from_numpy(self, data_np):
self._data = data_np.astype(np.float)
return self
def draw(self):
try:
if self._canvas is None:
raise ValueError
coords = self._data.transpose()
bb = np.hstack([coords-self._radius, coords+self._radius])
bb *= self.scaling_factor
color = 'chartreuse'
for coord in bb:
c = coord.astype(int).tolist()
point_id = self._canvas.create_oval(*c, outline=color, fill=color)
self._canvas_id.append(point_id)
except ValueError:
self.clear()
class CanvasImageDisplay(tk.Canvas):
MAGNIFICATION_WHEEL = 1.1
item_count = 0
def __init__(self, *arg, **kwargs):
super().__init__(*arg, **kwargs)
all(self.bind(e, self._on_wheel) for e in ['<Button-4>', '<Button-5>', '<MouseWheel>'])
all(self.bind(e, self._on_drag) for e in ['<Button-1>', '<ButtonRelease-1>', '<B1-Motion>'])
self.bind("<Configure>", self.on_resize)
self._scaling_factor = 1.0
self._drawables = {} # list of TkDrawable
def _on_wheel(self, event):
factor = 1.0
if self.scaling_factor > 0.2 and (event.num == 5 or event.delta == -120):
factor = 1. / self.MAGNIFICATION_WHEEL
elif self._scaling_factor < 8.0 and (event.num == 4 or event.delta == 120):
factor = self.MAGNIFICATION_WHEEL
self.zoom(factor)
def _on_drag(self, event):
if tk.EventType.ButtonPress == event.type:
# self._viewport['drag_from'] = np.array([event.x, event.y])
self.scan_mark(event.x, event.y)
elif tk.EventType.ButtonRelease == event.type:
pass
elif tk.EventType.Motion == event.type:
self.scan_dragto(event.x, event.y, gain=1)
def on_resize(self, event):
pass
def to_image_coord(self, pixel_coord):
return [int(x / self._image.scale) for x in pixel_coord]
def from_image_coord(self, image_coord):
return [int(x * self._image.scale) for x in image_coord]
@property
def scaling_factor(self):
return self._scaling_factor
def zoom(self, magnification):
# magnification = scaling_factor / self._scaling_factor
self._scaling_factor *= magnification
super().scale('all', 0, 0, magnification, magnification)
for drawable in self._drawables.values():
drawable.scaling_factor = self.scaling_factor
self.redraw()
def add_drawable(self, drawable):
drawable.bind_canvas(self)
item_id = self.item_count
self._drawables[item_id] = drawable
self.item_count += 1
self.redraw(item_id)
return item_id
def redraw(self, item_id='all'):
# draw image
logger.debug('redraw all')
if item_id == 'all':
for drawable in self._drawables.values():
drawable.redraw()
elif item_id in self._drawables:
self._drawables[item_id].redraw()
| 30.846939 | 100 | 0.610155 | 754 | 6,046 | 4.644562 | 0.192308 | 0.089092 | 0.072816 | 0.034266 | 0.23301 | 0.191319 | 0.159909 | 0.137065 | 0.073672 | 0.044546 | 0 | 0.010405 | 0.284651 | 6,046 | 195 | 101 | 31.005128 | 0.799306 | 0.032087 | 0 | 0.287582 | 0 | 0 | 0.0215 | 0 | 0 | 0 | 0 | 0 | 0.006536 | 1 | 0.183007 | false | 0.013072 | 0.039216 | 0.03268 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
46d4677b37669cdaf6bf286e4bbbce299572bd25 | 1,485 | py | Python | oddsgym/envs/meta.py | OryJonay/soccer_odds_env | b69401518003099ca355c812f2b26775abc25754 | [
"Apache-2.0"
] | 11 | 2020-03-10T10:13:53.000Z | 2022-02-06T19:06:27.000Z | oddsgym/envs/meta.py | OryJonay/soccer_odds_env | b69401518003099ca355c812f2b26775abc25754 | [
"Apache-2.0"
] | 1 | 2020-04-11T14:14:17.000Z | 2020-04-11T14:14:17.000Z | oddsgym/envs/meta.py | OryJonay/soccer_odds_env | b69401518003099ca355c812f2b26775abc25754 | [
"Apache-2.0"
] | 1 | 2020-10-05T01:20:28.000Z | 2020-10-05T01:20:28.000Z | from .base import BaseOddsEnv
from .base_percentage import BasePercentageOddsEnv
from .daily_bets import DailyOddsEnv, DailyPercentageOddsEnv
class MetaEnvBuilder(type):
def __new__(cls, name, bases, attr):
def safe_get(attribute):
if attribute in attr:
return attr[attribute]
for base in bases:
if getattr(base, attribute, None):
return getattr(base, attribute)
new_bases = list(bases)
percentage_env, daily_env = 'Percentage' in name, 'Daily' in name
docstring = (f'Environment for {safe_get("sport")} betting{", grouped by date," if daily_env else ""}'
f' with a {"non fixed" if percentage_env else "fixed"} bet size.\n\n '
f'.. versionadded:: {safe_get("versionadded")}\n ')
if percentage_env and daily_env:
new_bases += [DailyPercentageOddsEnv]
elif daily_env:
new_bases += [DailyOddsEnv]
elif percentage_env:
new_bases += [BasePercentageOddsEnv]
else:
new_bases += [BaseOddsEnv]
attr['percentage_env'] = percentage_env
attr['daily_env'] = daily_env
attr['odds_columns'] = safe_get('odds_column_names') + ['date'] * daily_env
attr['odds_column_names'] = safe_get('odds_column_names')
attr['__doc__'] = docstring
return super(MetaEnvBuilder, cls).__new__(cls, name, tuple(new_bases), attr)
| 43.676471 | 110 | 0.617508 | 166 | 1,485 | 5.253012 | 0.337349 | 0.06422 | 0.037844 | 0.036697 | 0.050459 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.280808 | 1,485 | 33 | 111 | 45 | 0.816479 | 0 | 0 | 0 | 0 | 0 | 0.214141 | 0.018855 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.1 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
46dce8abd822089ad9a46a0110574671f9f528d4 | 1,133 | py | Python | auto/appium/demo.py | wuhongnpm/Gardenia | a07b34ddfff8c058b5bfc9ee5832e59c86e7e276 | [
"MIT"
] | 1 | 2019-05-01T08:03:06.000Z | 2019-05-01T08:03:06.000Z | basic/auto/appium/demo.py | wuhongnpm/Python | 1b0d576c8c04db6214b627bbe5530643b1f85da0 | [
"MIT"
] | null | null | null | basic/auto/appium/demo.py | wuhongnpm/Python | 1b0d576c8c04db6214b627bbe5530643b1f85da0 | [
"MIT"
] | null | null | null | # This sample code uses the Appium python client
# pip install Appium-Python-Client
# Then you can paste this into a file and simply run with Python
#adb shell dumpsys window | findstr mCurrentFocus
#adb shell getprop ro.product.model
from appium import webdriver
caps = {}
caps["platformName"] = "Android"
caps["deviceName"] = "CJ-JD-70"
caps["appPackage"] = "com.ccl.appstore"
caps["appActivity"] = "com.ccl.appstore.Main2Activity"
caps["autoGrantPermissions"] = True
driver = webdriver.Remote("http://localhost:4723/wd/hub", caps)
#添加隐式等待
driver.implicitly_wait(1)
"""
#增加TouchAction
TouchAction(driver).long_press().move_to().release().perform();
driver.swipe()
"""
el1 = driver.find_element_by_xpath("//android.widget.Button[@content-desc=\"Subscriptions\"]/android.widget.ImageView")
el1.click()
el2 = driver.find_element_by_xpath("//android.widget.Button[@content-desc=\"Library\"]/android.widget.ImageView")
el2.click()
el3 = driver.find_element_by_accessibility_id("Search")
el3.click()
el4 = driver.find_element_by_id("com.google.android.youtube:id/search_edit_text")
el4.click()
el4.send_keys("wuhong")
driver.quit() | 31.472222 | 119 | 0.761695 | 156 | 1,133 | 5.410256 | 0.615385 | 0.047393 | 0.080569 | 0.090047 | 0.127962 | 0.127962 | 0.127962 | 0.127962 | 0.127962 | 0.127962 | 0 | 0.016362 | 0.082966 | 1,133 | 36 | 120 | 31.472222 | 0.795958 | 0.203001 | 0 | 0 | 0 | 0 | 0.427136 | 0.258794 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.052632 | 0 | 0.052632 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
46e01dbf680d02bed46b7756ac1cd294cfc51cef | 2,589 | py | Python | weallcode/forms.py | rgroves/weallcode-website | ead60d3272dbbfe610b2d500978d1de44aef6386 | [
"MIT"
] | 15 | 2019-05-04T00:24:00.000Z | 2021-08-21T16:34:05.000Z | weallcode/forms.py | rgroves/weallcode-website | ead60d3272dbbfe610b2d500978d1de44aef6386 | [
"MIT"
] | 73 | 2019-04-24T15:53:42.000Z | 2021-08-06T20:41:41.000Z | weallcode/forms.py | rgroves/weallcode-website | ead60d3272dbbfe610b2d500978d1de44aef6386 | [
"MIT"
] | 20 | 2019-04-26T20:13:08.000Z | 2021-06-21T14:53:21.000Z | from django import forms
from django.conf import settings
from captcha.fields import ReCaptchaField
from captcha.widgets import ReCaptchaV3
from coderdojochi.util import email
class ContactForm(forms.Form):
widths = (
("name", "small-6"),
("email", "small-6"),
("interest", "small-6"),
("phone", "small-6"),
("message", "small-12"),
("captcha", ""),
)
captcha = ReCaptchaField(
widget=ReCaptchaV3,
)
name = forms.CharField(
max_length=100,
label="Full Name",
)
email = forms.EmailField(
max_length=200,
label="Email Address",
widget=forms.TextInput(
attrs={
"type": "email",
"placeholder": "email@example.com",
},
),
)
interest = forms.ChoiceField(
choices=[
("volunteer", "Volunteer"),
("donate", "Donate"),
("sponsor", "Sponsor"),
("collaborate", "Collaborate"),
("other", "Other"),
],
label="Topic of Interest",
)
phone = forms.CharField(
max_length=20,
label="Phone Number",
widget=forms.TextInput(
attrs={
"type": "tel",
"placeholder": "+1 555 555-5555",
"minlength": "10",
"maxlength": "20",
},
),
)
message = forms.CharField(
label="Message",
widget=forms.Textarea(
attrs={
"placeholder": "Enter your message",
"minlength": 25,
"maxlength": 500,
},
),
)
def as_grid(self):
return "".join([self.field_html(f[0], f[1]) for f in self.widths])
def field_html(self, field_name, field_classes):
field = self[field_name]
if field_classes == "":
return f"{field}{field.errors}"
return f"<div class='cell {field_classes}'>{field.label_tag()}{field}{field.errors}</div>"
def send_email(self):
# send email using the self.cleaned_data dictionary
data = self.cleaned_data
email(
subject=f"{data['name']} | We All Code Contact Form",
recipients=[settings.CONTACT_EMAIL],
reply_to=[f"{data['name']}<{data['email']}>"],
template_name="contact_email",
merge_global_data={
"interest": data["interest"],
"message": data["message"],
"phone": data["phone"],
},
)
| 25.89 | 98 | 0.499034 | 241 | 2,589 | 5.273859 | 0.410788 | 0.018883 | 0.026751 | 0.036192 | 0.045633 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022754 | 0.354963 | 2,589 | 99 | 99 | 26.151515 | 0.738323 | 0.018926 | 0 | 0.096386 | 0 | 0 | 0.22498 | 0.045311 | 0 | 0 | 0 | 0 | 0 | 1 | 0.036145 | false | 0 | 0.060241 | 0.012048 | 0.228916 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
46e0903e5bdf7bf970df5689667d22a05b2773ae | 4,411 | py | Python | examples/ex4_mathsop/myhdl/construct.py | cfelton/alt.hdl | 80cdefd20d4fd46e3a1b6116d4b2090135fe1cdf | [
"MIT"
] | 19 | 2015-01-01T18:37:28.000Z | 2021-11-26T14:33:37.000Z | examples/ex4_mathsop/myhdl/construct.py | cfelton/alt.hdl | 80cdefd20d4fd46e3a1b6116d4b2090135fe1cdf | [
"MIT"
] | null | null | null | examples/ex4_mathsop/myhdl/construct.py | cfelton/alt.hdl | 80cdefd20d4fd46e3a1b6116d4b2090135fe1cdf | [
"MIT"
] | 1 | 2017-07-04T13:15:17.000Z | 2017-07-04T13:15:17.000Z |
from myhdl import *
ggens = []
gclock = None #Signal(bool(0))
greset = None #ResetSignal(0, active=0, async=True)
def init(clock=None, reset=None):
global ggens,gclock,greset
gclock,greset = clock,reset
ggens = []
return ggens
def end(g=None, dump=False):
global ggens
if dump:
for gg in ggens:
print(" %s -> %s : %s" % (gg.func.func_code.co_name,
gg.func.func_code.co_varnames,
gg.objlist))
g = ggens
# @todo: need global gens stack
ggens = None
# @todo: ???? do some checking ????
return g
#=========================================#
def _m_mul(x, y, z):
@always_comb
def rtl_mul():
z.next = x * y
return rtl_mul
def _m_add(x, y, z):
@always_comb
def rtl_add():
z.next = x + y
return rtl_add
def _m_dff(x, y, load=None, clock=None, reset=None):
# @todo: if we really want this to be "construction"
# this should use low-level primitives, the
# behavioral description can be used for simulation
# but otherwise dff should be used?
global glock, greset
clock = gclock if clock is None else clock
reset = greset if reset is None else reset
@always_seq(clock.posedge, reset=reset)
def rtl_dff_load():
if load: y.next = x
@always_seq(clock.posedge, reset=reset)
def rtl_dff():
y.next = x
g = rtl_dff if load is None else rtl_dff_load
return g
#=========================================#
class Wire(object):
def __init__(self, val):
assert isinstance(val, (SignalType, Reg, Wire))
_val = val if isinstance(val, SignalType) else val.d
self.d = Signal(_val.val)
def __add__(self, other):
assert isinstance(other, (Reg, Wire, int))
_max = max(abs(self.d.min), self.d.max)
_max = 2*_max
z = Wire(Signal(intbv(0, min=-_max, max=_max)))
od = other if isinstance(other, int) else other.d
g = _m_add(self.d, od, z.d)
ggens.append(g)
return z
def __call__(self):
return self.d
def __radd__(self, other):
assert isinstance(other, (Reg, Wire, int))
_max = max(abs(self.d.min), self.d.max)
_max = 2*_max
z = Wire(Signal(intbv(0, min=-_max, max=_max)))
od = other if isinstance(other, int) else other.d
g = _m_add(self.d, od, z.d)
ggens.append(g)
return z
def __mul__(self, other):
assert isinstance(other, (Reg, Wire, int))
_max = max(abs(self.d.min), self.d.max)
_max = _max**2
z = Wire(Signal(intbv(0, min=-_max, max=_max)))
od = other if isinstance(other, int) else other.d
g = _m_mul(self.d, od, z.d)
ggens.append(g)
return z
#=========================================#
#=========================================#
class Reg(object):
# @todo: init=None, next=None
def __init__(self, next=None):
# @todo: if it is None it will need to be assigned
# later with @when decorator construct.when
# if it is None no generator created
if next is None:
self._load = Signal(bool(1))
else:
assert isinstance(next, (SignalType, Reg, Wire))
_next = next if isinstance(next, SignalType) else next.d
self.d = Signal(_next.val)
# @todo _when signal
g = _m_dff(_next, self.d)
ggens.append(g)
def __call__(self):
return self.d
def __add__(self, other):
assert isinstance(other, (Reg, Wire, int))
_max = max(abs(self.d.min), self.d.max)
_max = 2*_max
z = Wire(Signal(intbv(0, min=-_max, max=_max)))
od = other if isinstance(other, int) else other.d
g = _m_add(self.d, od, z.d)
ggens.append(g)
return z
def __mul__(self, other):
assert isinstance(other, (Reg, Wire, int))
_max = max(abs(self.d.min), self.d.max)
_max = _max**2
z = Wire(Signal(intbv(0, min=-_max, max=_max)))
od = other if isinstance(other, int) else other.d
g = _m_mul(self.d, od, z.d)
ggens.append(g)
return z
# @todo when decarator
# y = Reg(x)
# @y.when
# def action():
# if x > 0:
# y.update()
| 28.642857 | 69 | 0.535706 | 614 | 4,411 | 3.679153 | 0.188925 | 0.058433 | 0.027888 | 0.034529 | 0.490483 | 0.476317 | 0.462151 | 0.425852 | 0.425852 | 0.390438 | 0 | 0.004926 | 0.30968 | 4,411 | 153 | 70 | 28.830065 | 0.736946 | 0.169803 | 0 | 0.542857 | 0 | 0 | 0.004129 | 0 | 0 | 0 | 0 | 0.006536 | 0.066667 | 1 | 0.171429 | false | 0 | 0.009524 | 0.019048 | 0.314286 | 0.009524 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
46e21454a078b2e8ade7f2191dc1b15307690c29 | 5,458 | py | Python | torchvision/datasets/cifar.py | felixgwu/vision | d5eab760e60bc662961faa08a3a17deaa65d2c75 | [
"BSD-3-Clause"
] | 4 | 2018-07-22T19:20:49.000Z | 2019-04-30T01:28:58.000Z | torchvision/datasets/cifar.py | felixgwu/vision | d5eab760e60bc662961faa08a3a17deaa65d2c75 | [
"BSD-3-Clause"
] | null | null | null | torchvision/datasets/cifar.py | felixgwu/vision | d5eab760e60bc662961faa08a3a17deaa65d2c75 | [
"BSD-3-Clause"
] | 2 | 2019-04-30T01:29:02.000Z | 2019-05-01T07:36:23.000Z | from __future__ import print_function
import torch.utils.data as data
from PIL import Image
import os
import os.path
import errno
import numpy as np
import sys
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
class CIFAR10(data.Dataset):
base_folder = 'cifar-10-batches-py'
url = "http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
filename = "cifar-10-python.tar.gz"
tgz_md5 = 'c58f30108f718f92721af3b95e74349a'
train_list = [
['data_batch_1', 'c99cafc152244af753f735de768cd75f'],
['data_batch_2', 'd4bba439e000b95fd0a9bffe97cbabec'],
['data_batch_3', '54ebc095f3ab1f0389bbae665268c751'],
['data_batch_4', '634d18415352ddfa80567beed471001a'],
['data_batch_5', '482c414d41f54cd18b22e5b47cb7c3cb'],
]
test_list = [
['test_batch', '40351d587109b95175f43aff81a1287e'],
]
def __init__(self, root, train=True, transform=None, target_transform=None, download=False):
self.root = root
self.transform = transform
self.target_transform = target_transform
self.train = train # training set or test set
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
# now load the picked numpy arrays
if self.train:
self.train_data = []
self.train_labels = []
for fentry in self.train_list:
f = fentry[0]
file = os.path.join(root, self.base_folder, f)
fo = open(file, 'rb')
if sys.version_info[0] == 2:
entry = pickle.load(fo)
else:
entry = pickle.load(fo, encoding='latin1')
self.train_data.append(entry['data'])
if 'labels' in entry:
self.train_labels += entry['labels']
else:
self.train_labels += entry['fine_labels']
fo.close()
self.train_data = np.concatenate(self.train_data)
self.train_data = self.train_data.reshape((50000, 3, 32, 32))
else:
f = self.test_list[0][0]
file = os.path.join(root, self.base_folder, f)
fo = open(file, 'rb')
if sys.version_info[0] == 2:
entry = pickle.load(fo)
else:
entry = pickle.load(fo, encoding='latin1')
self.test_data = entry['data']
if 'labels' in entry:
self.test_labels = entry['labels']
else:
self.test_labels = entry['fine_labels']
fo.close()
self.test_data = self.test_data.reshape((10000, 3, 32, 32))
def __getitem__(self, index):
if self.train:
img, target = self.train_data[index], self.train_labels[index]
else:
img, target = self.test_data[index], self.test_labels[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(np.transpose(img, (1, 2, 0)))
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
if self.train:
return 50000
else:
return 10000
def _check_integrity(self):
import hashlib
root = self.root
for fentry in (self.train_list + self.test_list):
filename, md5 = fentry[0], fentry[1]
fpath = os.path.join(root, self.base_folder, filename)
if not os.path.isfile(fpath):
return False
md5c = hashlib.md5(open(fpath, 'rb').read()).hexdigest()
if md5c != md5:
return False
return True
def download(self):
from six.moves import urllib
import tarfile
import hashlib
root = self.root
fpath = os.path.join(root, self.filename)
try:
os.makedirs(root)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
if self._check_integrity():
print('Files already downloaded and verified')
return
# downloads file
if os.path.isfile(fpath) and \
hashlib.md5(open(fpath, 'rb').read()).hexdigest() == self.tgz_md5:
print('Using downloaded file: ' + fpath)
else:
print('Downloading ' + self.url + ' to ' + fpath)
urllib.request.urlretrieve(self.url, fpath)
# extract file
cwd = os.getcwd()
print('Extracting tar file')
tar = tarfile.open(fpath, "r:gz")
os.chdir(root)
tar.extractall()
tar.close()
os.chdir(cwd)
print('Done!')
class CIFAR100(CIFAR10):
base_folder = 'cifar-100-python'
url = "http://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
filename = "cifar-100-python.tar.gz"
tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
train_list = [
['train', '16019d7e3df5f24257cddd939b257f8d'],
]
test_list = [
['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'],
]
| 32.105882 | 96 | 0.563576 | 617 | 5,458 | 4.858995 | 0.277147 | 0.051034 | 0.030354 | 0.018679 | 0.288526 | 0.217145 | 0.172782 | 0.100734 | 0.080053 | 0.080053 | 0 | 0.075662 | 0.329241 | 5,458 | 169 | 97 | 32.295858 | 0.74324 | 0.030597 | 0 | 0.271429 | 0 | 0.014286 | 0.158592 | 0.069076 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0.007143 | 0.1 | 0 | 0.285714 | 0.042857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
46e27dfea306c481f9189b43bf419b95084d4d77 | 1,780 | py | Python | project/modules/modules_helpers.py | MattiaPeiretti/MCDAS | 11a12df305949d49201af26c0d71d48be6fcb545 | [
"CC0-1.0"
] | null | null | null | project/modules/modules_helpers.py | MattiaPeiretti/MCDAS | 11a12df305949d49201af26c0d71d48be6fcb545 | [
"CC0-1.0"
] | null | null | null | project/modules/modules_helpers.py | MattiaPeiretti/MCDAS | 11a12df305949d49201af26c0d71d48be6fcb545 | [
"CC0-1.0"
] | null | null | null | import os
import multiprocessing
from tqdm import tqdm
# Custom Modules
import constants
import formulas
from dataHandler import dataHandler
from settingsHandler import SettingsHandler
from mcd_interface import MCDInterface
settings_handler = SettingsHandler()
mcd_interface = MCDInterface(settings_handler.get_setting("MCD_BASELINK"))
data_hander = dataHandler()
def load_dataset(dataset_dir):
data = {}
for filename in os.scandir(dataset_dir):
if filename.is_file():
current_file_data = []
if filename.name.endswith(".csv"):
file = str(dataset_dir + filename.name)
for row in data_hander.read_matrix_from_csv(file):
current_file_data.append([row[0], row[1], row[2]])
data[filename.name] = current_file_data
print("Read ", len(data), "files.")
return data
def execute_parallel(func, data_to_process, workers_amount=10):
with multiprocessing.Pool(workers_amount) as p:
print("Converting...")
result = list(tqdm(p.imap(func, data_to_process), total=len(data_to_process)))
return result
class workers:
# This class contains all of the different workers needed for the parallel working.
@staticmethod
def worker_k2w_converter(data):
lat = data[0]
lon = data[1]
k_value = data[2]
w_data = formulas.convert_K2W(k_value)
return (lat, lon, k_value, w_data)
@staticmethod
def worker_w_avg_calculator(data):
w2_tsurfmn = data[4]
w2_tsurfmx = data[5]
return (w2_tsurfmx + w2_tsurfmn) / 2
def find_indexes(array, coord):
for count, item in enumerate(array):
if item[0] == coord[0]:
if item[1] == coord[1]:
return count
| 29.180328 | 87 | 0.664045 | 230 | 1,780 | 4.934783 | 0.426087 | 0.026432 | 0.039648 | 0.03348 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015556 | 0.241573 | 1,780 | 60 | 88 | 29.666667 | 0.825185 | 0.053933 | 0 | 0.043478 | 0 | 0 | 0.023795 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108696 | false | 0 | 0.173913 | 0 | 0.413043 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
46e3fc7bf3794e3fe125f3281b40c8afefb840e4 | 8,160 | py | Python | backend/data/connection.py | jiangyy12/application-tracking-system | 6de2d98351df65d43c09a5739c3c3dbdf3bf78d3 | [
"MIT"
] | 1 | 2021-10-17T16:00:20.000Z | 2021-10-17T16:00:20.000Z | backend/data/connection.py | jiangyy12/application-tracking-system | 6de2d98351df65d43c09a5739c3c3dbdf3bf78d3 | [
"MIT"
] | 14 | 2021-11-01T17:34:51.000Z | 2021-11-16T02:48:03.000Z | backend/data/connection.py | jiangyy12/application-tracking-system | 6de2d98351df65d43c09a5739c3c3dbdf3bf78d3 | [
"MIT"
] | 3 | 2021-11-01T18:00:49.000Z | 2021-11-16T19:57:26.000Z | import mysql.connector as conn
from mysql.connector import errorcode
Connection = conn.connect(
host="localhost",
port="3306",
user="root",
password="",
database="applicationtrackingsystem"
)
print("Connect to the local database outside method success!")
def connect():
try:
Connection = conn.connect(
host="localhost",
port="3306",
user="root",
password="",
database="applicationtrackingsystem"
)
print("Connect to the local database success!")
return Connection
except conn.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
def query():
try:
# Connection = connect()
# try:
# with open('../database/SET_DATABASE.sql', 'r') as f:
# with Connection.cursor() as cursor:
# cursor.execute(f.read(), multi=True)
# Connection.commit()
# print("Sourcing .sql file succeed!")
# except:
# print("Sourcing .sql file failed!")
query = "SELECT jobName, jobCompany, updateTime, applyStatus, job.jobId " \
"FROM job, users, application " \
"WHERE users.userId=application.userId AND job.jobId=application.jobId;"
cursor = Connection.cursor()
cursor.execute(query)
results = cursor.fetchall()
for row in results:
jobName = row[0]
jobCompany = row[1]
updateTime = row[2]
applyStatus = row[3]
jobId = row[4]
print("jobName=%s, jobCompany=%s, updateTime=%s, applyStatus=%s, jobId=%s"
% (jobName, jobCompany, updateTime, applyStatus, jobId))
return results
except conn.Error as err:
print("Query failed! Error number is: %s" %err.errno)
Connection.close()
def query_groupByCompany():
try:
# Connection = connect()
# try:
# with open('../database/SET_DATABASE.sql', 'r') as f:
# with Connection.cursor() as cursor:
# cursor.execute(f.read(), multi=True)
# Connection.commit()
# print("Sourcing .sql file succeed!")
# except:
# print("Sourcing .sql file failed!")
query = "SELECT jobCompany, count(case when applyStatus = 2 then 1 end) as Waiting," \
"count(case when applyStatus = 3 then 1 end) as Offer," \
"count(case when applyStatus = 4 then 1 end) as Rejected " \
"FROM job, application " \
"WHERE job.jobId = application.jobId " \
"GROUP BY jobCompany;"
cursor = Connection.cursor()
cursor.execute(query)
results = cursor.fetchall()
for row in results:
companyName = row[0]
Waiting = row[1]
Offer = row[2]
Rejected = row[3]
print("companyName=%s, Waiting=%s, Offer=%s, Rejected=%s"
% (companyName, Waiting, Offer, Rejected))
return results
except conn.Error as err:
print("Query failed! Error number is: %s" %err.errno)
Connection.close()
def querySchool():
try:
# Connection = connect()
# try:
# with open('../database/SET_DATABASE.sql', 'r') as f:
# with Connection.cursor() as cursor:
# cursor.execute(f.read(), multi=True)
# Connection.commit()
# print("Sourcing .sql file succeed!")
# except:
# print("Sourcing .sql file failed!")
query = "SELECT programName, programSchool, updateTime, applyStatus, program.programId " \
"FROM program, users, school " \
"WHERE users.userId=school.userId AND program.programId=school.programId;"
cursor = Connection.cursor()
cursor.execute(query)
results = cursor.fetchall()
for row in results:
programName = row[0]
programSchool = row[1]
updateTime = row[2]
applyStatus = row[3]
programId = row[4]
print("programName=%s, programSchool=%s, updateTime=%s, applyStatus=%s, programId=%s"
% (programName, programSchool, updateTime, applyStatus, programId))
return results
except conn.Error as err:
print("School Query failed! Error number is: %s" %err.errno)
Connection.close()
def queryItem():
try:
query = "SELECT jobName, jobCompany, commentTime, itemContent, job.jobId " \
"FROM job, users, item " \
"WHERE users.userId=item.userId AND job.jobId=item.jobId;"
cursor = Connection.cursor()
cursor.execute(query)
results = cursor.fetchall()
for row in results:
jobName = row[0]
jobCompany = row[1]
commentTime = row[2]
itemContent = row[3]
jobId = row[4]
print("jobName=%s, jobCompany=%s, commentTime=%s, itemContent=%s, jobId=%s"
% (jobName, jobCompany, commentTime, itemContent, jobId))
return results
except conn.Error as err:
print("Item Query failed! Error number is: %s" %err.errno)
Connection.close()
def count():
try:
query = "SELECT COUNT(*) FROM job;"
cursor = Connection.cursor()
cursor.execute(query)
result = cursor.fetchall()
for row in result:
count = row[0] + 1
return count
except conn.Error as err:
print("Query failed! Error number is: %s" % err.errno)
def countProgram():
try:
query = "SELECT COUNT(*) FROM program;"
cursor = Connection.cursor()
cursor.execute(query)
result = cursor.fetchall()
for row in result:
count = row[0] + 1
return count
except conn.Error as err:
print("Query failed! Error number is: %s" % err.errno)
def insert(tableName, data):
try:
if (tableName == 'application'):
query = "INSERT INTO application (userId, jobId, applyStatus, updateTime) " \
"VALUES (%s, %s, %s, %s);"
# Gaolin: Since there was no user management before, I temporarily use '123' as the userId when inserting application table.
# todo: record real userId
value = ('1', data['jobId'], data['applyStatus'], data['updateTime'])
cursor = Connection.cursor()
cursor.execute(query, value)
elif (tableName == 'job'):
query = "INSERT INTO job (jobId, jobName, jobCompany, jobReleaseDate, jobClass) " \
"VALUES (%s, %s, %s, %s, %s);"
value = (data['jobId'], data['jobName'], data['jobCompany'], data['jobReleaseDate'], data['jobClass'])
cursor = Connection.cursor()
cursor.execute(query, value)
elif (tableName == 'school'):
query = "INSERT INTO school (userId, programId, applyStatus, updateTime) " \
"VALUES (%s, %s, %s, %s);"
value = ('1', data['programId'], data['applyStatus'], data['updateTime'])
cursor = Connection.cursor()
cursor.execute(query, value)
elif (tableName == 'program'):
query = "INSERT INTO program (programId, programName, programSchool, programReleaseDate, programClass) " \
"VALUES (%s, %s, %s, %s, %s);"
value = (data['programId'], data['programName'], data['programSchool'], data['programReleaseDate'], data['programClass'])
cursor = Connection.cursor()
cursor.execute(query, value)
Connection.commit()
print("Insert table %s succeed!" % tableName)
except:
print("Insert table %s failed!" % tableName)
query()
| 32.771084 | 136 | 0.552451 | 829 | 8,160 | 5.425814 | 0.170084 | 0.006225 | 0.054913 | 0.06225 | 0.576701 | 0.531125 | 0.531125 | 0.507337 | 0.477101 | 0.453535 | 0 | 0.007657 | 0.327819 | 8,160 | 248 | 137 | 32.903226 | 0.812397 | 0.126471 | 0 | 0.546012 | 0 | 0 | 0.303043 | 0.03029 | 0 | 0 | 0 | 0.004032 | 0 | 1 | 0.04908 | false | 0.018405 | 0.01227 | 0 | 0.104294 | 0.104294 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
46e55a727f3051ddea52da6c465bbf43323192b6 | 5,898 | py | Python | build/PureCloudPlatformClientV2/models/biography.py | cjohnson-ctl/platform-client-sdk-python | 38ce53bb8012b66e8a43cc8bd6ff00cf6cc99100 | [
"MIT"
] | 10 | 2019-02-22T00:27:08.000Z | 2021-09-12T23:23:44.000Z | libs/PureCloudPlatformClientV2/models/biography.py | rocketbot-cl/genesysCloud | dd9d9b5ebb90a82bab98c0d88b9585c22c91f333 | [
"MIT"
] | 5 | 2018-06-07T08:32:00.000Z | 2021-07-28T17:37:26.000Z | libs/PureCloudPlatformClientV2/models/biography.py | rocketbot-cl/genesysCloud | dd9d9b5ebb90a82bab98c0d88b9585c22c91f333 | [
"MIT"
] | 6 | 2020-04-09T17:43:07.000Z | 2022-02-17T08:48:05.000Z | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class Biography(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
Biography - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'biography': 'str',
'interests': 'list[str]',
'hobbies': 'list[str]',
'spouse': 'str',
'education': 'list[Education]'
}
self.attribute_map = {
'biography': 'biography',
'interests': 'interests',
'hobbies': 'hobbies',
'spouse': 'spouse',
'education': 'education'
}
self._biography = None
self._interests = None
self._hobbies = None
self._spouse = None
self._education = None
@property
def biography(self):
"""
Gets the biography of this Biography.
Personal detailed description
:return: The biography of this Biography.
:rtype: str
"""
return self._biography
@biography.setter
def biography(self, biography):
"""
Sets the biography of this Biography.
Personal detailed description
:param biography: The biography of this Biography.
:type: str
"""
self._biography = biography
@property
def interests(self):
"""
Gets the interests of this Biography.
:return: The interests of this Biography.
:rtype: list[str]
"""
return self._interests
@interests.setter
def interests(self, interests):
"""
Sets the interests of this Biography.
:param interests: The interests of this Biography.
:type: list[str]
"""
self._interests = interests
@property
def hobbies(self):
"""
Gets the hobbies of this Biography.
:return: The hobbies of this Biography.
:rtype: list[str]
"""
return self._hobbies
@hobbies.setter
def hobbies(self, hobbies):
"""
Sets the hobbies of this Biography.
:param hobbies: The hobbies of this Biography.
:type: list[str]
"""
self._hobbies = hobbies
@property
def spouse(self):
"""
Gets the spouse of this Biography.
:return: The spouse of this Biography.
:rtype: str
"""
return self._spouse
@spouse.setter
def spouse(self, spouse):
"""
Sets the spouse of this Biography.
:param spouse: The spouse of this Biography.
:type: str
"""
self._spouse = spouse
@property
def education(self):
"""
Gets the education of this Biography.
User education details
:return: The education of this Biography.
:rtype: list[Education]
"""
return self._education
@education.setter
def education(self, education):
"""
Sets the education of this Biography.
User education details
:param education: The education of this Biography.
:type: list[Education]
"""
self._education = education
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 25.097872 | 77 | 0.553238 | 630 | 5,898 | 5.085714 | 0.252381 | 0.037453 | 0.093633 | 0.031211 | 0.287453 | 0.185393 | 0.169164 | 0.129838 | 0.021223 | 0 | 0 | 0.003176 | 0.359444 | 5,898 | 234 | 78 | 25.205128 | 0.844891 | 0.401831 | 0 | 0.081395 | 0 | 0 | 0.062392 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.197674 | false | 0 | 0.05814 | 0 | 0.395349 | 0.011628 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
46e87d0d702a6085e4175544be3a7b236bbac27f | 3,329 | py | Python | services/dsrp-api/app/api/application/namespace.py | bcgov/dormant-site-reclamation-program | 4710434174a204a292a3128d92c8daf1de2a65a6 | [
"Apache-2.0"
] | null | null | null | services/dsrp-api/app/api/application/namespace.py | bcgov/dormant-site-reclamation-program | 4710434174a204a292a3128d92c8daf1de2a65a6 | [
"Apache-2.0"
] | 9 | 2020-05-06T23:29:43.000Z | 2022-03-14T22:58:17.000Z | services/dsrp-api/app/api/application/namespace.py | bcgov/dormant-site-reclamation-program | 4710434174a204a292a3128d92c8daf1de2a65a6 | [
"Apache-2.0"
] | 3 | 2020-05-08T16:54:22.000Z | 2021-01-27T17:28:49.000Z | from flask_restplus import Namespace
from app.api.application.resources.application import ApplicationResource, ApplicationListResource, ApplicationReviewResource
from app.api.application.resources.application_estimated_cost_override import ApplicationEstimatedCostOverride
from app.api.application.resources.application_document import ApplicationDocumentListResource, ApplicationDocumentResource
from app.api.application.resources.application_status import ApplicationStatusListResource
from app.api.application.resources.application_summary import ApplicationSummaryResource
from app.api.application.resources.gen_application_docs import GenerateApplicationDocumentResource
from app.api.application.resources.payment_document import PaymentDocumentResource, PaymentDocumentListResource
from app.api.application.resources.application_approved_contracted_work import ApplicationApprovedContractedWorkResource, ApplicationApprovedContractedWorkListResource
from app.api.contracted_work.resources.contracted_work_payment import ContractedWorkPaymentInterim, ContractedWorkPaymentFinal, ContractedWorkPaymentInterimReport, AdminContractedWorkPaymentStatusChange, AdminContractedWorkPaymentAudit
api = Namespace('application', description='Application endpoints')
# General
api.add_resource(ApplicationListResource, '')
api.add_resource(ApplicationResource, '/<string:application_guid>')
api.add_resource(ApplicationSummaryResource, '/<string:application_guid>/summary')
api.add_resource(ApplicationStatusListResource, '/<string:application_guid>/status')
api.add_resource(ApplicationReviewResource, '/<string:application_guid>/review')
api.add_resource(ApplicationEstimatedCostOverride,
'/<string:application_guid>/work/<string:work_id>/estimated-cost-override')
# Documents
api.add_resource(GenerateApplicationDocumentResource,
'/<string:application_guid>/generate-doc/<string:document_type>')
api.add_resource(ApplicationDocumentResource,
'/<string:application_guid>/documents/<string:document_guid>')
api.add_resource(ApplicationDocumentListResource, '/<string:application_guid>/documents')
api.add_resource(PaymentDocumentResource,
'/<string:application_guid>/payment-doc/<string:document_guid>')
api.add_resource(PaymentDocumentListResource, '/<string:application_guid>/payment-doc')
# Contracted Work
api.add_resource(ApplicationApprovedContractedWorkResource,
'/<string:application_guid>/approved-contracted-work')
api.add_resource(ApplicationApprovedContractedWorkListResource, '/approved-contracted-work')
api.add_resource(ContractedWorkPaymentInterim,
'/<string:application_guid>/contracted-work-payment/<string:work_id>/interim')
api.add_resource(ContractedWorkPaymentFinal,
'/<string:application_guid>/contracted-work-payment/<string:work_id>/final')
api.add_resource(
ContractedWorkPaymentInterimReport,
'/<string:application_guid>/contracted-work-payment/<string:work_id>/interim-report')
api.add_resource(AdminContractedWorkPaymentStatusChange,
'/<string:application_guid>/contracted-work-payment/<string:work_id>/status')
api.add_resource(AdminContractedWorkPaymentAudit,
'/<string:application_guid>/contracted-work-payment/<string:work_id>/audit')
| 67.938776 | 235 | 0.828477 | 300 | 3,329 | 8.996667 | 0.186667 | 0.040015 | 0.093368 | 0.062245 | 0.302334 | 0.246758 | 0.105224 | 0.105224 | 0.105224 | 0.045202 | 0 | 0 | 0.079303 | 3,329 | 48 | 236 | 69.354167 | 0.880587 | 0.009913 | 0 | 0 | 0 | 0 | 0.285237 | 0.275516 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
46eb67734142a3e1e31aedcc8ef44301ec624a41 | 743 | py | Python | saliency.py | Desaiakshata/Saliecy-detection-using-opencv | 8ebc9e461093bf66652ac55b36f737e9a93df787 | [
"MIT"
] | null | null | null | saliency.py | Desaiakshata/Saliecy-detection-using-opencv | 8ebc9e461093bf66652ac55b36f737e9a93df787 | [
"MIT"
] | null | null | null | saliency.py | Desaiakshata/Saliecy-detection-using-opencv | 8ebc9e461093bf66652ac55b36f737e9a93df787 | [
"MIT"
] | null | null | null | import cv2
import argparse
a=argparse.ArgumentParser()
a.add_argument("-i","--image", required=True, help="input image path")
args=vars(a.parse_args())
image=cv2.imread(args["image"])
# two methods
# Static spectral saliency
saliency = cv2.saliency.StaticSaliencySpectralResidual_create()
(success, saliencyMap) = saliency.computeSaliency(image)
saliencyMap = (saliencyMap * 255).astype("uint8")
# Fine grained saliency
saliency=cv2.saliency.StaticSaliencyFineGrained_create()
(success, saliencyMap)=saliency.computeSaliency(image)
threshMap = cv2.threshold(saliencyMap.astype("uint8"), 0, 255,
cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
cv2.imshow("Image",saliencyMap)
cv2.imshow("Thresh",threshMap)
cv2.waitKey(0)
| 29.72 | 71 | 0.755047 | 87 | 743 | 6.37931 | 0.494253 | 0.032432 | 0.068468 | 0.097297 | 0.187387 | 0.187387 | 0 | 0 | 0 | 0 | 0 | 0.031627 | 0.106326 | 743 | 24 | 72 | 30.958333 | 0.804217 | 0.078062 | 0 | 0.125 | 0 | 0 | 0.077626 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
46ee6358447c89093b274edefbf4cb4da8117ae6 | 9,517 | py | Python | website/CubeToaster.py | yuxuibbs/MCC-Competition-Docs | 384726c41434c5a07becb6438c3d2409c6ca6eb4 | [
"MIT"
] | 4 | 2016-11-13T20:49:33.000Z | 2017-12-20T20:03:03.000Z | website/CubeToaster.py | yuxuibbs/MCC-Competition-Docs | 384726c41434c5a07becb6438c3d2409c6ca6eb4 | [
"MIT"
] | 5 | 2016-12-26T19:14:46.000Z | 2022-02-11T03:44:39.000Z | website/CubeToaster.py | yuxuibbs/MCC-Competition-Docs | 384726c41434c5a07becb6438c3d2409c6ca6eb4 | [
"MIT"
] | 2 | 2016-12-29T12:03:15.000Z | 2017-02-16T15:51:02.000Z | from flask import Flask, request, render_template, redirect, url_for, flash, make_response
from flask_wtf import FlaskForm
from wtforms import StringField, IntegerField, SubmitField, SelectField, widgets
from wtforms.validators import Required
import os
import csv
import json
import requests
# import jellyfish
#################################################################################
# Configurations
app = Flask(__name__)
app.debug = True
app.use_reloader = True
app.config['SECRET_KEY'] = 'cubetoaster'
allEventsDict = {"222" : "2x2 Cube",
"333" : "Rubik's Cube",
"333oh" : "Rubik's Cube: One-Handed",
"333bf" : "Rubik's Cube: Blindfolded",
"333fm" : "Rubik's Cube: Fewest moves",
"333ft" : "Rubik's Cube: With feet",
"333mbf": "Rubik's Cube: Multiple Blindfolded",
"444" : "4x4 Cube",
"444bf" : "4x4 Cube: Blindfolded",
"555" : "5x5 Cube",
"555bf" : "5x5 Cube: Blindfolded",
"666" : "6x6 Cube",
"777" : "7x7 Cube",
"clock" : "Rubik's Clock",
"minx" : "Megaminx",
"pyram" : "Pyraminx",
"skewb" : "Skewb",
"sq1" : "Square-1"}
startHTML = '''
<html>
<head>
<style>
table {
border-collapse: collapse;
height: 100%;
width: 100%;
}
table, th, td {
border: 3px solid black;
}
@media print {
table {
page-break-after: always;
}
}
.cutoffs td {
border: 0;
font-weight: bold;
}
.compName {
font-size: 48pt;
font-weight: bold;
}
.labels {
font-size: 24pt;
font-weight: bold;
}
.attempt {
font-size: 36pt;
font-weight: bold;
text-align: center;
}
.event, .personID, .scrambler {
font-size: 24pt;
font-weight: bold;
width: 60px;
}
.round, .heat {
font-size: 24pt;
font-weight: bold;
}
.personName {
font-size: 40pt;
font-weight: bold;
}
.attemptNumber {
width: 60px;
}
.initial {
width: 100px;
}
</style>
</head>
<body>
'''
ao5Table = '''
<table>
<tr>
<th colspan="6" class="compName">competitionName</th>
</tr>
<tr>
<th colspan="1" class="personID"></th>
<th colspan="3" class="event">eventName</th>
<th colspan="1" class="heat">G: </th>
<th colspan="1" class="round">R: roundNumber</th>
</tr>
<tr>
<th colspan="6" class="personName">competitorName</th>
</tr>
<tr class="labels">
<th colspan="1" class="scrambler">Scr</th>
<th colspan="1" class="attemptNumber">#</th>
<th colspan="2">Results</th>
<th colspan="1" class="initial">Judge</th>
<th colspan="1" class="initial">Comp</th>
</tr>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">1</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">2</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
<tr class="cutoffs">
<td colspan="1"></td>
<td colspan="1"></td>
<td colspan="1">Cutoff: cutoffTime</td>
<td colspan="1">Time Limit: timeLimit</td>
<td colspan="1"></td>
<td colspan="1"></td>
</tr>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">3</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">4</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">5</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
<tr class="empty">
<td colspan="6"></td>
</tr>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">E</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
</table>
'''
endHTML = '''
</body>
</html>
'''
#################################################################################
# Forms
class RegistrationForm(FlaskForm):
# compId = StringField('Competition ID from WCA website:', validators = [Required()])
compName = StringField('Competition name to use in scorecards:', validators = [Required()])
roundNum = StringField('Round number:', validators = [Required()])
event = StringField('Event:', validators = [Required()])
cutoff = StringField('Cutoff:', validators = [Required()])
timeLimit = StringField('Time limit:', validators = [Required()])
names = StringField('Insert list of people separated by comma:', validators = [Required()])
submit = SubmitField('Submit')
class WCIFInputForm(FlaskForm):
compId = StringField('Competition ID (the same one that is used on the WCA website):', validators = [Required()])
compInfo = StringField('Competition Info:', validators = [Required()])
event = SelectField("Select the event you want to see a psych sheet for", choices = [
("222" , "2x2 Cube"),
("333" , "Rubik's Cube"),
("333oh" , "Rubik's Cube: One-Handed"),
("333bf" , "Rubik's Cube: Blindfolded"),
("333fm" , "Rubik's Cube: Fewest moves"),
("333ft" , "Rubik's Cube: With feet"),
("333mbf", "Rubik's Cube: Multiple Blindfolded"),
("444" , "4x4 Cube"),
("444bf" , "4x4 Cube: Blindfolded"),
("555" , "5x5 Cube"),
("555bf" , "5x5 Cube: Blindfolded"),
("666" , "6x6 Cube"),
("777" , "7x7 Cube"),
("clock" , "Rubik's Clock"),
("minx" , "Megaminx"),
("pyram" , "Pyraminx"),
("skewb" , "Skewb"),
("sq1" , "Square-1")], validators = [Required()])
submit = SubmitField('Submit')
#################################################################################
# Helper functions
# def createDataStructure(data):
# compName = data['shortName']
# people = data['persons']
# compEvents = data['events']
def addScoreSheet(updatedScoreSheetTable, compName, eventName, roundNum, name, cutoff, timeLimit):
updatedScoreSheetTable = updatedScoreSheetTable.replace("competitionName", compName
).replace("eventName", eventName
).replace("roundNumber", roundNum
).replace("cutoffTime", cutoff
).replace("timeLimit", timeLimit
).replace("competitorName", name)
return updatedScoreSheetTable
def makeScoreSheets(compName, roundNum, event, names, cutoff, timeLimit):
'''
Creates string with HTML that contains all of the necessary
score sheets for the first round of the competition
'''
scoreSheetList = []
updatedScoreSheetTable = ao5Table
for num, person in enumerate(names.split(',')):
scoreSheetList.append(addScoreSheet(updatedScoreSheetTable, compName, event, roundNum, person, cutoff, timeLimit))
scoreSheets = str.join("\n", scoreSheetList)
return startHTML + scoreSheets + endHTML
###############################################################################
## Routes and view functions
@app.route('/')
def home():
return render_template("index.html")
@app.route('/scorecards', methods = ['GET', 'POST'])
def scorecards():
form = RegistrationForm()
if form.validate_on_submit() and request.method == 'POST':
data = makeScoreSheets(request.form.get('compName'), request.form.get('roundNum'), request.form.get('event'), request.form.get('names'), request.form.get('cutoff'), request.form.get('timeLimit'))
return render_template('view_heats.html', data=data)
return render_template('scorecards.html', form=form)
@app.route('/wcif', methods = ['GET', 'POST'])
def wcif():
wcif_form = WCIFInputForm()
print(wcif_form.validate_on_submit())
print(request.method)
if request.method == 'POST':
data = json.loads(request.form.get('compInfo'))
info = []
for person in data["persons"]:
if "personalBests" in person:
for event in person["personalBests"]:
if event["eventId"] == request.form.get("event") and event["type"] == "average":
info.append((int(event["best"]) / 100, person["name"]))
info.sort()
return render_template("psych_sheet.html", info=info)
return render_template("wcif.html", form=wcif_form)
if __name__ == '__main__':
# app.run(host = '0.0.0.0', port = int(os.getenv('PORT', 5001)))
app.run()
| 32.261017 | 203 | 0.505201 | 947 | 9,517 | 5.043295 | 0.265048 | 0.061977 | 0.062814 | 0.057789 | 0.321608 | 0.266332 | 0.23995 | 0.23995 | 0.232831 | 0.232831 | 0 | 0.030683 | 0.304823 | 9,517 | 294 | 204 | 32.370748 | 0.691203 | 0.04886 | 0 | 0.282158 | 0 | 0 | 0.548914 | 0.029199 | 0 | 0 | 0 | 0 | 0 | 1 | 0.020747 | false | 0 | 0.033195 | 0.004149 | 0.136929 | 0.012448 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
46ef02cf00dc2eef4d91549f2257af29dc55234b | 747 | py | Python | ex036.py | erikamaylim/Python-CursoemVideo | 5a6809818c4c55a02ec52379d95f3d20c833df2e | [
"MIT"
] | null | null | null | ex036.py | erikamaylim/Python-CursoemVideo | 5a6809818c4c55a02ec52379d95f3d20c833df2e | [
"MIT"
] | null | null | null | ex036.py | erikamaylim/Python-CursoemVideo | 5a6809818c4c55a02ec52379d95f3d20c833df2e | [
"MIT"
] | null | null | null | """Escreva um programa para aprovar o empréstimo bancário para a compra de uma casa.
Pergunte o valor da casa, o salário do comprador e em quantos anos ele vai pagar.
A prestação mensal não pode exceder 30% do salário ou então o empréstimo será negado."""
casa = float(input('Qual o valor da casa? R$ '))
salario = float(input('Qual o seu salário? R$ '))
anos = int(input('Em quantos anos pretende pagar o valor total? '))
mes = anos * 12
prestacao = casa / mes
GREEN = "\033[1;32m"
RED = "\033[1;31m"
END = "\033[0m"
if prestacao <= salario * (30 / 100):
print(f'{GREEN}EMPRÉSTIMO APROVADO!{END} Você irá pagar R$ {prestacao:.2f} por mês.')
else:
print(f'{RED}EMPRÉSTIMO NEGADO!{END} O valor das prestações excedeu 30% do seu salário.')
| 43.941176 | 93 | 0.702811 | 125 | 747 | 4.2 | 0.56 | 0.045714 | 0.030476 | 0.045714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.045381 | 0.174029 | 747 | 16 | 94 | 46.6875 | 0.805511 | 0.333333 | 0 | 0 | 0 | 0.083333 | 0.558943 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
46f1607eb413535d97110e501a2e28d97385688e | 1,972 | py | Python | src/pyconcepticon/commands/citation.py | concepticon/pyconcepticon | bd336df18545b493f59ed8c22b636ded447dede1 | [
"Apache-2.0"
] | 5 | 2019-06-04T02:17:03.000Z | 2021-12-28T01:59:16.000Z | src/pyconcepticon/commands/citation.py | concepticon/pyconcepticon | bd336df18545b493f59ed8c22b636ded447dede1 | [
"Apache-2.0"
] | 36 | 2019-02-06T11:50:21.000Z | 2021-12-28T18:43:11.000Z | src/pyconcepticon/commands/citation.py | concepticon/pyconcepticon | bd336df18545b493f59ed8c22b636ded447dede1 | [
"Apache-2.0"
] | 5 | 2019-09-18T13:34:19.000Z | 2021-12-28T02:01:44.000Z | """
Print a full bibliographic citation for a Concepticon version
"""
import html
import collections
from datetime import date
from clldutils.path import git_describe
from clldutils.jsonlib import dump
from nameparser import HumanName
def register(parser):
parser.add_argument('--version', default=None)
parser.add_argument('--year', default=date.today().year, type=int)
def zenodo_json(citation, version, editors):
return collections.OrderedDict([
("upload_type", "dataset"),
("description", "<p>{}</p>".format(html.escape(citation))),
("alternate_identifiers",
[{"scheme": "url", "identifier": "https://concepticon.clld.org"}]),
("title", "CLLD Concepticon {}".format(version.replace('v', ''))),
("access_right", "open"),
("license", {"id": "CC-BY-4.0"}),
("keywords", ["linguistics"]),
("creators", [{"name": e.name} for e in editors]),
("communities", [{"identifier": "calc"}, {"identifier": "clld"}, {"identifier": "dighl"}])
])
def run(args):
if not args.version: # pragma: no cover
args.version = git_describe(args.repos.repos)
if args.version.startswith('v'):
args.version = args.version[1:]
current_editors = [
e for e in args.repos.editors
if ((not e.end) or (int(e.end) >= args.year)) and int(e.start) <= args.year]
editor_names = []
for e in current_editors:
name = HumanName(e.name)
editor_names.append('{0.last}, {0.first} {0.middle}'.format(name).strip())
editor_names = ' & '.join(editor_names)
res = "{0} (eds.) {1.year}. {2.title} {1.version}. {2.description}. "\
"{2.publisher.place}: {2.publisher.name}. Available online at {2.url}".format(
editor_names, args, args.repos.dataset_metadata,
)
print(res)
dump(
zenodo_json(res, args.version, current_editors),
args.repos.repos / '.zenodo.json',
indent=4)
| 35.854545 | 98 | 0.61359 | 239 | 1,972 | 4.987448 | 0.443515 | 0.055369 | 0.015101 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009609 | 0.208418 | 1,972 | 54 | 99 | 36.518519 | 0.754004 | 0.040061 | 0 | 0 | 0 | 0.045455 | 0.233422 | 0.011141 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068182 | false | 0 | 0.136364 | 0.022727 | 0.227273 | 0.022727 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
46f20ff3199dbbecb63862c20b9f0fbe6430f66b | 1,106 | py | Python | get_repos.py | Oyekunle-Mark/alpha-tower | 46d2e96553909388432ff28f2caf74a359538f5e | [
"MIT"
] | null | null | null | get_repos.py | Oyekunle-Mark/alpha-tower | 46d2e96553909388432ff28f2caf74a359538f5e | [
"MIT"
] | 1 | 2021-06-02T00:28:26.000Z | 2021-06-02T00:28:26.000Z | get_repos.py | Oyekunle-Mark/alpha-tower | 46d2e96553909388432ff28f2caf74a359538f5e | [
"MIT"
] | null | null | null | from requests import get
from parameters import build_parameters
GITHUB_API_URL = "https://api.github.com/search/repositories"
def get_repos_with_most_stars(languages, sort="stars", order="desc", stars=50000):
"""Fetches the data from the GitHub API
Arguments:
languages {list} -- the list of languages
Keyword Arguments:
sort {str} -- the sort condition (default: {"stars"})
order {str} -- the order pattern (default: {"desc"})
stars {int} -- the minimum number of stars (default: {50000})
Raises:
RuntimeError: Error that might occur while fetching data from the API
Returns:
{tuple} -- the repos and number of repos
"""
parameters = build_parameters(languages, sort, order, stars)
response = get(GITHUB_API_URL, params=parameters)
if response.status_code != 200:
raise RuntimeError(
f"An error occured while fetching the data. The status code was {response.status_code}")
repos = response.json()["items"]
repo_count = response.json()["total_count"]
return repos, repo_count
| 29.891892 | 100 | 0.674503 | 140 | 1,106 | 5.221429 | 0.464286 | 0.036936 | 0.032832 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015134 | 0.223327 | 1,106 | 36 | 101 | 30.722222 | 0.835856 | 0.392405 | 0 | 0 | 0 | 0 | 0.243156 | 0.035427 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.166667 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
46f2dabd5d215702d44606f19c384a3fe040de66 | 580 | py | Python | media.py | Daniatnuom/Programming_Foundations_with_Python | ec5c5af25c74bdb14e2dbce48b6e25b15b016b24 | [
"MIT"
] | null | null | null | media.py | Daniatnuom/Programming_Foundations_with_Python | ec5c5af25c74bdb14e2dbce48b6e25b15b016b24 | [
"MIT"
] | null | null | null | media.py | Daniatnuom/Programming_Foundations_with_Python | ec5c5af25c74bdb14e2dbce48b6e25b15b016b24 | [
"MIT"
] | null | null | null | import webbrowser
class Movie():
""" This class provides a way to store movie related information"""
VALID_RATINGS = ['G','PG','PG-13','R']
# Make class variables, guideline recommend make constant variables as upper case
def __init__(self, movie_title, movie_storyline, poster_image,trailer_youtube):
self.title = movie_title
self.storyline = movie_storyline
self.poster_image_url = poster_image
self.trailer_youtube_url = trailer_youtube
# Create "Instance mothod" so called "Show Trailer"
def show_trailer(self):
webbrowser.open(self.trailer_youtube_url)
| 30.526316 | 83 | 0.77069 | 80 | 580 | 5.3375 | 0.5375 | 0.131148 | 0.084309 | 0.098361 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004 | 0.137931 | 580 | 18 | 84 | 32.222222 | 0.85 | 0.331034 | 0 | 0 | 0 | 0 | 0.023747 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.1 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
46f492ce8bbd51b82ba43d37275a6e3759094db3 | 5,763 | py | Python | src/bat/renamefiles.py | armijnhemel/binaryanalysis | 1d186d1f6b711b83a6f8e0ffbcce37284ffa1c16 | [
"Apache-2.0"
] | 78 | 2016-09-29T08:26:50.000Z | 2022-02-21T23:41:23.000Z | src/bat/renamefiles.py | armijnhemel/binaryanalysis | 1d186d1f6b711b83a6f8e0ffbcce37284ffa1c16 | [
"Apache-2.0"
] | 7 | 2016-10-14T14:32:00.000Z | 2018-03-17T17:28:42.000Z | src/bat/renamefiles.py | armijnhemel/binaryanalysis | 1d186d1f6b711b83a6f8e0ffbcce37284ffa1c16 | [
"Apache-2.0"
] | 50 | 2016-10-05T06:22:38.000Z | 2022-02-03T16:08:48.000Z | #!/usr/bin/python
# Binary Analysis Tool
# Copyright 2015-2016 Armijn Hemel for Tjaldur Software Governance Solutions
# Licensed under Apache 2.0, see LICENSE file for details
import shutil
import os.path
import copy
'''
This aggregate scan traverses the unpackreports an tries to rename certain files based on properties of
unpacked files. For example:
* if a file is carved out of a larger file that contains a Linux kernel,
rename it to something like "unpacked-linux-kernel"
* if a gzip CPIO archive is extracted from a Linux kernel and contains
files/directories, like /root or /dev it is likely an initramfs
'''
def renamefiles(unpackreports, scantempdir, topleveldir, processors, scanenv, batcursors, batcons, scandebug=False, unpacktempdir=None):
# only focus on initramfs that is also compressed for now
kernelfiles = set()
# known compressions for initramfs
initramfscompressions = ['gzip']
for r in unpackreports.keys():
if 'checksum' in unpackreports[r]:
if 'linuxkernel' in unpackreports[r]['tags']:
if 'modulekernelversion' in unpackreports[r]['tags']:
continue
if 'duplicate' in unpackreports[r]['tags']:
continue
kernelfiles.add(r)
if 'TEMPLATE' in scanenv:
template = scanenv['TEMPLATE']
if template is not None:
templatecutoff = template.find('%')
template = template[:templatecutoff]
cpiotemplate = "initramfs"
for r in kernelfiles:
if unpackreports[r]['scans'] != []:
counter = 0
for s in unpackreports[r]['scans']:
if len(s['scanreports']) != 1:
counter += 1
continue
renamefiles = set()
origcpio = ''
targetcpio = ''
process = False
if s['scanname'] in initramfscompressions:
unpackfile = s['scanreports'][0]
if unpackreports[unpackfile]['name'].startswith('tmp'):
process = True
else:
if template is not None:
if unpackreports[unpackfile]['name'].startswith(template):
process = True
if not process:
counter += 1
continue
if unpackreports[unpackfile]['scans'] != []:
if len(unpackreports[unpackfile]['scans']) != 1:
counter += 1
continue
if unpackreports[unpackfile]['scans'][0]['scanname'] == 'cpio':
# it is an initramfs, so it is possible to rename the file
# Rename on disk:
# 1. file
# 2. unpacking directory
# Then rename in unpackreports
# 1. original file
# 2. any paths in scanreports (path, realpath)
# 3. references in parent file
origname = os.path.join(unpackreports[unpackfile]['realpath'], unpackreports[unpackfile]['name'])
targetname = os.path.join(unpackreports[unpackfile]['realpath'], cpiotemplate)
if not os.path.exists(targetname):
# on disk
shutil.move(origname, targetname)
if not "duplicate" in unpackreports[unpackfile]['tags']:
origcpio = "%s-cpio-1" % origname
targetcpio = "%s-cpio-1" % targetname
shutil.move(origcpio, targetcpio)
# in unpackreports
unpackreports[unpackfile]['name'] = cpiotemplate
newunpackreportsname = os.path.join(os.path.dirname(unpackfile), cpiotemplate)
unpackreports[r]['scans'][counter]['scanreports'][0] = newunpackreportsname
renamefiles.add(unpackfile)
while len(renamefiles) != 0:
newrenamefiles = set()
for re in renamefiles:
origcpio = '/%s' % os.path.basename(origcpio)
targetcpio = '/%s' % os.path.basename(targetcpio)
newr = re.replace(origcpio, targetcpio)
realpath = copy.deepcopy(unpackreports[re]['realpath'])
newrealpath = realpath.replace(origcpio, targetcpio)
unpackreports[re]['realpath'] = newrealpath
# recurse into files, if any
if 'scans' in unpackreports[re]:
for sc in unpackreports[re]['scans']:
if 'scanreports' in sc:
newrenamefiles.update(sc['scanreports'])
newscanreports = []
for scr in sc['scanreports']:
newscanreports.append(scr.replace(origcpio, targetcpio))
sc['scanreports'] = newscanreports
# then rename and delete the old value
unpackreports[newr] = copy.deepcopy(unpackreports[re])
del unpackreports[re]
renamefiles = newrenamefiles
counter += 1
| 48.838983 | 136 | 0.495402 | 487 | 5,763 | 5.862423 | 0.338809 | 0.057793 | 0.028021 | 0.021016 | 0.121191 | 0.060946 | 0.032224 | 0 | 0 | 0 | 0 | 0.008411 | 0.422349 | 5,763 | 117 | 137 | 49.25641 | 0.849204 | 0.098907 | 0 | 0.1625 | 0 | 0 | 0.065828 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0125 | false | 0 | 0.0375 | 0 | 0.05 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |