hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
33162412afcb1a45af8ff03715ec9e96750eac9d
| 44
|
py
|
Python
|
addons/stock_zebra/__init__.py
|
jjiege/odoo
|
fd5b8ad387c1881f349d125cbd56433f4d49398f
|
[
"MIT"
] | null | null | null |
addons/stock_zebra/__init__.py
|
jjiege/odoo
|
fd5b8ad387c1881f349d125cbd56433f4d49398f
|
[
"MIT"
] | null | null | null |
addons/stock_zebra/__init__.py
|
jjiege/odoo
|
fd5b8ad387c1881f349d125cbd56433f4d49398f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from tests import *
| 14.666667
| 23
| 0.568182
| 6
| 44
| 4.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028571
| 0.204545
| 44
| 2
| 24
| 22
| 0.685714
| 0.477273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6879e124a862a5d8525f80dd30e51e357d660f1f
| 66
|
py
|
Python
|
rulesets/routes/blueprints/__init__.py
|
jdr-tools/rulesets
|
2bbfb280c84da6ef359d47fa6c24d34b84814eeb
|
[
"MIT"
] | null | null | null |
rulesets/routes/blueprints/__init__.py
|
jdr-tools/rulesets
|
2bbfb280c84da6ef359d47fa6c24d34b84814eeb
|
[
"MIT"
] | 3
|
2018-12-19T08:16:15.000Z
|
2018-12-19T08:16:47.000Z
|
rulesets/routes/blueprints/__init__.py
|
jdr-tools/rulesets
|
2bbfb280c84da6ef359d47fa6c24d34b84814eeb
|
[
"MIT"
] | null | null | null |
from rulesets.routes.blueprints.rulesets import rulesets_blueprint
| 66
| 66
| 0.909091
| 8
| 66
| 7.375
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045455
| 66
| 1
| 66
| 66
| 0.936508
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
687c56da3c8383d518959ddff88a8be0bbaa2a63
| 2,712
|
py
|
Python
|
b1.py
|
bigdraw715/IML_Assignment
|
8f3f07bf260f891e62dcc57bfbc4a740ab996f24
|
[
"Apache-2.0"
] | 1
|
2021-12-03T13:38:15.000Z
|
2021-12-03T13:38:15.000Z
|
b1.py
|
bigdraw715/IML_Assignment
|
8f3f07bf260f891e62dcc57bfbc4a740ab996f24
|
[
"Apache-2.0"
] | null | null | null |
b1.py
|
bigdraw715/IML_Assignment
|
8f3f07bf260f891e62dcc57bfbc4a740ab996f24
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
from sklearn.model_selection import train_test_split
import pandas as pd
from preprocess import data_preprocess
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.linear_model import LogisticRegression
import time
X_onehot,y_onehot = data_preprocess(method="onehot")
X_class,y_class = data_preprocess(method="class")
print("Use class encoded data")
time.sleep(2)
print("One vs One calssifier:")
X = np.array(X_class)
y = np.array(y_class["category"]).astype(int)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.33, shuffle=True, random_state=0)
clf = OneVsOneClassifier(
LogisticRegression(max_iter = 10000)).fit(X_train, y_train)
# clf = LogisticRegression(max_iter = 10000).fit(X_train, y_train)
print("Train Accuracy:",clf.score(X_train,y_train))
print("Test Accuracy:",clf.score(X_test,y_test))
print("One vs All calssifier:")
X = np.array(X_class)
y = np.array(y_class["category"]).astype(int)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.33, shuffle=True, random_state=0)
clf = OneVsRestClassifier(
LogisticRegression(max_iter = 10000)).fit(X_train, y_train)
# clf = LogisticRegression(max_iter = 10000).fit(X_train, y_train)
print("Train Accuracy:",clf.score(X_train,y_train))
print("Test Accuracy:",clf.score(X_test,y_test))
print("\n Use onehot encoded data")
time.sleep(2)
print("One vs One calssifier:")
X = np.array(X_onehot)
y = np.array(y_onehot["category"]).astype(int)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.33, shuffle=True, random_state=0)
clf = OneVsOneClassifier(
LogisticRegression(max_iter = 10000)).fit(X_train, y_train)
# clf = LogisticRegression(max_iter = 10000).fit(X_train, y_train)
print("Train Accuracy:",clf.score(X_train,y_train))
print("Test Accuracy:",clf.score(X_test,y_test))
print("One vs All calssifier:")
X = np.array(X_onehot)
y = np.array(y_onehot["category"]).astype(int)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.33, shuffle=True, random_state=0)
clf = OneVsRestClassifier(
LogisticRegression(max_iter = 10000)).fit(X_train, y_train)
# clf = LogisticRegression(max_iter = 10000).fit(X_train, y_train)
print("Train Accuracy:",clf.score(X_train,y_train))
print("Test Accuracy:",clf.score(X_test,y_test))
print(
'''
##### #
##### ## #### # # # # ##
# # # # # # # # #
# # # #### #### ##### #
# ###### # # # # ### #
# # # # # # # # ### #
# # # #### # # ####### ### #####
'''
)
| 38.742857
| 66
| 0.668879
| 389
| 2,712
| 4.429306
| 0.138817
| 0.055717
| 0.048752
| 0.083575
| 0.792803
| 0.792803
| 0.792803
| 0.792803
| 0.792803
| 0.792803
| 0
| 0.025812
| 0.17146
| 2,712
| 69
| 67
| 39.304348
| 0.740988
| 0.095501
| 0
| 0.730769
| 0
| 0
| 0.139217
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.153846
| 0
| 0.153846
| 0.288462
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d7b5d36586e40fedc4321edbe1784b5edbec40a1
| 13,849
|
py
|
Python
|
bugs/gcc-function-detection/gzip/x86/test.py
|
xbabka01/retdec-regression-tests
|
1ac40cca5165740364e6f7fb72b20820eac9bc7c
|
[
"MIT"
] | 8
|
2017-12-14T14:25:17.000Z
|
2019-03-09T03:29:12.000Z
|
bugs/gcc-function-detection/gzip/x86/test.py
|
xbabka01/retdec-regression-tests
|
1ac40cca5165740364e6f7fb72b20820eac9bc7c
|
[
"MIT"
] | 10
|
2019-06-14T09:12:55.000Z
|
2021-10-01T12:15:43.000Z
|
bugs/gcc-function-detection/gzip/x86/test.py
|
xbabka01/retdec-regression-tests
|
1ac40cca5165740364e6f7fb72b20820eac9bc7c
|
[
"MIT"
] | 8
|
2019-05-10T14:59:48.000Z
|
2022-03-07T16:34:23.000Z
|
from regression_tests import *
class Test(Test):
settings = TestSettings(
input='gzip-strip',
args='-k' # TODO: matula, not sure if some functions are not called, or we just do not detect it.
)
def test_check_for_all_currently_detected_strings(self):
assert self.out_c.has_string_literal( ' %9lu %9lu ' )
assert self.out_c.has_string_literal( ' Copyright (C) 1992-1993 Jean-loup Gailly' )
assert self.out_c.has_string_literal( ' This program is free software; you can redistribute it and/or modify' )
assert self.out_c.has_string_literal( ' any later version.' )
assert self.out_c.has_string_literal( ' it under the terms of the GNU General Public License as published by' )
assert self.out_c.has_string_literal( ' the Free Software Foundation; either version 2, or (at your option)' )
assert self.out_c.has_string_literal( ' %s\\n' )
assert self.out_c.has_string_literal( ' (totals)' )
assert self.out_c.has_string_literal( ' -- replaced with %s' )
assert self.out_c.has_string_literal( ' -1 --fast compress faster' )
assert self.out_c.has_string_literal( ' -9 --best compress better' )
assert self.out_c.has_string_literal( ' -L --license display software license' )
assert self.out_c.has_string_literal( ' -N --name save or restore the original name and time stamp' )
assert self.out_c.has_string_literal( ' -S .suf --suffix .suf use suffix .suf on compressed files' )
assert self.out_c.has_string_literal( ' -V --version display version number' )
assert self.out_c.has_string_literal( ' -c --stdout write on standard output, keep original files unchanged' )
assert self.out_c.has_string_literal( ' -d --decompress decompress' )
assert self.out_c.has_string_literal( ' -f --force force overwrite of output file and compress links' )
assert self.out_c.has_string_literal( ' -h --help give this help' )
assert self.out_c.has_string_literal( ' -l --list list compressed file contents' )
assert self.out_c.has_string_literal( ' -n --no-name do not save or restore the original name and time stamp' )
assert self.out_c.has_string_literal( ' -q --quiet suppress all warnings' )
assert self.out_c.has_string_literal( ' -t --test test compressed file integrity' )
assert self.out_c.has_string_literal( ' -v --verbose verbose mode' )
assert self.out_c.has_string_literal( ' OK' )
assert self.out_c.has_string_literal( ' OK\\n' )
assert self.out_c.has_string_literal( ' do you wish to overwrite (y or n)? ' )
assert self.out_c.has_string_literal( ' file... files to (de)compress. If none given, use standard input.' )
assert self.out_c.has_string_literal( '%2ld.%1ld%%' )
assert self.out_c.has_string_literal( '%5s %08lx %11s ' )
assert self.out_c.has_string_literal( '%9ld %9ld ' )
assert self.out_c.has_string_literal( '%s %s (%s)\\n' )
assert self.out_c.has_string_literal( '%s: ' )
assert self.out_c.has_string_literal( '%s: %s already exists;' )
assert self.out_c.has_string_literal( '%s: %s already has %s suffix -- unchanged\\n' )
assert self.out_c.has_string_literal( '%s: %s and %s are the same file\\n' )
assert self.out_c.has_string_literal( '%s: %s compressed to %s\\n' )
assert self.out_c.has_string_literal( '%s: %s has %d other link%c -- unchanged\\n' )
assert self.out_c.has_string_literal( '%s: %s has flags 0x%x -- get newer version of gzip\\n' )
assert self.out_c.has_string_literal( '%s: %s has more than one entry -- unchanged\\n' )
assert self.out_c.has_string_literal( '%s: %s has more than one entry--rest ignored\\n' )
assert self.out_c.has_string_literal( '%s: %s is a a multi-part gzip file -- get newer version of gzip\\n' )
assert self.out_c.has_string_literal( '%s: %s is a directory -- ignored\\n' )
assert self.out_c.has_string_literal( '%s: %s is encrypted -- get newer version of gzip\\n' )
assert self.out_c.has_string_literal( '%s: %s is not a directory or a regular file - ignored\\n' )
assert self.out_c.has_string_literal( '%s: %s: cannot %scompress onto itself\\n' )
assert self.out_c.has_string_literal( '%s: %s: extra field of %u bytes ignored\\n' )
assert self.out_c.has_string_literal( '%s: %s: part number %u\\n' )
assert self.out_c.has_string_literal( '%s: %s: unknown method %d -- get newer version of gzip\\n' )
assert self.out_c.has_string_literal( '%s: %s: unknown suffix -- ignored\\n' )
assert self.out_c.has_string_literal( '%s: %s: warning, name truncated\\n' )
assert self.out_c.has_string_literal( '%s: %s: warning: %s%s\\n' )
assert self.out_c.has_string_literal( '%s: -Z not supported in this version\\n' )
assert self.out_c.has_string_literal( '%s: -r not supported on this system\\n' )
assert self.out_c.has_string_literal( '%s: compressed data not %s a terminal. Use -f to force %scompression.\\n' )
assert self.out_c.has_string_literal( '%s: time stamp restored\\n' )
assert self.out_c.has_string_literal( '%s:\\t%s' )
assert self.out_c.has_string_literal( '%s\\n' )
assert self.out_c.has_string_literal( '.exe' )
assert self.out_c.has_string_literal( '.tar' )
assert self.out_c.has_string_literal( '.taz' )
assert self.out_c.has_string_literal( '.tgz' )
assert self.out_c.has_string_literal( '1.2.4' )
assert self.out_c.has_string_literal( '18 Aug 93' )
assert self.out_c.has_string_literal( 'Bad table\\n' )
assert self.out_c.has_string_literal( 'Compilation options:\\n%s %s ' )
assert self.out_c.has_string_literal( 'For help, type: %s -h\\n' )
assert self.out_c.has_string_literal( 'HAVE_UNISTD_H ' )
assert self.out_c.has_string_literal( 'NO_DIR' )
assert self.out_c.has_string_literal( 'NO_MEMORY_H ' )
assert self.out_c.has_string_literal( 'UTIME' )
assert self.out_c.has_string_literal( '\\n%s: ' )
assert self.out_c.has_string_literal( '\\n%s: %s: %s\\n' )
assert self.out_c.has_string_literal( '\\n%s: %s: compressed with %d bits, can only handle %d bits\\n' )
assert self.out_c.has_string_literal( '\\n%s: %s: decompression OK, trailing garbage ignored\\n' )
assert self.out_c.has_string_literal( '\\n%s: %s: encrypted file -- use unzip\\n' )
assert self.out_c.has_string_literal( '\\n%s: %s: first entry not deflated or stored -- use unzip\\n' )
assert self.out_c.has_string_literal( '\\n%s: %s: not a valid zip file\\n' )
assert self.out_c.has_string_literal( '\\n%s: %s: not in gzip format\\n' )
assert self.out_c.has_string_literal( '\\n%s: %s: warning, unknown flags 0x%x\\n' )
assert self.out_c.has_string_literal( 'ab:cdfhH?lLmMnNqrS:tvVZ123456789' )
assert self.out_c.has_string_literal( 'argc<=0' )
assert self.out_c.has_string_literal( 'bad pack level' )
assert self.out_c.has_string_literal( "can't recover suffix\\n" )
assert self.out_c.has_string_literal( 'compressed uncompr. ratio uncompressed_name' )
assert self.out_c.has_string_literal( 'corrupt input.' )
assert self.out_c.has_string_literal( 'corrupt input. Use zcat to recover some data.' )
assert self.out_c.has_string_literal( 'corrupted input -- file name too large' )
assert self.out_c.has_string_literal( 'fstat(stdin)' )
assert self.out_c.has_string_literal( 'internal error, invalid method' )
assert self.out_c.has_string_literal( 'invalid compressed data -- Huffman code > 32 bits' )
assert self.out_c.has_string_literal( 'invalid compressed data--crc error' )
assert self.out_c.has_string_literal( 'invalid compressed data--format violated' )
assert self.out_c.has_string_literal( 'invalid compressed data--length error' )
assert self.out_c.has_string_literal( 'invalid compressed data--length mismatch' )
assert self.out_c.has_string_literal( 'len %ld, siz %ld\\n' )
assert self.out_c.has_string_literal( 'method crc date time ' )
assert self.out_c.has_string_literal( 'name too short' )
assert self.out_c.has_string_literal( 'out of memory' )
assert self.out_c.has_string_literal( 'output in compress .Z format not supported\\n' )
assert self.out_c.has_string_literal( 'read from' )
assert self.out_c.has_string_literal( 'stdout' )
assert self.out_c.has_string_literal( 'too many leaves in Huffman tree' )
assert self.out_c.has_string_literal( 'un' )
assert self.out_c.has_string_literal( 'usage: %s [-%scdfhlLnN%stvV19] [-S suffix] [file ...]\\n' )
assert self.out_c.has_string_literal( '%s: unexpected end of file\\n' )
assert self.out_c.has_string_literal( 'internal error in shorten_name' )
# Currently detected functions which have their named (from symbols) counterparts in not-stripped binary.
#
def test_check_for_all_currently_detected_functions(self):
assert self.out_c.has_func( 'function_804897c' ) #
assert self.out_c.has_func( 'entry_point' ) #
assert self.out_c.has_func( 'function_8048c70' ) #
assert self.out_c.has_func( 'function_8048c80' ) #
assert self.out_c.has_func( 'function_8048cb0' ) #
assert self.out_c.has_func( 'function_8048cf0' ) #
assert self.out_c.has_func( 'function_8048d50' ) #
assert self.out_c.has_func( 'function_8048d80' ) #
assert self.out_c.has_func( 'function_8048da1' ) #
assert self.out_c.has_func( 'function_8048e00' ) #
assert self.out_c.has_func( 'function_8048e2f' ) #
assert self.out_c.has_func( 'function_8048e64' ) #
assert self.out_c.has_func( 'function_8048f64' ) #
assert self.out_c.has_func( 'function_8049019' ) #
assert self.out_c.has_func( 'function_8049053' ) #
assert self.out_c.has_func( 'function_80491c9' ) #
assert self.out_c.has_func( 'function_8049a5e' ) #
assert self.out_c.has_func( 'function_8049d3d' ) #
assert self.out_c.has_func( 'function_804b368' ) #
assert self.out_c.has_func( 'function_804b70c' ) #
assert self.out_c.has_func( 'function_804b829' ) #
assert self.out_c.has_func( 'function_804b950' ) #
assert self.out_c.has_func( 'function_804bbf2' ) #
assert self.out_c.has_func( 'function_804c110' ) #
assert self.out_c.has_func( 'function_804c19d' ) #
assert self.out_c.has_func( 'function_804c270' ) #
assert self.out_c.has_func( 'function_804c350' ) #
assert self.out_c.has_func( 'function_804c4f5' ) #
assert self.out_c.has_func( 'function_804c660' ) #
assert self.out_c.has_func( 'function_804c6cc' ) #
assert self.out_c.has_func( 'function_804cae5' ) #
assert self.out_c.has_func( 'function_804cd1c' ) #
assert self.out_c.has_func( 'function_804cfbe' ) #
assert self.out_c.has_func( 'function_804d110' ) #
assert self.out_c.has_func( 'function_804d13d' ) #
assert self.out_c.has_func( 'function_804d216' ) #
assert self.out_c.has_func( 'function_804d23a' ) #
assert self.out_c.has_func( 'function_804d2ff' ) #
assert self.out_c.has_func( 'function_804d430' ) #
assert self.out_c.has_func( 'function_804da20' ) #
assert self.out_c.has_func( 'function_804da4a' ) #
assert self.out_c.has_func( 'function_804e048' ) #
assert self.out_c.has_func( 'function_804e43f' ) #
assert self.out_c.has_func( 'function_804e604' ) #
assert self.out_c.has_func( 'function_804e7aa' ) #
assert self.out_c.has_func( 'function_804eeaa' ) #
assert self.out_c.has_func( 'function_804efa8' ) #
assert self.out_c.has_func( 'function_804f040' ) #
assert self.out_c.has_func( 'function_804f081' ) #
assert self.out_c.has_func( 'function_804f0b4' ) #
assert self.out_c.has_func( 'basename' ) #
assert self.out_c.has_func( 'function_804f111' ) #
assert self.out_c.has_func( 'error' ) #
assert self.out_c.has_func( 'warn' ) #
assert self.out_c.has_func( 'function_804f1de' ) #
assert self.out_c.has_func( 'function_804f23d' ) #
assert self.out_c.has_func( 'function_804f2d5' ) #
assert self.out_c.has_func( 'function_804f30b' ) #
assert self.out_c.has_func( 'function_804f34c' ) #
assert self.out_c.has_func( 'function_804f3a9' ) #
assert self.out_c.has_func( 'function_804f3e8' ) #
assert self.out_c.has_func( 'function_804f47c' ) #
assert self.out_c.has_func( 'function_804f52f' ) #
assert self.out_c.has_func( 'function_804f558' ) #
assert self.out_c.has_func( 'function_8050180' ) #
assert self.out_c.has_func( 'function_8050220' ) #
assert self.out_c.has_func( 'function_8050240' ) #
assert self.out_c.has_func( 'function_8050478' ) #
assert self.out_c.has_func( 'function_8050a60' ) #
assert self.out_c.has_func( 'function_804b6b3' ) # file_read
assert self.out_c.has_func( 'function_804d592' ) # unzip
assert self.out_c.has_func( 'function_804f734' ) # lzw
assert self.out_c.has_func( 'function_804f7a0' ) # unlzw
assert self.out_c.has_func( 'function_804fd94' ) # unpack
assert self.out_c.has_func( 'function_80505eb' ) # unlzh
| 70.658163
| 125
| 0.662431
| 2,035
| 13,849
| 4.230958
| 0.173464
| 0.211382
| 0.274797
| 0.295935
| 0.712544
| 0.712544
| 0.701278
| 0.345993
| 0.254239
| 0.210453
| 0
| 0.039537
| 0.22016
| 13,849
| 195
| 126
| 71.020513
| 0.757685
| 0.016463
| 0
| 0.010526
| 0
| 0.010526
| 0.339905
| 0.002363
| 0
| 0
| 0
| 0.005128
| 0.957895
| 1
| 0.010526
| false
| 0
| 0.005263
| 0
| 0.026316
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d7bf7f7930e175c69a5da5f16a33a1472ac6496d
| 115
|
py
|
Python
|
run_test.py
|
Reuben481/tigre
|
f342aaa73da8204140fb48929c28cf2f75566a21
|
[
"BSD-3-Clause"
] | null | null | null |
run_test.py
|
Reuben481/tigre
|
f342aaa73da8204140fb48929c28cf2f75566a21
|
[
"BSD-3-Clause"
] | null | null | null |
run_test.py
|
Reuben481/tigre
|
f342aaa73da8204140fb48929c28cf2f75566a21
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import print_function
from _Ax import Ax
from tigre.Algorithms import SART
print('hello world')
| 23
| 37
| 0.808696
| 17
| 115
| 5.117647
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147826
| 115
| 4
| 38
| 28.75
| 0.887755
| 0
| 0
| 0
| 0
| 0
| 0.095652
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
cc1c8f766b384e5ddcb7b4ff0ad39f829b62b3c8
| 73
|
py
|
Python
|
tests/context_processors/models.py
|
jpmallarino/django
|
659d2421c7adbbcd205604002d521d82d6b0b465
|
[
"BSD-3-Clause",
"0BSD"
] | 61,676
|
2015-01-01T00:05:13.000Z
|
2022-03-31T20:37:54.000Z
|
tests/context_processors/models.py
|
jpmallarino/django
|
659d2421c7adbbcd205604002d521d82d6b0b465
|
[
"BSD-3-Clause",
"0BSD"
] | 8,884
|
2015-01-01T00:12:05.000Z
|
2022-03-31T19:53:11.000Z
|
tests/context_processors/models.py
|
jpmallarino/django
|
659d2421c7adbbcd205604002d521d82d6b0b465
|
[
"BSD-3-Clause",
"0BSD"
] | 33,143
|
2015-01-01T02:04:52.000Z
|
2022-03-31T19:42:46.000Z
|
from django.db import models
class DebugObject(models.Model):
pass
| 12.166667
| 32
| 0.753425
| 10
| 73
| 5.5
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.178082
| 73
| 5
| 33
| 14.6
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
0bc5d96966f648cadbfe6b3cbf5fb9b714a41b19
| 44
|
py
|
Python
|
src/sacred_scripts/__init__.py
|
jhrmnn/schnetpack
|
2f96dee7d184b8db8ee610d6743570daeb3763b9
|
[
"MIT"
] | null | null | null |
src/sacred_scripts/__init__.py
|
jhrmnn/schnetpack
|
2f96dee7d184b8db8ee610d6743570daeb3763b9
|
[
"MIT"
] | null | null | null |
src/sacred_scripts/__init__.py
|
jhrmnn/schnetpack
|
2f96dee7d184b8db8ee610d6743570daeb3763b9
|
[
"MIT"
] | null | null | null |
from sacred_scripts.run_schnetpack import *
| 22
| 43
| 0.863636
| 6
| 44
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 44
| 1
| 44
| 44
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0befd131c790b6c5002f2d1c19cce8957b053cd7
| 99
|
py
|
Python
|
memories/__init__.py
|
jacobver/mem_seq2seq
|
1d87d2fb0884b825131d991e97aecc6d2bd31ce0
|
[
"MIT"
] | null | null | null |
memories/__init__.py
|
jacobver/mem_seq2seq
|
1d87d2fb0884b825131d991e97aecc6d2bd31ce0
|
[
"MIT"
] | null | null | null |
memories/__init__.py
|
jacobver/mem_seq2seq
|
1d87d2fb0884b825131d991e97aecc6d2bd31ce0
|
[
"MIT"
] | null | null | null |
import memories.memory_model
import memories.util as util
from memories.Converser import Converser
| 24.75
| 40
| 0.868687
| 14
| 99
| 6.071429
| 0.571429
| 0.329412
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.10101
| 99
| 3
| 41
| 33
| 0.955056
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
040f971d7c3a25aa2f6b3bc174e49f099822305b
| 1,303
|
py
|
Python
|
genrl/classical/bandit/__init__.py
|
ajaysub110/JigglypuffRL
|
083fd26d05b7eac018e6db7d32c4be4587461766
|
[
"MIT"
] | null | null | null |
genrl/classical/bandit/__init__.py
|
ajaysub110/JigglypuffRL
|
083fd26d05b7eac018e6db7d32c4be4587461766
|
[
"MIT"
] | null | null | null |
genrl/classical/bandit/__init__.py
|
ajaysub110/JigglypuffRL
|
083fd26d05b7eac018e6db7d32c4be4587461766
|
[
"MIT"
] | null | null | null |
from genrl.classical.bandit.bandits import Bandit # noqa
from genrl.classical.bandit.bandits import BernoulliBandit # noqa
from genrl.classical.bandit.bandits import GaussianBandit # noqa
from genrl.classical.bandit.contextual_bandits import BernoulliCB # noqa
from genrl.classical.bandit.contextual_bandits import ContextualBandit # noqa
from genrl.classical.bandit.contextual_bandits import GaussianCB # noqa
from genrl.classical.bandit.contextual_policies import BayesianUCBCBPolicy # noqa
from genrl.classical.bandit.contextual_policies import CBPolicy # noqa
from genrl.classical.bandit.contextual_policies import EpsGreedyCBPolicy # noqa
from genrl.classical.bandit.contextual_policies import GradientCBPolicy # noqa
from genrl.classical.bandit.contextual_policies import ThompsonSamplingCBPolicy # noqa
from genrl.classical.bandit.contextual_policies import UCBCBPolicy # noqa
from genrl.classical.bandit.policies import BanditPolicy # noqa
from genrl.classical.bandit.policies import BayesianUCBPolicy # noqa
from genrl.classical.bandit.policies import EpsGreedyPolicy # noqa
from genrl.classical.bandit.policies import GradientPolicy # noqa
from genrl.classical.bandit.policies import ThompsonSamplingPolicy # noqa
from genrl.classical.bandit.policies import UCBPolicy # noqa
| 68.578947
| 87
| 0.848043
| 153
| 1,303
| 7.163399
| 0.169935
| 0.14781
| 0.29562
| 0.394161
| 0.762774
| 0.762774
| 0.729015
| 0.42427
| 0
| 0
| 0
| 0
| 0.0967
| 1,303
| 18
| 88
| 72.388889
| 0.931181
| 0.068304
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
0457dcd17de9810388f967ac4a353a3433d957e5
| 8,232
|
py
|
Python
|
boto/redshift/exceptions.py
|
bopopescu/debpkg_python-boto
|
06f9b6f3693ba1933be8214da69cebcd5212cd97
|
[
"MIT"
] | 15
|
2015-03-25T05:24:11.000Z
|
2021-12-18T04:24:06.000Z
|
boto/redshift/exceptions.py
|
bopopescu/debpkg_python-boto
|
06f9b6f3693ba1933be8214da69cebcd5212cd97
|
[
"MIT"
] | 1
|
2021-09-11T14:30:32.000Z
|
2021-09-11T14:30:32.000Z
|
boto/redshift/exceptions.py
|
bopopescu/debpkg_python-boto
|
06f9b6f3693ba1933be8214da69cebcd5212cd97
|
[
"MIT"
] | 10
|
2015-04-26T17:56:37.000Z
|
2020-09-24T14:01:53.000Z
|
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.exception import JSONResponseError
class ClusterNotFoundFault(JSONResponseError):
pass
class InvalidClusterSnapshotStateFault(JSONResponseError):
pass
class ClusterSnapshotNotFoundFault(JSONResponseError):
pass
class ClusterNotFoundFault(JSONResponseError):
pass
class ClusterSecurityGroupQuotaExceededFault(JSONResponseError):
pass
class ReservedNodeOfferingNotFoundFault(JSONResponseError):
pass
class InvalidSubnet(JSONResponseError):
pass
class ClusterSubnetGroupQuotaExceededFault(JSONResponseError):
pass
class InvalidClusterStateFault(JSONResponseError):
pass
class InvalidClusterParameterGroupStateFault(JSONResponseError):
pass
class ClusterParameterGroupAlreadyExistsFault(JSONResponseError):
pass
class InvalidClusterSecurityGroupStateFault(JSONResponseError):
pass
class InvalidRestoreFault(JSONResponseError):
pass
class AuthorizationNotFoundFault(JSONResponseError):
pass
class ResizeNotFoundFault(JSONResponseError):
pass
class NumberOfNodesQuotaExceededFault(JSONResponseError):
pass
class ClusterSnapshotAlreadyExistsFault(JSONResponseError):
pass
class AuthorizationQuotaExceededFault(JSONResponseError):
pass
class AuthorizationAlreadyExistsFault(JSONResponseError):
pass
class ClusterSnapshotQuotaExceededFault(JSONResponseError):
pass
class ReservedNodeNotFoundFault(JSONResponseError):
pass
class ReservedNodeAlreadyExistsFault(JSONResponseError):
pass
class ClusterSecurityGroupAlreadyExistsFault(JSONResponseError):
pass
class ClusterParameterGroupNotFoundFault(JSONResponseError):
pass
class ReservedNodeQuotaExceededFault(JSONResponseError):
pass
class ClusterQuotaExceededFault(JSONResponseError):
pass
class ClusterSubnetQuotaExceededFault(JSONResponseError):
pass
class UnsupportedOptionFault(JSONResponseError):
pass
class InvalidVPCNetworkStateFault(JSONResponseError):
pass
class ClusterSecurityGroupNotFoundFault(JSONResponseError):
pass
class InvalidClusterSubnetGroupStateFault(JSONResponseError):
pass
class ClusterSubnetGroupAlreadyExistsFault(JSONResponseError):
pass
class NumberOfNodesPerClusterLimitExceededFault(JSONResponseError):
pass
class ClusterSubnetGroupNotFoundFault(JSONResponseError):
pass
class ClusterParameterGroupQuotaExceededFault(JSONResponseError):
pass
class ClusterAlreadyExistsFault(JSONResponseError):
pass
class InsufficientClusterCapacityFault(JSONResponseError):
pass
class InvalidClusterSubnetStateFault(JSONResponseError):
pass
class SubnetAlreadyInUse(JSONResponseError):
pass
class InvalidParameterCombinationFault(JSONResponseError):
pass
class AccessToSnapshotDeniedFault(JSONResponseError):
pass
class UnauthorizedOperationFault(JSONResponseError):
pass
class SnapshotCopyAlreadyDisabled(JSONResponseError):
pass
class ClusterNotFound(JSONResponseError):
pass
class UnknownSnapshotCopyRegion(JSONResponseError):
pass
class InvalidClusterSubnetState(JSONResponseError):
pass
class ReservedNodeQuotaExceeded(JSONResponseError):
pass
class InvalidClusterState(JSONResponseError):
pass
class HsmClientCertificateQuotaExceeded(JSONResponseError):
pass
class SubscriptionCategoryNotFound(JSONResponseError):
pass
class HsmClientCertificateNotFound(JSONResponseError):
pass
class SubscriptionEventIdNotFound(JSONResponseError):
pass
class ClusterSecurityGroupAlreadyExists(JSONResponseError):
pass
class HsmConfigurationAlreadyExists(JSONResponseError):
pass
class NumberOfNodesQuotaExceeded(JSONResponseError):
pass
class ReservedNodeOfferingNotFound(JSONResponseError):
pass
class BucketNotFound(JSONResponseError):
pass
class InsufficientClusterCapacity(JSONResponseError):
pass
class InvalidRestore(JSONResponseError):
pass
class UnauthorizedOperation(JSONResponseError):
pass
class ClusterQuotaExceeded(JSONResponseError):
pass
class InvalidVPCNetworkState(JSONResponseError):
pass
class ClusterSnapshotNotFound(JSONResponseError):
pass
class AuthorizationQuotaExceeded(JSONResponseError):
pass
class InvalidHsmClientCertificateState(JSONResponseError):
pass
class SNSTopicArnNotFound(JSONResponseError):
pass
class ResizeNotFound(JSONResponseError):
pass
class ClusterSubnetGroupNotFound(JSONResponseError):
pass
class SNSNoAuthorization(JSONResponseError):
pass
class ClusterSnapshotQuotaExceeded(JSONResponseError):
pass
class AccessToSnapshotDenied(JSONResponseError):
pass
class InvalidClusterSecurityGroupState(JSONResponseError):
pass
class NumberOfNodesPerClusterLimitExceeded(JSONResponseError):
pass
class ClusterSubnetQuotaExceeded(JSONResponseError):
pass
class SNSInvalidTopic(JSONResponseError):
pass
class ClusterSecurityGroupNotFound(JSONResponseError):
pass
class InvalidElasticIp(JSONResponseError):
pass
class InvalidClusterParameterGroupState(JSONResponseError):
pass
class InvalidHsmConfigurationState(JSONResponseError):
pass
class ClusterAlreadyExists(JSONResponseError):
pass
class HsmConfigurationQuotaExceeded(JSONResponseError):
pass
class ClusterSnapshotAlreadyExists(JSONResponseError):
pass
class SubscriptionSeverityNotFound(JSONResponseError):
pass
class SourceNotFound(JSONResponseError):
pass
class ReservedNodeAlreadyExists(JSONResponseError):
pass
class ClusterSubnetGroupQuotaExceeded(JSONResponseError):
pass
class ClusterParameterGroupNotFound(JSONResponseError):
pass
class InvalidS3BucketName(JSONResponseError):
pass
class InvalidS3KeyPrefix(JSONResponseError):
pass
class SubscriptionAlreadyExist(JSONResponseError):
pass
class HsmConfigurationNotFound(JSONResponseError):
pass
class AuthorizationNotFound(JSONResponseError):
pass
class ClusterSecurityGroupQuotaExceeded(JSONResponseError):
pass
class EventSubscriptionQuotaExceeded(JSONResponseError):
pass
class AuthorizationAlreadyExists(JSONResponseError):
pass
class InvalidClusterSnapshotState(JSONResponseError):
pass
class ClusterParameterGroupQuotaExceeded(JSONResponseError):
pass
class SnapshotCopyDisabled(JSONResponseError):
pass
class ClusterSubnetGroupAlreadyExists(JSONResponseError):
pass
class ReservedNodeNotFound(JSONResponseError):
pass
class HsmClientCertificateAlreadyExists(JSONResponseError):
pass
class InvalidClusterSubnetGroupState(JSONResponseError):
pass
class SubscriptionNotFound(JSONResponseError):
pass
class InsufficientS3BucketPolicy(JSONResponseError):
pass
class ClusterParameterGroupAlreadyExists(JSONResponseError):
pass
class UnsupportedOption(JSONResponseError):
pass
class CopyToRegionDisabled(JSONResponseError):
pass
class SnapshotCopyAlreadyEnabled(JSONResponseError):
pass
class IncompatibleOrderableOptions(JSONResponseError):
pass
| 17.895652
| 77
| 0.809038
| 616
| 8,232
| 10.811688
| 0.349026
| 0.343694
| 0.421622
| 0.013814
| 0.015315
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000993
| 0.143465
| 8,232
| 459
| 78
| 17.934641
| 0.943554
| 0.131195
| 0
| 0.506849
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.497717
| 0.004566
| 0
| 0.502283
| 0
| 0
| 0
| 1
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
f09d91ddaea70f71b76289b850f48c08a528b370
| 29
|
py
|
Python
|
RestFlask-Hotels/MemoryMode/models/__init__.py
|
LucasBiason/FlaskStudies
|
a594846f6eaa1655267f84da73764716e22f719b
|
[
"MIT"
] | null | null | null |
RestFlask-Hotels/MemoryMode/models/__init__.py
|
LucasBiason/FlaskStudies
|
a594846f6eaa1655267f84da73764716e22f719b
|
[
"MIT"
] | null | null | null |
RestFlask-Hotels/MemoryMode/models/__init__.py
|
LucasBiason/FlaskStudies
|
a594846f6eaa1655267f84da73764716e22f719b
|
[
"MIT"
] | null | null | null |
from .hotel import HotelModel
| 29
| 29
| 0.862069
| 4
| 29
| 6.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103448
| 29
| 1
| 29
| 29
| 0.961538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f0c79622e39856b52ad56e96ebd9fff337670348
| 782
|
py
|
Python
|
python_helper/api/test/api/src/ModuleImportsTest.py
|
SamuelJansen/python_helper
|
1cd43f9ef64cdb84d3c22e56346dc3a1096ac809
|
[
"MIT"
] | null | null | null |
python_helper/api/test/api/src/ModuleImportsTest.py
|
SamuelJansen/python_helper
|
1cd43f9ef64cdb84d3c22e56346dc3a1096ac809
|
[
"MIT"
] | null | null | null |
python_helper/api/test/api/src/ModuleImportsTest.py
|
SamuelJansen/python_helper
|
1cd43f9ef64cdb84d3c22e56346dc3a1096ac809
|
[
"MIT"
] | null | null | null |
from python_helper import log
from python_helper import ObjectHelper
from python_helper import SettingHelper
from python_helper import StringHelper
from python_helper import EnvironmentHelper
from python_helper import ReflectionHelper
from python_helper import RandomHelper
from python_helper import Constant
from python_helper import EnvironmentVariable
from python_helper import Test
from python_helper import Method
from python_helper import Function
from python_helper import ObjectHelperHelper
from python_helper import SettingHelperHelper
from python_helper import SettingHelperHelper
from python_helper import LogHelperHelper
from python_helper import RandomHelperHelper
from python_helper import FileHelper
@Test(inspectGlobals=False)
def allImportedSuccesfuly() :
...
| 34
| 45
| 0.882353
| 95
| 782
| 7.073684
| 0.263158
| 0.267857
| 0.428571
| 0.589286
| 0.154762
| 0.154762
| 0.154762
| 0.154762
| 0.154762
| 0
| 0
| 0
| 0.104859
| 782
| 22
| 46
| 35.545455
| 0.96
| 0
| 0
| 0.095238
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| true
| 0
| 0.904762
| 0
| 0.952381
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f0ccb1d4136a01f5a1a46145d3c3722cb4ea2e83
| 142
|
py
|
Python
|
tests/example/core/tests.py
|
jacklinke/django-directed
|
8ef8cd8a71e9a03a8628dce6465351f676f542ff
|
[
"Apache-2.0"
] | 2
|
2022-02-09T10:15:40.000Z
|
2022-02-22T14:11:03.000Z
|
tests/example/core/tests.py
|
jacklinke/django-directed
|
8ef8cd8a71e9a03a8628dce6465351f676f542ff
|
[
"Apache-2.0"
] | 1
|
2022-02-20T14:49:37.000Z
|
2022-02-20T14:49:37.000Z
|
tests/example/core/tests.py
|
jacklinke/django-directed
|
8ef8cd8a71e9a03a8628dce6465351f676f542ff
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from django.conf import settings
def test_account_is_configured():
assert "tests.example.core" in settings.INSTALLED_APPS
| 20.285714
| 58
| 0.809859
| 20
| 142
| 5.55
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.126761
| 142
| 6
| 59
| 23.666667
| 0.895161
| 0
| 0
| 0
| 0
| 0
| 0.126761
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0b12d5fac4c9dd60c1948ade841572faba00efe9
| 150
|
py
|
Python
|
BlockSim/settings.py
|
aminrd/BlockSim
|
cda6f119ab57b4db6e177a1095705c28d024c25e
|
[
"MIT"
] | null | null | null |
BlockSim/settings.py
|
aminrd/BlockSim
|
cda6f119ab57b4db6e177a1095705c28d024c25e
|
[
"MIT"
] | null | null | null |
BlockSim/settings.py
|
aminrd/BlockSim
|
cda6f119ab57b4db6e177a1095705c28d024c25e
|
[
"MIT"
] | null | null | null |
import os
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
database_url = f'sqlite:///{os.path.join(root_dir, "database.db")}'
| 30
| 70
| 0.733333
| 25
| 150
| 4.12
| 0.56
| 0.23301
| 0.252427
| 0.291262
| 0.31068
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 150
| 4
| 71
| 37.5
| 0.735714
| 0
| 0
| 0
| 0
| 0
| 0.326667
| 0.22
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
9bfaa1fe4ff1db73cf283a644fc9b01945728a13
| 36
|
py
|
Python
|
indra/assemblers/cag/__init__.py
|
zebulon2/indra
|
7727ddcab52ad8012eb6592635bfa114e904bd48
|
[
"BSD-2-Clause"
] | 1
|
2020-12-27T14:37:10.000Z
|
2020-12-27T14:37:10.000Z
|
indra/assemblers/cag/__init__.py
|
zebulon2/indra
|
7727ddcab52ad8012eb6592635bfa114e904bd48
|
[
"BSD-2-Clause"
] | null | null | null |
indra/assemblers/cag/__init__.py
|
zebulon2/indra
|
7727ddcab52ad8012eb6592635bfa114e904bd48
|
[
"BSD-2-Clause"
] | null | null | null |
from .assembler import CAGAssembler
| 18
| 35
| 0.861111
| 4
| 36
| 7.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 36
| 1
| 36
| 36
| 0.96875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5041bbcb0f65bc2c0fad6c129c93427862929a0a
| 67
|
py
|
Python
|
hackathonbaobab2020/__init__.py
|
JaimeSotomayor/hackathonbaobab2020
|
0fd527a37adc110d4118c8d87f5448c677a31bba
|
[
"MIT"
] | null | null | null |
hackathonbaobab2020/__init__.py
|
JaimeSotomayor/hackathonbaobab2020
|
0fd527a37adc110d4118c8d87f5448c677a31bba
|
[
"MIT"
] | null | null | null |
hackathonbaobab2020/__init__.py
|
JaimeSotomayor/hackathonbaobab2020
|
0fd527a37adc110d4118c8d87f5448c677a31bba
|
[
"MIT"
] | null | null | null |
from .core import *
from .execution import *
from .solver import *
| 16.75
| 24
| 0.731343
| 9
| 67
| 5.444444
| 0.555556
| 0.408163
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.179104
| 67
| 3
| 25
| 22.333333
| 0.890909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ac9d2f9fcab843a1a40d34f769236fb2c0df0800
| 4,800
|
py
|
Python
|
tests/api/one/test_accounts.py
|
stjordanis/python-client
|
2a04351ea4da9db491fd85c8f898bb8fbab542df
|
[
"MIT"
] | 1
|
2018-12-07T22:42:06.000Z
|
2018-12-07T22:42:06.000Z
|
tests/api/one/test_accounts.py
|
stjordanis/python-client
|
2a04351ea4da9db491fd85c8f898bb8fbab542df
|
[
"MIT"
] | null | null | null |
tests/api/one/test_accounts.py
|
stjordanis/python-client
|
2a04351ea4da9db491fd85c8f898bb8fbab542df
|
[
"MIT"
] | null | null | null |
import responses
from client import ArkClient
def test_balance_calls_correct_url_with_params():
responses.add(
responses.GET,
'http://127.0.0.1:4002/accounts/getBalance',
json={'success': True},
status=200
)
client = ArkClient('http://127.0.0.1:4002', api_version='v1')
client.accounts.balance(address='spongebob')
assert len(responses.calls) == 1
assert responses.calls[0].request.url == (
'http://127.0.0.1:4002/accounts/getBalance?address=spongebob'
)
def test_public_key_calls_correct_url_with_params():
responses.add(
responses.GET,
'http://127.0.0.1:4002/accounts/getPublicKey',
json={'success': True},
status=200
)
client = ArkClient('http://127.0.0.1:4002', api_version='v1')
client.accounts.public_key(address='spongebob')
assert len(responses.calls) == 1
assert responses.calls[0].request.url == (
'http://127.0.0.1:4002/accounts/getPublicKey?address=spongebob'
)
def test_delegates_calls_correct_url_with_params():
responses.add(
responses.GET,
'http://127.0.0.1:4002/accounts/delegates',
json={'success': True},
status=200
)
client = ArkClient('http://127.0.0.1:4002', api_version='v1')
client.accounts.delegates(address='spongebob')
assert len(responses.calls) == 1
assert responses.calls[0].request.url == (
'http://127.0.0.1:4002/accounts/delegates?address=spongebob'
)
def test_delegates_fee_calls_correct_url():
responses.add(
responses.GET,
'http://127.0.0.1:4002/accounts/delegates/fee',
json={'success': True},
status=200
)
client = ArkClient('http://127.0.0.1:4002', api_version='v1')
client.accounts.delegates_fee()
assert len(responses.calls) == 1
assert responses.calls[0].request.url == 'http://127.0.0.1:4002/accounts/delegates/fee'
def test_get_correct_url_with_params():
responses.add(
responses.GET,
'http://127.0.0.1:4002/accounts',
json={'success': True},
status=200
)
client = ArkClient('http://127.0.0.1:4002', api_version='v1')
client.accounts.get(address='spongebob')
assert len(responses.calls) == 1
assert responses.calls[0].request.url == 'http://127.0.0.1:4002/accounts?address=spongebob'
def test_all_calls_correct_url_with_default_params():
responses.add(
responses.GET,
'http://127.0.0.1:4002/accounts/getAllAccounts',
json={'success': True},
status=200
)
client = ArkClient('http://127.0.0.1:4002', api_version='v1')
client.accounts.all()
assert len(responses.calls) == 1
assert responses.calls[0].request.url == (
'http://127.0.0.1:4002/accounts/getAllAccounts?limit=100'
)
def test_all_calls_correct_url_with_passed_in_params():
responses.add(
responses.GET,
'http://127.0.0.1:4002/accounts/getAllAccounts',
json={'success': True},
status=200
)
client = ArkClient('http://127.0.0.1:4002', api_version='v1')
client.accounts.all(limit=69, offset=123)
assert len(responses.calls) == 1
assert responses.calls[0].request.url.startswith(
'http://127.0.0.1:4002/accounts/getAllAccounts?'
)
assert 'limit=69' in responses.calls[0].request.url
assert 'offset=123' in responses.calls[0].request.url
def test_top_calls_correct_url_with_default_params():
responses.add(
responses.GET,
'http://127.0.0.1:4002/accounts/top',
json={'success': True},
status=200
)
client = ArkClient('http://127.0.0.1:4002', api_version='v1')
client.accounts.top()
assert len(responses.calls) == 1
assert responses.calls[0].request.url == 'http://127.0.0.1:4002/accounts/top?limit=100'
def test_top_calls_correct_url_with_passed_in_params():
responses.add(
responses.GET,
'http://127.0.0.1:4002/accounts/top',
json={'success': True},
status=200
)
client = ArkClient('http://127.0.0.1:4002', api_version='v1')
client.accounts.top(limit=69, offset=123)
assert len(responses.calls) == 1
assert responses.calls[0].request.url.startswith('http://127.0.0.1:4002/accounts/top?')
assert 'limit=69' in responses.calls[0].request.url
assert 'offset=123' in responses.calls[0].request.url
def test_count_calls_correct_url():
responses.add(
responses.GET,
'http://127.0.0.1:4002/accounts/count',
json={'success': True},
status=200
)
client = ArkClient('http://127.0.0.1:4002', api_version='v1')
client.accounts.count()
assert len(responses.calls) == 1
assert responses.calls[0].request.url == 'http://127.0.0.1:4002/accounts/count'
| 30.188679
| 95
| 0.645417
| 657
| 4,800
| 4.601218
| 0.08067
| 0.069467
| 0.079391
| 0.089315
| 0.94608
| 0.924909
| 0.924909
| 0.913993
| 0.884883
| 0.882898
| 0
| 0.100464
| 0.19125
| 4,800
| 158
| 96
| 30.379747
| 0.678259
| 0
| 0
| 0.571429
| 0
| 0
| 0.260417
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 1
| 0.079365
| false
| 0.015873
| 0.015873
| 0
| 0.095238
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
acec77c98e9dde22241e1b4e2ec03f97e2bdf055
| 349
|
py
|
Python
|
nlproar/explain/importance_measures/__init__.py
|
AndreasMadsen/nlp-roar-interpretability
|
ad30f756cd744dfb05d1b57de744c5ff60d9f20c
|
[
"MIT"
] | 17
|
2021-11-04T02:15:30.000Z
|
2021-12-26T16:31:27.000Z
|
nlproar/explain/importance_measures/__init__.py
|
AndreasMadsen/nlp-roar-interpretability
|
ad30f756cd744dfb05d1b57de744c5ff60d9f20c
|
[
"MIT"
] | null | null | null |
nlproar/explain/importance_measures/__init__.py
|
AndreasMadsen/nlp-roar-interpretability
|
ad30f756cd744dfb05d1b57de744c5ff60d9f20c
|
[
"MIT"
] | 1
|
2021-11-04T10:45:25.000Z
|
2021-11-04T10:45:25.000Z
|
from .attention import AttentionImportanceMeasure
from .gradient import GradientImportanceMeasure
from .integrated_gradient import IntegratedGradientImportanceMeasure
from .mutual_information import MutualInformationImportanceMeasure
from .random import RandomImportanceMeasure
from .input_times_gradient import InputTimesGradientImportanceMeasure
| 43.625
| 69
| 0.911175
| 28
| 349
| 11.214286
| 0.571429
| 0.133758
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071633
| 349
| 7
| 70
| 49.857143
| 0.969136
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4a16c50ac67ab1d7846d6e1d5c5e6c9ff2749df8
| 8,546
|
py
|
Python
|
UnityEngine/UI/GraphicRaycaster/__init__.py
|
Grim-es/udon-pie-auto-completion
|
c2cd86554ed615cdbbb01e19fa40665eafdfaedc
|
[
"MIT"
] | null | null | null |
UnityEngine/UI/GraphicRaycaster/__init__.py
|
Grim-es/udon-pie-auto-completion
|
c2cd86554ed615cdbbb01e19fa40665eafdfaedc
|
[
"MIT"
] | null | null | null |
UnityEngine/UI/GraphicRaycaster/__init__.py
|
Grim-es/udon-pie-auto-completion
|
c2cd86554ed615cdbbb01e19fa40665eafdfaedc
|
[
"MIT"
] | null | null | null |
from typing import overload
from UdonPie import System
from UdonPie import UnityEngine
from UdonPie.Undefined import *
class GraphicRaycaster:
def __new__(cls, arg1=None):
'''
:returns: GraphicRaycaster
:rtype: UnityEngine.UI.GraphicRaycaster
'''
pass
@staticmethod
def op_Implicit(arg1):
'''
:param arg1: Object
:type arg1: UnityEngine.Object
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def op_Equality(arg1, arg2):
'''
:param arg1: Object
:type arg1: UnityEngine.Object
:param arg2: Object
:type arg2: UnityEngine.Object
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def op_Inequality(arg1, arg2):
'''
:param arg1: Object
:type arg1: UnityEngine.Object
:param arg2: Object
:type arg2: UnityEngine.Object
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def get_sortOrderPriority():
'''
:returns: Int32
:rtype: System.Int32
'''
pass
@staticmethod
def get_renderOrderPriority():
'''
:returns: Int32
:rtype: System.Int32
'''
pass
@staticmethod
def get_ignoreReversedGraphics():
'''
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def set_ignoreReversedGraphics(arg1):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
'''
pass
@staticmethod
def get_blockingObjects():
'''
:returns: GraphicRaycaster+BlockingObjects
:rtype: UnityEngine.GraphicRaycaster+BlockingObjects
'''
pass
@staticmethod
def set_blockingObjects(arg1):
'''
:param arg1: BlockingObjects
:type arg1: UnityEngine.BlockingObjects
'''
pass
@staticmethod
def Raycast(arg1, arg2):
'''
:param arg1: PointerEventData
:type arg1: UnityEngine.PointerEventData
:param arg2: Undefined variable
:type arg2: SystemCollectionsGenericList.SystemCollectionsGenericList
'''
pass
@staticmethod
def get_eventCamera():
'''
:returns: Camera
:rtype: UnityEngine.Camera
'''
pass
@staticmethod
def ToString():
'''
:returns: String
:rtype: System.String
'''
pass
@staticmethod
def IsActive():
'''
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def IsDestroyed():
'''
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def get_enabled():
'''
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def set_enabled(arg1):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
'''
pass
@staticmethod
def get_transform():
'''
:returns: Transform
:rtype: UnityEngine.Transform
'''
pass
@staticmethod
def get_gameObject():
'''
:returns: GameObject
:rtype: UnityEngine.GameObject
'''
pass
@staticmethod
@overload
def GetComponent(arg1):
'''
:param arg1: Type
:type arg1: System.Type
:returns: Component
:rtype: UnityEngine.Component
'''
pass
@staticmethod
@overload
def GetComponent(arg1):
'''
:param arg1: String
:type arg1: System.String or str
:returns: Component
:rtype: UnityEngine.Component
'''
pass
@staticmethod
def GetComponent(arg1=None):
pass
@staticmethod
@overload
def GetComponentInChildren(arg1, arg2):
'''
:param arg1: Type
:type arg1: System.Type
:param arg2: Boolean
:type arg2: System.Boolean or bool
:returns: Component
:rtype: UnityEngine.Component
'''
pass
@staticmethod
@overload
def GetComponentInChildren(arg1):
'''
:param arg1: Type
:type arg1: System.Type
:returns: Component
:rtype: UnityEngine.Component
'''
pass
@staticmethod
def GetComponentInChildren(arg1=None, arg2=None):
pass
@staticmethod
@overload
def GetComponentsInChildren(arg1, arg2):
'''
:param arg1: Type
:type arg1: System.Type
:param arg2: Boolean
:type arg2: System.Boolean or bool
:returns: ComponentArray
:rtype: UnityEngine.ComponentArray
'''
pass
@staticmethod
@overload
def GetComponentsInChildren(arg1):
'''
:param arg1: Type
:type arg1: System.Type
:returns: ComponentArray
:rtype: UnityEngine.ComponentArray
'''
pass
@staticmethod
@overload
def GetComponentsInChildren(arg1, arg2):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
:param arg2: Undefined variable
:type arg2: ListT.ListT
'''
pass
@staticmethod
@overload
def GetComponentsInChildren(arg1):
'''
:param arg1: Undefined variable
:type arg1: ListT.ListT
'''
pass
@staticmethod
def GetComponentsInChildren(arg1=None, arg2=None):
pass
@staticmethod
@overload
def GetComponentInParent(arg1):
'''
:param arg1: Type
:type arg1: System.Type
:returns: Component
:rtype: UnityEngine.Component
'''
pass
@staticmethod
def GetComponentInParent(arg1=None):
pass
@staticmethod
@overload
def GetComponentsInParent(arg1, arg2):
'''
:param arg1: Type
:type arg1: System.Type
:param arg2: Boolean
:type arg2: System.Boolean or bool
:returns: ComponentArray
:rtype: UnityEngine.ComponentArray
'''
pass
@staticmethod
@overload
def GetComponentsInParent(arg1):
'''
:param arg1: Type
:type arg1: System.Type
:returns: ComponentArray
:rtype: UnityEngine.ComponentArray
'''
pass
@staticmethod
@overload
def GetComponentsInParent(arg1, arg2):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
:param arg2: Undefined variable
:type arg2: ListT.ListT
'''
pass
@staticmethod
def GetComponentsInParent(arg1=None, arg2=None):
pass
@staticmethod
@overload
def GetComponents(arg1):
'''
:param arg1: Type
:type arg1: System.Type
:returns: ComponentArray
:rtype: UnityEngine.ComponentArray
'''
pass
@staticmethod
@overload
def GetComponents(arg1, arg2):
'''
:param arg1: Type
:type arg1: System.Type
:param arg2: Undefined variable
:type arg2: SystemCollectionsGenericList.SystemCollectionsGenericList
'''
pass
@staticmethod
@overload
def GetComponents(arg1):
'''
:param arg1: Undefined variable
:type arg1: ListT.ListT
'''
pass
@staticmethod
def GetComponents(arg1=None, arg2=None):
pass
@staticmethod
def GetInstanceID():
'''
:returns: Int32
:rtype: System.Int32
'''
pass
@staticmethod
def GetHashCode():
'''
:returns: Int32
:rtype: System.Int32
'''
pass
@staticmethod
def Equals(arg1):
'''
:param arg1: Object
:type arg1: System.Object
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def get_name():
'''
:returns: String
:rtype: System.String
'''
pass
@staticmethod
def set_name(arg1):
'''
:param arg1: String
:type arg1: System.String or str
'''
pass
@staticmethod
def GetType():
'''
:returns: Type
:rtype: System.Type
'''
pass
| 20.742718
| 77
| 0.540838
| 696
| 8,546
| 6.612069
| 0.094828
| 0.156454
| 0.123859
| 0.088005
| 0.763146
| 0.762495
| 0.727075
| 0.722077
| 0.607562
| 0.51369
| 0
| 0.023307
| 0.367423
| 8,546
| 411
| 78
| 20.793187
| 0.827969
| 0.378423
| 0
| 0.738854
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.292994
| false
| 0.292994
| 0.025478
| 0
| 0.324841
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
c580055bbc089ebf59edd6420a0ce51bf8ec2bef
| 101
|
py
|
Python
|
integration/common/openlineage/common/provider/great_expectations/__init__.py
|
tomassatka/OpenLineage
|
96b34d8ed2eb642dcc3a2ee53eca53f4455ac7c0
|
[
"Apache-2.0"
] | 746
|
2020-10-26T16:45:54.000Z
|
2022-03-31T22:49:29.000Z
|
integration/common/openlineage/common/provider/great_expectations/__init__.py
|
tomassatka/OpenLineage
|
96b34d8ed2eb642dcc3a2ee53eca53f4455ac7c0
|
[
"Apache-2.0"
] | 442
|
2020-10-26T12:34:58.000Z
|
2022-03-31T16:28:41.000Z
|
integration/common/openlineage/common/provider/great_expectations/__init__.py
|
tomassatka/OpenLineage
|
96b34d8ed2eb642dcc3a2ee53eca53f4455ac7c0
|
[
"Apache-2.0"
] | 70
|
2020-12-28T18:52:35.000Z
|
2022-03-30T06:58:09.000Z
|
from openlineage.common.provider.great_expectations.action import OpenLineageValidationAction # noqa
| 50.5
| 100
| 0.891089
| 10
| 101
| 8.9
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.059406
| 101
| 1
| 101
| 101
| 0.936842
| 0.039604
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c5ab4ea59dfac027c47bbe9f226bf71744ff3ad9
| 10,436
|
py
|
Python
|
example/layers/attention.py
|
OpenBMB/BMCook
|
de31035e4d58d67433647e7c676f56ede7ac8477
|
[
"Apache-2.0"
] | 1
|
2022-03-29T09:06:17.000Z
|
2022-03-29T09:06:17.000Z
|
example/layers/attention.py
|
OpenBMB/BMCook
|
de31035e4d58d67433647e7c676f56ede7ac8477
|
[
"Apache-2.0"
] | null | null | null |
example/layers/attention.py
|
OpenBMB/BMCook
|
de31035e4d58d67433647e7c676f56ede7ac8477
|
[
"Apache-2.0"
] | 1
|
2022-03-30T02:25:42.000Z
|
2022-03-30T02:25:42.000Z
|
from typing import Optional
import torch
import bmtrain as bmt
import cpm_kernels.torch as ct
import math
class Attention(bmt.DistributedModule):
def __init__(self,
dim_model : int,
num_heads : int,
dim_head : int,
init_method : bmt.ParameterInitializer,
int8=True,
dtype=torch.half
):
super().__init__()
self.project_q = bmt.DistributedParameter(
torch.empty(num_heads * dim_head, dim_model, dtype=dtype),
init_method=init_method)
self.project_k = bmt.DistributedParameter(
torch.empty(num_heads * dim_head, dim_model, dtype=dtype),
init_method=init_method)
self.project_v = bmt.DistributedParameter(
torch.empty(num_heads * dim_head, dim_model, dtype=dtype),
init_method=init_method)
self.attention_out = bmt.DistributedParameter(
torch.empty(dim_model, num_heads * dim_head, dtype=dtype),
init_method=init_method)
self.relu = torch.nn.ReLU()
self.dim_model = dim_model
self.num_heads = num_heads
self.dim_head = dim_head
self.int8 = int8
def forward(self,
hidden_q : torch.Tensor, # (batch, dim_model, len_q)
hidden_kv : torch.Tensor, # (batch, dim_model, len_k)
mask : torch.Tensor, # (batch, len_k, len_q)
position_bias : Optional[torch.Tensor], # (num_heads, len_k, len_q)
):
"""
Args:
hidden_q : (batch, dim_model, len_q) fp16
hidden_kv : (batch, dim_model, len_k) fp16
mask : (batch, len_k, len_q) fp16
position_bias : (num_heads, len_k, len_q) fp16
Returns:
out : (batch, dim_model, len_q) fp16
"""
# bmt.inspect.record_tensor(hidden_q, "attn_x")
batch_size = hidden_q.size(0)
len_q = hidden_q.size(2)
len_k = hidden_kv.size(2)
project_q = self.project_q
project_k = self.project_k
project_v = self.project_v
attention_out = self.attention_out
# (1#batch, num_heads * dim_head, dim_model) @ (batch, dim_model, len_q) = (batch, num_heads * dim_head, len_q)
h_q = ct.bmm(project_q.unsqueeze(0), False, hidden_q, False, int8=self.int8) #/ math.sqrt(self.dim_model)
h_k = ct.bmm(project_k.unsqueeze(0), False, hidden_kv, False, int8=self.int8) #/ math.sqrt(self.dim_model)
h_v = ct.bmm(project_v.unsqueeze(0), False, hidden_kv, False, int8=self.int8) #/ math.sqrt(self.dim_model)
# view (batch * num_heads, dim_head, length)
h_q = h_q.view(batch_size * self.num_heads, self.dim_head, -1)
h_k = h_k.view(batch_size * self.num_heads, self.dim_head, -1)
h_v = h_v.view(batch_size * self.num_heads, self.dim_head, -1)
# (batch * num_heads, dim_head, len_k)T @ (batch * num_heads, dim_head, len_q) = (batch * num_heads, len_k, len_q)
score = ct.bmm( h_k, True, h_q, False, int8=False) # use FP 16 here
score = score / math.sqrt(self.dim_head)
# (batch, num_heads, len_k, len_q)
score = score.view(batch_size, self.num_heads, len_k, len_q)
if position_bias is not None:
score = ct.batched_add(
score,
position_bias
)
# (batch, num_heads, len_k * len_q)
masked_score = ct.mask(
score.view(batch_size, self.num_heads, -1),
mask.view(batch_size, -1),
float("-inf")
)
# (batch * num_heads, len_k, len_q)
masked_score = masked_score.view(batch_size * self.num_heads, len_k, len_q)
# (batch * num_heads, len_k, len_q)
masked_score = ct.softmax(masked_score) # softmax along len_k
# (batch * num_heads, dim_head, len_k) @ (batch * num_heads, len_k, len_q) = (batch * num_heads, dim_head, len_q)
attention_result = ct.bmm(h_v, False, masked_score, False, int8=False) # use FP 16 here
attention_result = attention_result.view(batch_size, self.num_heads * self.dim_head, len_q)
# (1#batch, dim_model, num_heads * dim_head) @ (batch, num_heads * dim_head, len_q) = (batch, dim_model, len_q)
attention_out = ct.bmm(attention_out.unsqueeze(0), False, attention_result, False, int8=self.int8) #/ math.sqrt(self.dim_head * self.num_heads)
return attention_out
def fixed_pos_embedding(x, seq_dim=1, seq_len=None):
dim = x.shape[-2]
if seq_len is None:
seq_len = x.shape[seq_dim]
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2) / dim))
sinusoid_inp = torch.einsum("j , i -> i j", torch.arange(seq_len), inv_freq).to(x.device).half()
return torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)
def rotate_every_two(x):
x1 = x[:, ::2, :]
x2 = x[:, 1::2, :]
x = torch.stack((-x2, x1), axis=-2)
return x.flatten(-3, -2) # in einsum notation: rearrange(x, '... d j -> ... (d j)')
def apply_rotary_pos_emb(x, sincos, offset=0):
sin, cos = map(lambda t: t[None, :, offset : x.shape[-1] + offset].repeat_interleave(2, 1), sincos)
# einsum notation for lambda t: repeat(t[offset:x.shape[1]+offset,:], "n d -> () n () (d j)", j=2)
return (x * cos) + (rotate_every_two(x) * sin)
class GPTJAtt(bmt.DistributedModule):
def __init__(self,
dim_model : int,
num_heads : int,
dim_head : int,
init_method: bmt.ParameterInitializer,
int8=True,
dtype=torch.half
):
super().__init__()
self.q_proj = bmt.DistributedParameter(
torch.empty(num_heads * dim_head, dim_model, dtype=dtype),
init_method=init_method)
self.k_proj = bmt.DistributedParameter(
torch.empty(num_heads * dim_head, dim_model, dtype=dtype),
init_method=init_method)
self.v_proj = bmt.DistributedParameter(
torch.empty(num_heads * dim_head, dim_model, dtype=dtype),
init_method=init_method)
self.out_proj = bmt.DistributedParameter(
torch.empty(dim_model, num_heads * dim_head, dtype=dtype),
init_method=init_method)
self.relu = torch.nn.ReLU()
self.dim_model = dim_model
self.num_heads = num_heads
self.dim_head = dim_head
self.int8 = int8
self.rotary_dim = 64
def forward(self,
hidden_q : torch.Tensor, # (batch, dim_model, len_q)
hidden_kv : torch.Tensor, # (batch, dim_model, len_k)
mask : torch.Tensor, # (batch, len_k, len_q)
position_bias : Optional[torch.Tensor], # (num_heads, len_k, len_q)
):
"""
Args:
hidden_q : (batch, dim_model, len_q) fp16
hidden_kv : (batch, dim_model, len_k) fp16
mask : (batch, len_k, len_q) fp16
position_bias : (num_heads, len_k, len_q) fp16
Returns:
out : (batch, dim_model, len_q) fp16
"""
# bmt.inspect.record_tensor(hidden_q, "attn_x")
batch_size = hidden_q.size(0)
len_q = hidden_q.size(2)
len_k = hidden_kv.size(2)
project_q = self.q_proj
project_k = self.k_proj
project_v = self.v_proj
attention_out = self.out_proj
# (1#batch, num_heads * dim_head, dim_model) @ (batch, dim_model, len_q) = (batch, num_heads * dim_head, len_q)
h_q = ct.bmm(project_q.unsqueeze(0), False, hidden_q, False, int8=self.int8) #/ math.sqrt(self.dim_model)
h_k = ct.bmm(project_k.unsqueeze(0), False, hidden_kv, False, int8=self.int8) #/ math.sqrt(self.dim_model)
h_v = ct.bmm(project_v.unsqueeze(0), False, hidden_kv, False, int8=self.int8) #/ math.sqrt(self.dim_model)
# view (batch * num_heads, dim_head, length)
h_q = h_q.view(batch_size * self.num_heads, self.dim_head, -1)
h_k = h_k.view(batch_size * self.num_heads, self.dim_head, -1)
h_v = h_v.view(batch_size * self.num_heads, self.dim_head, -1)
k_rot = h_k[:, : self.rotary_dim, :]
k_pass = h_k[:, self.rotary_dim :, :]
q_rot = h_q[:, : self.rotary_dim, :]
q_pass = h_q[:, self.rotary_dim :, :]
seq_len = h_k.shape[-1]
sincos = fixed_pos_embedding(k_rot, -1, seq_len=seq_len)
k_rot = apply_rotary_pos_emb(k_rot, sincos, offset=0)
q_rot = apply_rotary_pos_emb(q_rot, sincos, offset=0)
h_k = torch.cat([k_rot, k_pass], dim=-2)
h_q = torch.cat([q_rot, q_pass], dim=-2)
# (batch * num_heads, dim_head, len_k)T @ (batch * num_heads, dim_head, len_q) = (batch * num_heads, len_k, len_q)
score = ct.bmm( h_k, True, h_q, False, int8=False) # use FP 16 here
score = score / math.sqrt(self.dim_head)
# (batch, num_heads, len_k, len_q)
score = score.view(batch_size, self.num_heads, len_k, len_q)
# if position_bias is not None:
# score = ct.batched_add(
# score,
# position_bias
# )
# (batch, num_heads, len_k * len_q)
masked_score = ct.mask(
score.view(batch_size, self.num_heads, -1),
mask.view(batch_size, -1),
float("-inf")
)
# (batch * num_heads, len_k, len_q)
masked_score = masked_score.view(batch_size * self.num_heads, len_k, len_q)
self.masked_score = masked_score # Intermediary values for KD
# (batch * num_heads, len_k, len_q)
masked_score = ct.softmax(masked_score) # softmax along len_k
# (batch * num_heads, dim_head, len_k) @ (batch * num_heads, len_k, len_q) = (batch * num_heads, dim_head, len_q)
attention_result = ct.bmm(h_v, False, masked_score, False, int8=False) # use FP 16 here
attention_result = attention_result.view(batch_size, self.num_heads * self.dim_head, len_q)
# (1#batch, dim_model, num_heads * dim_head) @ (batch, num_heads * dim_head, len_q) = (batch, dim_model, len_q)
attention_out = ct.bmm(attention_out.unsqueeze(0), False, attention_result, False, int8=self.int8) #/ math.sqrt(self.dim_head * self.num_heads)
return attention_out
| 41.744
| 151
| 0.597355
| 1,508
| 10,436
| 3.831565
| 0.090849
| 0.088612
| 0.062998
| 0.067497
| 0.834545
| 0.809969
| 0.809969
| 0.809969
| 0.809969
| 0.809969
| 0
| 0.015638
| 0.283059
| 10,436
| 249
| 152
| 41.911647
| 0.756616
| 0.258432
| 0
| 0.636943
| 0
| 0
| 0.002652
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044586
| false
| 0.025478
| 0.031847
| 0
| 0.121019
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c5e4c89e80185f3850abfabe2724a9cd076c82b2
| 15,976
|
py
|
Python
|
lib/pytracking/ltr/data/processing.py
|
ngunnar/tracking_reg
|
71a1d22e53e277f36f961040f03e56efb163ded5
|
[
"MIT"
] | 11
|
2020-11-25T16:19:23.000Z
|
2022-01-12T08:08:47.000Z
|
ltr/data/processing.py
|
tsingqguo/AttackTracker
|
054268d5afa0044675c7acf1ac13e621f1c9549e
|
[
"Apache-2.0"
] | null | null | null |
ltr/data/processing.py
|
tsingqguo/AttackTracker
|
054268d5afa0044675c7acf1ac13e621f1c9549e
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torchvision.transforms as transforms
from pytracking import TensorDict
import ltr.data.processing_utils as prutils
class BaseProcessing:
""" Base class for Processing. Processing class is used to process the data returned by a dataset, before passing it
through the network. For example, it can be used to crop a search region around the object, apply various data
augmentations, etc."""
def __init__(self, transform=transforms.ToTensor(), train_transform=None, test_transform=None, joint_transform=None):
"""
args:
transform - The set of transformations to be applied on the images. Used only if train_transform or
test_transform is None.
train_transform - The set of transformations to be applied on the train images. If None, the 'transform'
argument is used instead.
test_transform - The set of transformations to be applied on the test images. If None, the 'transform'
argument is used instead.
joint_transform - The set of transformations to be applied 'jointly' on the train and test images. For
example, it can be used to convert both test and train images to grayscale.
"""
self.transform = {'train': transform if train_transform is None else train_transform,
'test': transform if test_transform is None else test_transform,
'joint': joint_transform}
def __call__(self, data: TensorDict):
raise NotImplementedError
class ATOMProcessing(BaseProcessing):
""" The processing class used for training ATOM. The images are processed in the following way.
First, the target bounding box is jittered by adding some noise. Next, a square region (called search region )
centered at the jittered target center, and of area search_area_factor^2 times the area of the jittered box is
cropped from the image. The reason for jittering the target box is to avoid learning the bias that the target is
always at the center of the search region. The search region is then resized to a fixed size given by the
argument output_sz. A set of proposals are then generated for the test images by jittering the ground truth box.
"""
def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor, proposal_params,
mode='pair', *args, **kwargs):
"""
args:
search_area_factor - The size of the search region relative to the target size.
output_sz - An integer, denoting the size to which the search region is resized. The search region is always
square.
center_jitter_factor - A dict containing the amount of jittering to be applied to the target center before
extracting the search region. See _get_jittered_box for how the jittering is done.
scale_jitter_factor - A dict containing the amount of jittering to be applied to the target size before
extracting the search region. See _get_jittered_box for how the jittering is done.
proposal_params - Arguments for the proposal generation process. See _generate_proposals for details.
mode - Either 'pair' or 'sequence'. If mode='sequence', then output has an extra dimension for frames
"""
super().__init__(*args, **kwargs)
self.search_area_factor = search_area_factor
self.output_sz = output_sz
self.center_jitter_factor = center_jitter_factor
self.scale_jitter_factor = scale_jitter_factor
self.proposal_params = proposal_params
self.mode = mode
def _get_jittered_box(self, box, mode):
""" Jitter the input box
args:
box - input bounding box
mode - string 'train' or 'test' indicating train or test data
returns:
torch.Tensor - jittered box
"""
jittered_size = box[2:4] * torch.exp(torch.randn(2) * self.scale_jitter_factor[mode])
max_offset = (jittered_size.prod().sqrt() * self.center_jitter_factor[mode]).item()
jittered_center = box[0:2] + 0.5 * box[2:4] + max_offset * (torch.rand(2) - 0.5)
return torch.cat((jittered_center - 0.5 * jittered_size, jittered_size), dim=0)
def _generate_proposals(self, box):
""" Generates proposals by adding noise to the input box
args:
box - input box
returns:
torch.Tensor - Array of shape (num_proposals, 4) containing proposals
torch.Tensor - Array of shape (num_proposals,) containing IoU overlap of each proposal with the input box. The
IoU is mapped to [-1, 1]
"""
# Generate proposals
num_proposals = self.proposal_params['boxes_per_frame']
proposals = torch.zeros((num_proposals, 4))
gt_iou = torch.zeros(num_proposals)
for i in range(num_proposals):
proposals[i, :], gt_iou[i] = prutils.perturb_box(box, min_iou=self.proposal_params['min_iou'],
sigma_factor=self.proposal_params['sigma_factor']
)
# Map to [-1, 1]
gt_iou = gt_iou * 2 - 1
return proposals, gt_iou
def __call__(self, data: TensorDict):
"""
args:
data - The input data, should contain the following fields:
'train_images' -
'test_images' -
'train_anno' -
'test_anno' -
returns:
TensorDict - output data block with following fields:
'train_images' -
'test_images' -
'train_anno' -
'test_anno' -
'test_proposals'-
'proposal_iou' -
"""
# Apply joint transforms
if self.transform['joint'] is not None:
num_train_images = len(data['train_images'])
all_images = data['train_images'] + data['test_images']
all_images_trans = self.transform['joint'](*all_images)
data['train_images'] = all_images_trans[:num_train_images]
data['test_images'] = all_images_trans[num_train_images:]
for s in ['train', 'test']:
assert self.mode == 'sequence' or len(data[s + '_images']) == 1, \
"In pair mode, num train/test frames must be 1"
# Add a uniform noise to the center pos
jittered_anno = [self._get_jittered_box(a, s) for a in data[s + '_anno']]
# Crop image region centered at jittered_anno box
crops, boxes = prutils.jittered_center_crop(data[s + '_images'], jittered_anno, data[s + '_anno'],
self.search_area_factor, self.output_sz)
# Apply transforms
data[s + '_images'] = [self.transform[s](x) for x in crops]
data[s + '_anno'] = boxes
# Generate proposals
frame2_proposals, gt_iou = zip(*[self._generate_proposals(a) for a in data['test_anno']])
data['test_proposals'] = list(frame2_proposals)
data['proposal_iou'] = list(gt_iou)
# Prepare output
if self.mode == 'sequence':
data = data.apply(prutils.stack_tensors)
else:
data = data.apply(lambda x: x[0] if isinstance(x, list) else x)
return data
class DiMPProcessing(BaseProcessing):
""" The processing class used for training DiMP. The images are processed in the following way.
First, the target bounding box is jittered by adding some noise. Next, a square region (called search region )
centered at the jittered target center, and of area search_area_factor^2 times the area of the jittered box is
cropped from the image. The reason for jittering the target box is to avoid learning the bias that the target is
always at the center of the search region. The search region is then resized to a fixed size given by the
argument output_sz. A Gaussian label centered at the target is generated for each image. These label functions are
used for computing the loss of the predicted classification model on the test images. A set of proposals are
also generated for the test images by jittering the ground truth box. These proposals are used to train the
bounding box estimating branch.
"""
def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor, crop_type='replicate',
mode='pair',
proposal_params=None, label_function_params=None, *args, **kwargs):
"""
args:
search_area_factor - The size of the search region relative to the target size.
output_sz - An integer, denoting the size to which the search region is resized. The search region is always
square.
center_jitter_factor - A dict containing the amount of jittering to be applied to the target center before
extracting the search region. See _get_jittered_box for how the jittering is done.
scale_jitter_factor - A dict containing the amount of jittering to be applied to the target size before
extracting the search region. See _get_jittered_box for how the jittering is done.
crop_type - If 'replicate', the boundary pixels are replicated in case the search region crop goes out of image.
If 'nopad', the search region crop is shifted/shrunk to fit completely inside the image.
mode - Either 'pair' or 'sequence'. If mode='sequence', then output has an extra dimension for frames
proposal_params - Arguments for the proposal generation process. See _generate_proposals for details.
label_function_params - Arguments for the label generation process. See _generate_label_function for details.
"""
super().__init__(*args, **kwargs)
self.search_area_factor = search_area_factor
self.output_sz = output_sz
self.center_jitter_factor = center_jitter_factor
self.scale_jitter_factor = scale_jitter_factor
self.crop_type = crop_type
self.mode = mode
self.proposal_params = proposal_params
self.label_function_params = label_function_params
def _get_jittered_box(self, box, mode):
""" Jitter the input box
args:
box - input bounding box
mode - string 'train' or 'test' indicating train or test data
returns:
torch.Tensor - jittered box
"""
jittered_size = box[2:4] * torch.exp(torch.randn(2) * self.scale_jitter_factor[mode])
max_offset = (jittered_size.prod().sqrt() * self.center_jitter_factor[mode]).item()
jittered_center = box[0:2] + 0.5 * box[2:4] + max_offset * (torch.rand(2) - 0.5)
return torch.cat((jittered_center - 0.5 * jittered_size, jittered_size), dim=0)
def _generate_proposals(self, box):
""" Generates proposals by adding noise to the input box
args:
box - input box
returns:
torch.Tensor - Array of shape (num_proposals, 4) containing proposals
torch.Tensor - Array of shape (num_proposals,) containing IoU overlap of each proposal with the input box. The
IoU is mapped to [-1, 1]
"""
# Generate proposals
num_proposals = self.proposal_params['boxes_per_frame']
proposals = torch.zeros((num_proposals, 4))
gt_iou = torch.zeros(num_proposals)
for i in range(num_proposals):
proposals[i, :], gt_iou[i] = prutils.perturb_box(box, min_iou=self.proposal_params['min_iou'],
sigma_factor=self.proposal_params['sigma_factor'])
# Map to [-1, 1]
gt_iou = gt_iou * 2 - 1
return proposals, gt_iou
def _generate_label_function(self, target_bb):
""" Generates the gaussian label function centered at target_bb
args:
target_bb - target bounding box (num_images, 4)
returns:
torch.Tensor - Tensor of shape (num_images, label_sz, label_sz) containing the label for each sample
"""
gauss_label = prutils.gaussian_label_function(target_bb.view(-1, 4), self.label_function_params['sigma_factor'],
self.label_function_params['kernel_sz'],
self.label_function_params['feature_sz'], self.output_sz,
end_pad_if_even=self.label_function_params.get('end_pad_if_even', True))
return gauss_label
def __call__(self, data: TensorDict):
"""
args:
data - The input data, should contain the following fields:
'train_images' -
'test_images' -
'train_anno' -
'test_anno' -
returns:
TensorDict - output data block with following fields:
'train_images' -
'test_images' -
'train_anno' -
'test_anno' -
'test_proposals' (optional) -
'proposal_iou' (optional) -
'test_label' (optional) -
'train_label' (optional) -
"""
if self.transform['joint'] is not None:
num_train_images = len(data['train_images'])
all_images = data['train_images'] + data['test_images']
all_images_trans = self.transform['joint'](*all_images)
data['train_images'] = all_images_trans[:num_train_images]
data['test_images'] = all_images_trans[num_train_images:]
for s in ['train', 'test']:
assert self.mode == 'sequence' or len(data[s + '_images']) == 1, \
"In pair mode, num train/test frames must be 1"
# Add a uniform noise to the center pos
jittered_anno = [self._get_jittered_box(a, s) for a in data[s + '_anno']]
if self.crop_type == 'replicate':
crops, boxes = prutils.jittered_center_crop(data[s + '_images'], jittered_anno, data[s + '_anno'],
self.search_area_factor, self.output_sz)
elif self.crop_type == 'nopad':
crops, boxes = prutils.jittered_center_crop_nopad(data[s + '_images'], jittered_anno, data[s + '_anno'],
self.search_area_factor, self.output_sz)
else:
raise ValueError('Unknown crop type {}'.format(self.crop_type))
data[s + '_images'] = [self.transform[s](x) for x in crops]
data[s + '_anno'] = boxes
# Generate proposals
if self.proposal_params:
frame2_proposals, gt_iou = zip(*[self._generate_proposals(a) for a in data['test_anno']])
data['test_proposals'] = list(frame2_proposals)
data['proposal_iou'] = list(gt_iou)
# Prepare output
if self.mode == 'sequence':
data = data.apply(prutils.stack_tensors)
else:
data = data.apply(lambda x: x[0] if isinstance(x, list) else x)
# Generate label functions
if self.label_function_params is not None:
data['train_label'] = self._generate_label_function(data['train_anno'])
data['test_label'] = self._generate_label_function(data['test_anno'])
return data
| 49.4613
| 126
| 0.611417
| 1,987
| 15,976
| 4.717665
| 0.130347
| 0.025603
| 0.025603
| 0.014935
| 0.765628
| 0.759228
| 0.740986
| 0.726051
| 0.721464
| 0.711863
| 0
| 0.005547
| 0.311718
| 15,976
| 322
| 127
| 49.614907
| 0.846944
| 0.437782
| 0
| 0.712
| 0
| 0
| 0.080903
| 0
| 0
| 0
| 0
| 0
| 0.016
| 1
| 0.088
| false
| 0
| 0.032
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c5fd3ba9c1239d1be32b09988fbbc83d00c6584c
| 139
|
py
|
Python
|
autoflow/feature_engineer/impute/__init__.py
|
auto-flow/autoflow
|
f5903424ad8694d57741a0bd6dfeaba320ea6517
|
[
"BSD-3-Clause"
] | 49
|
2020-04-16T11:17:28.000Z
|
2020-05-06T01:32:44.000Z
|
autoflow/feature_engineer/impute/__init__.py
|
auto-flow/autoflow
|
f5903424ad8694d57741a0bd6dfeaba320ea6517
|
[
"BSD-3-Clause"
] | null | null | null |
autoflow/feature_engineer/impute/__init__.py
|
auto-flow/autoflow
|
f5903424ad8694d57741a0bd6dfeaba320ea6517
|
[
"BSD-3-Clause"
] | 3
|
2020-04-17T00:53:24.000Z
|
2020-04-23T03:04:26.000Z
|
from .knn_impute import KNNImputer
from .miss_forest import MissForest
from .simple import SimpleImputer
from .gbt_impute import GBTImputer
| 34.75
| 35
| 0.863309
| 19
| 139
| 6.157895
| 0.631579
| 0.205128
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107914
| 139
| 4
| 36
| 34.75
| 0.943548
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
681a3c3478bca9f1176e4b1fd3da716ab0eda4ad
| 35
|
py
|
Python
|
tdirstat/__init__.py
|
apockill/tdirstat
|
2d2196432e3f2a861a24db86064cf36f093585c3
|
[
"MIT"
] | 2
|
2020-02-03T18:11:55.000Z
|
2020-12-19T21:31:12.000Z
|
tdirstat/__init__.py
|
apockill/tdirstat
|
2d2196432e3f2a861a24db86064cf36f093585c3
|
[
"MIT"
] | 1
|
2021-08-25T01:50:37.000Z
|
2021-08-30T04:44:35.000Z
|
tdirstat/__init__.py
|
apockill/tdirstat
|
2d2196432e3f2a861a24db86064cf36f093585c3
|
[
"MIT"
] | null | null | null |
from .main import main as tdirstat
| 17.5
| 34
| 0.8
| 6
| 35
| 4.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.171429
| 35
| 1
| 35
| 35
| 0.965517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a89c10ebebee62503ebcc5f14e8d005100b55d3e
| 146
|
py
|
Python
|
m16_mlutils/pipeline/__init__.py
|
messier16/m16_mlutils
|
868775f48106f2e3a2090e98b8508349ca278158
|
[
"MIT"
] | null | null | null |
m16_mlutils/pipeline/__init__.py
|
messier16/m16_mlutils
|
868775f48106f2e3a2090e98b8508349ca278158
|
[
"MIT"
] | 9
|
2018-10-13T06:50:05.000Z
|
2021-06-01T23:07:42.000Z
|
m16_mlutils/pipeline/__init__.py
|
messier16/m16_mlutils
|
868775f48106f2e3a2090e98b8508349ca278158
|
[
"MIT"
] | null | null | null |
from .DataFrameSelector import DataFrameSelector
from .MostFrequentImputer import MostFrequentImputer
from .CategoryEncoder import CategoryEncoder
| 48.666667
| 52
| 0.90411
| 12
| 146
| 11
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075342
| 146
| 3
| 53
| 48.666667
| 0.977778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a89fb7b3f701258a61b86789bfb6848889a90154
| 1,762
|
py
|
Python
|
shamanai/common/wrapper2.py
|
adaptationio/Shaman-RL
|
548fa847e6ba2105cc0a876b02db3f3d7c179c54
|
[
"MIT"
] | 2
|
2020-06-13T04:38:08.000Z
|
2022-03-22T08:38:10.000Z
|
shamanai/common/wrapper2.py
|
adaptationio/Shaman-RL
|
548fa847e6ba2105cc0a876b02db3f3d7c179c54
|
[
"MIT"
] | 1
|
2020-11-13T17:46:38.000Z
|
2020-11-13T17:46:38.000Z
|
shamanai/common/wrapper2.py
|
adaptationio/Shaman-AI
|
548fa847e6ba2105cc0a876b02db3f3d7c179c54
|
[
"MIT"
] | null | null | null |
import numpy as np
from collections import deque
import gym
from gym import spaces
import cv2
import tensorflow as tf
import json
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env):
"""Warp frames to 84x84 as done in the Nature paper and later work."""
gym.ObservationWrapper.__init__(self, env)
self.width = 84
self.height = 84
self.observation_space = spaces.Box(low=0, high=255,
shape=(self.height, self.width, 1), dtype=np.uint8)
def observation(self, frame):
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA)
return frame[:, :, None]
class WarpFrameRGB(gym.ObservationWrapper):
def __init__(self, env):
"""Warp frames to 84x84 as done in the Nature paper and later work."""
gym.ObservationWrapper.__init__(self, env)
self.width = 84
self.height = 84
self.observation_space = spaces.Box(low=0, high=255,
shape=(self.height, self.width, 3), dtype=np.uint8)
def observation(self, frame):
frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA)
return frame[:, :, None]
class WarpFrameRGBYolo(gym.ObservationWrapper):
def __init__(self, env):
"""Warp frames to 84x84 as done in the Nature paper and later work."""
gym.ObservationWrapper.__init__(self, env)
self.observation_space = spaces.Box(low=0, high=255,
shape=(84, 84, 24), dtype=np.uint8) # hack this part so that the graph is correctly built
def observation(self, frame):
frame = cv2.resize(frame, (224, 224), interpolation=cv2.INTER_AREA)
return frame
| 37.489362
| 102
| 0.665721
| 235
| 1,762
| 4.859574
| 0.297872
| 0.110333
| 0.057793
| 0.073555
| 0.783713
| 0.783713
| 0.752189
| 0.752189
| 0.715412
| 0.64711
| 0
| 0.043255
| 0.22588
| 1,762
| 46
| 103
| 38.304348
| 0.793988
| 0.140182
| 0
| 0.555556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.194444
| 0
| 0.527778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
765e536ba56c55528d56c015566fe130f4999b8d
| 122
|
py
|
Python
|
zmyy/zmyy.py
|
ZehnMilliarden/PublicPyTools
|
4671cac07de00ecbdf50b0ef577d847c4ee6f892
|
[
"Apache-2.0"
] | 4
|
2021-11-14T07:50:42.000Z
|
2021-11-29T01:38:00.000Z
|
zmyy/zmyy.py
|
ZehnMilliarden/PublicPyTools
|
4671cac07de00ecbdf50b0ef577d847c4ee6f892
|
[
"Apache-2.0"
] | null | null | null |
zmyy/zmyy.py
|
ZehnMilliarden/PublicPyTools
|
4671cac07de00ecbdf50b0ef577d847c4ee6f892
|
[
"Apache-2.0"
] | null | null | null |
import zmyy_a
import zmyy_b
import zmyy_s
if __name__ == '__main__':
# zmyy_a.excute_main()
zmyy_b.excute_main()
| 15.25
| 26
| 0.721311
| 20
| 122
| 3.65
| 0.45
| 0.410959
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.180328
| 122
| 8
| 27
| 15.25
| 0.73
| 0.163934
| 0
| 0
| 0
| 0
| 0.08
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
767ab757b6c644bb0b13ae27db0c4c707931f56d
| 10,742
|
py
|
Python
|
ddq_1/lang/fol_inference.py
|
jadnohra/connect
|
8eb21e6f122898094447bc3d5edb3053d5a2adf2
|
[
"Unlicense"
] | null | null | null |
ddq_1/lang/fol_inference.py
|
jadnohra/connect
|
8eb21e6f122898094447bc3d5edb3053d5a2adf2
|
[
"Unlicense"
] | 6
|
2021-03-19T12:06:56.000Z
|
2022-03-12T00:23:09.000Z
|
ddq_1/lang/fol_inference.py
|
jadnohra/connect
|
8eb21e6f122898094447bc3d5edb3053d5a2adf2
|
[
"Unlicense"
] | null | null | null |
'''
References:
- Symbolic Logic, Copi, p.33, 396
'''
from typing import List
from .fol_lang import Wff, PropVarWff, BinaryWff, PropositionalVariable, NegWff
class Inference:
def short_name(self):
pass
def possible_inferences(self, permisses: List[Wff]):
pass
class PropInference(Inference):
def __init__(self, premisses: List[Wff], conclusion: List[Wff]):
self.premisses = premisses
self.conclusion = conclusion
def short_name(self):
pass
class ModusPonens(PropInference):
def __init__(self):
p = PropVarWff(PropositionalVariable('p'))
q = PropVarWff(PropositionalVariable('q'))
prem1 = BinaryWff.new_impl(p, q)
prem2 = p
concl = q
super().__init__([prem1, prem2], [concl])
def short_name(self):
return 'MP'
def possible_inferences(self, permisses: List[Wff]):
pass
class ModusTollens(PropInference):
def __init__(self):
p = PropVarWff(PropositionalVariable('p'))
q = PropVarWff(PropositionalVariable('q'))
prem1 = BinaryWff.new_impl(p, q)
prem2 = NegWff(q)
concl = NegWff(p)
super().__init__([prem1, prem2], [concl])
def short_name(self):
return 'MT'
class HypotheticalSyllogism(PropInference):
def __init__(self):
p = PropVarWff(PropositionalVariable('p'))
q = PropVarWff(PropositionalVariable('q'))
r = PropVarWff(PropositionalVariable('r'))
prem1 = BinaryWff.new_impl(p, q)
prem2 = BinaryWff.new_impl(q, r)
concl = BinaryWff.new_impl(p, r)
super().__init__([prem1, prem2], [concl])
def short_name(self):
return 'HS'
class DisjunctiveSyllogism(PropInference):
def __init__(self):
p = PropVarWff(PropositionalVariable('p'))
q = PropVarWff(PropositionalVariable('q'))
prem1 = BinaryWff.new_disj(p, q)
prem2 = NegWff(q)
concl = q
super().__init__([prem1, prem2], [concl])
def short_name(self):
return 'DS'
class ConstructiveDilemma(PropInference):
def __init__(self):
p = PropVarWff(PropositionalVariable('p'))
q = PropVarWff(PropositionalVariable('q'))
r = PropVarWff(PropositionalVariable('r'))
s = PropVarWff(PropositionalVariable('s'))
prem1 = BinaryWff.new_disj(
BinaryWff.new_impl(p, q),
BinaryWff.new_impl(r, s))
prem2 = BinaryWff.new_disj(p, r)
concl = BinaryWff.new_disj(q, s)
super().__init__([prem1, prem2], [concl])
def short_name(self):
return 'DS'
class DistructuveDilemma(PropInference):
def __init__(self):
p = PropVarWff(PropositionalVariable('p'))
q = PropVarWff(PropositionalVariable('q'))
r = PropVarWff(PropositionalVariable('r'))
s = PropVarWff(PropositionalVariable('s'))
prem1 = BinaryWff.new_disj(
BinaryWff.new_impl(p, q),
BinaryWff.new_impl(r, s))
prem2 = BinaryWff.new_disj(
NegWff(q),
NegWff(s))
concl = BinaryWff.new_disj(
NegWff(p),
NegWff(r))
super().__init__([prem1, prem2], [concl])
def short_name(self):
return 'DD'
class Simplification(PropInference):
def __init__(self):
p = PropVarWff(PropositionalVariable('p'))
q = PropVarWff(PropositionalVariable('q'))
prem = BinaryWff.new_conj(p, q)
concl = q
super().__init__([prem], [concl])
def short_name(self):
return 'SIMP'
class Conjunction(PropInference):
def __init__(self):
p = PropVarWff(PropositionalVariable('p'))
q = PropVarWff(PropositionalVariable('q'))
prem1 = p
prem2 = q
concl = BinaryWff.new_conj(p, q)
super().__init__([prem1, prem2], [concl])
def short_name(self):
return 'CONJ'
class Addition(PropInference):
def __init__(self):
p = PropVarWff(PropositionalVariable('p'))
q = PropVarWff(PropositionalVariable('q'))
prem = p
concl = BinaryWff.new_disj(p, q)
super().__init__([prem], [concl])
def short_name(self):
return 'ADD'
class Replacement:
def __init__(self, pattern1: Wff, pattern2: Wff):
self.pattern1 = pattern1
self.pattern2 = pattern2
def short_name(self):
pass
class DeMorganConj(Replacement):
def __init__(self):
p = PropVarWff(PropositionalVariable('p'))
q = PropVarWff(PropositionalVariable('q'))
pat1 = NegWff(BinaryWff.new_conj(p, q))
pat2 = BinaryWff.new_disj(
NegWff(p),
NegWff(q))
super().__init__(pat1, pat2)
def short_name(self):
return 'DMc'
class DeMorganDisj(Replacement):
def __init__(self):
p = PropVarWff(PropositionalVariable('p'))
q = PropVarWff(PropositionalVariable('q'))
pat1 = NegWff(BinaryWff.new_disj(p, q))
pat2 = BinaryWff.new_conj(
NegWff(p),
NegWff(q))
super().__init__(pat1, pat2)
def short_name(self):
return 'DMd'
class CommutationConj(Replacement):
def __init__(self):
p = PropVarWff(PropositionalVariable('p'))
q = PropVarWff(PropositionalVariable('q'))
pat1 = BinaryWff.new_conj(p, q)
pat2 = BinaryWff.new_conj(q, p)
super().__init__(pat1, pat2)
def short_name(self):
return 'COMc'
class CommutationDisj(Replacement):
def __init__(self):
p = PropVarWff(PropositionalVariable('p'))
q = PropVarWff(PropositionalVariable('q'))
pat1 = BinaryWff.new_disj(p, q)
pat2 = BinaryWff.new_disj(q, p)
super().__init__(pat1, pat2)
def short_name(self):
return 'COMd'
class AssociationConj(Replacement):
def __init__(self):
p = PropVarWff(PropositionalVariable('p'))
q = PropVarWff(PropositionalVariable('q'))
r = PropVarWff(PropositionalVariable('r'))
pat1 = BinaryWff.new_conj(
BinaryWff.new_conj(p, q),
r)
pat2 = BinaryWff.new_conj(
p,
BinaryWff.new_conj(q, r))
super().__init__(pat1, pat2)
def short_name(self):
return 'ASCc'
class AssociationDisj(Replacement):
def __init__(self):
p = PropVarWff(PropositionalVariable('p'))
q = PropVarWff(PropositionalVariable('q'))
r = PropVarWff(PropositionalVariable('r'))
pat1 = BinaryWff.new_disj(
BinaryWff.new_disj(p, q),
r)
pat2 = BinaryWff.new_disj(
p,
BinaryWff.new_disj(q, r))
super().__init__(pat1, pat2)
def short_name(self):
return 'ASCd'
class DistributionConj(Replacement):
def __init__(self):
p = PropVarWff(PropositionalVariable('p'))
q = PropVarWff(PropositionalVariable('q'))
r = PropVarWff(PropositionalVariable('r'))
pat1 = BinaryWff.new_conj(
p,
BinaryWff.new_disj(q, r))
pat2 = BinaryWff.new_disj(
BinaryWff.new_conj(p, q),
BinaryWff.new_conj(p, r))
super().__init__(pat1, pat2)
def short_name(self):
return 'DISTc'
class DistributionDisj(Replacement):
def __init__(self):
p = PropVarWff(PropositionalVariable('p'))
q = PropVarWff(PropositionalVariable('q'))
r = PropVarWff(PropositionalVariable('r'))
pat1 = BinaryWff.new_disj(
p,
BinaryWff.new_conj(q, r))
pat2 = BinaryWff.new_conj(
BinaryWff.new_disj(p, q),
BinaryWff.new_disj(p, r))
super().__init__(pat1, pat2)
def short_name(self):
return 'DISTd'
class DoubleNegation(Replacement):
def __init__(self):
p = PropVarWff(PropositionalVariable('p'))
pat1 = NegWff(NegWff(p))
pat2 = p
super().__init__(pat1, pat2)
def short_name(self):
return 'DN'
class Transposition(Replacement):
def __init__(self):
p = PropVarWff(PropositionalVariable('p'))
q = PropVarWff(PropositionalVariable('q'))
pat1 = BinaryWff.new_impl(p, q)
pat2 = BinaryWff.new_impl(NegWff(q), NegWff(p))
super().__init__(pat1, pat2)
def short_name(self):
return 'TRANS'
class MaterialImplication(Replacement):
def __init__(self):
p = PropVarWff(PropositionalVariable('p'))
q = PropVarWff(PropositionalVariable('q'))
pat1 = BinaryWff.new_impl(p, q)
pat2 = BinaryWff.new_disj(NegWff(p), q)
super().__init__(pat1, pat2)
def short_name(self):
return 'IMPL'
class MaterialEquivalenceConj(Replacement):
def __init__(self):
p = PropVarWff(PropositionalVariable('p'))
q = PropVarWff(PropositionalVariable('q'))
pat1 = BinaryWff.new_equiv(p, q)
pat2 = BinaryWff.new_conj(
BinaryWff.new_impl(p, q),
BinaryWff.new_impl(q, p)
)
super().__init__(pat1, pat2)
def short_name(self):
return 'EQUIVc'
class MaterialEquivalenceDisj(Replacement):
def __init__(self):
p = PropVarWff(PropositionalVariable('p'))
q = PropVarWff(PropositionalVariable('q'))
pat1 = BinaryWff.new_equiv(p, q)
pat2 = BinaryWff.new_disj(
BinaryWff.new_conj(p, q),
BinaryWff.new_conj(NegWff(q), NegWff(q))
)
super().__init__(pat1, pat2)
def short_name(self):
return 'EQUIVd'
class Exportation(Replacement):
def __init__(self):
p = PropVarWff(PropositionalVariable('p'))
q = PropVarWff(PropositionalVariable('q'))
r = PropVarWff(PropositionalVariable('r'))
pat1 = BinaryWff.new_impl(
BinaryWff.new_conj(p, q),
r)
pat2 = BinaryWff.new_impl(
p,
BinaryWff.new_impl(q, r))
super().__init__(pat1, pat2)
def short_name(self):
return 'EXP'
class TautologyConj(Replacement):
def __init__(self):
p = PropVarWff(PropositionalVariable('p'))
pat1 = p
pat2 = BinaryWff.new_conj(p, p)
super().__init__(pat1, pat2)
def short_name(self):
return 'TAUTc'
class TautologyDisj(Replacement):
def __init__(self):
p = PropVarWff(PropositionalVariable('p'))
pat1 = p
pat2 = BinaryWff.new_conj(p, p)
super().__init__(pat1, pat2)
def short_name(self):
return 'TAUTd'
| 27.685567
| 79
| 0.592255
| 1,107
| 10,742
| 5.474255
| 0.088528
| 0.124752
| 0.055446
| 0.073927
| 0.828383
| 0.802805
| 0.761221
| 0.756601
| 0.733993
| 0.696205
| 0
| 0.013466
| 0.287935
| 10,742
| 387
| 80
| 27.757106
| 0.778795
| 0.004282
| 0
| 0.677852
| 0
| 0
| 0.013847
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.191275
| false
| 0.016779
| 0.006711
| 0.083893
| 0.375839
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
767c80a0b89e81a36d63833781ae88999338bf2b
| 19
|
py
|
Python
|
python/testData/resolve/multiFile/resolveInPkg/pkg/submodule.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/resolve/multiFile/resolveInPkg/pkg/submodule.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/resolve/multiFile/resolveInPkg/pkg/submodule.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
def token():
pass
| 9.5
| 12
| 0.631579
| 3
| 19
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.210526
| 19
| 2
| 13
| 9.5
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
767c957bb26309d3466ca37f3047270b07f8f60f
| 35
|
py
|
Python
|
reqlog/api/requests/view/__init__.py
|
JFF-Bohdan/reqlog
|
a7ba7b6e12609d736b3cd8cd8bc2913d511848ee
|
[
"MIT"
] | null | null | null |
reqlog/api/requests/view/__init__.py
|
JFF-Bohdan/reqlog
|
a7ba7b6e12609d736b3cd8cd8bc2913d511848ee
|
[
"MIT"
] | null | null | null |
reqlog/api/requests/view/__init__.py
|
JFF-Bohdan/reqlog
|
a7ba7b6e12609d736b3cd8cd8bc2913d511848ee
|
[
"MIT"
] | null | null | null |
from .api_view_all import * # noqa
| 35
| 35
| 0.742857
| 6
| 35
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.171429
| 35
| 1
| 35
| 35
| 0.827586
| 0.114286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
76bcfa0ec30e0c395ca989eb1be41b2aa4dff255
| 3,312
|
py
|
Python
|
RNNs.py
|
ishine/RPN_KWS
|
b54d4010a701a6ec0a9ddf3ab6177a4be6dd6af5
|
[
"MIT"
] | 53
|
2019-08-13T08:05:26.000Z
|
2022-02-27T15:44:59.000Z
|
RNNs.py
|
ishine/RPN_KWS
|
b54d4010a701a6ec0a9ddf3ab6177a4be6dd6af5
|
[
"MIT"
] | 3
|
2019-10-31T09:25:38.000Z
|
2021-04-16T06:26:39.000Z
|
RNNs.py
|
ishine/RPN_KWS
|
b54d4010a701a6ec0a9ddf3ab6177a4be6dd6af5
|
[
"MIT"
] | 19
|
2019-08-14T03:47:58.000Z
|
2022-02-14T08:49:38.000Z
|
#!/usr/bin/env python
# Copyrigh 2018 houjingyong@gmail.com
# MIT Licence
import numpy as np
import sys
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from basic_nodes import *
class GRU(nn.Module):
def __init__(self, input_size, output_size, hidden_size, num_layers, bias=True, batch_first=True, dropout=0.0001, bidirectional=False, output_layer=False, init_weight=True):
super(GRU, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.rnn = nn.GRU(input_size=input_size, hidden_size=hidden_size,
num_layers=num_layers, bias=bias, batch_first=batch_first,
dropout=dropout, bidirectional = bidirectional)
if init_weight:
self.init_weights()
if output_layer:
self.linear = LinearBlock(hidden_size, output_size, 'relu')
else:
self.linear = None
def init_weights(self):
for name, param in self.named_parameters():
#print("name: %s\n"%name)
#print(param.shape)
if 'weight_ih' in name:
torch.nn.init.xavier_uniform_(param.data)
elif 'weight_hh' in name:
torch.nn.init.orthogonal_(param.data)
elif 'bias' in name:
param.data.fill_(0)
def forward(self, x, length):
batch_size = x.shape[0]
total_length = x.shape[1]
h0 = torch.randn(self.num_layers, batch_size, self.hidden_size).type_as(x)
x = pack_padded_sequence(x, length, batch_first=True)
output, hn = self.rnn(x, h0)
output, _ = pad_packed_sequence(output, batch_first=True, total_length=total_length)
if self.linear == None:
return output
output = self.linear(output)
return output
class LSTM(nn.Module):
def __init__(self, input_size, output_size, hidden_size, num_layers, bias=True, batch_first=True, dropout=0.0001, bidirectional=False, output_layer=False, init_weight=False):
super(LSTM, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.rnn = nn.LSTM(input_size=input_size, hidden_size=hidden_size,
num_layers=num_layers, bias=bias, batch_first=batch_first,
dropout=dropout, bidirectional = bidirectional)
if init_weight:
print("Xavier init")
for i in range(num_layers):
nn.init.xavier_uniform_(self.rnn.all_weights[i][0])
nn.init.xavier_uniform_(self.rnn.all_weights[i][1])
if output_layer:
self.linear = LinearBlock(hidden_size, output_size, 'relu')
else:
self.linear = None
def forward(self, x, h0, length):
batch_size = x.shape[0]
total_length = x.shape[1]
h0 = torch.randn(self.num_layers, batch_size, self.hidden_dim).type_as(x)
x = pack_padded_sequence(x, length, batch_first=True)
output, hn = self.rnn(x, h0)
output, _ = pad_packed_sequence(output, batch_first=True, total_length=total_length)
if self.linear == None:
return output
output = self.linear(output)
return output
| 38.964706
| 178
| 0.632246
| 437
| 3,312
| 4.533181
| 0.210526
| 0.065623
| 0.056537
| 0.034326
| 0.750126
| 0.732963
| 0.732963
| 0.732963
| 0.732963
| 0.695608
| 0
| 0.010744
| 0.269324
| 3,312
| 84
| 179
| 39.428571
| 0.807851
| 0.033213
| 0
| 0.537313
| 0
| 0
| 0.012825
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.089552
| null | null | 0.014925
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4f12370126b2ab7c00276ce1e1cc4e5de67cd411
| 26
|
py
|
Python
|
undict/__init__.py
|
firstprayer/undict
|
94edc66c8243ee35746d830b80e8bd4a9f046f20
|
[
"MIT"
] | null | null | null |
undict/__init__.py
|
firstprayer/undict
|
94edc66c8243ee35746d830b80e8bd4a9f046f20
|
[
"MIT"
] | null | null | null |
undict/__init__.py
|
firstprayer/undict
|
94edc66c8243ee35746d830b80e8bd4a9f046f20
|
[
"MIT"
] | null | null | null |
from undict import undict
| 13
| 25
| 0.846154
| 4
| 26
| 5.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 26
| 1
| 26
| 26
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4f2646ef476417fa46e6a5536e7103e237123ff4
| 25,643
|
py
|
Python
|
projects/08/code_writer.py
|
Youngermaster/Nand2Tetris-Solutions
|
9fb4ac31a0558bcc2324696bfb451aac11232088
|
[
"MIT"
] | null | null | null |
projects/08/code_writer.py
|
Youngermaster/Nand2Tetris-Solutions
|
9fb4ac31a0558bcc2324696bfb451aac11232088
|
[
"MIT"
] | null | null | null |
projects/08/code_writer.py
|
Youngermaster/Nand2Tetris-Solutions
|
9fb4ac31a0558bcc2324696bfb451aac11232088
|
[
"MIT"
] | null | null | null |
import os
from parser import Parser
class CodeWriter:
def __init__(self, filepath, isfile=True):
self.parser = Parser(filepath)
self.isfile = isfile
# * Performs the logic of the recommended setFileName constructor here
if self.isfile:
ind1 = path.find('/')
ind2 = path.find('.')
self.writefile = path[:ind1] + "/" + path[ind1+1:ind2]
self.filename = self.writefile + '.asm'
self.file = open(self.filename, 'w')
self.writefile_ind = self.writefile.rfind('/')
# useful in declaring static variables
self.static_var = self.writefile[self.writefile_ind + 1:]
self.function_list = []
else:
inds = [i for i, x in enumerate(filepath) if x == '/']
self.writefolder = path[inds[-2]+1:inds[-1]]
self.filename = self.writefolder + '.asm'
writefile_ind = filepath.rfind('/')
filepath_ = filepath[:writefile_ind]
self.file = open(filepath_ + '/' + self.filename, 'w')
self.static_var_dict = {} # useful in declaring static variables
self.function_list = []
def writePushPop(self): # no need to pass in command as an argument
assert self.parser.commandType() in ['C_PUSH', 'C_POP']
arg1 = self.parser.arg1()
arg2 = self.parser.arg2()
if self.parser.commandType() == 'C_PUSH':
self.file.write('// push %s %s\n' % (arg1, arg2))
# stack operation
if arg1 == 'constant':
# e.g. push constant 7
self.file.write('@%s\n' % arg2)
self.file.write('D=A\n') # D = 7
self.file.write('@SP\n')
self.file.write('A=M\n')
self.file.write('M=D\n') # M[M[base_address]] = 7
elif arg1 in ['temp', 'pointer', 'local', 'argument', 'this', 'that']:
self.file.write('@%s\n' % arg2)
self.file.write('D=A\n')
if arg1 == 'temp':
self.file.write('@5\n')
self.file.write('A=D+A\n')
elif arg1 == 'pointer':
self.file.write('@3\n')
self.file.write('A=D+A\n')
elif arg1 == 'local':
self.file.write('@LCL\n')
self.file.write('A=D+M\n')
elif arg1 == 'argument':
self.file.write('@ARG\n')
self.file.write('A=D+M\n')
elif arg1 == 'this':
self.file.write('@THIS\n')
self.file.write('A=D+M\n')
elif arg1 == 'that':
self.file.write('@THAT\n')
self.file.write('A=D+M\n')
else:
pass
self.file.write('D=M\n')
self.file.write('@SP\n')
self.file.write('A=M\n')
self.file.write('M=D\n')
elif arg1 == 'static':
# declare a new symbol file.j in "push static j"
if self.isfile:
self.file.write('@%s.%s\n' % (self.static_var, arg2))
else:
self.file.write('@%s.%s\n' %
(self.static_var_dict[self.parser.i], arg2))
self.file.write('D=M\n')
# push D's value to the stack
self.file.write('@SP\n')
self.file.write('A=M\n')
self.file.write('M=D\n')
else:
# TODO
pass
# increase address of stack top
self.file.write('@SP\n')
self.file.write('M=M+1\n') # M[base_address] = M[base_address] + 1
elif self.parser.commandType() == 'C_POP':
# pop the stack value and store it in segment[index]
# use general purpose RAM[13] to store the value of 'segment_base_address + index'
self.file.write('// pop %s %s\n' % (arg1, arg2))
self.file.write('@%s\n' % arg2)
self.file.write('D=A\n')
if arg1 in ['temp', 'pointer', 'local', 'argument', 'this', 'that']:
if arg1 == 'local':
self.file.write('@LCL\n')
self.file.write('D=D+M\n')
elif arg1 == 'argument':
self.file.write('@ARG\n')
self.file.write('D=D+M\n')
elif arg1 == 'this':
self.file.write('@THIS\n')
self.file.write('D=D+M\n')
elif arg1 == 'that':
self.file.write('@THAT\n')
self.file.write('D=D+M\n')
elif arg1 == 'temp':
self.file.write('@5\n')
self.file.write('D=D+A\n')
elif arg1 == 'pointer':
self.file.write('@3\n')
self.file.write('D=D+A\n')
else:
# TODO
pass
# self.file.write('D=D+M\n')
self.file.write('@13\n') # general purpose register
self.file.write('M=D\n')
self.file.write('@SP\n')
self.file.write('A=M-1\n')
self.file.write('D=M\n') # pop command
self.file.write('@13\n')
self.file.write('A=M\n')
self.file.write('M=D\n') # write to appropriate address
self.file.write('@SP\n')
self.file.write('M=M-1\n') # adjust address of stack top
elif arg1 == 'static':
self.file.write('@SP\n')
self.file.write('A=M-1\n')
self.file.write('D=M\n') # pop command
if self.isfile:
self.file.write('@%s.%s\n' % (self.static_var, arg2))
else:
self.file.write('@%s.%s\n' %
(self.static_var_dict[self.parser.i], arg2))
self.file.write('M=D\n') # write to appropriate address
self.file.write('@SP\n')
self.file.write('M=M-1\n') # adjust address of stack top
else:
# TODO
pass
def writeArithmetic(self): # no need to pass in command as an argument
assert self.parser.commandType() == 'C_ARITHMETIC'
command = self.parser.arg1()
self.file.write('// %s\n' % command)
if command == 'add':
# stack operation
self.file.write('@SP\n')
self.file.write('A=M-1\n')
self.file.write('D=M\n')
self.file.write('A=A-1\n')
self.file.write('D=D+M\n')
self.file.write('M=D\n')
self.file.write('@SP\n')
self.file.write('M=M-1\n')
elif command == 'sub':
# stack operation
self.file.write('@SP\n')
self.file.write('A=M-1\n')
self.file.write('D=M\n')
self.file.write('A=A-1\n')
self.file.write('D=M-D\n')
self.file.write('M=D\n')
self.file.write('@SP\n')
self.file.write('M=M-1\n')
elif command == 'eq':
# stack operation
self.file.write('@SP\n')
self.file.write('A=M-1\n')
self.file.write('D=M\n')
self.file.write('A=A-1\n')
self.file.write('D=M-D\n')
# there could be more than one 'eq' command
self.file.write('@IF_TRUE_%s\n' % self.parser.i)
self.file.write('D;JEQ\n')
self.file.write('@SP\n')
self.file.write('A=M-1\n')
self.file.write('A=A-1\n')
self.file.write('M=0\n')
# there could be more than one 'eq' command
self.file.write('@END_%s\n' % self.parser.i)
self.file.write('0;JMP\n')
self.file.write('(IF_TRUE_%s)\n' % self.parser.i)
self.file.write('@SP\n')
self.file.write('A=M-1\n')
self.file.write('A=A-1\n')
self.file.write('M=-1\n')
self.file.write('(END_%s)\n' % self.parser.i)
self.file.write('@SP\n')
self.file.write('M=M-1\n')
elif command == 'gt':
# stack operation
self.file.write('@SP\n')
self.file.write('A=M-1\n')
self.file.write('D=M\n')
self.file.write('A=A-1\n')
self.file.write('D=M-D\n')
# there could be more than one 'gt' command
self.file.write('@IF_TRUE_%s\n' % self.parser.i)
self.file.write('D;JGT\n')
self.file.write('@SP\n')
self.file.write('A=M-1\n')
self.file.write('A=A-1\n')
self.file.write('M=0\n')
# there could be more than one 'gt' command
self.file.write('@END_%s\n' % self.parser.i)
self.file.write('0;JMP\n')
self.file.write('(IF_TRUE_%s)\n' % self.parser.i)
self.file.write('@SP\n')
self.file.write('A=M-1\n')
self.file.write('A=A-1\n')
self.file.write('M=-1\n')
self.file.write('(END_%s)\n' % self.parser.i)
self.file.write('@SP\n')
self.file.write('M=M-1\n')
elif command == 'lt':
# stack operation
self.file.write('@SP\n')
self.file.write('A=M-1\n')
self.file.write('D=M\n')
self.file.write('A=A-1\n')
self.file.write('D=M-D\n')
# there could be more than one 'lt' command
self.file.write('@IF_TRUE_%s\n' % self.parser.i)
self.file.write('D;JLT\n')
self.file.write('@SP\n')
self.file.write('A=M-1\n')
self.file.write('A=A-1\n')
self.file.write('M=0\n')
# there could be more than one 'lt' command
self.file.write('@END_%s\n' % self.parser.i)
self.file.write('0;JMP\n')
self.file.write('(IF_TRUE_%s)\n' % self.parser.i)
self.file.write('@SP\n')
self.file.write('A=M-1\n')
self.file.write('A=A-1\n')
self.file.write('M=-1\n')
self.file.write('(END_%s)\n' % self.parser.i)
self.file.write('@SP\n')
self.file.write('M=M-1\n')
elif command == 'and':
# stack operation
self.file.write('@SP\n')
self.file.write('A=M-1\n')
self.file.write('D=M\n')
self.file.write('A=A-1\n')
self.file.write('M=D&M\n')
self.file.write('@SP\n')
self.file.write('M=M-1\n')
elif command == 'or':
# stack operation
self.file.write('@SP\n')
self.file.write('A=M-1\n')
self.file.write('D=M\n')
self.file.write('A=A-1\n')
self.file.write('M=D|M\n')
self.file.write('@SP\n')
self.file.write('M=M-1\n')
elif command == 'neg':
# stack operation
self.file.write('@SP\n')
self.file.write('A=M-1\n')
self.file.write('M=-M\n')
elif command == 'not':
# stack operation
self.file.write('@SP\n')
self.file.write('A=M-1\n')
self.file.write('M=!M\n')
else:
raise ValueError(
"Unrecognized command for C_ARITHMETIC command type")
def writeInit(self):
self.file.write('// init\n')
# initially set the SP address to 256 (the address for the stack)
self.file.write('@256\n')
self.file.write('D=A\n')
self.file.write('@SP\n')
self.file.write('M=D\n')
# set the local address to 300
self.file.write('@300\n')
self.file.write('D=A\n')
self.file.write('@LCL\n')
self.file.write('M=D\n')
# set the argument address to 400
self.file.write('@400\n')
self.file.write('D=A\n')
self.file.write('@ARG\n')
self.file.write('M=D\n')
# set the this address to 3000
self.file.write('@3000\n')
self.file.write('D=A\n')
self.file.write('@THIS\n')
self.file.write('M=D\n')
# set the that address to 3010
self.file.write('@3010\n')
self.file.write('D=A\n')
self.file.write('@THAT\n')
self.file.write('M=D\n')
def writeLabel(self):
self.file.write('// label\n')
# check if label was declared within function; if so, label should carry function name
try:
func_name = self.function_list[-1] + "$"
except:
func_name = ''
label_name_input = self.parser.arg1()
label_name = func_name + label_name_input
self.file.write('(%s)\n' % label_name)
def writeGoto(self):
self.file.write('// goto\n')
# check if goto was declared within function; if so, label should carry function name
try:
func_name = self.function_list[-1] + "$"
except:
func_name = ''
label_name_input = self.parser.arg1()
label_name = func_name + label_name_input
self.file.write('@%s\n' % label_name)
self.file.write('0;JMP\n')
def writeIf(self):
self.file.write('// if-goto\n')
# check if 'if-goto' was declared within function; if so, label should carry function name
try:
func_name = self.function_list[-1] + "$"
except:
func_name = ''
label_name_input = self.parser.arg1()
label_name = func_name + label_name_input
self.file.write('@SP\n')
self.file.write('A=M-1\n')
self.file.write('D=M\n')
self.file.write('@SP\n') # adjust stack top
self.file.write('M=M-1\n')
self.file.write('@%s\n' % label_name)
self.file.write('D;JNE\n')
def writeFunction(self):
func_name = self.parser.arg1()
self.function_list.append(func_name)
num_locals = self.parser.arg2()
self.file.write('// function %s %s\n' % (func_name, num_locals))
self.file.write('(%s)\n' % func_name)
self.file.write('@%s\n' % num_locals)
self.file.write('D=A\n')
self.file.write('@13\n')
self.file.write('M=D\n')
self.file.write('(LOOP_%s)\n' % func_name)
self.file.write('@13\n')
self.file.write('D=M\n')
self.file.write('@END_%s\n' % func_name)
self.file.write('D;JEQ\n')
# start logic for code to carry out while D != 0
self.file.write('@SP\n')
self.file.write('A=M\n')
self.file.write('M=0\n') # M[M[base_address]] = 7
self.file.write('@SP\n')
self.file.write('M=M+1\n') # M[base_address] = M[base_address] + 1
self.file.write('@13\n')
self.file.write('M=M-1\n')
# end logic for code to carry out while D != 0
self.file.write('@LOOP_%s\n' % func_name)
self.file.write('0;JMP\n')
self.file.write('(END_%s)\n' % func_name)
def writeReturn(self):
self.file.write('// return\n')
# func_name = self.function_list.pop()
# FRAME = LCL : store FRAME in a temp variable
self.file.write('@LCL\n')
self.file.write('D=M\n')
self.file.write('@13\n') # address of the temp variable FRAME
self.file.write('M=D\n')
# RET = *(FRAME - 5) : store return address in another temp variable
self.file.write('@13\n')
self.file.write('D=M\n')
self.file.write('@5\n')
self.file.write('D=D-A\n')
self.file.write('A=D\n')
self.file.write('D=M\n') # D now equals *(FRAME - 5)
self.file.write('@14\n') # address of the temp variable RET
self.file.write('M=D\n')
# *ARG = pop()
self.file.write('@SP\n')
self.file.write('A=M-1\n')
self.file.write('D=M\n')
self.file.write('@ARG\n')
self.file.write('A=M\n')
self.file.write('M=D\n')
self.file.write('@SP\n')
self.file.write('M=M-1\n')
## SP = ARG + 1
self.file.write('@ARG\n')
self.file.write('D=M+1\n')
self.file.write('@SP\n')
self.file.write('M=D\n')
# THAT = *(FRAME - 1)
self.file.write('@13\n')
self.file.write('A=M-1\n')
self.file.write('D=M\n')
self.file.write('@THAT\n')
self.file.write('M=D\n')
# THIS = *(FRAME - 2)
self.file.write('@13\n')
self.file.write('D=M\n')
self.file.write('@2\n')
self.file.write('A=D-A\n')
self.file.write('D=M\n')
self.file.write('@THIS\n')
self.file.write('M=D\n')
# ARG = *(FRAME - 3)
self.file.write('@13\n')
self.file.write('D=M\n')
self.file.write('@3\n')
self.file.write('A=D-A\n')
self.file.write('D=M\n')
self.file.write('@ARG\n')
self.file.write('M=D\n')
# LCL = *(FRAME - 4)
self.file.write('@13\n')
self.file.write('D=M\n')
self.file.write('@4\n')
self.file.write('A=D-A\n')
self.file.write('D=M\n')
self.file.write('@LCL\n')
self.file.write('M=D\n')
# goto RET
self.file.write('@14\n') # address of RET
self.file.write('A=M\n') # address = RET
self.file.write('0;JMP\n')
def writeCall(self):
func_name = self.parser.arg1()
num_args = self.parser.arg2()
self.file.write('// call %s %s\n' % (func_name, num_args))
# push return-address (using label declared below)
self.file.write('// call : push return-address\n')
# there could be more than one return_addresses in the entire code
s = 'RETURN_ADDRESS_' + str(self.parser.i)
self.file.write('@%s\n' % s)
self.file.write('D=A\n')
self.file.write('@SP\n')
self.file.write('A=M\n')
self.file.write('M=D\n')
self.file.write('@SP\n')
self.file.write('M=M+1\n')
# push LCL
self.file.write('// call : push LCL\n')
self.file.write('@LCL\n')
self.file.write('D=M\n')
self.file.write('@SP\n')
self.file.write('A=M\n')
self.file.write('M=D\n')
self.file.write('@SP\n')
self.file.write('M=M+1\n')
# push ARG
self.file.write('// call : push ARG\n')
self.file.write('@ARG\n')
self.file.write('D=M\n')
self.file.write('@SP\n')
self.file.write('A=M\n')
self.file.write('M=D\n')
self.file.write('@SP\n')
self.file.write('M=M+1\n')
# push THIS
self.file.write('// call : push THIS\n')
self.file.write('@THIS\n')
self.file.write('D=M\n')
self.file.write('@SP\n')
self.file.write('A=M\n')
self.file.write('M=D\n')
self.file.write('@SP\n')
self.file.write('M=M+1\n')
# push THAT
self.file.write('// call : push THAT\n')
self.file.write('@THAT\n')
self.file.write('D=M\n')
self.file.write('@SP\n')
self.file.write('A=M\n')
self.file.write('M=D\n')
self.file.write('@SP\n')
self.file.write('M=M+1\n')
# ARG = SP - n - 5
self.file.write('// call : ARG = SP - n - 5\n')
self.file.write('@SP\n')
self.file.write('D=M\n')
self.file.write('@%s\n' % num_args)
self.file.write('D=D-A\n')
self.file.write('@5\n')
self.file.write('D=D-A\n')
self.file.write('@ARG\n')
self.file.write('M=D\n')
# LCL = SP
self.file.write('// call : LCL = SP\n')
self.file.write('@SP\n')
self.file.write('D=M\n')
self.file.write('@LCL\n')
self.file.write('M=D\n')
# goto f
self.file.write('// call : goto f\n')
self.file.write('@%s\n' % func_name)
self.file.write('0;JMP\n')
# declare a label for the return-address
self.file.write('// call : declare label for return-address\n')
self.file.write('(%s)\n' % s)
def writeBootstrap(self):
self.file.write('// boostrap\n')
## SP = 256
self.file.write('@256\n')
self.file.write('D=A\n')
self.file.write('@SP\n')
self.file.write('M=D\n')
# call Sys.init : call Sys.init 0
# push return-address
sys_init_ret_add = 'return-address-sysinit'
self.file.write('@%s\n' % sys_init_ret_add)
self.file.write('D=A\n')
self.file.write('@SP\n')
self.file.write('A=M\n')
self.file.write('M=D\n')
self.file.write('@SP\n')
self.file.write('M=M+1\n')
# push LCL
self.file.write('@LCL\n')
self.file.write('D=M\n')
self.file.write('@SP\n')
self.file.write('A=M\n')
self.file.write('M=D\n')
self.file.write('@SP\n')
self.file.write('M=M+1\n')
# push ARG
self.file.write('@ARG\n')
self.file.write('D=M\n')
self.file.write('@SP\n')
self.file.write('A=M\n')
self.file.write('M=D\n')
self.file.write('@SP\n')
self.file.write('M=M+1\n')
# push THIS
self.file.write('@THIS\n')
self.file.write('D=M\n')
self.file.write('@SP\n')
self.file.write('A=M\n')
self.file.write('M=D\n')
self.file.write('@SP\n')
self.file.write('M=M+1\n')
# push THAT
self.file.write('@THAT\n')
self.file.write('D=M\n')
self.file.write('@SP\n')
self.file.write('A=M\n')
self.file.write('M=D\n')
self.file.write('@SP\n')
self.file.write('M=M+1\n')
# ARG = SP - n - 5
self.file.write('@SP\n')
self.file.write('D=M\n')
self.file.write('@5\n')
self.file.write('D=D-A\n')
self.file.write('@ARG\n')
self.file.write('M=D\n')
# LCL = SP
self.file.write('@SP\n')
self.file.write('D=M\n')
self.file.write('@LCL\n')
self.file.write('M=D\n')
# goto f
func_name = 'Sys.init'
self.file.write('@%s\n' % func_name)
self.file.write('0;JMP\n')
# declare a label for the return-address
self.file.write('(%s)\n' % sys_init_ret_add)
def createOutput(self):
if not self.isfile:
self.writeBootstrap()
else:
pass
# self.writeBootstrap()
self.parser.i = -1
while self.parser.hasMoreCommands():
self.parser.advance()
c_type = self.parser.commandType()
if c_type in ['C_PUSH', 'C_POP']:
self.writePushPop()
elif c_type == 'C_ARITHMETIC':
self.writeArithmetic()
elif c_type == 'C_FUNCTION':
self.writeFunction()
elif c_type == 'C_LABEL':
self.writeLabel()
elif c_type == 'C_GOTO':
self.writeGoto()
elif c_type == 'C_IF':
self.writeIf()
elif c_type == 'C_RETURN':
self.writeReturn()
elif c_type == 'C_CALL':
self.writeCall()
# close file
self.file.close()
if __name__ == "__main__":
for path in ["ProgramFlow/BasicLoop/BasicLoop.vm", "ProgramFlow/FibonacciSeries/FibonacciSeries.vm",
"FunctionCalls/SimpleFunction/SimpleFunction.vm",
"FunctionCalls/FibonacciElement", "FunctionCalls/StaticsTest"]:
# handle the case where input path is a folder
if os.path.isdir(path):
files = [file_ for file_ in os.listdir(
path) if file_.endswith(".vm")]
d_file_codewriter = {}
for f in files:
f_input = path + '/%s' % f
d_file_codewriter[f] = CodeWriter(f_input, False)
codewriter = d_file_codewriter['Sys.vm']
tot_lines_sys = d_file_codewriter['Sys.vm'].parser.total_commands
count_f = 0
for f in files:
if f != 'Sys.vm':
if count_f == 0:
codewriter.static_var_dict = {i: f for i in range(
d_file_codewriter[f].parser.total_commands)}
prev_counts = d_file_codewriter[f].parser.total_commands
else:
new_dict = {
i + prev_counts: f for i in range(d_file_codewriter[f].parser.total_commands)}
codewriter.static_var_dict.update(new_dict)
prev_counts += d_file_codewriter[f].parser.total_commands
codewriter.parser.clean_lines = codewriter.parser.clean_lines + \
d_file_codewriter[f].parser.clean_lines
codewriter.parser.total_commands = len(
codewriter.parser.clean_lines)
count_f += 1
# post-processing of clean_lines
codewriter.parser.clean_lines = codewriter.parser.clean_lines[
tot_lines_sys:] + codewriter.parser.clean_lines[:tot_lines_sys]
# handle the case where input path is a file
elif os.path.isfile(path):
codewriter = CodeWriter(path)
codewriter.createOutput()
| 38.618976
| 106
| 0.491167
| 3,591
| 25,643
| 3.451128
| 0.064884
| 0.252401
| 0.407004
| 0.289196
| 0.745017
| 0.717986
| 0.687081
| 0.669168
| 0.647946
| 0.626644
| 0
| 0.013359
| 0.337363
| 25,643
| 663
| 107
| 38.677225
| 0.715984
| 0.105721
| 0
| 0.706093
| 0
| 0
| 0.137646
| 0.00889
| 0
| 0
| 0
| 0.001508
| 0.003584
| 1
| 0.021505
| false
| 0.008961
| 0.003584
| 0
| 0.026882
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4f5d1809808d697e40d6c3b20491f75cf98a311a
| 214
|
py
|
Python
|
services/__init__.py
|
dev-11/mars-rover-challenge
|
67569fcc4b93e5ec4cbe466d7a2fd5b3e9a316b0
|
[
"MIT"
] | null | null | null |
services/__init__.py
|
dev-11/mars-rover-challenge
|
67569fcc4b93e5ec4cbe466d7a2fd5b3e9a316b0
|
[
"MIT"
] | null | null | null |
services/__init__.py
|
dev-11/mars-rover-challenge
|
67569fcc4b93e5ec4cbe466d7a2fd5b3e9a316b0
|
[
"MIT"
] | null | null | null |
from .rover_runner_service import RoverRunnerService
from .move_commands import get_move_commands, MoveCommandSelector
from .turn_commands import get_turn_commands, TurnCommandSelector
from .command import Command
| 42.8
| 65
| 0.88785
| 26
| 214
| 7
| 0.5
| 0.131868
| 0.186813
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.084112
| 214
| 4
| 66
| 53.5
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
96e11fbf2414390c277f0a9f47b09043ce2bb218
| 35
|
py
|
Python
|
linux_plex_updater/api/__init__.py
|
amickael/Linux-Plex-Updater
|
a74dc480d374daf52ee4cc09b40ea34b9e6ffcd4
|
[
"MIT"
] | null | null | null |
linux_plex_updater/api/__init__.py
|
amickael/Linux-Plex-Updater
|
a74dc480d374daf52ee4cc09b40ea34b9e6ffcd4
|
[
"MIT"
] | null | null | null |
linux_plex_updater/api/__init__.py
|
amickael/Linux-Plex-Updater
|
a74dc480d374daf52ee4cc09b40ea34b9e6ffcd4
|
[
"MIT"
] | null | null | null |
from .PlexClient import PlexClient
| 17.5
| 34
| 0.857143
| 4
| 35
| 7.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 1
| 35
| 35
| 0.967742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8c04cad192ddc2276a9700252dfd9cb17cf0b09e
| 135
|
py
|
Python
|
tests/__init__.py
|
g-simmons2/py2cytoscape
|
e2cd1c5d598e1da02f50273e958ddf574c523eb8
|
[
"MIT"
] | 97
|
2018-01-23T00:20:51.000Z
|
2022-03-11T05:01:01.000Z
|
tests/__init__.py
|
g-simmons2/py2cytoscape
|
e2cd1c5d598e1da02f50273e958ddf574c523eb8
|
[
"MIT"
] | 64
|
2018-01-24T14:51:20.000Z
|
2022-02-21T01:05:02.000Z
|
tests/__init__.py
|
g-simmons2/py2cytoscape
|
e2cd1c5d598e1da02f50273e958ddf574c523eb8
|
[
"MIT"
] | 25
|
2018-01-20T20:29:39.000Z
|
2021-04-09T17:28:58.000Z
|
# -*- coding: utf-8 -*-
"""
Tests for py2cytoscape
-------------------
"""
import json
print('============ Test Init =============')
| 13.5
| 45
| 0.392593
| 11
| 135
| 4.818182
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017241
| 0.140741
| 135
| 9
| 46
| 15
| 0.439655
| 0.481481
| 0
| 0
| 0
| 0
| 0.590164
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
8c2179e387711c77d6977a6fa547da1822d10e82
| 198
|
py
|
Python
|
function/python/brightics/common/json/__init__.py
|
nohkwangsun/studio
|
b2dd7da1d73d83bef6c046d73fb85639d3006fc2
|
[
"Apache-2.0"
] | null | null | null |
function/python/brightics/common/json/__init__.py
|
nohkwangsun/studio
|
b2dd7da1d73d83bef6c046d73fb85639d3006fc2
|
[
"Apache-2.0"
] | null | null | null |
function/python/brightics/common/json/__init__.py
|
nohkwangsun/studio
|
b2dd7da1d73d83bef6c046d73fb85639d3006fc2
|
[
"Apache-2.0"
] | 1
|
2020-11-19T06:44:15.000Z
|
2020-11-19T06:44:15.000Z
|
def to_json(data, for_redis=False):
from .encoder import encode
return encode(data, for_redis)
def from_json(json_str):
from .decoder import decode
return decode(json_str)
| 22
| 36
| 0.70202
| 29
| 198
| 4.586207
| 0.517241
| 0.105263
| 0.180451
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 198
| 8
| 37
| 24.75
| 0.863636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8c3f7564cb15fff935229ac0ac2f5e217d40d599
| 126
|
py
|
Python
|
mlpipe/data_reader/mongodb/__init__.py
|
j-o-d-o/MLPipe-Trainer
|
b686dc4d28e3d4cd2c6581487f8a2491a6d7cb60
|
[
"MIT"
] | null | null | null |
mlpipe/data_reader/mongodb/__init__.py
|
j-o-d-o/MLPipe-Trainer
|
b686dc4d28e3d4cd2c6581487f8a2491a6d7cb60
|
[
"MIT"
] | null | null | null |
mlpipe/data_reader/mongodb/__init__.py
|
j-o-d-o/MLPipe-Trainer
|
b686dc4d28e3d4cd2c6581487f8a2491a6d7cb60
|
[
"MIT"
] | null | null | null |
from .mongodb_connect import MongoDBConnect
from .mongodb_generator import MongoDBGenerator
from .data_loader import load_ids
| 31.5
| 47
| 0.880952
| 16
| 126
| 6.6875
| 0.6875
| 0.205607
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 126
| 3
| 48
| 42
| 0.938596
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8c5cb547cfc5e18d429cee44a2b29b3240bf6d91
| 195
|
py
|
Python
|
NewPee/tests.py
|
CMPUT404W19T3/NewPee
|
ba34341e0407746c12aec72689e50fbc2054ae77
|
[
"MIT"
] | 2
|
2019-02-19T17:11:58.000Z
|
2019-02-19T17:19:28.000Z
|
NewPee/tests.py
|
CMPUT404W19T3/NewPee
|
ba34341e0407746c12aec72689e50fbc2054ae77
|
[
"MIT"
] | 74
|
2019-02-01T17:15:02.000Z
|
2022-03-08T21:09:44.000Z
|
NewPee/tests.py
|
CMPUT404W19T3/NewPee
|
ba34341e0407746c12aec72689e50fbc2054ae77
|
[
"MIT"
] | 1
|
2019-03-15T16:09:51.000Z
|
2019-03-15T16:09:51.000Z
|
from django.test import TestCase
from Tests.test_author import AuthorModelTests
from Tests.test_frontend import FrontEndTests
from Tests.test_post import PostModelTests
# Create your tests here.
| 32.5
| 46
| 0.861538
| 27
| 195
| 6.111111
| 0.555556
| 0.163636
| 0.236364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107692
| 195
| 6
| 47
| 32.5
| 0.948276
| 0.117949
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4fcc9b858948fe4ebbe885fd09e1441ed28c834c
| 3,776
|
py
|
Python
|
vnet_manager/settings/test.py
|
ppartarr/vnet-manager
|
e7e8dfc9014c98f34bce639f48e0baa603d83b67
|
[
"MIT"
] | null | null | null |
vnet_manager/settings/test.py
|
ppartarr/vnet-manager
|
e7e8dfc9014c98f34bce639f48e0baa603d83b67
|
[
"MIT"
] | null | null | null |
vnet_manager/settings/test.py
|
ppartarr/vnet-manager
|
e7e8dfc9014c98f34bce639f48e0baa603d83b67
|
[
"MIT"
] | null | null | null |
from .base import *
# /dev/log doesn't exist everywhere
del LOGGING["handlers"]["syslog"]["address"]
# Fixture config
CONFIG = {
"providers": {
"lxc": {
"supported_operating_systems": ["bionic", "focal"],
"dns-nameserver": "1.1.1.1",
"required_host_packages": ["lxd", "lxc", "bridge-utils", "tcpdump", "net-tools", "curl"],
"guest_packages": ["man", "net-tools", "traceroute", "nano", "vim", "bridge-utils", "radvd", "frr", "frr-pythontools"],
"base_image": {"os": "18.04", "server": "https://cloud-images.ubuntu.com/daily", "protocol": "simplestreams"},
}
},
"switches": 2,
"machines": {
"router100": {
"type": "router",
"interfaces": {"eth12": {"ipv4": "192.168.0.2/24", "ipv6": "fd00:12::2/64", "mac": "00:00:00:00:01:11", "bridge": 0}},
"vlans": {"vlan.100": {"id": 100, "link": "eth12", "addresses": ["10.0.100.1/24"]},},
"files": {"router100": "/etc/frr/"},
},
"router101": {
"type": "router",
"interfaces": {
"eth12": {"ipv4": "192.168.0.1/24", "ipv6": "fd00:12::1/64", "mac": "00:00:00:00:02:12", "bridge": 0},
"eth23": {"ipv4": "10.0.0.1/8", "ipv6": "fd00:23::1/64", "mac": "00:00:00:00:02:22", "bridge": 1},
},
"files": {"router101": "/etc/frr/"},
},
"host102": {
"type": "host",
"interfaces": {"eth23": {"ipv4": "10.0.0.2/8", "ipv6": "fd00:23::2/64", "mac": "00:00:00:00:03:23", "bridge": 1}},
"files": {"host102": "/etc/frr/"},
},
},
"veths": {"vnet-veth1": {"bridge": "vnet-br1", "stp": True}, "vnet-veth0": {"peer": "vnet-veth1", "bridge": "vnet-br0", "stp": False},},
}
VALIDATED_CONFIG = {
"providers": {
"lxc": {
"supported_operating_systems": ["bionic", "focal"],
"dns-nameserver": "8.8.8.8",
"required_host_packages": ["lxd", "lxc", "bridge-utils", "tcpdump", "net-tools", "curl"],
"guest_packages": ["man", "net-tools", "traceroute", "nano", "vim", "bridge-utils", "radvd", "frr", "frr-pythontools"],
"base_image": {"os": "18.04", "server": "https://cloud-images.ubuntu.com/daily", "protocol": "simplestreams"},
}
},
"switches": 2,
"machines": {
"router100": {
"type": "router",
"interfaces": {"eth12": {"ipv4": "192.168.0.2/24", "ipv6": "fd00:12::2/64", "mac": "00:00:00:00:01:11", "bridge": 0}},
"files": {"/root/vnet-manager/config/ripng/router100": "/etc/frr/"},
},
"router101": {
"type": "router",
"interfaces": {
"eth12": {"ipv4": "192.168.0.1/24", "ipv6": "fd00:12::1/64", "mac": "00:00:00:00:02:12", "bridge": 0},
"eth23": {"ipv4": "10.0.0.1/8", "ipv6": "fd00:23::1/64", "mac": "00:00:00:00:02:22", "bridge": 1},
},
"files": {"/root/vnet-manager/config/ripng/router101": "/etc/frr/"},
},
"host102": {
"type": "host",
"interfaces": {"eth23": {"ipv4": "10.0.0.2/8", "ipv6": "fd00:23::2/64", "mac": "00:00:00:00:03:23", "bridge": 1}},
},
},
"veths": {
"vnet-veth3": {"bridge": "vnet-br2", "stp": True},
"vnet-veth2": {"peer": "vnet-veth3", "bridge": "vnet-br0"},
"vnet-veth1": {"bridge": "vnet-br1", "stp": True},
"vnet-veth0": {"peer": "vnet-veth1", "bridge": "vnet-br0", "stp": True},
"vnet-veth5": {"bridge": "vnet-br2"},
"vnet-veth4": {"peer": "vnet-veth5", "bridge": "vnet-br1"},
},
"config_dir": "/root/vnet-manager/config/ripng",
}
# Speed up testing
LXC_MAX_STATUS_WAIT_ATTEMPTS = 2
| 44.423529
| 140
| 0.480932
| 444
| 3,776
| 4.04955
| 0.274775
| 0.053393
| 0.053393
| 0.040044
| 0.801446
| 0.786986
| 0.755284
| 0.755284
| 0.755284
| 0.755284
| 0
| 0.122167
| 0.252119
| 3,776
| 84
| 141
| 44.952381
| 0.514518
| 0.017214
| 0
| 0.493506
| 0
| 0
| 0.498516
| 0.056919
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.012987
| 0
| 0.012987
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8b0602dde28463d89f06e3aeec4ac0d327387ee9
| 16,775
|
py
|
Python
|
huaweicloud-sdk-elb/huaweicloudsdkelb/v2/__init__.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | 64
|
2020-06-12T07:05:07.000Z
|
2022-03-30T03:32:50.000Z
|
huaweicloud-sdk-elb/huaweicloudsdkelb/v2/__init__.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | 11
|
2020-07-06T07:56:54.000Z
|
2022-01-11T11:14:40.000Z
|
huaweicloud-sdk-elb/huaweicloudsdkelb/v2/__init__.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | 24
|
2020-06-08T11:42:13.000Z
|
2022-03-04T06:44:08.000Z
|
# coding: utf-8
from __future__ import absolute_import
# import ElbClient
from huaweicloudsdkelb.v2.elb_client import ElbClient
from huaweicloudsdkelb.v2.elb_async_client import ElbAsyncClient
# import models into sdk package
from huaweicloudsdkelb.v2.model.action_match import ActionMatch
from huaweicloudsdkelb.v2.model.action_tag import ActionTag
from huaweicloudsdkelb.v2.model.batch_create_listener_tags_request import BatchCreateListenerTagsRequest
from huaweicloudsdkelb.v2.model.batch_create_listener_tags_request_body import BatchCreateListenerTagsRequestBody
from huaweicloudsdkelb.v2.model.batch_create_listener_tags_response import BatchCreateListenerTagsResponse
from huaweicloudsdkelb.v2.model.batch_create_loadbalancer_tags_request import BatchCreateLoadbalancerTagsRequest
from huaweicloudsdkelb.v2.model.batch_create_loadbalancer_tags_request_body import BatchCreateLoadbalancerTagsRequestBody
from huaweicloudsdkelb.v2.model.batch_create_loadbalancer_tags_response import BatchCreateLoadbalancerTagsResponse
from huaweicloudsdkelb.v2.model.batch_delete_listener_tags_request import BatchDeleteListenerTagsRequest
from huaweicloudsdkelb.v2.model.batch_delete_listener_tags_request_body import BatchDeleteListenerTagsRequestBody
from huaweicloudsdkelb.v2.model.batch_delete_listener_tags_response import BatchDeleteListenerTagsResponse
from huaweicloudsdkelb.v2.model.batch_delete_loadbalancer_tags_request import BatchDeleteLoadbalancerTagsRequest
from huaweicloudsdkelb.v2.model.batch_delete_loadbalancer_tags_request_body import BatchDeleteLoadbalancerTagsRequestBody
from huaweicloudsdkelb.v2.model.batch_delete_loadbalancer_tags_response import BatchDeleteLoadbalancerTagsResponse
from huaweicloudsdkelb.v2.model.certificate_resp import CertificateResp
from huaweicloudsdkelb.v2.model.create_certificate_request import CreateCertificateRequest
from huaweicloudsdkelb.v2.model.create_certificate_request_body import CreateCertificateRequestBody
from huaweicloudsdkelb.v2.model.create_certificate_response import CreateCertificateResponse
from huaweicloudsdkelb.v2.model.create_healthmonitor_req import CreateHealthmonitorReq
from huaweicloudsdkelb.v2.model.create_healthmonitor_request import CreateHealthmonitorRequest
from huaweicloudsdkelb.v2.model.create_healthmonitor_request_body import CreateHealthmonitorRequestBody
from huaweicloudsdkelb.v2.model.create_healthmonitor_response import CreateHealthmonitorResponse
from huaweicloudsdkelb.v2.model.create_l7policy_req import CreateL7policyReq
from huaweicloudsdkelb.v2.model.create_l7policy_request import CreateL7policyRequest
from huaweicloudsdkelb.v2.model.create_l7policy_request_body import CreateL7policyRequestBody
from huaweicloudsdkelb.v2.model.create_l7policy_response import CreateL7policyResponse
from huaweicloudsdkelb.v2.model.create_l7rule_req import CreateL7ruleReq
from huaweicloudsdkelb.v2.model.create_l7rule_req_in_policy import CreateL7ruleReqInPolicy
from huaweicloudsdkelb.v2.model.create_l7rule_request import CreateL7ruleRequest
from huaweicloudsdkelb.v2.model.create_l7rule_request_body import CreateL7ruleRequestBody
from huaweicloudsdkelb.v2.model.create_l7rule_response import CreateL7ruleResponse
from huaweicloudsdkelb.v2.model.create_listener_req import CreateListenerReq
from huaweicloudsdkelb.v2.model.create_listener_request import CreateListenerRequest
from huaweicloudsdkelb.v2.model.create_listener_request_body import CreateListenerRequestBody
from huaweicloudsdkelb.v2.model.create_listener_response import CreateListenerResponse
from huaweicloudsdkelb.v2.model.create_listener_tags_request import CreateListenerTagsRequest
from huaweicloudsdkelb.v2.model.create_listener_tags_request_body import CreateListenerTagsRequestBody
from huaweicloudsdkelb.v2.model.create_listener_tags_response import CreateListenerTagsResponse
from huaweicloudsdkelb.v2.model.create_loadbalancer_req import CreateLoadbalancerReq
from huaweicloudsdkelb.v2.model.create_loadbalancer_request import CreateLoadbalancerRequest
from huaweicloudsdkelb.v2.model.create_loadbalancer_request_body import CreateLoadbalancerRequestBody
from huaweicloudsdkelb.v2.model.create_loadbalancer_response import CreateLoadbalancerResponse
from huaweicloudsdkelb.v2.model.create_loadbalancer_tags_request import CreateLoadbalancerTagsRequest
from huaweicloudsdkelb.v2.model.create_loadbalancer_tags_request_body import CreateLoadbalancerTagsRequestBody
from huaweicloudsdkelb.v2.model.create_loadbalancer_tags_response import CreateLoadbalancerTagsResponse
from huaweicloudsdkelb.v2.model.create_member_req import CreateMemberReq
from huaweicloudsdkelb.v2.model.create_member_request import CreateMemberRequest
from huaweicloudsdkelb.v2.model.create_member_request_body import CreateMemberRequestBody
from huaweicloudsdkelb.v2.model.create_member_response import CreateMemberResponse
from huaweicloudsdkelb.v2.model.create_pool_req import CreatePoolReq
from huaweicloudsdkelb.v2.model.create_pool_request import CreatePoolRequest
from huaweicloudsdkelb.v2.model.create_pool_request_body import CreatePoolRequestBody
from huaweicloudsdkelb.v2.model.create_pool_response import CreatePoolResponse
from huaweicloudsdkelb.v2.model.create_whitelist_req import CreateWhitelistReq
from huaweicloudsdkelb.v2.model.create_whitelist_request import CreateWhitelistRequest
from huaweicloudsdkelb.v2.model.create_whitelist_request_body import CreateWhitelistRequestBody
from huaweicloudsdkelb.v2.model.create_whitelist_response import CreateWhitelistResponse
from huaweicloudsdkelb.v2.model.delete_certificate_request import DeleteCertificateRequest
from huaweicloudsdkelb.v2.model.delete_certificate_response import DeleteCertificateResponse
from huaweicloudsdkelb.v2.model.delete_healthmonitor_request import DeleteHealthmonitorRequest
from huaweicloudsdkelb.v2.model.delete_healthmonitor_response import DeleteHealthmonitorResponse
from huaweicloudsdkelb.v2.model.delete_l7policy_request import DeleteL7policyRequest
from huaweicloudsdkelb.v2.model.delete_l7policy_response import DeleteL7policyResponse
from huaweicloudsdkelb.v2.model.delete_l7rule_request import DeleteL7ruleRequest
from huaweicloudsdkelb.v2.model.delete_l7rule_response import DeleteL7ruleResponse
from huaweicloudsdkelb.v2.model.delete_listener_request import DeleteListenerRequest
from huaweicloudsdkelb.v2.model.delete_listener_response import DeleteListenerResponse
from huaweicloudsdkelb.v2.model.delete_listener_tags_request import DeleteListenerTagsRequest
from huaweicloudsdkelb.v2.model.delete_listener_tags_response import DeleteListenerTagsResponse
from huaweicloudsdkelb.v2.model.delete_loadbalancer_request import DeleteLoadbalancerRequest
from huaweicloudsdkelb.v2.model.delete_loadbalancer_response import DeleteLoadbalancerResponse
from huaweicloudsdkelb.v2.model.delete_loadbalancer_tags_request import DeleteLoadbalancerTagsRequest
from huaweicloudsdkelb.v2.model.delete_loadbalancer_tags_response import DeleteLoadbalancerTagsResponse
from huaweicloudsdkelb.v2.model.delete_member_request import DeleteMemberRequest
from huaweicloudsdkelb.v2.model.delete_member_response import DeleteMemberResponse
from huaweicloudsdkelb.v2.model.delete_pool_request import DeletePoolRequest
from huaweicloudsdkelb.v2.model.delete_pool_response import DeletePoolResponse
from huaweicloudsdkelb.v2.model.delete_whitelist_request import DeleteWhitelistRequest
from huaweicloudsdkelb.v2.model.delete_whitelist_response import DeleteWhitelistResponse
from huaweicloudsdkelb.v2.model.healthmonitor_resp import HealthmonitorResp
from huaweicloudsdkelb.v2.model.healthmonitors_in_status_resp import HealthmonitorsInStatusResp
from huaweicloudsdkelb.v2.model.insert_header import InsertHeader
from huaweicloudsdkelb.v2.model.l7policies_in_status_resp import L7policiesInStatusResp
from huaweicloudsdkelb.v2.model.l7policy_resp import L7policyResp
from huaweicloudsdkelb.v2.model.l7rule_resp import L7ruleResp
from huaweicloudsdkelb.v2.model.l7rules_in_status_resp import L7rulesInStatusResp
from huaweicloudsdkelb.v2.model.list_certificates_request import ListCertificatesRequest
from huaweicloudsdkelb.v2.model.list_certificates_response import ListCertificatesResponse
from huaweicloudsdkelb.v2.model.list_healthmonitors_request import ListHealthmonitorsRequest
from huaweicloudsdkelb.v2.model.list_healthmonitors_response import ListHealthmonitorsResponse
from huaweicloudsdkelb.v2.model.list_l7policies_request import ListL7policiesRequest
from huaweicloudsdkelb.v2.model.list_l7policies_response import ListL7policiesResponse
from huaweicloudsdkelb.v2.model.list_l7rules_request import ListL7rulesRequest
from huaweicloudsdkelb.v2.model.list_l7rules_response import ListL7rulesResponse
from huaweicloudsdkelb.v2.model.list_listener_tags_request import ListListenerTagsRequest
from huaweicloudsdkelb.v2.model.list_listener_tags_response import ListListenerTagsResponse
from huaweicloudsdkelb.v2.model.list_listeners_by_tags_request import ListListenersByTagsRequest
from huaweicloudsdkelb.v2.model.list_listeners_by_tags_request_body import ListListenersByTagsRequestBody
from huaweicloudsdkelb.v2.model.list_listeners_by_tags_response import ListListenersByTagsResponse
from huaweicloudsdkelb.v2.model.list_listeners_request import ListListenersRequest
from huaweicloudsdkelb.v2.model.list_listeners_response import ListListenersResponse
from huaweicloudsdkelb.v2.model.list_loadbalancer_tags_request import ListLoadbalancerTagsRequest
from huaweicloudsdkelb.v2.model.list_loadbalancer_tags_response import ListLoadbalancerTagsResponse
from huaweicloudsdkelb.v2.model.list_loadbalancers_by_tags_request import ListLoadbalancersByTagsRequest
from huaweicloudsdkelb.v2.model.list_loadbalancers_by_tags_request_body import ListLoadbalancersByTagsRequestBody
from huaweicloudsdkelb.v2.model.list_loadbalancers_by_tags_response import ListLoadbalancersByTagsResponse
from huaweicloudsdkelb.v2.model.list_loadbalancers_request import ListLoadbalancersRequest
from huaweicloudsdkelb.v2.model.list_loadbalancers_response import ListLoadbalancersResponse
from huaweicloudsdkelb.v2.model.list_members_request import ListMembersRequest
from huaweicloudsdkelb.v2.model.list_members_response import ListMembersResponse
from huaweicloudsdkelb.v2.model.list_pools_request import ListPoolsRequest
from huaweicloudsdkelb.v2.model.list_pools_response import ListPoolsResponse
from huaweicloudsdkelb.v2.model.list_tag import ListTag
from huaweicloudsdkelb.v2.model.list_whitelists_request import ListWhitelistsRequest
from huaweicloudsdkelb.v2.model.list_whitelists_response import ListWhitelistsResponse
from huaweicloudsdkelb.v2.model.listener_resp import ListenerResp
from huaweicloudsdkelb.v2.model.listeners_in_status_resp import ListenersInStatusResp
from huaweicloudsdkelb.v2.model.loadbalancer_in_status_resp import LoadbalancerInStatusResp
from huaweicloudsdkelb.v2.model.loadbalancer_resp import LoadbalancerResp
from huaweicloudsdkelb.v2.model.member_resp import MemberResp
from huaweicloudsdkelb.v2.model.members_in_status_resp import MembersInStatusResp
from huaweicloudsdkelb.v2.model.pool_resp import PoolResp
from huaweicloudsdkelb.v2.model.pools_in_status_resp import PoolsInStatusResp
from huaweicloudsdkelb.v2.model.resource_list import ResourceList
from huaweicloudsdkelb.v2.model.resource_tag import ResourceTag
from huaweicloudsdkelb.v2.model.resources_by_tag import ResourcesByTag
from huaweicloudsdkelb.v2.model.session_persistence import SessionPersistence
from huaweicloudsdkelb.v2.model.show_certificate_request import ShowCertificateRequest
from huaweicloudsdkelb.v2.model.show_certificate_response import ShowCertificateResponse
from huaweicloudsdkelb.v2.model.show_healthmonitors_request import ShowHealthmonitorsRequest
from huaweicloudsdkelb.v2.model.show_healthmonitors_response import ShowHealthmonitorsResponse
from huaweicloudsdkelb.v2.model.show_l7policy_request import ShowL7policyRequest
from huaweicloudsdkelb.v2.model.show_l7policy_response import ShowL7policyResponse
from huaweicloudsdkelb.v2.model.show_l7rule_request import ShowL7ruleRequest
from huaweicloudsdkelb.v2.model.show_l7rule_response import ShowL7ruleResponse
from huaweicloudsdkelb.v2.model.show_listener_request import ShowListenerRequest
from huaweicloudsdkelb.v2.model.show_listener_response import ShowListenerResponse
from huaweicloudsdkelb.v2.model.show_listener_tags_request import ShowListenerTagsRequest
from huaweicloudsdkelb.v2.model.show_listener_tags_response import ShowListenerTagsResponse
from huaweicloudsdkelb.v2.model.show_loadbalancer_request import ShowLoadbalancerRequest
from huaweicloudsdkelb.v2.model.show_loadbalancer_response import ShowLoadbalancerResponse
from huaweicloudsdkelb.v2.model.show_loadbalancer_tags_request import ShowLoadbalancerTagsRequest
from huaweicloudsdkelb.v2.model.show_loadbalancer_tags_response import ShowLoadbalancerTagsResponse
from huaweicloudsdkelb.v2.model.show_loadbalancers_status_request import ShowLoadbalancersStatusRequest
from huaweicloudsdkelb.v2.model.show_loadbalancers_status_response import ShowLoadbalancersStatusResponse
from huaweicloudsdkelb.v2.model.show_member_request import ShowMemberRequest
from huaweicloudsdkelb.v2.model.show_member_response import ShowMemberResponse
from huaweicloudsdkelb.v2.model.show_pool_request import ShowPoolRequest
from huaweicloudsdkelb.v2.model.show_pool_response import ShowPoolResponse
from huaweicloudsdkelb.v2.model.show_whitelist_request import ShowWhitelistRequest
from huaweicloudsdkelb.v2.model.show_whitelist_response import ShowWhitelistResponse
from huaweicloudsdkelb.v2.model.status_resp import StatusResp
from huaweicloudsdkelb.v2.model.update_certificate_request import UpdateCertificateRequest
from huaweicloudsdkelb.v2.model.update_certificate_request_body import UpdateCertificateRequestBody
from huaweicloudsdkelb.v2.model.update_certificate_response import UpdateCertificateResponse
from huaweicloudsdkelb.v2.model.update_healthmonitor_req import UpdateHealthmonitorReq
from huaweicloudsdkelb.v2.model.update_healthmonitor_request import UpdateHealthmonitorRequest
from huaweicloudsdkelb.v2.model.update_healthmonitor_request_body import UpdateHealthmonitorRequestBody
from huaweicloudsdkelb.v2.model.update_healthmonitor_response import UpdateHealthmonitorResponse
from huaweicloudsdkelb.v2.model.update_l7policies_request import UpdateL7policiesRequest
from huaweicloudsdkelb.v2.model.update_l7policies_request_body import UpdateL7policiesRequestBody
from huaweicloudsdkelb.v2.model.update_l7policies_response import UpdateL7policiesResponse
from huaweicloudsdkelb.v2.model.update_l7policy_req import UpdateL7policyReq
from huaweicloudsdkelb.v2.model.update_l7rule_req import UpdateL7ruleReq
from huaweicloudsdkelb.v2.model.update_l7rule_request import UpdateL7ruleRequest
from huaweicloudsdkelb.v2.model.update_l7rule_request_body import UpdateL7ruleRequestBody
from huaweicloudsdkelb.v2.model.update_l7rule_response import UpdateL7ruleResponse
from huaweicloudsdkelb.v2.model.update_listener_req import UpdateListenerReq
from huaweicloudsdkelb.v2.model.update_listener_request import UpdateListenerRequest
from huaweicloudsdkelb.v2.model.update_listener_request_body import UpdateListenerRequestBody
from huaweicloudsdkelb.v2.model.update_listener_response import UpdateListenerResponse
from huaweicloudsdkelb.v2.model.update_loadbalancer_req import UpdateLoadbalancerReq
from huaweicloudsdkelb.v2.model.update_loadbalancer_request import UpdateLoadbalancerRequest
from huaweicloudsdkelb.v2.model.update_loadbalancer_request_body import UpdateLoadbalancerRequestBody
from huaweicloudsdkelb.v2.model.update_loadbalancer_response import UpdateLoadbalancerResponse
from huaweicloudsdkelb.v2.model.update_member_req import UpdateMemberReq
from huaweicloudsdkelb.v2.model.update_member_request import UpdateMemberRequest
from huaweicloudsdkelb.v2.model.update_member_request_body import UpdateMemberRequestBody
from huaweicloudsdkelb.v2.model.update_member_response import UpdateMemberResponse
from huaweicloudsdkelb.v2.model.update_pool_req import UpdatePoolReq
from huaweicloudsdkelb.v2.model.update_pool_request import UpdatePoolRequest
from huaweicloudsdkelb.v2.model.update_pool_request_body import UpdatePoolRequestBody
from huaweicloudsdkelb.v2.model.update_pool_response import UpdatePoolResponse
from huaweicloudsdkelb.v2.model.update_whitelist_req import UpdateWhitelistReq
from huaweicloudsdkelb.v2.model.update_whitelist_request import UpdateWhitelistRequest
from huaweicloudsdkelb.v2.model.update_whitelist_request_body import UpdateWhitelistRequestBody
from huaweicloudsdkelb.v2.model.update_whitelist_response import UpdateWhitelistResponse
from huaweicloudsdkelb.v2.model.whitelist_resp import WhitelistResp
| 84.722222
| 121
| 0.919404
| 1,796
| 16,775
| 8.332405
| 0.139198
| 0.266622
| 0.292015
| 0.351754
| 0.541129
| 0.507718
| 0.258336
| 0.082459
| 0.047043
| 0
| 0
| 0.016067
| 0.046438
| 16,775
| 197
| 122
| 85.152284
| 0.91948
| 0.003636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8cf0e53859e40d148324cd066673c6a4a76c4bdf
| 22,829
|
py
|
Python
|
from_config/dev/model_dev.py
|
astrockragh/IceCube
|
eba09e9f9a3c351dbf05496821bcd7d29ac0261c
|
[
"MIT"
] | null | null | null |
from_config/dev/model_dev.py
|
astrockragh/IceCube
|
eba09e9f9a3c351dbf05496821bcd7d29ac0261c
|
[
"MIT"
] | null | null | null |
from_config/dev/model_dev.py
|
astrockragh/IceCube
|
eba09e9f9a3c351dbf05496821bcd7d29ac0261c
|
[
"MIT"
] | 2
|
2021-03-03T20:39:38.000Z
|
2021-06-09T11:58:00.000Z
|
import os
import numpy as np
from spektral.layers.convolutional.gcn_conv import GCNConv
os.environ["TF_CPP_MIN_LOG_LEVEL"] = '2'
import tensorflow as tf
from spektral.layers import ECCConv, GraphSageConv, MessagePassing
from spektral.layers.pooling.global_pool import GlobalMaxPool, GlobalAvgPool, GlobalSumPool
from tensorflow.keras import Model, Input, Sequential
from tensorflow.keras.layers import Dense, LeakyReLU, BatchNormalization, Dropout, multiply
from tensorflow.keras.activations import tanh, sigmoid
from tensorflow.sparse import SparseTensor
eps=1e-5
print('loading model')
d_act=LeakyReLU(alpha=0.15)
def no_norm(x, training):
return x
#copy over from other model
class GAT(Model):
def __init__(self, n_out = 4, hidden_states=64, gat_layers=2, gat_activation='relu', decode_layers=3, decode_activation='relu', regularization=None, dropout=0.2, batch_norm=True, forward=True):
super().__init__()
self.n_out=n_out
self.hidden_states=hidden_states
self.gat_activation=conv_activation
self.forward=forward
self.dropout=dropout
self.gat_layers=gat_layers
self.regularize=regularization
if type(decode_activation)==str:
self.decode_activation=tf.keras.activations.get(decode_activation)
else:
self.decode_activation=decode_activation
self.batch_norm=batch_norm
# Define layers of the model
if self.edgeconv:
self.ECC1 = ECCConv(hidden_states, [hidden_states, hidden_states, hidden_states], n_out = hidden_states, activation = "relu", kernel_regularizer=self.regularize)
self.GCNs = [GCNConv(hidden_states*int(i), activation=self.conv_activation, kernel_regularizer=self.regularize) for i in 2**np.arange(self.conv_layers)]
self.Pool1 = GlobalMaxPool()
self.Pool2 = GlobalAvgPool()
self.Pool3 = GlobalSumPool()
self.decode = [Dense(i * hidden_states, activation=self.decode_activation) for i in 2**np.arange(decode_layers)]
self.dropout_layers = [Dropout(dropout) for i in range(len(self.decode))]
if self.batch_norm:
self.norm_layers = [BatchNormalization() for i in range(len(self.decode))]
else:
self.norm_layers = [no_norm for i in range(len(self.decode))]
self.final = Dense(n_out)
def call(self, inputs, training = False):
x, a, i = inputs
if self.edgeconv:
a, e = self.generate_edge_features(x, a)
x = self.ECC1([x, a, e])
for GCN_layer in self.GCNs:
x=GCN_layer([x,a])
x1 = self.Pool1([x, i])
x2 = self.Pool2([x, i])
x3 = self.Pool3([x, i])
x = tf.concat([x1, x2, x3], axis = 1)
for decode_layer, dropout_layer, norm_layer in zip(self.decode, self.dropout_layers, self.norm_layers):
x = dropout_layer(x, training = training)
x = self.decode_activation(decode_layer(x))
x = norm_layer(x, training = training)
x = self.final(x)
# tf.print(tf.shape(x))
return x
def generate_edge_features(self, x, a):
send = a.indices[:, 0]
receive = a.indices[:, 1]
if self.forward == True:
forwards = tf.gather(x[:, 3], send) <= tf.gather(x[:, 3], receive)
send = tf.cast(send[forwards], tf.int64)
receive = tf.cast(receive[forwards], tf.int64)
a = SparseTensor(indices = tf.stack([send, receive], axis = 1), values = tf.ones(tf.shape(send), dtype = tf.float32), dense_shape = tf.cast(tf.shape(a), tf.int64))
diff_x = tf.subtract(tf.gather(x, receive), tf.gather(x, send))
dists = tf.sqrt(
tf.reduce_sum(
tf.square(
diff_x[:, :3]
), axis = 1
))
vects = tf.math.divide_no_nan(diff_x[:, :3], tf.expand_dims(dists, axis = -1))
e = tf.concat([diff_x[:, 3:], tf.expand_dims(dists, -1), vects], axis = 1)
return a, e
class DevEdge(Model):
def __init__(self, edgeconv, edgenorm, hidden_states=64, edgetype=0, forward=True, K=[1,2], agg_method='min',regularization=None, dropout=0.025):
super().__init__()
self.n_out=3
self.n_sigs=2
self.hidden_states=hidden_states
self.conv_activation='relu'
self.forward=forward
self.dropout=dropout
self.Ks=K
self.agg_method=agg_method
self.conv_layers=2
self.decode_layers=2
self.edgeconv=edgeconv
self.edgenorm=edgenorm
self.edgetype=edgetype
self.regularize=regularization
self.decode_activation=d_act
self.batch_norm=True
# Define layers of the model
if self.edgenorm:
self.norm_edge = BatchNormalization()
self.MPs = [SGConv(self.hidden_states, self.hidden_states, K=K, agg_method=self.agg_method, dropout = self.dropout) for K in self.Ks]
if self.edgeconv:
self.ECC1 = ECCConv(self.hidden_states, [self.hidden_states, self.hidden_states, self.hidden_states], n_out = self.hidden_states, activation = "relu", kernel_regularizer=self.regularize)
self.GCNs = [GraphSageConv(self.hidden_states*int(i), activation=self.conv_activation, kernel_regularizer=self.regularize) for i in 4*2**np.arange(self.conv_layers)]
self.Pool1 = GlobalMaxPool()
self.Pool2 = GlobalAvgPool()
self.Pool3 = GlobalSumPool()
self.decode = [Dense(i * self.hidden_states) for i in 2*2**np.arange(self.decode_layers+1,1,-1)]
self.dropout_layers = [Dropout(self.dropout) for i in range(len(self.decode))]
if self.batch_norm:
self.norm_layers = [BatchNormalization() for i in range(len(self.decode))]
else:
self.norm_layers = [no_norm for i in range(len(self.decode))]
self.loge = [Dense(self.hidden_states) for _ in range(2)]
self.loge_out = Dense(1)
self.angles = [Dense(self.hidden_states) for _ in range(2)]
self.angles_out = Dense(2)
self.angle_scale= Dense(2)
if self.n_sigs > 0:
self.sigs = [Dense(self.hidden_states) for _ in range(2)]
self.sigs_out = Dense(self.n_sigs)
def call(self, inputs, training = False):
x, a, i = inputs
glob_avg=tf.math.segment_mean(x,i)
glob_var=abs(tf.math.subtract(tf.math.segment_mean(multiply([x,x]),i),multiply([glob_avg, glob_avg])))
glob_max=tf.math.segment_max(x,i)
glob_min=tf.math.segment_min(x,i)
xglob=tf.concat([glob_avg, glob_var, glob_max, glob_min], axis=1)
a, e = self.generate_edge_features(x, a)
if self.edgenorm:
e=self.norm_edge(e)
for MP in self.MPs:
x = MP([x, a, e])
if self.edgeconv:
x = self.ECC1([x, a, e])
for conv in self.GCNs:
x=conv([x,a])
x1 = self.Pool1([x, i])
x2 = self.Pool2([x, i])
x3 = self.Pool3([x, i])
x = tf.concat([x1, x2, x3], axis = 1)
x=tf.concat([x, xglob], axis=1)
for decode_layer, dropout_layer, norm_layer in zip(self.decode, self.dropout_layers, self.norm_layers):
x = dropout_layer(x, training = training)
x = self.decode_activation(decode_layer(x))
x = norm_layer(x, training = training)
x_loge = self.loge[0](x)
x_loge = self.loge[1](x_loge)
x_loge = self.loge_out(x_loge)
x_angles = self.angles[0](x)
x_angles = self.angles[1](x_angles)
x_angles = self.angles_out(x_angles)
zeniazi=sigmoid(self.angle_scale(x_angles))
if self.n_sigs > 0:
x_sigs = self.sigs[0](x)
x_sigs = self.sigs[1](x_sigs)
x_sigs = tf.abs(self.sigs_out(x_sigs)) + eps
#could add correlation here
xs=tf.stack([x_loge[:,0], zeniazi[:,0]*np.pi, zeniazi[:,1]*2*np.pi], axis = 1)
if self.n_sigs > 0:
return tf.concat([xs, x_sigs], axis=1)
else:
return xs
def generate_edge_features(self, x, a):
send = a.indices[:, 0]
receive = a.indices[:, 1]
if self.forward == True: #could maybe be improved
forwards = tf.gather(x[:, 3], send) <= tf.gather(x[:, 3], receive)
send = tf.cast(send[forwards], tf.int64)
receive = tf.cast(receive[forwards], tf.int64)
a = SparseTensor(indices = tf.stack([send, receive], axis = 1), values = tf.ones(tf.shape(send), dtype = tf.float32), dense_shape = tf.cast(tf.shape(a), tf.int64))
##distance vectors
diff_x = tf.subtract(tf.gather(x, receive), tf.gather(x, send))
dists = tf.sqrt(
tf.reduce_sum(
tf.square(
diff_x[:, :3]
), axis = 1
))
vects = tf.math.divide_no_nan(diff_x[:, :3], tf.expand_dims(dists, axis = -1))
if self.edgetype==0:
e = tf.concat([diff_x[:, 3:], tf.expand_dims(dists, -1), vects], axis = 1)
if self.edgetype==1:
## SRT, could make this is a mask
prod_x = tf.math.multiply(tf.gather(x, receive), tf.gather(x, send))
srt = prod_x[:,5]
e = tf.concat([diff_x[:, 3:], tf.expand_dims(dists, -1), vects, tf.expand_dims(srt, -1)], axis = 1)
if self.edgetype==2:
st=2699 # time scale, database specific
c=tf.constant(0.000299792458) #speed of light in km pr nanosec
speed = tf.math.divide_no_nan(dists, st*diff_x[:,3]) #could add fudge factor to account for ice c lower than vacuum c
speed = tf.math.greater_equal(speed, c)
speed = tf.cast(tf.where(speed, 0,1), tf.float32)
e = tf.concat([diff_x[:, 3:], tf.expand_dims(dists, -1), vects, tf.expand_dims(speed,-1)], axis = 1)
if self.edgetype==3:
#srt comm?
prod_x = tf.math.multiply(tf.gather(x, receive), tf.gather(x, send))
srt = prod_x[:,5]
#higher than c?
st=2699 # time scale, database specific
c=tf.constant(0.000299792458) #speed of light in km pr nanosec
speed = tf.math.divide_no_nan(dists, st*diff_x[:,3]) #could add fudge factor to account for ice c lower than vacuum c
speed = tf.math.greater_equal(speed, c)
speed = tf.cast(tf.where(speed, 0,1), tf.float32)
e = tf.concat([diff_x[:, 3:], tf.expand_dims(dists, -1), vects, tf.expand_dims(srt, -1), tf.expand_dims(speed,-1)], axis = 1)
return a, e
class MLP(Model):
def __init__(self, output, hidden=256, layers=2, batch_norm=True,
dropout=0.0, activation='relu', final_activation=None):
super().__init__()
self.batch_norm = batch_norm
self.dropout_rate = dropout
self.mlp = Sequential()
for i in range(layers):
# Linear
self.mlp.add(Dense(hidden if i < layers - 1 else output, activation = activation))
if dropout > 0:
self.mlp.add(Dropout(dropout))
def call(self, inputs, training = False):
return self.mlp(inputs, training = training)
class SGConv(MessagePassing):
# note that the D^-1/2 norm is not implemented since it is irrelevant for us
def __init__(self, n_out, hidden_states, K=2, agg_method='sum', dropout = 0):
"""Agg_method supports "sum": scatter_sum,
"mean": scatter_mean,
"max": scatter_max,
"min": scatter_min,
"prod": scatter_prod"""
super().__init__()
self.n_out = n_out
self.agg_method=agg_method
self.K=K
self.hidden_states = hidden_states
self.message_mlps = [MLP(hidden_states * 2, hidden = hidden_states * 4, layers = 2, dropout = dropout) for _ in range(self.K)]
self.update_mlp = MLP(hidden_states * 1, hidden = hidden_states * 2, layers = 2, dropout = dropout)
##inverted structure since tf requires output func to be propagate
def prop_khop(self, x, a, k, e=None, training = False, **kwargs):
self.n_nodes = tf.shape(x)[0]
self.index_i = a.indices[:, 1]
self.index_j = a.indices[:, 0]
# Message
# print(x, a, e)
# msg_kwargs = self.get_kwargs(x, a, e, self.msg_signature, kwargs)
messages = self.message(x, a, k, e, training = training)
# Aggregate
# agg_kwargs = self.get_kwargs(x, a, e, self.agg_signature, kwargs)
## make own aggregate
embeddings = self.aggregate(messages, training = training)
return embeddings
def propagate(self, x, a, e, training=False):
for hop in range(self.K):
x=self.prop_khop(x,a, hop, e, training = training)
return self.update(x, training = training)
def message(self, x, a, k, e, training = False):
# print([self.get_i(x), self.get_j(x), e])
out = tf.concat([self.get_i(x), self.get_j(x), e], axis = 1)
out = self.message_mlps[k](out, training = training)
return out
def update(self, embeddings, training = False):
out = self.update_mlp(embeddings, training = training)
return out
class KHop(Model):
def __init__(self, n_out = 3, n_sigs=2, K=[1,2,3], agg_method='sum', hidden_states=64, glob=True, conv_layers=1, conv_activation='relu', decode_layers=2, decode_activation=1, regularization=None, dropout=0.2, batch_norm=True, forward=True):
super().__init__()
self.n_out=n_out
self.n_sigs=n_sigs
self.hidden_states=hidden_states
self.conv_activation=conv_activation
self.forward=forward
self.dropout=dropout
self.glob=glob
self.Ks=K
self.agg_method=agg_method
self.conv_layers=conv_layers
self.regularize=regularization
if type(decode_activation)==str:
self.decode_activation=tf.keras.activations.get(decode_activation)
else:
self.decode_activation=d_act
self.batch_norm=batch_norm
# Define layers of the model
self.MPs = [SGConv(hidden_states, hidden_states, K=K, agg_method=self.agg_method, dropout = dropout) for K in self.Ks]
self.GCNs = [GraphSageConv(hidden_states*int(i), activation=self.conv_activation, kernel_regularizer=self.regularize) for i in 2*2**np.arange(self.conv_layers)]
self.Pool1 = GlobalMaxPool()
self.Pool2 = GlobalAvgPool()
self.Pool3 = GlobalSumPool()
self.decode = [Dense(i * hidden_states) for i in 2*2**np.arange(decode_layers+1,1,-1)]
self.dropout_layers = [Dropout(dropout) for i in range(len(self.decode))]
if self.batch_norm:
self.norm_layers = [BatchNormalization() for i in range(len(self.decode))]
else:
self.norm_layers = [no_norm for i in range(len(self.decode))]
self.loge = [Dense(hidden_states) for _ in range(2)]
self.loge_out = Dense(1)
self.angles = [Dense(hidden_states) for _ in range(2)]
self.angles_out = Dense(2)
self.angle_scale= Dense(2)
if n_sigs > 0:
self.sigs = [Dense(hidden_states) for i in range(2)]
self.sigs_out = Dense(n_sigs)
def call(self, inputs, training = False):
x, a, i = inputs
glob_avg=tf.math.segment_mean(x,i)
glob_var=abs(tf.math.subtract(tf.math.segment_mean(multiply([x,x]),i),multiply([glob_avg, glob_avg])))
glob_max=tf.math.segment_max(x,i)
glob_min=tf.math.segment_min(x,i)
xglob=tf.concat([glob_avg, glob_var, glob_max, glob_min], axis=1)
a, e = self.generate_edge_features(x, a)
for MP in self.MPs:
x = MP([x, a, e])
for conv in self.GCNs:
x=conv([x,a])
x1 = self.Pool1([x, i])
x2 = self.Pool2([x, i])
x3 = self.Pool3([x, i])
x = tf.concat([x1, x2, x3], axis = 1)
x=tf.concat([x, xglob], axis=1)
for decode_layer, dropout_layer, norm_layer in zip(self.decode, self.dropout_layers, self.norm_layers):
x = dropout_layer(x, training = training)
x = self.decode_activation(decode_layer(x))
x = norm_layer(x, training = training)
x_loge = self.loge[0](x)
x_loge = self.loge[1](x_loge)
x_loge = self.loge_out(x_loge)
x_angles = self.angles[0](x)
x_angles = self.angles[1](x_angles)
x_angles = self.angles_out(x_angles)
zeniazi=sigmoid(self.angle_scale(x_angles))
if self.n_sigs > 0:
x_sigs = self.sigs[0](x)
x_sigs = self.sigs[1](x_sigs)
x_sigs = tf.abs(self.sigs_out(x_sigs)) + eps
#could add correlation here
xs=tf.stack([x_loge[:,0], zeniazi[:,0]*np.pi, zeniazi[:,1]*2*np.pi], axis = 1)
if self.n_sigs > 0:
return tf.concat([xs, x_sigs], axis=1)
else:
return xs
def generate_edge_features(self, x, a):
send = a.indices[:, 0]
receive = a.indices[:, 1]
if self.forward == True:
forwards = tf.gather(x[:, 3], send) <= tf.gather(x[:, 3], receive)
send = tf.cast(send[forwards], tf.int64)
receive = tf.cast(receive[forwards], tf.int64)
a = SparseTensor(indices = tf.stack([send, receive], axis = 1), values = tf.ones(tf.shape(send), dtype = tf.float32), dense_shape = tf.cast(tf.shape(a), tf.int64))
diff_x = tf.subtract(tf.gather(x, receive), tf.gather(x, send))
dists = tf.sqrt(
tf.reduce_sum(
tf.square(
diff_x[:, :3]
), axis = 1
))
vects = tf.math.divide_no_nan(diff_x[:, :3], tf.expand_dims(dists, axis = -1))
e = tf.concat([diff_x[:, 3:], tf.expand_dims(dists, -1), vects], axis = 1)
return a, e
class KHopSplit(Model):
def __init__(self, n_out = 3, n_sigs=2, K=[1,2,3], agg_method='sum', hidden_states=64, glob=True, conv_layers=1, conv_activation='relu', decode_layers=2, decode_activation=1, regularization=None, dropout=0.2, batch_norm=True, forward=True):
super().__init__()
self.n_out=n_out
self.n_sigs=n_sigs
self.hidden_states=hidden_states
self.conv_activation=conv_activation
self.forward=forward
self.dropout=dropout
self.glob=glob
self.Ks=K
self.agg_method=agg_method
self.conv_layers=conv_layers
self.regularize=regularization
if type(decode_activation)==str:
self.decode_activation=tf.keras.activations.get(decode_activation)
else:
self.decode_activation=d_act
self.batch_norm=batch_norm
# Define layers of the model
self.MPs = [SGConv(hidden_states, hidden_states, K=K, agg_method=self.agg_method, dropout = dropout) for K in self.Ks]
self.GCNs = [GraphSageConv(hidden_states*int(i), activation=self.conv_activation, kernel_regularizer=self.regularize) for i in 2*2**np.arange(self.conv_layers)]
self.Pool1 = GlobalMaxPool()
self.Pool2 = GlobalAvgPool()
self.Pool3 = GlobalSumPool()
self.decode = [Dense(i * hidden_states) for i in 2*2**np.arange(decode_layers+1,1,-1)]
self.dropout_layers = [Dropout(dropout) for i in range(len(self.decode))]
if self.batch_norm:
self.norm_layers = [BatchNormalization() for i in range(len(self.decode))]
else:
self.norm_layers = [no_norm for i in range(len(self.decode))]
self.loge = [Dense(hidden_states) for _ in range(2)]
self.loge_out = Dense(1)
self.zeni = [Dense(hidden_states) for _ in range(2)]
self.zeni_out = Dense(1)
self.azi = [Dense(hidden_states) for _ in range(2)]
self.azi_out = Dense(1)
self.zeni_scale= Dense(1)
self.azi_scale= Dense(1)
self.sig_zeni = [Dense(hidden_states) for i in range(2)]
self.sig_zeni_out = Dense(1)
self.sig_azi = [Dense(hidden_states) for i in range(2)]
self.sig_azi_out = Dense(1)
def call(self, inputs, training = False):
x, a, i = inputs
glob_avg=tf.math.segment_mean(x,i)
glob_var=abs(tf.math.subtract(tf.math.segment_mean(multiply([x,x]),i),multiply([glob_avg, glob_avg])))
glob_max=tf.math.segment_max(x,i)
glob_min=tf.math.segment_min(x,i)
xglob=tf.concat([glob_avg, glob_var, glob_max, glob_min], axis=1)
a, e = self.generate_edge_features(x, a)
for MP in self.MPs:
x = MP([x, a, e])
for conv in self.GCNs:
x=conv([x,a])
x1 = self.Pool1([x, i])
x2 = self.Pool2([x, i])
x3 = self.Pool3([x, i])
x = tf.concat([x1, x2, x3], axis = 1)
x=tf.concat([x, xglob], axis=1)
for decode_layer, dropout_layer, norm_layer in zip(self.decode, self.dropout_layers, self.norm_layers):
x = dropout_layer(x, training = training)
x = self.decode_activation(decode_layer(x))
x = norm_layer(x, training = training)
x_loge = self.loge[0](x)
x_loge = self.loge[1](x_loge)
x_loge = self.loge_out(x_loge)
x_zeni = self.zeni[0](x)
x_zeni = self.zeni[1](x_zeni)
x_zeni = self.zeni_out(x_zeni)
zeni=sigmoid(self.zeni_scale(x_zeni))
x_azi = self.azi[0](x)
x_azi = self.azi[1](x_azi)
x_azi = self.azi_out(x_azi)
azi=sigmoid(self.azi_scale(x_azi))
sig_z = self.sig_zeni[0](x)
sig_z = self.sig_zeni[1](sig_z)
sig_z = tf.abs(self.sig_zeni_out(sig_z)) + eps
sig_az = self.sig_azi[0](x)
sig_az = self.sig_azi[1](sig_az)
sig_az = tf.abs(self.sig_azi_out(sig_az)) + eps
#could add correlation here
x=tf.stack([x_loge[:,0], zeni[:,0]*np.pi, azi[:,0]*2*np.pi, sig_z[:,0], sig_az[:,0]], axis = 1)
return x
def generate_edge_features(self, x, a):
send = a.indices[:, 0]
receive = a.indices[:, 1]
if self.forward == True:
forwards = tf.gather(x[:, 3], send) <= tf.gather(x[:, 3], receive)
send = tf.cast(send[forwards], tf.int64)
receive = tf.cast(receive[forwards], tf.int64)
a = SparseTensor(indices = tf.stack([send, receive], axis = 1), values = tf.ones(tf.shape(send), dtype = tf.float32), dense_shape = tf.cast(tf.shape(a), tf.int64))
diff_x = tf.subtract(tf.gather(x, receive), tf.gather(x, send))
dists = tf.sqrt(
tf.reduce_sum(
tf.square(
diff_x[:, :3]
), axis = 1
))
vects = tf.math.divide_no_nan(diff_x[:, :3], tf.expand_dims(dists, axis = -1))
e = tf.concat([diff_x[:, 3:], tf.expand_dims(dists, -1), vects], axis = 1)
return a, e
| 40.766071
| 244
| 0.60686
| 3,329
| 22,829
| 3.994293
| 0.077801
| 0.048733
| 0.01083
| 0.013236
| 0.787095
| 0.764007
| 0.736632
| 0.725201
| 0.705949
| 0.687448
| 0
| 0.021962
| 0.26002
| 22,829
| 560
| 245
| 40.766071
| 0.765169
| 0.047045
| 0
| 0.728111
| 0
| 0
| 0.003598
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046083
| false
| 0.004608
| 0.023041
| 0.004608
| 0.119816
| 0.002304
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8cf957b985cbf2f565da3a545f37b6007d69179b
| 10,247
|
py
|
Python
|
ore/tests/tests_permissions.py
|
lukegb/Ore-python
|
1d1c73795406fa52ae969726feb89f7aedbc4afc
|
[
"MIT"
] | 1
|
2016-05-24T14:49:42.000Z
|
2016-05-24T14:49:42.000Z
|
ore/tests/tests_permissions.py
|
gratimax/ore-old
|
1d1c73795406fa52ae969726feb89f7aedbc4afc
|
[
"MIT"
] | null | null | null |
ore/tests/tests_permissions.py
|
gratimax/ore-old
|
1d1c73795406fa52ae969726feb89f7aedbc4afc
|
[
"MIT"
] | null | null | null |
from django.contrib.contenttypes.models import ContentType
from ore.accounts.models import OreUser
from ore.core.models import Permission, Organization
from django.test import TestCase
from ore.projects.models import Project
from ore.teams.models import OrganizationTeam
class PermissionsTestCase(TestCase):
def make_john(self):
user_john = OreUser.objects.create_user(
'john', 'password', 'john@ore.spongepowered.org')
user_john.is_superuser = False
user_john.save()
return user_john
def setUp(self):
org_content_type = ContentType.objects.get_for_model(Organization)
self.org_permission_foo = Permission.objects.create(
slug='org.foo.do', name='Do Foo', description='Performs foo', applies_to_model=org_content_type)
self.org_permission_bar = Permission.objects.create(
slug='org.foo.bar', name='Bar', description='Bars foo', applies_to_model=org_content_type)
self.org_permission_baz = Permission.objects.create(
slug='org.baz.do', name='Do Baz', description='Bazzes the widget', applies_to_model=org_content_type)
proj_content_type = ContentType.objects.get_for_model(Organization)
self.proj_permission_foo = Permission.objects.create(
slug='proj.foo.do', name='Do Foo', description='Performs foo', applies_to_model=proj_content_type)
self.proj_permission_bar = Permission.objects.create(
slug='proj.foo.bar', name='Bar', description='Bars foo', applies_to_model=proj_content_type)
self.proj_permission_baz = Permission.objects.create(
slug='proj.baz.do', name='Do Baz', description='Bazzes the widget', applies_to_model=proj_content_type)
def test_unrelated_people_cant_do_anything_on_organization(self):
organization_sponge = Organization.objects.create(name='Sponge')
user_john = self.make_john()
self.assertFalse(organization_sponge.user_has_permission(
user_john, 'org.foo.do'), 'John can\'t foo.do')
self.assertFalse(organization_sponge.user_has_permission(
user_john, 'org.foo.bar'), 'John can\'t foo.bar')
self.assertFalse(organization_sponge.user_has_permission(
user_john, 'org.baz.do'), 'John can\'t baz.do')
def test_unrelated_people_cant_do_anything_on_organization_project(self):
organization_sponge = Organization.objects.create(name='Sponge')
project_sponge = Project.objects.create(
name='Sponge', namespace=organization_sponge, description='Sponge',
)
user_john = self.make_john()
self.assertFalse(project_sponge.user_has_permission(
user_john, 'proj.foo.do'), 'John can\'t foo.do')
self.assertFalse(project_sponge.user_has_permission(
user_john, 'proj.foo.bar'), 'John can\'t foo.bar')
self.assertFalse(project_sponge.user_has_permission(
user_john, 'proj.baz.do'), 'John can\'t baz.do')
def test_organization_owner_can_do_everything_on_organization(self):
organization_sponge = Organization.objects.create(name='Sponge')
team = organization_sponge.teams.get(is_owner_team=True)
user_john = self.make_john()
team.users = [user_john]
self.assertTrue(organization_sponge.user_has_permission(
user_john, 'org.foo.do'), 'John can foo.do')
self.assertTrue(organization_sponge.user_has_permission(
user_john, 'org.foo.bar'), 'John can foo.bar')
self.assertTrue(organization_sponge.user_has_permission(
user_john, 'org.baz.do'), 'John can baz.do')
def test_organization_owner_can_do_everything_on_project(self):
organization_sponge = Organization.objects.create(name='Sponge')
project_sponge = Project.objects.create(
name='Sponge', namespace=organization_sponge, description='Sponge'
)
team = organization_sponge.teams.get(is_owner_team=True)
user_john = self.make_john()
team.users = [user_john]
self.assertTrue(project_sponge.user_has_permission(
user_john, 'proj.foo.do'), 'John can foo.do')
self.assertTrue(project_sponge.user_has_permission(
user_john, 'proj.foo.bar'), 'John can foo.bar')
self.assertTrue(project_sponge.user_has_permission(
user_john, 'proj.baz.do'), 'John can baz.do')
def test_project_owner_can_do_everything_on_project(self):
user_john = self.make_john()
project_sponge = Project.objects.create(
name='Sponge', namespace=user_john, description='Sponge'
)
self.assertTrue(project_sponge.user_has_permission(
user_john, 'proj.foo.do'), 'John can foo.do')
self.assertTrue(project_sponge.user_has_permission(
user_john, 'proj.foo.bar'), 'John can foo.bar')
self.assertTrue(project_sponge.user_has_permission(
user_john, 'proj.baz.do'), 'John can baz.do')
def test_organization_all_project_teams_grant_permissions_on_projects(self):
organization_sponge = Organization.objects.create(name='Sponge')
project_sponge = Project.objects.create(
name='Sponge', namespace=organization_sponge, description='Sponge'
)
user_john = self.make_john()
team = OrganizationTeam.objects.create(
name='People',
organization=organization_sponge,
is_all_projects=True,
is_owner_team=False,
)
team.users = [user_john]
team.permissions = [self.proj_permission_foo, self.proj_permission_bar]
self.assertTrue(project_sponge.user_has_permission(
user_john, 'proj.foo.do'), 'John can foo.do')
self.assertTrue(project_sponge.user_has_permission(
user_john, 'proj.foo.bar'), 'John can foo.bar')
self.assertFalse(project_sponge.user_has_permission(
user_john, 'proj.baz.do'), 'John can\'t baz.do')
def test_organization_all_project_teams_grant_permissions_on_organisations(self):
organization_sponge = Organization.objects.create(name='Sponge')
user_john = self.make_john()
team = OrganizationTeam.objects.create(
name='People',
organization=organization_sponge,
is_all_projects=True,
is_owner_team=False,
)
team.users = [user_john]
team.permissions = [self.org_permission_foo, self.org_permission_bar]
self.assertTrue(organization_sponge.user_has_permission(
user_john, 'org.foo.do'), 'John can foo.do')
self.assertTrue(organization_sponge.user_has_permission(
user_john, 'org.foo.bar'), 'John can foo.bar')
self.assertFalse(organization_sponge.user_has_permission(
user_john, 'org.baz.do'), 'John can\'t baz.do')
def test_organization_limited_project_teams_grant_permissions_on_selected_projects(self):
organization_sponge = Organization.objects.create(name='Sponge')
project_sponge = Project.objects.create(
name='Sponge', namespace=organization_sponge, description='Sponge'
)
user_john = self.make_john()
team = OrganizationTeam.objects.create(
name='People',
organization=organization_sponge,
is_all_projects=False,
is_owner_team=False,
)
team.users = [user_john]
team.permissions = [self.proj_permission_foo, self.proj_permission_bar]
team.projects = [project_sponge]
self.assertTrue(project_sponge.user_has_permission(
user_john, 'proj.foo.do'), 'John can foo.do')
self.assertTrue(project_sponge.user_has_permission(
user_john, 'proj.foo.bar'), 'John can foo.bar')
self.assertFalse(project_sponge.user_has_permission(
user_john, 'proj.baz.do'), 'John can\'t baz.do')
def test_organization_limited_project_teams_dont_grant_permissions_on_unselected_projects(self):
organization_sponge = Organization.objects.create(name='Sponge')
project_sponge = Project.objects.create(
name='Sponge', namespace=organization_sponge, description='Sponge'
)
project_spongeapi = Project.objects.create(
name='SpongeAPI', namespace=organization_sponge, description='Sponge'
)
user_john = self.make_john()
team = OrganizationTeam.objects.create(
name='People',
organization=organization_sponge,
is_all_projects=False,
is_owner_team=False,
)
team.users = [user_john]
team.permissions = [self.proj_permission_foo, self.proj_permission_bar]
team.projects = [project_spongeapi]
self.assertFalse(project_sponge.user_has_permission(
user_john, 'proj.foo.do'), 'John can\'t foo.do')
self.assertFalse(project_sponge.user_has_permission(
user_john, 'proj.foo.bar'), 'John can\'t foo.bar')
self.assertFalse(project_sponge.user_has_permission(
user_john, 'proj.baz.do'), 'John can\'t baz.do')
def test_organization_limited_project_teams_dont_grant_permissions_on_organisations(self):
organization_sponge = Organization.objects.create(name='Sponge')
project_sponge = Project.objects.create(
name='Sponge', namespace=organization_sponge, description='Sponge'
)
user_john = self.make_john()
team = OrganizationTeam.objects.create(
name='People',
organization=organization_sponge,
is_all_projects=False,
is_owner_team=False,
)
team.users = [user_john]
team.permissions = [self.org_permission_foo, self.org_permission_bar]
team.projects = [project_sponge]
self.assertFalse(organization_sponge.user_has_permission(
user_john, 'org.foo.do'), 'John can\'t foo.do')
self.assertFalse(organization_sponge.user_has_permission(
user_john, 'org.foo.bar'), 'John can\'t foo.bar')
self.assertFalse(organization_sponge.user_has_permission(
user_john, 'orgl.baz.do'), 'John can\'t baz.do')
| 46.157658
| 115
| 0.679809
| 1,233
| 10,247
| 5.360097
| 0.068127
| 0.064155
| 0.05901
| 0.104403
| 0.919958
| 0.913149
| 0.876381
| 0.864881
| 0.857013
| 0.828113
| 0
| 0
| 0.215185
| 10,247
| 221
| 116
| 46.366516
| 0.82181
| 0
| 0
| 0.691489
| 0
| 0
| 0.105885
| 0.002537
| 0
| 0
| 0
| 0
| 0.159574
| 1
| 0.06383
| false
| 0.005319
| 0.031915
| 0
| 0.106383
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5069f67c5c7bc019b3bb913925c7782b29dba34f
| 56
|
py
|
Python
|
test.py
|
anhlt59/firebase-tutorial
|
0044873cbbdb4c75769941af6df2b0f2de473cbc
|
[
"MIT"
] | null | null | null |
test.py
|
anhlt59/firebase-tutorial
|
0044873cbbdb4c75769941af6df2b0f2de473cbc
|
[
"MIT"
] | null | null | null |
test.py
|
anhlt59/firebase-tutorial
|
0044873cbbdb4c75769941af6df2b0f2de473cbc
|
[
"MIT"
] | null | null | null |
import firebase_admin
# from firebase_admin import db
| 11.2
| 31
| 0.821429
| 8
| 56
| 5.5
| 0.625
| 0.590909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.160714
| 56
| 4
| 32
| 14
| 0.93617
| 0.517857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
506fcb5f72f8f72d20284d842495b20339901545
| 353
|
py
|
Python
|
scripts/whitelist.py
|
marcofavorito/google-hashcode-2021
|
d12ea986343d27bf531247e7e70e6bea030116fd
|
[
"MIT"
] | null | null | null |
scripts/whitelist.py
|
marcofavorito/google-hashcode-2021
|
d12ea986343d27bf531247e7e70e6bea030116fd
|
[
"MIT"
] | null | null | null |
scripts/whitelist.py
|
marcofavorito/google-hashcode-2021
|
d12ea986343d27bf531247e7e70e6bea030116fd
|
[
"MIT"
] | null | null | null |
# type: ignore
o # unused variable (hashcode/common/base.py:38)
write_input # unused function (hashcode/common/base.py:59)
obj # unused variable (hashcode/common/base.py:59)
read_output # unused function (hashcode/common/base.py:70)
obj # unused variable (hashcode/common/base.py:80)
_._module_name # unused attribute (hashcode/common/core.py:22)
| 44.125
| 63
| 0.76204
| 53
| 353
| 4.981132
| 0.45283
| 0.318182
| 0.340909
| 0.378788
| 0.681818
| 0.666667
| 0.280303
| 0
| 0
| 0
| 0
| 0.038217
| 0.110482
| 353
| 7
| 64
| 50.428571
| 0.802548
| 0.8017
| 0
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
507aa04f87a5c2f1fcc7be8f05e51279ca73612f
| 30,476
|
py
|
Python
|
scripts/DR_comparison_stats_SG41_52.py
|
hhuang2018/HLAWholeGeneAnalysis
|
9cdd2e062a6cc2eed2ebfa84e1888687b2b98cf3
|
[
"MIT"
] | 2
|
2018-03-28T19:06:40.000Z
|
2020-08-06T08:32:09.000Z
|
scripts/DR_comparison_stats_SG41_52.py
|
hhuang2018/HLAWholeGeneAnalysis
|
9cdd2e062a6cc2eed2ebfa84e1888687b2b98cf3
|
[
"MIT"
] | null | null | null |
scripts/DR_comparison_stats_SG41_52.py
|
hhuang2018/HLAWholeGeneAnalysis
|
9cdd2e062a6cc2eed2ebfa84e1888687b2b98cf3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 3 14:42:06 2017
@author: hhuang2
"""
import glob
import sqlite3 as sql
# from utils import phase_block_check as ps
from utils import IMGTdbIO, CompareSeq
import os
import re
fname = '../Output/SG41_52/2018/IMGTv3310/SG41_52_DRpair_Stats/SG41_52_pairedCases_Stats.pkl'
Matching_cases_stats = IMGTdbIO.load_pickle2dict(fname)
## 'All_paired'
groupType = 'fiveLoci_paired' # groupType = 'ClassI_paired' # groupType = 'All_paired'
group_caseIDs = Matching_cases_stats[groupType]
All_loci = ['A', 'B', 'C', 'DRB1', 'DQB1']#, 'DPB1']
ClassI_loci = ['A', 'B', 'C']
ClassII_loci = ['DRB1', 'DQB1']
CaseStats = {}
LocusStats = {}
#MatchStats = {}
for caseID in group_caseIDs:
#
for locus in ClassI_loci:
ARSregion = ['Exon2', 'Exon3']
bothMM_output = "../Output/SG41_52/2018/IMGTv3310/SG41_52_bothMisMatched_locus_" + locus + "_0125_TargetedAlignment/" # "_1218_TargetedAlignment/"
singleMM_output = "../Output/SG41_52/2018/IMGTv3310/SG41_52_singleMisMatched_" + locus + "_0125_TargetedAlignment/"
### Cases where both sequences don't match
if caseID in Matching_cases_stats[locus+'_both_Seqmm']:
mm_file_PS1 = bothMM_output+ 'CaseID_'+ caseID + '_Locus_' + locus + '_annotation_PS1.pkl'
mm_file_PS2 = bothMM_output+ 'CaseID_'+ caseID + '_Locus_' + locus + '_annotation_PS2.pkl'
mm_locus_stats_PS1 = IMGTdbIO.load_pickle2dict(mm_file_PS1)
mm_locus_stats_PS1 = CompareSeq.rmRefAln(mm_locus_stats_PS1)
mm_locus_stats_PS2 = IMGTdbIO.load_pickle2dict(mm_file_PS2)
mm_locus_stats_PS2 = CompareSeq.rmRefAln(mm_locus_stats_PS2)
#if len(mm_locus_stats_PS1['MMpos']) > 20 or len(mm_locus_stats_PS2['MMpos']) > 20:
if CompareSeq.isARSmm(mm_locus_stats_PS1['MMannotation'].values(), ARSregion) and CompareSeq.isARSmm(mm_locus_stats_PS2['MMannotation'].values(), ARSregion):
# probably phase set swap.
seq_ps1 = mm_locus_stats_PS1['seq']
seq_ps2 = mm_locus_stats_PS2['seq']
params_ps1 = mm_locus_stats_PS1['params']
params_ps2 = mm_locus_stats_PS2['params']
#tp = params_ps2['HLAtyping']
#tp = [tp[1], tp[0]]
#params_ps2['HLAtyping'] = tp
swapped_alignment = CompareSeq.swapPS_comparison(seq_ps1, params_ps1, seq_ps2, params_ps2, caseID)
#if max([len(swapped_alignment['PS1']['MMpos']), len(swapped_alignment['PS2']['MMpos'])]) < max([len(mm_locus_stats_PS1['MMpos']), len(mm_locus_stats_PS2['MMpos'])]):
if not CompareSeq.isARSmm(swapped_alignment['PS1']['MMannotation'].values(), ARSregion) or not CompareSeq.isARSmm(swapped_alignment['PS2']['MMannotation'].values(), ARSregion):
# if swapped case is better, then use the swapped case
mm_locus_stats_PS1 = swapped_alignment['PS1']
mm_locus_stats_PS2 = swapped_alignment['PS2']
params_ps1 = mm_locus_stats_PS1['params']
params_ps2 = mm_locus_stats_PS2['params']
# caseStats
if caseID in CaseStats.keys():
CaseStats[caseID][locus] = {'PS1': CompareSeq.RegionCount(mm_locus_stats_PS1['MMannotation'], locus, True), 'PS2': CompareSeq.RegionCount(mm_locus_stats_PS2['MMannotation'], locus, True)}
CaseStats[caseID][locus]['HLAtyping'] = params_ps1['HLAtyping'] + params_ps2['HLAtyping']
else:
CaseStats[caseID] = {locus:{'PS1': CompareSeq.RegionCount(mm_locus_stats_PS1['MMannotation'], locus, True), 'PS2': CompareSeq.RegionCount(mm_locus_stats_PS2['MMannotation'], locus, True),
'HLAtyping':params_ps1['HLAtyping'] + params_ps2['HLAtyping']}}
if len(mm_locus_stats_PS1['params']['HLAtyping']) == 1:
typing = mm_locus_stats_PS1['params']['HLAtyping'][0]
if typing in LocusStats.keys():
if CaseStats[caseID][locus]['PS1']['ARS'] > 0:
if 'ARS' in LocusStats[typing].keys():
LocusStats[typing]['ARS'].append(caseID)
else:
LocusStats[typing] = {'ARS': [caseID]}
#LocusStats[typing]['ARS'].append(caseID)
if CaseStats[caseID][locus]['PS1']['Non_ARS_exon'] >0:
if 'Non_ARS_exon' in LocusStats[typing].keys():
LocusStats[typing]['Non_ARS_exon'].append(caseID)
else:
LocusStats[typing] = {'Non_ARS_exon': [caseID]}
#LocusStats[typing]['Non_ARS_exon'].append(caseID)
if CaseStats[caseID][locus]['PS1']['Intron'] > 0:
if 'Intron' in LocusStats[typing].keys():
LocusStats[typing]['Intron'].append(caseID)
else:
LocusStats[typing] = {'Intron': [caseID]}
#LocusStats[typing]['Intron'].append(caseID)
else:
LocusStats[typing] = {'ARS': [], 'Non_ARS_exon': [], 'Intron': []}
if CaseStats[caseID][locus]['PS1']['ARS'] > 0:
LocusStats[typing]['ARS'].append(caseID)
if CaseStats[caseID][locus]['PS1']['Non_ARS_exon'] >0:
LocusStats[typing]['Non_ARS_exon'].append(caseID)
if CaseStats[caseID][locus]['PS1']['Intron'] > 0:
LocusStats[typing]['Intron'].append(caseID)
for key, item in mm_locus_stats_PS1['MMannotation'].items():
if key.isdigit():
if item in LocusStats[typing].keys():
LocusStats[typing][item].append(caseID)
else:
LocusStats[typing] = {item: [caseID]}
if len(mm_locus_stats_PS2['params']['HLAtyping']) == 1:
typing = mm_locus_stats_PS2['params']['HLAtyping'][0]
if typing in LocusStats.keys():
if CaseStats[caseID][locus]['PS2']['ARS'] > 0:
if 'ARS' in LocusStats[typing].keys():
LocusStats[typing]['ARS'].append(caseID)
else:
LocusStats[typing] = {'ARS': [caseID]}
#LocusStats[typing]['ARS'].append(caseID)
if CaseStats[caseID][locus]['PS2']['Non_ARS_exon'] >0:
if 'Non_ARS_exon' in LocusStats[typing].keys():
LocusStats[typing]['Non_ARS_exon'].append(caseID)
else:
LocusStats[typing] = {'Non_ARS_exon': [caseID]}
#LocusStats[typing]['Non_ARS_exon'].append(caseID)
if CaseStats[caseID][locus]['PS2']['Intron'] > 0:
if 'Intron' in LocusStats[typing].keys():
LocusStats[typing]['Intron'].append(caseID)
else:
LocusStats[typing] = {'Intron': [caseID]}
#LocusStats[typing]['Intron'].append(caseID)
else:
LocusStats[typing] = {'ARS': [], 'Non_ARS_exon': [], 'Intron': []}
if CaseStats[caseID][locus]['PS2']['ARS'] > 0:
LocusStats[typing]['ARS'].append(caseID)
if CaseStats[caseID][locus]['PS2']['Non_ARS_exon'] >0:
LocusStats[typing]['Non_ARS_exon'].append(caseID)
if CaseStats[caseID][locus]['PS2']['Intron'] > 0:
LocusStats[typing]['Intron'].append(caseID)
for key, item in mm_locus_stats_PS2['MMannotation'].items():
if key.isdigit():
if item in LocusStats[typing].keys():
LocusStats[typing][item].append(caseID)
else:
LocusStats[typing] = {item: [caseID]}
elif caseID in Matching_cases_stats[locus+'_one_Seqmm']:
### Cases where only one sequence doesn't match
mm_file = singleMM_output+ 'CaseID_'+ caseID + '_Locus_' + locus + '_annotation.pkl'
mm_locus_stats = IMGTdbIO.load_pickle2dict(mm_file)
mm_locus_stats = CompareSeq.rmRefAln(mm_locus_stats)
params_singmm = mm_locus_stats['params']
# caseStats
if caseID in CaseStats.keys():
CaseStats[caseID][locus] = {'PS1': CompareSeq.RegionCount(mm_locus_stats['MMannotation'], locus, True)}
CaseStats[caseID][locus]['HLAtyping'] = params_singmm['HLAtyping']
else:
CaseStats[caseID] = {locus:{'PS1': CompareSeq.RegionCount(mm_locus_stats['MMannotation'], locus, True),
'HLAtyping':params_singmm['HLAtyping']}}
if len(params_singmm['HLAtyping']) == 1:
typing = params_singmm['HLAtyping'][0]
if typing in LocusStats.keys():
if CaseStats[caseID][locus]['PS1']['ARS'] > 0:
if 'ARS' in LocusStats[typing].keys():
LocusStats[typing]['ARS'].append(caseID)
else:
LocusStats[typing] = {'ARS': [caseID]}
if CaseStats[caseID][locus]['PS1']['Non_ARS_exon'] >0:
if 'Non_ARS_exon' in LocusStats[typing].keys():
LocusStats[typing]['Non_ARS_exon'].append(caseID)
else:
LocusStats[typing] = {'Non_ARS_exon': [caseID]}
if CaseStats[caseID][locus]['PS1']['Intron'] > 0:
if 'Intron' in LocusStats[typing].keys():
LocusStats[typing]['Intron'].append(caseID)
else:
LocusStats[typing] = {'Intron': [caseID]}
else:
LocusStats[typing] = {'ARS': [], 'Non_ARS_exon': [], 'Intron': []}
if CaseStats[caseID][locus]['PS1']['ARS'] > 0:
LocusStats[typing]['ARS'].append(caseID)
if CaseStats[caseID][locus]['PS1']['Non_ARS_exon'] >0:
LocusStats[typing]['Non_ARS_exon'].append(caseID)
if CaseStats[caseID][locus]['PS1']['Intron'] > 0:
LocusStats[typing]['Intron'].append(caseID)
for key, item in mm_locus_stats['MMannotation'].items():
if key.isdigit():
if item in LocusStats[typing].keys():
LocusStats[typing][item].append(caseID)
else:
LocusStats[typing] = {item: [caseID]}
ClassI_stats = {'CaseStats': CaseStats, 'LocusStats': LocusStats}
IMGTdbIO.save_dict2pickle(ClassI_stats, '../Output/SG41_52/2018/IMGTv3310/SG41_52_DRpair_Stats/ClassI_Stats_0125_'+groupType) #1220_'+groupType)
# Class II
#Group_fname = '../Output/SG41_52/2018/IMGTv3310/SG41_52_DRpair_Stats/ClassI_Stats_0125_' + groupType + '.pkl'
#Stats_Dict = IMGTdbIO.load_pickle2dict(Group_fname)
#CaseStats = Stats_Dict['CaseStats']
#LocusStats = Stats_Dict['LocusStats']
for caseID in group_caseIDs:
#
for locus in ClassII_loci:
bothMM_output = "../Output/SG41_52/2018/IMGTv3310/SG41_52_bothMisMatched_locus_" + locus + "_0125_TargetedAlignment/"
singleMM_output = "../Output/SG41_52/2018/IMGTv3310/SG41_52_singleMisMatched_" + locus + "_0125_TargetedAlignment/"
### Cases where both sequences don't match
if caseID in Matching_cases_stats[locus+'_both_Seqmm']:
mm_file_PS1 = glob.glob(bothMM_output+ 'CaseID_'+ caseID + '_Locus_' + locus + '_annotation_PS1*.pkl')
for file_id in mm_file_PS1:
mm_locus_stats_PS1 = IMGTdbIO.load_pickle2dict(file_id)
mm_locus_stats_PS1 = CompareSeq.rmRefAln(mm_locus_stats_PS1)
#if mm_locus_stats_PS1['SameSeqs']: # if the exons are the same
if 'Exon' in file_id:
for key, item in mm_locus_stats_PS1['MMannotation'].items():
if key.isdigit():
tempItem = item.split('.')
if int(tempItem[1]) <0 and int(tempItem[0][-1])== int(file_id.split('_')[-2][-1]):
tempItem[0] = 'Intron'+ str(int(file_id.split('_')[-2][-1])-1)
mm_locus_stats_PS1['MMannotation'][key] = '.'.join(tempItem)
if caseID in CaseStats.keys():
if locus not in CaseStats[caseID].keys():
CaseStats[caseID][locus] = {}
if 'PS1' not in CaseStats[caseID][locus].keys():
CaseStats[caseID][locus]['PS1'] = CompareSeq.RegionCount(mm_locus_stats_PS1['MMannotation'], locus)
else:
tempStats = CompareSeq.RegionCount(mm_locus_stats_PS1['MMannotation'], locus)
for key, item in tempStats.items():
CaseStats[caseID][locus]['PS1'][key] += item
if 'HLAtyping' not in CaseStats[caseID][locus].keys():
CaseStats[caseID][locus]['HLAtyping'] = mm_locus_stats_PS1['params']['HLAtyping']
else:
CaseStats[caseID] = {locus:{'PS1': CompareSeq.RegionCount(mm_locus_stats_PS1['MMannotation'], locus),
'HLAtyping':mm_locus_stats_PS1['params']['HLAtyping']}}
if len(mm_locus_stats_PS1['params']['HLAtyping']) == 1:
typing = mm_locus_stats_PS1['params']['HLAtyping'][0]
if typing in LocusStats.keys():
if CaseStats[caseID][locus]['PS1']['ARS'] > 0:
if 'ARS' in LocusStats[typing].keys():
LocusStats[typing]['ARS'].append(caseID)
else:
LocusStats[typing] = {'ARS': [caseID]}
#LocusStats[typing]['ARS'].append(caseID)
if CaseStats[caseID][locus]['PS1']['Non_ARS_exon'] >0:
if 'Non_ARS_exon' in LocusStats[typing].keys():
LocusStats[typing]['Non_ARS_exon'].append(caseID)
else:
LocusStats[typing] = {'Non_ARS_exon': [caseID]}
#LocusStats[typing]['Non_ARS_exon'].append(caseID)
if CaseStats[caseID][locus]['PS1']['Intron'] > 0:
if 'Intron' in LocusStats[typing].keys():
LocusStats[typing]['Intron'].append(caseID)
else:
LocusStats[typing] = {'Intron': [caseID]}
#LocusStats[typing]['Intron'].append(caseID)
else:
LocusStats[typing] = {'ARS': [], 'Non_ARS_exon': [], 'Intron': []}
if CaseStats[caseID][locus]['PS1']['ARS'] > 0:
LocusStats[typing]['ARS'].append(caseID)
if CaseStats[caseID][locus]['PS1']['Non_ARS_exon'] >0:
LocusStats[typing]['Non_ARS_exon'].append(caseID)
if CaseStats[caseID][locus]['PS1']['Intron'] > 0:
LocusStats[typing]['Intron'].append(caseID)
for key, item in mm_locus_stats_PS1['MMannotation'].items():
if key.isdigit():
if item in LocusStats[typing].keys():
LocusStats[typing][item].append(caseID)
else:
LocusStats[typing] = {item: [caseID]}
mm_file_PS2 = glob.glob(bothMM_output+ 'CaseID_'+ caseID + '_Locus_' + locus + '_annotation_PS2*.pkl')
for file_id in mm_file_PS2:
mm_locus_stats_PS2 = IMGTdbIO.load_pickle2dict(file_id)
mm_locus_stats_PS2 = CompareSeq.rmRefAln(mm_locus_stats_PS2)
if 'Exon' in file_id:
for key, item in mm_locus_stats_PS2['MMannotation'].items():
if key.isdigit():
tempItem = item.split('.')
if int(tempItem[1]) <0 and int(tempItem[0][-1])== int(file_id.split('_')[-2][-1]):
tempItem[0] = 'Intron'+ str(int(file_id.split('_')[-2][-1])-1)
mm_locus_stats_PS2['MMannotation'][key] = '.'.join(tempItem)
if caseID in CaseStats.keys():
if locus not in CaseStats[caseID].keys():
CaseStats[caseID][locus] = {}
if 'PS2' not in CaseStats[caseID][locus].keys():
CaseStats[caseID][locus]['PS2'] = CompareSeq.RegionCount(mm_locus_stats_PS2['MMannotation'], locus)
else:
tempStats = CompareSeq.RegionCount(mm_locus_stats_PS2['MMannotation'], locus)
for key, item in tempStats.items():
CaseStats[caseID][locus]['PS2'][key] += item
CaseStats[caseID][locus]['HLAtyping'] += mm_locus_stats_PS2['params']['HLAtyping']
else:
CaseStats[caseID] = {locus:{'PS2': CompareSeq.RegionCount(mm_locus_stats_PS2['MMannotation'], locus),
'HLAtyping':mm_locus_stats_PS2['params']['HLAtyping']}}
if len(mm_locus_stats_PS2['params']['HLAtyping']) == 1:
typing = mm_locus_stats_PS2['params']['HLAtyping'][0]
if typing in LocusStats.keys():
if CaseStats[caseID][locus]['PS2']['ARS'] > 0:
if 'ARS' in LocusStats[typing].keys():
LocusStats[typing]['ARS'].append(caseID)
else:
LocusStats[typing] = {'ARS': [caseID]}
#LocusStats[typing]['ARS'].append(caseID)
if CaseStats[caseID][locus]['PS2']['Non_ARS_exon'] >0:
if 'Non_ARS_exon' in LocusStats[typing].keys():
LocusStats[typing]['Non_ARS_exon'].append(caseID)
else:
LocusStats[typing] = {'Non_ARS_exon': [caseID]}
#LocusStats[typing]['Non_ARS_exon'].append(caseID)
if CaseStats[caseID][locus]['PS2']['Intron'] > 0:
if 'Intron' in LocusStats[typing].keys():
LocusStats[typing]['Intron'].append(caseID)
else:
LocusStats[typing] = {'Intron': [caseID]}
#LocusStats[typing]['Intron'].append(caseID)
else:
LocusStats[typing] = {'ARS': [], 'Non_ARS_exon': [], 'Intron': []}
if CaseStats[caseID][locus]['PS2']['ARS'] > 0:
LocusStats[typing]['ARS'].append(caseID)
if CaseStats[caseID][locus]['PS2']['Non_ARS_exon'] >0:
LocusStats[typing]['Non_ARS_exon'].append(caseID)
if CaseStats[caseID][locus]['PS2']['Intron'] > 0:
LocusStats[typing]['Intron'].append(caseID)
for key, item in mm_locus_stats_PS2['MMannotation'].items():
if key.isdigit():
if item in LocusStats[typing].keys():
LocusStats[typing][item].append(caseID)
else:
LocusStats[typing] = {item: [caseID]}
# Single mismatch cases
elif caseID in Matching_cases_stats[locus+'_one_Seqmm']:
### Cases where only one sequence doesn't match
mm_file_PS = glob.glob(singleMM_output+ 'CaseID_'+ caseID + '_Locus_' + locus + '_annotation*.pkl')
for file_id in mm_file_PS:
mm_locus_stats_PS = IMGTdbIO.load_pickle2dict(file_id)
mm_locus_stats_PS = CompareSeq.rmRefAln(mm_locus_stats_PS)
if 'Exon' in file_id:
for key, item in mm_locus_stats_PS['MMannotation'].items():
if key.isdigit():
tempItem = item.split('.')
if int(tempItem[1]) <0 and int(tempItem[0][-1])== int(file_id.split('_')[-2][-1]):
tempItem[0] = 'Intron'+ str(int(file_id.split('_')[-2][-1])-1)
mm_locus_stats_PS['MMannotation'][key] = '.'.join(tempItem)
if caseID in CaseStats.keys():
if locus not in CaseStats[caseID].keys():
CaseStats[caseID][locus] = {}
if 'PS1' not in CaseStats[caseID][locus].keys():
CaseStats[caseID][locus]['PS1'] = CompareSeq.RegionCount(mm_locus_stats_PS['MMannotation'], locus)
else:
tempStats = CompareSeq.RegionCount(mm_locus_stats_PS['MMannotation'], locus)
for key, item in tempStats.items():
CaseStats[caseID][locus]['PS1'][key] += item
if 'HLAtyping' not in CaseStats[caseID][locus].keys():
CaseStats[caseID][locus]['HLAtyping'] = mm_locus_stats_PS['params']['HLAtyping']
else:
CaseStats[caseID] = {locus:{'PS1': CompareSeq.RegionCount(mm_locus_stats_PS['MMannotation'], locus),
'HLAtyping':mm_locus_stats_PS['params']['HLAtyping']}}
if len(mm_locus_stats_PS['params']['HLAtyping']) == 1:
typing = mm_locus_stats_PS['params']['HLAtyping'][0]
if typing in LocusStats.keys():
if CaseStats[caseID][locus]['PS1']['ARS'] > 0:
if 'ARS' in LocusStats[typing].keys():
LocusStats[typing]['ARS'].append(caseID)
else:
LocusStats[typing] = {'ARS': [caseID]}
#LocusStats[typing]['ARS'].append(caseID)
if CaseStats[caseID][locus]['PS1']['Non_ARS_exon'] >0:
if 'Non_ARS_exon' in LocusStats[typing].keys():
LocusStats[typing]['Non_ARS_exon'].append(caseID)
else:
LocusStats[typing] = {'Non_ARS_exon': [caseID]}
#LocusStats[typing]['Non_ARS_exon'].append(caseID)
if CaseStats[caseID][locus]['PS1']['Intron'] > 0:
if 'Intron' in LocusStats[typing].keys():
LocusStats[typing]['Intron'].append(caseID)
else:
LocusStats[typing] = {'Intron': [caseID]}
#LocusStats[typing]['Intron'].append(caseID)
else:
LocusStats[typing] = {'ARS': [], 'Non_ARS_exon': [], 'Intron': []}
if CaseStats[caseID][locus]['PS1']['ARS'] > 0:
LocusStats[typing]['ARS'].append(caseID)
if CaseStats[caseID][locus]['PS1']['Non_ARS_exon'] >0:
LocusStats[typing]['Non_ARS_exon'].append(caseID)
if CaseStats[caseID][locus]['PS1']['Intron'] > 0:
LocusStats[typing]['Intron'].append(caseID)
for key, item in mm_locus_stats_PS['MMannotation'].items():
if key.isdigit():
if item in LocusStats[typing].keys():
LocusStats[typing][item].append(caseID)
else:
LocusStats[typing] = {item: [caseID]}
ClassII_stats = {'CaseStats': CaseStats, 'LocusStats': LocusStats}
IMGTdbIO.save_dict2pickle(ClassII_stats, '../Output/SG41_52/2018/IMGTv3310/SG41_52_DRpair_Stats/ClassII_Stats_0125_'+groupType)
#ClassI_stats = IMGTdbIO.load_pickle2dict('../Output/SG41_52/2018/IMGTv3310/SG41_52_DRpair_Stats/ClassI_Stats_0125_fiveLoci_paired.pkl')
#ClassII_stats = IMGTdbIO.load_pickle2dict('../Output/SG41_52/2018/IMGTv3310/SG41_52_DRpair_Stats/ClassII_Stats_0125_fiveLoci_paired.pkl')
fiveLociPaired_stats = {'CaseStats': CaseStats, 'LocusStats': LocusStats}
IMGTdbIO.save_dict2pickle(fiveLociPaired_stats, '../Output/SG41_52/2018/IMGTv3310/SG41_52_DRpair_Stats/fiveLoci_paired_Stats_0125_'+groupType)
####### Swapped cases for DQB1
## Swapped case check
if 'PS1' in CaseStats[caseID][locus].keys() and 'PS2' in CaseStats[caseID][locus].keys():
if CaseStats[caseID][locus]['PS1']['ARS'] > 5 and CaseStats[caseID][locus]['PS2']['ARS'] > 5:
DB_fp = '../Output/SG39_DRpairs/SG39_HLA_'+ locus +'_paired.db'
con = sql.connect(DB_fp)
con.row_factory = sql.Row
cur = con.cursor()
t = (caseID,)
cur.execute('SELECT * FROM OriginalSeqs WHERE BMT_caseID = ?', t)
case_records = cur.fetchall()
Sequence = {}
Params = {}
for ind in range(2):
seq1_ID = 'Recipient-PS'+str(ind+1)
seq2_ID = 'Donor-PS'+str(ind+1)
seq1 = case_records[ind][seq1_ID.split('-')[0]]
seq2 = case_records[ind][seq2_ID.split('-')[0]]
HLAtyping_list = case_records[ind]['HLATyping']
tplist = HLAtyping_list.split("+")
HLAtyping = []
for tp in tplist:
if tp.find('[') == -1:
if tp.find('/') != -1:
ambTPlist = tp.split('/')
HLAtyping.extend(ambTPlist)
else:
HLAtyping.append(tp)
else:
possTPlist = re.sub('[\[\'\]]', '',tp) # remove possible characters
possTPlist = possTPlist.split(",")
for item in possTPlist:
if item.find('/') != -1:
item_pos = item.replace(" ", "")
ambTPlist = item_pos.split('/')
HLAtyping.extend(ambTPlist)
else:
#HLAtyping.extend(possTPlist)
HLAtyping.append(item.replace(" ", ""))
# HLAtyping.append(tp)
Sequence[str(ind)]= {seq1_ID: seq1, seq2_ID:seq2}
if ind == 0:
algn_file = mm_locus_stats_PS1['params']['algn_file']
else:
algn_file = mm_locus_stats_PS2['params']['algn_file']
Params[str(ind)] = {'algn_file': algn_file, 'saveFile': True, 'HLAtyping': HLAtyping}
swapped_alignment = CompareSeq.swapPS_comparison(Sequence['0'], Params['0'], Sequence['1'], Params['1'], caseID)
if any("Exon" in s for s in alignment.keys()):
## save results # for multiple Exons
for itemID, itemDict in alignment.items():
saveOBJ = {'seq': Sequence, 'params': params, 'alignment':itemDict, 'MMannotation': annotation[itemID], 'SameSeqs': annotation[itemID]['SameSeqs']}
if annotation[itemID]['SameSeqs']: # same seqs
Output_fname = bothMM_output + 'CaseID_'+ caseID + '_Locus_' + locus + '_annotation_PS'+str(ind+1)+'_'+itemID+'_SameSeqs'
else:
Output_fname = bothMM_output + 'CaseID_'+ caseID + '_Locus_' + locus + '_annotation_PS'+str(ind+1)+'_'+itemID+'_MisMatchSeqs'
IMGTdbIO.save_dict2pickle(saveOBJ, Output_fname)
else:
## save results -- for one single sequence
saveOBJ = {'seq': Sequence, 'params': params, 'alignment':alignment, 'MMannotation': annotation}
Output_fname = bothMM_output + 'CaseID_'+ caseID + '_Locus_' + locus + '_annotation_PS'+str(ind+1)
IMGTdbIO.save_dict2pickle(saveOBJ, Output_fname)
| 59.992126
| 204
| 0.493077
| 2,845
| 30,476
| 5.059051
| 0.073814
| 0.123393
| 0.065865
| 0.056555
| 0.831098
| 0.791079
| 0.767178
| 0.743834
| 0.695546
| 0.668936
| 0
| 0.028235
| 0.380595
| 30,476
| 508
| 205
| 59.992126
| 0.734227
| 0.074846
| 0
| 0.674419
| 0
| 0
| 0.118291
| 0.024129
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.01292
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5080d273a610511207399cb356f74e6810f43f4f
| 2,313
|
py
|
Python
|
models.py
|
water-vapor/how-can-we-be-so-dense-pytorch
|
76132ee80dfc29c9c60e7ca614f8f9415d133b5e
|
[
"MIT"
] | null | null | null |
models.py
|
water-vapor/how-can-we-be-so-dense-pytorch
|
76132ee80dfc29c9c60e7ca614f8f9415d133b5e
|
[
"MIT"
] | null | null | null |
models.py
|
water-vapor/how-can-we-be-so-dense-pytorch
|
76132ee80dfc29c9c60e7ca614f8f9415d133b5e
|
[
"MIT"
] | null | null | null |
import torch
from layers import SparseLinear, KWinner, SparseConv2D
class DenseMLP(torch.nn.Module):
def __init__(self):
super().__init__()
self.layers_stack = torch.nn.Sequential(
torch.nn.Flatten(),
torch.nn.Linear(784, 128),
torch.nn.ReLU(),
torch.nn.Linear(128, 64),
torch.nn.ReLU(),
torch.nn.Linear(64, 10)
)
def forward(self, inputs):
return self.layers_stack(inputs)
class SparseMLP(torch.nn.Module):
def __init__(self):
super().__init__()
self.layers_stack = torch.nn.Sequential(
torch.nn.Flatten(),
SparseLinear(784, 128),
KWinner(k=40),
SparseLinear(128, 64),
KWinner(k=20),
torch.nn.Linear(64, 10)
)
def forward(self, inputs):
return self.layers_stack(inputs)
class DenseCNN(torch.nn.Module):
def __init__(self):
super().__init__()
self.layers_stack = torch.nn.Sequential(
torch.nn.Conv2d(1, 30, (3, 3)),
torch.nn.MaxPool2d(2, stride=2),
torch.nn.ReLU(),
torch.nn.Flatten(),
torch.nn.Linear(5070, 150),
torch.nn.ReLU(),
torch.nn.Linear(150, 10)
)
def forward(self, inputs):
return self.layers_stack(inputs)
class HybridCNN(torch.nn.Module):
def __init__(self):
super().__init__()
self.layers_stack = torch.nn.Sequential(
torch.nn.Conv2d(1, 30, (3, 3)),
torch.nn.MaxPool2d(2, stride=2),
KWinner(k=400),
torch.nn.Flatten(),
SparseLinear(5070, 150),
torch.nn.ReLU(),
torch.nn.Linear(150, 10)
)
def forward(self, inputs):
return self.layers_stack(inputs)
class SparseCNN(torch.nn.Module):
def __init__(self):
super().__init__()
self.layers_stack = torch.nn.Sequential(
SparseConv2D(1, 30, (3, 3)),
torch.nn.MaxPool2d(2, stride=2),
KWinner(k=400),
torch.nn.Flatten(),
SparseLinear(5070, 150),
KWinner(k=50),
torch.nn.Linear(150, 10)
)
def forward(self, inputs):
return self.layers_stack(inputs)
| 26.586207
| 54
| 0.543018
| 267
| 2,313
| 4.516854
| 0.168539
| 0.191542
| 0.124378
| 0.066335
| 0.839138
| 0.830017
| 0.767828
| 0.767828
| 0.767828
| 0.767828
| 0
| 0.067688
| 0.322957
| 2,313
| 86
| 55
| 26.895349
| 0.702427
| 0
| 0
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.028571
| 0.071429
| 0.314286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
50a2011cfd3821bb6f5b4c71ac548be24eaacfec
| 100
|
py
|
Python
|
django/mysite/tourit/context_processors.py
|
rishiraj-rpg/MPR--Tour-It
|
923dc55f49848583898b6402824c7bcf6d8ebe7b
|
[
"MIT"
] | null | null | null |
django/mysite/tourit/context_processors.py
|
rishiraj-rpg/MPR--Tour-It
|
923dc55f49848583898b6402824c7bcf6d8ebe7b
|
[
"MIT"
] | null | null | null |
django/mysite/tourit/context_processors.py
|
rishiraj-rpg/MPR--Tour-It
|
923dc55f49848583898b6402824c7bcf6d8ebe7b
|
[
"MIT"
] | 1
|
2022-03-22T17:43:33.000Z
|
2022-03-22T17:43:33.000Z
|
from tourit.models import PlaceType
def sidenav(request):
return{'sn':PlaceType.objects.all()}
| 20
| 40
| 0.75
| 13
| 100
| 5.769231
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12
| 100
| 4
| 41
| 25
| 0.852273
| 0
| 0
| 0
| 0
| 0
| 0.02
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
0f9778ac1d5327b60845e52065892fab09f1e3a8
| 25
|
py
|
Python
|
sniffpy/__init__.py
|
asifmallik/sniffpy
|
0214a2b899e9bd169f782e363a836dfb4dd94bf2
|
[
"MIT"
] | null | null | null |
sniffpy/__init__.py
|
asifmallik/sniffpy
|
0214a2b899e9bd169f782e363a836dfb4dd94bf2
|
[
"MIT"
] | null | null | null |
sniffpy/__init__.py
|
asifmallik/sniffpy
|
0214a2b899e9bd169f782e363a836dfb4dd94bf2
|
[
"MIT"
] | null | null | null |
from .sniff import sniff
| 12.5
| 24
| 0.8
| 4
| 25
| 5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 25
| 1
| 25
| 25
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0fa7cbab5735418c9ff13c86764cde19cc52444d
| 8,702
|
py
|
Python
|
pulse2percept/implants/tests/test_prima.py
|
tanyabhatia/pulse2percept
|
b322c7daf22154d60f7abd8adb039c5982824a7c
|
[
"BSD-3-Clause"
] | null | null | null |
pulse2percept/implants/tests/test_prima.py
|
tanyabhatia/pulse2percept
|
b322c7daf22154d60f7abd8adb039c5982824a7c
|
[
"BSD-3-Clause"
] | null | null | null |
pulse2percept/implants/tests/test_prima.py
|
tanyabhatia/pulse2percept
|
b322c7daf22154d60f7abd8adb039c5982824a7c
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import pytest
import numpy.testing as npt
from matplotlib.patches import Circle, RegularPolygon
from pulse2percept.implants import (PhotovoltaicPixel, PRIMA, PRIMA75, PRIMA55,
PRIMA40)
def test_PhotovoltaicPixel():
electrode = PhotovoltaicPixel(0, 1, 2, 3, 4)
npt.assert_almost_equal(electrode.x, 0)
npt.assert_almost_equal(electrode.y, 1)
npt.assert_almost_equal(electrode.z, 2)
npt.assert_almost_equal(electrode.r, 3)
npt.assert_almost_equal(electrode.a, 4)
# Slots:
npt.assert_equal(hasattr(electrode, '__slots__'), True)
npt.assert_equal(hasattr(electrode, '__dict__'), False)
# Plots:
ax = electrode.plot()
npt.assert_equal(len(ax.texts), 0)
npt.assert_equal(len(ax.patches), 2)
npt.assert_equal(isinstance(ax.patches[0], RegularPolygon), True)
npt.assert_equal(isinstance(ax.patches[1], Circle), True)
PhotovoltaicPixel(0, 1, 2, 3, 4)
@pytest.mark.parametrize('ztype', ('float', 'list'))
@pytest.mark.parametrize('x', (-100, 200))
@pytest.mark.parametrize('y', (-200, 400))
@pytest.mark.parametrize('r', (-45, 60))
def test_PRIMA(ztype, x, y, r):
# 85 um pixel with 15 um trenches:
spacing = 100
# Roughly a 12x15 grid, but edges are trimmed off:
n_elec = 378
# Create an Prima and make sure location is correct
# Height `z` can either be a float or a list
z = -100 if ztype == 'float' else -np.ones(378) * 20
# Convert rotation angle to rad
rot = r * np.pi / 180
prima = PRIMA(x, y, z=z, rot=rot)
# Slots:
npt.assert_equal(hasattr(prima, '__slots__'), True)
npt.assert_equal(hasattr(prima, '__dict__'), False)
# Make sure number of electrodes is correct
npt.assert_equal(prima.n_electrodes, n_elec)
npt.assert_equal(len(prima.earray.electrodes), n_elec)
# Coordinates of A6 when device is not rotated:
xy = np.array([-616.99, -925.0]).T
# Rotate
R = np.array([np.cos(rot), -np.sin(rot),
np.sin(rot), np.cos(rot)]).reshape((2, 2))
xy = np.matmul(R, xy)
# Then off-set: Make sure first electrode is placed
# correctly
npt.assert_almost_equal(prima['A6'].x, xy[0] + x, decimal=2)
npt.assert_almost_equal(prima['A6'].y, xy[1] + y, decimal=2)
# Make sure the radius is correct
for e in ['A7', 'B3', 'C5', 'D7', 'E9', 'F11', 'G13', 'H14']:
npt.assert_almost_equal(prima[e].r, 14)
# Make sure the pitch is correct:
distF6E6 = np.sqrt((prima['E6'].x - prima['F6'].x) ** 2 +
(prima['E6'].y - prima['F6'].y) ** 2)
npt.assert_almost_equal(distF6E6, spacing)
distF6E7 = np.sqrt((prima['E7'].x - prima['F6'].x) ** 2 +
(prima['E7'].y - prima['F6'].y) ** 2)
npt.assert_almost_equal(distF6E7, spacing)
with pytest.raises(ValueError):
PRIMA(0, 0, z=np.ones(16))
@pytest.mark.parametrize('ztype', ('float', 'list'))
@pytest.mark.parametrize('x', (-100, 200))
@pytest.mark.parametrize('y', (-200, 400))
@pytest.mark.parametrize('r', (-45, 60))
def test_PRIMA75(ztype, x, y, r):
# 70 um pixel with 5 um trenches:
spacing = 75
# Roughly a 12x15 grid, but edges are trimmed off:
n_elec = 142
# Create an Prima and make sure location is correct
# Height `z` can either be a float or a list
z = -100 if ztype == 'float' else -np.ones(142) * 20
# Convert rotation angle to rad
rot = r * np.pi / 180
prima = PRIMA75(x, y, z=z, rot=rot)
# Slots:
npt.assert_equal(hasattr(prima, '__slots__'), True)
npt.assert_equal(hasattr(prima, '__dict__'), False)
# Make sure number of electrodes is correct
npt.assert_equal(len(prima.earray.electrodes), n_elec)
npt.assert_equal(prima.n_electrodes, n_elec)
# Coordinates of A6 when device is not rotated:
xy = np.array([-200.24, -431.25]).T
# Rotate
R = np.array([np.cos(rot), -np.sin(rot),
np.sin(rot), np.cos(rot)]).reshape((2, 2))
xy = np.matmul(R, xy)
# Then off-set: Make sure first electrode is placed
# correctly
npt.assert_almost_equal(prima['A6'].x, xy[0] + x, decimal=2)
npt.assert_almost_equal(prima['A6'].y, xy[1] + y, decimal=2)
# Make sure the radius is correct
for e in ['A6', 'B4', 'C5', 'D7', 'E9', 'F11', 'G13', 'H14']:
npt.assert_almost_equal(prima[e].r, 10)
# Make sure the pitch is correct:
distF6E6 = np.sqrt((prima['E6'].x - prima['F6'].x) ** 2 +
(prima['E6'].y - prima['F6'].y) ** 2)
npt.assert_almost_equal(distF6E6, spacing)
distF6E7 = np.sqrt((prima['E7'].x - prima['F6'].x) ** 2 +
(prima['E7'].y - prima['F6'].y) ** 2)
npt.assert_almost_equal(distF6E7, spacing)
with pytest.raises(ValueError):
PRIMA75(0, 0, z=np.ones(16))
@pytest.mark.parametrize('ztype', ('float', 'list'))
@pytest.mark.parametrize('x', (-100, 200))
@pytest.mark.parametrize('y', (-200, 400))
@pytest.mark.parametrize('r', (-45, 60))
def test_PRIMA55(ztype, x, y, r):
# 50 um pixels with 5 um trenches:
spacing = 55
# Roughly a 18x21 grid, but edges are trimmed off:
n_elec = 273
# Create an Prima and make sure location is correct
# Height `z` can either be a float or a list
z = -100 if ztype == 'float' else -np.ones(273) * 20
# Convert rotation angle to rad
rot = r * np.pi / 180
prima = PRIMA55(x, y, z=z, rot=rot)
# Slots:
npt.assert_equal(hasattr(prima, '__slots__'), True)
npt.assert_equal(hasattr(prima, '__dict__'), False)
# Make sure number of electrodes is correct
npt.assert_equal(len(prima.earray.electrodes), n_elec)
npt.assert_equal(prima.n_electrodes, n_elec)
# Coordinates of C8 when device is not rotated:
xy = np.array([-216.58, -371.25]).T
# Rotate
R = np.array([np.cos(rot), -np.sin(rot),
np.sin(rot), np.cos(rot)]).reshape((2, 2))
xy = np.matmul(R, xy)
# Then off-set: Make sure first electrode is placed
# correctly
npt.assert_almost_equal(prima['C8'].x, xy[0] + x, decimal=2)
npt.assert_almost_equal(prima['C8'].y, xy[1] + y, decimal=2)
# Make sure the radius is correct
for e in ['B12', 'C15', 'D17', 'E19', 'F11', 'G13', 'H14']:
npt.assert_almost_equal(prima[e].r, 8)
# Make sure the pitch is correct:
distF6E6 = np.sqrt((prima['E6'].x - prima['F6'].x) ** 2 +
(prima['E6'].y - prima['F6'].y) ** 2)
npt.assert_almost_equal(distF6E6, spacing)
distF6E7 = np.sqrt((prima['E7'].x - prima['F6'].x) ** 2 +
(prima['E7'].y - prima['F6'].y) ** 2)
npt.assert_almost_equal(distF6E7, spacing)
with pytest.raises(ValueError):
PRIMA55(0, 0, z=np.ones(16))
@pytest.mark.parametrize('ztype', ('float', 'list'))
@pytest.mark.parametrize('x', (-100, 200))
@pytest.mark.parametrize('y', (-200, 400))
@pytest.mark.parametrize('r', (-45, 60))
def test_PRIMA40(ztype, x, y, r):
# 35 um pixel with 5 um trenches:
spacing = 40
# Roughly a 25x28 grid, but edges are trimmed off:
n_elec = 532
# Create an Prima and make sure location is correct
# Height `z` can either be a float or a list
z = -100 if ztype == 'float' else -np.ones(532) * 20
# Convert rotation angle to rad
rot = r * np.pi / 180
prima = PRIMA40(x, y, z=z, rot=rot)
# Slots:
npt.assert_equal(hasattr(prima, '__slots__'), True)
npt.assert_equal(hasattr(prima, '__dict__'), False)
# Make sure number of electrodes is correct
npt.assert_equal(len(prima.earray.electrodes), n_elec)
npt.assert_equal(prima.n_electrodes, n_elec)
# Coordinates of D16 when device is not rotated:
xy = np.array([-20.38, -370.0]).T
# Rotate
R = np.array([np.cos(rot), -np.sin(rot),
np.sin(rot), np.cos(rot)]).reshape((2, 2))
xy = np.matmul(R, xy)
# Then off-set: Make sure first electrode is placed
# correctly
npt.assert_almost_equal(prima['D16'].x, xy[0] + x, decimal=2)
npt.assert_almost_equal(prima['D16'].y, xy[1] + y, decimal=2)
# Make sure the radius is correct
for e in ['B14', 'C15', 'D17', 'E19', 'F11', 'G13', 'H14']:
npt.assert_almost_equal(prima[e].r, 8)
# Make sure the pitch is correct:
distF6E6 = np.sqrt((prima['E6'].x - prima['F6'].x) ** 2 +
(prima['E6'].y - prima['F6'].y) ** 2)
npt.assert_almost_equal(distF6E6, spacing)
distF6E7 = np.sqrt((prima['E7'].x - prima['F6'].x) ** 2 +
(prima['E7'].y - prima['F6'].y) ** 2)
npt.assert_almost_equal(distF6E7, spacing)
with pytest.raises(ValueError):
PRIMA40(0, 0, z=np.ones(16))
| 37.029787
| 79
| 0.612503
| 1,326
| 8,702
| 3.919306
| 0.136501
| 0.081393
| 0.072157
| 0.096209
| 0.893208
| 0.845103
| 0.814124
| 0.802963
| 0.775447
| 0.77006
| 0
| 0.063581
| 0.222822
| 8,702
| 234
| 80
| 37.188034
| 0.704865
| 0.199494
| 0
| 0.58156
| 0
| 0
| 0.048473
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.035461
| false
| 0
| 0.035461
| 0
| 0.070922
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0fa991bee0b5cc0c29e9f2e3c50d359bb1b104ac
| 106
|
py
|
Python
|
python/pytraph/core/__init__.py
|
toyteam/traph
|
ae80d4e205e447fd8688dc95b76a43507b7fe568
|
[
"MIT"
] | 1
|
2019-07-05T05:41:00.000Z
|
2019-07-05T05:41:00.000Z
|
python/pytraph/core/__init__.py
|
jstzwj/traph
|
ae80d4e205e447fd8688dc95b76a43507b7fe568
|
[
"MIT"
] | null | null | null |
python/pytraph/core/__init__.py
|
jstzwj/traph
|
ae80d4e205e447fd8688dc95b76a43507b7fe568
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import pytraph.core.dtype
import pytraph.core.tensor
__all__ = ["dtype", "tensor"]
| 26.5
| 29
| 0.688679
| 14
| 106
| 4.928571
| 0.642857
| 0.376812
| 0.492754
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010638
| 0.113208
| 106
| 4
| 29
| 26.5
| 0.723404
| 0.198113
| 0
| 0
| 0
| 0
| 0.130952
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ba06d6e941fd37fb51bb507ff0294bf1e5632c1b
| 157
|
py
|
Python
|
chrome_dino/carregar_sprites.py
|
jjpaulo2/chrome-dino-pygame
|
f9fd40de343cd6d0e075e302d120f4ba9e09874d
|
[
"MIT"
] | null | null | null |
chrome_dino/carregar_sprites.py
|
jjpaulo2/chrome-dino-pygame
|
f9fd40de343cd6d0e075e302d120f4ba9e09874d
|
[
"MIT"
] | null | null | null |
chrome_dino/carregar_sprites.py
|
jjpaulo2/chrome-dino-pygame
|
f9fd40de343cd6d0e075e302d120f4ba9e09874d
|
[
"MIT"
] | null | null | null |
import pygame, pathlib
def carregar_imagem(imagem: str):
return pygame.image.load(str(pathlib.Path(__file__).parent.absolute()) + "/sprites/" + imagem)
| 31.4
| 98
| 0.738854
| 20
| 157
| 5.55
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.10828
| 157
| 4
| 99
| 39.25
| 0.792857
| 0
| 0
| 0
| 0
| 0
| 0.057325
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
e83d12b3b8bcff33e2ae3042aee9f0972e109ad4
| 28,423
|
py
|
Python
|
sesame/jacobian2.py
|
haney411/sesame
|
866aefb048143c5df131310253ce67b4a24283fc
|
[
"BSD-3-Clause"
] | 2
|
2018-04-06T14:50:20.000Z
|
2021-01-19T16:16:15.000Z
|
sesame/jacobian2.py
|
haney411/sesame
|
866aefb048143c5df131310253ce67b4a24283fc
|
[
"BSD-3-Clause"
] | null | null | null |
sesame/jacobian2.py
|
haney411/sesame
|
866aefb048143c5df131310253ce67b4a24283fc
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2017 University of Maryland.
#
# This file is part of Sesame. It is subject to the license terms in the file
# LICENSE.rst found in the top-level directory of this distribution.
import numpy as np
from itertools import chain
from .observables import *
from .defects import defectsJ
def getJ(sys, v, efn, efp):
###########################################################################
# organization of the Jacobian matrix #
###########################################################################
# A site with coordinates (i,j) corresponds to a site number s as follows:
# j = s//Nx
# i = s - j*Nx
#
# Rows for (efn_s, efp_s, v_s)
# ----------------------------
# fn_row = 3*s
# fp_row = 3*s+1
# fv_row = 3*s+2
#
# Columns for (efn_s, efp_s, v_s)
# -------------------------------
# efn_smN_col = 3*(s-Nx)
# efn_sm1_col = 3*(s-1)
# efn_s_col = 3*s
# efn_sp1_col = 3*(s+1)
# efn_spN_col = 3*(s+Nx)
#
# efp_smN_col = 3*(s-Nx)+1
# efp_sm1_col = 3*(s-1)+1
# efp_s_col = 3*s+1
# efp_sp1_col = 3*(s+1)+1
# efp_spN_col = 3*(s+Nx)+1
#
# v_smN_col = 3*(s-Nx)+2
# v_sm1_col = 3*(s-1)+2
# v_s_col = 3*s+2
# v_sp1_col = 3*(s+1)+2
# v_spN_col = 3*(s+Nx)+2
Nx, Ny = sys.xpts.shape[0], sys.ypts.shape[0]
# lists of rows, columns and data that will create the sparse Jacobian
global rows, columns, data
rows = []
columns = []
data = []
###########################################################################
# For all sites in the system #
###########################################################################
# carrier densities
n = sys.Nc * np.exp(+sys.bl + efn + v)
p = sys.Nv * exp(-sys.Eg - sys.bl - efp - v)
# bulk charges
drho_defn_s = - n
drho_defp_s = - p
drho_dv_s = - n - p
# derivatives of the bulk recombination rates
dr_defn_s, dr_defp_s, dr_dv_s = get_bulk_rr_derivs(sys, n, p)
# charge defects
if len(sys.defects_list) != 0:
defectsJ(sys, sys.defects_list, n, p, drho_dv_s, drho_defn_s,\
drho_defp_s, dr_defn_s, dr_defp_s, dr_dv_s)
# reshape the array as array[y-indices, x-indices]
_sites = np.arange(Nx*Ny, dtype=int).reshape(Ny, Nx)
def update(r, c, d):
global rows, columns, data
rows.extend(chain.from_iterable(r))
columns.extend(chain.from_iterable(c))
data.extend(chain.from_iterable(d))
def f_derivatives(carriers, djx_s, djx_sm1, djy_s, djy_smN, dxbar, dybar, sites):
# The function is written with p indices but is valid for both n and p
# currents derivatives
djx_s_def_s, djx_s_def_sp1, djx_s_dv_s, djx_s_dv_sp1 = djx_s
djx_sm1_def_sm1, djx_sm1_def_s, djx_sm1_dv_sm1, djx_sm1_dv_s = djx_sm1
djy_s_def_s, djy_s_def_spN, djy_s_dv_s, djy_s_dv_spN = djy_s
djy_smN_def_smN, djy_smN_def_s, djy_smN_dv_smN, djy_smN_dv_s = djy_smN
# compute the derivatives of fp
def_smN = - djy_smN_def_smN / dybar
dv_smN = - djy_smN_dv_smN / dybar
def_sm1 = - djx_sm1_def_sm1 / dxbar
dv_sm1 = - djx_sm1_dv_sm1 / dxbar
dv_s = (djx_s_dv_s - djx_sm1_dv_s) / dxbar + \
(djy_s_dv_s - djy_smN_dv_s) / dybar
if carriers == 'holes':
defn_s = dr_defn_s[sites]
defp_s = (djx_s_def_s - djx_sm1_def_s) / dxbar + \
(djy_s_def_s - djy_smN_def_s) / dybar + dr_defp_s[sites]
dv_s = dv_s + dr_dv_s[sites]
if carriers == 'electrons':
defn_s = (djx_s_def_s - djx_sm1_def_s) / dxbar + \
(djy_s_def_s - djy_smN_def_s) / dybar - dr_defn_s[sites]
defp_s = - dr_defp_s[sites]
dv_s = dv_s - dr_dv_s[sites]
def_sp1 = djx_s_def_sp1 / dxbar
dv_sp1 = djx_s_dv_sp1 / dxbar
def_spN = djy_s_def_spN / dybar
dv_spN = djy_s_dv_spN / dybar
return def_smN, dv_smN, def_sm1, dv_sm1, defn_s, defp_s, dv_s,\
def_sp1, dv_sp1, def_spN, dv_spN
def fv_derivatives(dx, dy, dxm1, dym1, epsilon, sites):
dxbar = (dx + dxm1) / 2
dybar = (dy + dym1) / 2
p1y_ind = np.mod(sites + Nx, Nx*Ny)
m1y_ind = np.mod(sites - Nx, Nx*Ny)
eps_m1x = .5 * (epsilon[sites - 1] + epsilon[sites])
eps_p1x = .5 * (epsilon[sites + 1] + epsilon[sites])
eps_m1y = .5 * (epsilon[sites - Nx] + epsilon[sites])
eps_p1y = .5 * (epsilon[sites + Nx] + epsilon[sites])
# compute the derivatives
#dvmN = -1./(dym1 * dybar)
#dvm1 = -1./(dxm1 * dxbar)
#dv = 2./(dx * dxm1) + 2./(dy * dym1) - drho_dv_s[sites]
#dvp1 = -1./(dx * dxbar)
#dvpN = -1./(dy * dybar)
dvmN = -eps_m1y * 1. / (dym1 * dybar)
dvm1 = -eps_m1x * 1. / (dxm1 * dxbar)
dv = eps_m1x/(dxm1 * dxbar) + eps_p1x/(dx * dxbar) + eps_m1y/(dym1 * dybar) + eps_p1y/(dy * dybar) - drho_dv_s[sites]
dvp1 = -eps_p1x * 1. / (dx * dxbar)
dvpN = -eps_p1y * 1. / (dy * dybar)
defn = - drho_defn_s[sites]
defp = - drho_defp_s[sites]
return dvmN, dvm1, dv, defn, defp, dvp1, dvpN
def bn_derivatives(carriers, djx_sm1, djy_s, djy_smN, dxbar, dybar, sites):
djx_sm1_def_sm1, djx_sm1_def_s, djx_sm1_dv_sm1, djx_sm1_dv_s = djx_sm1
djy_s_def_s, djy_s_def_spN, djy_s_dv_s, djy_s_dv_spN = djy_s
djy_smN_def_smN, djy_smN_def_s, djy_smN_dv_smN, djy_smN_dv_s = djy_smN
# compute bn derivatives
def_smN = dxbar/dybar * djy_smN_def_smN
dv_smN = dxbar/dybar * djy_smN_dv_smN
def_sm1 = djx_sm1_def_sm1
dv_sm1 = djx_sm1_dv_sm1
if carriers == 'electrons':
defn_s = djx_sm1_def_s + dxbar * (dr_defn_s[sites]\
- (djy_s_def_s - djy_smN_def_s) / dybar) + sys.Scn[1] * n[sites]
defp_s = dxbar * dr_defp_s[sites]
dv_s = djx_sm1_dv_s + dxbar * (dr_dv_s[sites]\
- (djy_s_dv_s - djy_smN_dv_s) / dybar) + sys.Scn[1] * n[sites]
if carriers == 'holes':
defn_s = - dxbar * dr_defn_s[sites]
defp_s = djx_sm1_def_s + dxbar * (-dr_defp_s[sites]\
- (djy_s_def_s - djy_smN_def_s) / dybar) + sys.Scp[1] * p[sites]
dv_s = djx_sm1_dv_s + dxbar * (-dr_dv_s[sites] \
- (djy_s_dv_s - djy_smN_dv_s) / dybar) + sys.Scp[1] * p[sites]
def_spN = - dxbar/dybar * djy_s_def_spN
dv_spN = - dxbar/dybar * djy_s_dv_spN
return def_smN, dv_smN, def_sm1, dv_sm1, defn_s, defp_s, dv_s,\
def_spN, dv_spN
###########################################################################
# inside the system: 0 < i < Nx-1 and 0 < j < Ny-1 #
###########################################################################
# We compute fn, fp, fv derivatives. Those functions are only defined on the
# inner part of the system. All the edges containing boundary conditions.
# list of the sites inside the system
sites = _sites[1:Ny-1, 1:Nx-1].flatten()
# lattice distances
dx = np.tile(sys.dx[1:], Ny-2)
dxm1 = np.tile(sys.dx[:-1], Ny-2)
dy = np.repeat(sys.dy[1:], Nx-2)
dym1 = np.repeat(sys.dy[:-1], Nx-2)
dxbar = (dxm1 + dx) / 2.
dybar = (dym1 + dy) / 2.
#------------------------ fn derivatives ----------------------------------
# get the derivatives of jx_s, jx_sm1, jy_s, jy_smN
djx_s = get_jn_derivs(sys, efn, v, sites, sites + 1, dx)
djx_sm1 = get_jn_derivs(sys, efn, v, sites - 1, sites, dxm1)
djy_s = get_jn_derivs(sys, efn, v, sites, sites + Nx, dy)
djy_smN = get_jn_derivs(sys, efn, v, sites - Nx, sites, dym1)
defn_smN, dv_smN, defn_sm1, dv_sm1, defn_s, defp_s, dv_s, defn_sp1, dv_sp1,\
defn_spN, dv_spN = \
f_derivatives('electrons', djx_s, djx_sm1, djy_s, djy_smN, dxbar, dybar, sites)
# update the sparse matrix row and columns for the inner part of the system
dfn_rows = np.reshape(np.repeat(3*sites, 11), (len(sites), 11)).tolist()
dfn_cols = zip(3*(sites-Nx), 3*(sites-Nx)+2, 3*(sites-1), 3*(sites-1)+2,
3*sites, 3*sites+1, 3*sites+2, 3*(sites+1), 3*(sites+1)+2,\
3*(sites+Nx), 3*(sites+Nx)+2)
dfn_data = zip(defn_smN, dv_smN, defn_sm1, dv_sm1, defn_s, defp_s, dv_s,\
defn_sp1, dv_sp1, defn_spN, dv_spN)
update(dfn_rows, dfn_cols, dfn_data)
#------------------------ fp derivatives ----------------------------------
# get the derivatives of jx_s, jx_sm1, jy_s, jy_smN
djx_s = get_jp_derivs(sys, efp, v, sites, sites + 1, dx)
djx_sm1 = get_jp_derivs(sys, efp, v, sites - 1, sites, dxm1)
djy_s = get_jp_derivs(sys, efp, v, sites, sites + Nx, dy)
djy_smN = get_jp_derivs(sys, efp, v, sites - Nx, sites, dym1)
defp_smN, dv_smN, defp_sm1, dv_sm1, defn_s, defp_s, dv_s, defp_sp1, dv_sp1,\
defp_spN, dv_spN = \
f_derivatives('holes', djx_s, djx_sm1, djy_s, djy_smN, dxbar, dybar, sites)
# update the sparse matrix row and columns for the inner part of the system
dfp_rows = np.reshape(np.repeat(3*sites+1, 11), (len(sites), 11)).tolist()
dfp_cols = zip(3*(sites-Nx)+1, 3*(sites-Nx)+2, 3*(sites-1)+1, 3*(sites-1)+2,
3*sites, 3*sites+1, 3*sites+2, 3*(sites+1)+1, 3*(sites+1)+2,\
3*(sites+Nx)+1, 3*(sites+Nx)+2)
dfp_data = zip(defp_smN, dv_smN, defp_sm1, dv_sm1, defn_s, defp_s, dv_s,\
defp_sp1, dv_sp1, defp_spN, dv_spN)
update(dfp_rows, dfp_cols, dfp_data)
#---------------- fv derivatives inside the system ------------------------
dvmN, dvm1, dv, defn, defp, dvp1, dvpN = fv_derivatives(dx, dy, dxm1, dym1, sys.epsilon, sites)
# update the sparse matrix row and columns for the inner part of the system
dfv_rows = np.reshape(np.repeat(3*sites+2, 7), (len(sites), 7)).tolist()
dfv_cols = zip(3*(sites-Nx)+2, 3*(sites-1)+2, 3*sites, 3*sites+1, 3*sites+2,
3*(sites+1)+2, 3*(sites+Nx)+2)
dfv_data = zip(dvmN, dvm1, defn, defp, dv, dvp1, dvpN)
update(dfv_rows, dfv_cols, dfv_data)
###########################################################################
# left boundary: i = 0 and 0 <= j <= Ny-1 #
###########################################################################
# We compute an, ap, av derivatives. Those functions are only defined on the
# left boundary of the system.
# list of the sites on the left side
sites = _sites[:, 0].flatten()
#-------------------------- an derivatives --------------------------------
# s_sp1 = [i for i in zip(sites, sites + 1)]
defn_s, defn_sp1, dv_s, dv_sp1 = get_jn_derivs(sys, efn, v, sites, sites+1, sys.dx[0])
defn_s -= sys.Scn[0] * n[sites]
dv_s -= sys.Scn[0] * n[sites]
# update the sparse matrix row and columns
dan_rows = zip(3*sites, 3*sites, 3*sites, 3*sites)
dan_cols = zip(3*sites, 3*sites+2, 3*(sites+1), 3*(sites+1)+2)
dan_data = zip(defn_s, dv_s, defn_sp1, dv_sp1)
update(dan_rows, dan_cols, dan_data)
#-------------------------- ap derivatives --------------------------------
defp_s, defp_sp1, dv_s, dv_sp1 = get_jp_derivs(sys, efp, v, sites, sites+1, sys.dx[0])
defp_s -= sys.Scp[0] * p[sites]
dv_s -= sys.Scp[0] * p[sites]
# update the sparse matrix row and columns
dap_rows = zip(3*sites+1, 3*sites+1, 3*sites+1, 3*sites+1)
dap_cols = zip(3*sites+1, 3*sites+2, 3*(sites+1)+1, 3*(sites+1)+2)
dap_data = zip(defp_s, dv_s, defp_sp1, dv_sp1)
update(dap_rows, dap_cols, dap_data)
#-------------------------- av derivatives --------------------------------
dav_rows = (3*sites+2).tolist()
dav_cols = (3*sites+2).tolist()
dav_data = np.ones((len(sites,))).tolist()
rows += dav_rows
columns += dav_cols
data += dav_data
###########################################################################
# right boundary: i = Nx-1 and 0 < j < Ny-1 #
###########################################################################
# We compute bn, bp, bv derivatives. Those functions are only defined on the
# right boundary of the system.
# list of the sites on the right side
sites = _sites[1:Ny-1, Nx-1].flatten()
# dxbar and dybar
dxm1 = sys.dx[-1]
dy = sys.dy[1:]
dym1 = sys.dy[:-1]
dxbar = np.tile(sys.dx[-1], Ny-2)
dybar = (dy + dym1) / 2.
#-------------------------- bn derivatives --------------------------------
# compute the currents derivatives
djx_sm1 = get_jn_derivs(sys, efn, v, sites - 1, sites, dxm1)
djy_s = get_jn_derivs(sys, efn, v, sites, sites + Nx, dy)
djy_smN = get_jn_derivs(sys, efn, v, sites - Nx, sites, dym1)
defn_smN, dv_smN, defn_sm1, dv_sm1, defn_s, defp_s, dv_s,\
defn_spN, dv_spN =\
bn_derivatives('electrons', djx_sm1, djy_s, djy_smN, dxbar, dybar, sites)
# update the sparse matrix row and columns
dbn_rows = np.reshape(np.repeat(3*sites, 9), (len(sites), 9)).tolist()
dbn_cols = zip(3*(sites-Nx), 3*(sites-Nx)+2, 3*(sites-1), 3*(sites-1)+2,
3*sites, 3*sites+1, 3*sites+2, 3*(sites+Nx), 3*(sites+Nx)+2)
dbn_data = zip(defn_smN, dv_smN, defn_sm1, dv_sm1, defn_s, defp_s, \
dv_s, defn_spN, dv_spN)
update(dbn_rows, dbn_cols, dbn_data)
#-------------------------- bp derivatives --------------------------------
# compute the currents derivatives
djx_sm1 = get_jp_derivs(sys, efp, v, sites - 1, sites, dxm1)
djy_s = get_jp_derivs(sys, efp, v, sites, sites + Nx, dy)
djy_smN = get_jp_derivs(sys, efp, v, sites - Nx, sites, dym1)
defp_smN, dv_smN, defp_sm1, dv_sm1, defn_s, defp_s, dv_s,\
defp_spN, dv_spN =\
bn_derivatives('holes', djx_sm1, djy_s, djy_smN, dxbar, dybar, sites)
# update the sparse matrix row and columns
dbp_rows = np.reshape(np.repeat(3*sites+1, 9), (len(sites), 9)).tolist()
dbp_cols = zip(3*(sites-Nx)+1, 3*(sites-Nx)+2, 3*(sites-1)+1, 3*(sites-1)+2,
3*sites, 3*sites+1, 3*sites+2, 3*(sites+Nx)+1, 3*(sites+Nx)+2)
dbp_data = zip(defp_smN, dv_smN, defp_sm1, dv_sm1, defn_s, defp_s, \
dv_s, defp_spN, dv_spN)
update(dbp_rows, dbp_cols, dbp_data)
#-------------------------- bv derivatives --------------------------------
dbv_rows = (3*sites+2).tolist()
dbv_cols = (3*sites+2).tolist()
dbv_data = np.ones((len(sites,))).tolist() # dv_s = 0
rows += dbv_rows
columns += dbv_cols
data += dbv_data
###########################################################################
# right boundary: i = Nx-1 and j = 0 #
###########################################################################
# list of the sites
sites = np.array([Nx-1])
# dxbar and dybar
dxm1 = sys.dx[-1]
dy = sys.dy[0]
dym1 = (sys.dy[0] + sys.dy[-1]) / 2.
dxbar = sys.dx[-1]
dybar = (dy + dym1) / 2.
#-------------------------- bn derivatives --------------------------------
# compute the currents derivatives
djx_sm1 = get_jn_derivs(sys, efn, v, Nx*Ny-1, Nx-1, dxm1)
djy_s = get_jn_derivs(sys, efn, v, Nx-1, 2*Nx-1, dy)
djy_smN = get_jn_derivs(sys, efn, v, Nx*Ny-1, Nx-1, dym1)
defn_smN, dv_smN, defn_sm1, dv_sm1, defn_s, defp_s, dv_s,\
defn_spN, dv_spN =\
bn_derivatives('electrons', djx_sm1, djy_s, djy_smN, dxbar, dybar, sites)
# update the sparse matrix row and columns
dbn_rows = np.reshape(np.repeat(3*sites, 9), (len(sites), 9)).tolist()
dbn_cols = [3*(sites-1), 3*(sites-1)+2, 3*sites, 3*sites+1, 3*sites+2,
3*(sites+Nx), 3*(sites+Nx)+2, 3*(sites+Nx*(Ny-1)),
3*(sites+Nx*(Ny-1))+2]
dbn_data = [defn_sm1, dv_sm1, defn_s[0], defp_s[0], dv_s[0], defn_spN, dv_spN,\
defn_smN, dv_smN]
update(dbn_rows, dbn_cols, [dbn_data])
#-------------------------- bp derivatives --------------------------------
# compute the currents derivatives
djx_sm1 = get_jp_derivs(sys, efp, v, Nx*Ny-1, Nx-1, dxm1)
djy_s = get_jp_derivs(sys, efp, v, Nx-1, 2*Nx-1, dy)
djy_smN = get_jp_derivs(sys, efp, v, Nx*Ny-1, Nx-1, dym1)
defp_smN, dv_smN, defp_sm1, dv_sm1, defn_s, defp_s, dv_s,\
defp_spN, dv_spN =\
bn_derivatives('holes', djx_sm1, djy_s, djy_smN, dxbar, dybar, sites)
# update the sparse matrix row and columns
dbp_rows = np.reshape(np.repeat(3*sites+1, 9), (len(sites), 9)).tolist()
dbp_cols = [3*(sites-1)+1, 3*(sites-1)+2, 3*sites, 3*sites+1, 3*sites+2,\
3*(sites+Nx)+1, 3*(sites+Nx)+2, 3*(sites+Nx*(Ny-1))+1,
3*(sites+Nx*(Ny-1))+2]
dbp_data = [defp_sm1, dv_sm1, defn_s[0], defp_s[0], dv_s[0], defp_spN, dv_spN,\
defp_smN, dv_smN]
update(dbp_rows, dbp_cols, [dbp_data])
#-------------------------- bv derivatives --------------------------------
dbv_rows = (3*sites+2).tolist()
dbv_cols = (3*sites+2).tolist()
dbv_data = np.ones((len(sites,))).tolist() # dv_s = 0
rows += dbv_rows
columns += dbv_cols
data += dbv_data
###########################################################################
# right boundary: i = Nx-1 and j = Ny-1 #
###########################################################################
# list of the sites
sites = np.array([Nx*Ny-1])
# dxbar and dybar
dxm1 = sys.dx[-1]
dy = (sys.dy[0] + sys.dy[-1]) / 2.
dym1 = sys.dy[-1]
dxbar = sys.dx[-1]
dybar = (dy + dym1) / 2.
#-------------------------- bn derivatives --------------------------------
# compute the currents derivatives
djx_sm1 = get_jn_derivs(sys, efn, v, Nx*Ny-2, Nx*Ny-1, dxm1)
djy_s = get_jn_derivs(sys, efn, v, Nx*Ny-1, Nx-1, dy)
djy_smN = get_jn_derivs(sys, efn, v, Nx*(Ny-1)-1, Nx*Ny-1, dym1)
defn_smN, dv_smN, defn_sm1, dv_sm1, defn_s, defp_s, dv_s,\
defn_spN, dv_spN =\
bn_derivatives('electrons', djx_sm1, djy_s, djy_smN, dxbar, dybar, sites)
# update the sparse matrix row and columns
dbn_rows = np.reshape(np.repeat(3*sites, 9), (len(sites), 9)).tolist()
dbn_cols = [3*(sites-Nx*(Ny-1)), 3*(sites-Nx*(Ny-1))+2, 3*(sites-Nx),
3*(sites-Nx)+2, 3*(sites-1), 3*(sites-1)+2, 3*sites, 3*sites+1, 3*sites+2]
dbn_data = [defn_spN, dv_spN, defn_smN, dv_smN, defn_sm1, dv_sm1,\
defn_s[0], defp_s[0], dv_s[0]]
update(dbn_rows, dbn_cols, [dbn_data])
#-------------------------- bp derivatives --------------------------------
# compute the currents derivatives
djx_sm1 = get_jp_derivs(sys, efp, v, Nx*Ny-2, Nx*Ny-1, dxm1)
djy_s = get_jp_derivs(sys, efp, v, Nx*Ny-1, Nx-1, dy)
djy_smN = get_jp_derivs(sys, efp, v, Nx*(Ny-1)-1, Nx*Ny-1, dym1)
defp_smN, dv_smN, defp_sm1, dv_sm1, defn_s, defp_s, dv_s,\
defp_spN, dv_spN =\
bn_derivatives('holes', djx_sm1, djy_s, djy_smN, dxbar, dybar, sites)
# update the sparse matrix row and columns
dbp_rows = np.reshape(np.repeat(3*sites+1, 9), (len(sites), 9)).tolist()
dbp_cols = [3*(sites-Nx*(Ny-1))+1, 3*(sites-Nx*(Ny-1))+2, 3*(sites-Nx)+1,
3*(sites-Nx)+2, 3*(sites-1)+1, 3*(sites-1)+2, 3*sites, 3*sites+1, 3*sites+2]
dbp_data = [defp_spN, dv_spN, defp_smN, dv_smN, defp_sm1, dv_sm1,\
defn_s[0], defp_s[0], dv_s[0]]
update(dbp_rows, dbp_cols, [dbp_data])
#-------------------------- bv derivatives --------------------------------
dbv_rows = (3*sites+2).tolist()
dbv_cols = (3*sites+2).tolist()
dbv_data = np.ones((len(sites,))).tolist() # dv_s = 0
rows += dbv_rows
columns += dbv_cols
data += dbv_data
###########################################################################
# boundary: 0 < i < Nx-1 and j = 0 #
###########################################################################
# We apply drift diffusion equations with the periodic boundary conditions.
# list of the sites inside the system
sites = _sites[0, 1:Nx-1].flatten()
# lattice distances
dx = sys.dx[1:]
dxm1 = sys.dx[:-1]
dy = np.repeat(sys.dy[0], Nx-2)
dym1 = np.repeat((sys.dy[0] + sys.dy[-1])/2., Nx-2)
dxbar = (dxm1 + dx) / 2.
dybar = (dym1 + dy) / 2.
#------------------------ fn derivatives ----------------------------------
# get the derivatives of jx_s, jx_sm1, jy_s, jy_smN
djx_s = get_jn_derivs(sys, efn, v, sites, sites + 1, dx)
djx_sm1 = get_jn_derivs(sys, efn, v, sites - 1, sites, dxm1)
djy_s = get_jn_derivs(sys, efn, v, sites, sites + Nx, dy)
djy_smN = get_jn_derivs(sys, efn, v, sites + Nx*(Ny-1), sites, dym1)
defn_smN, dv_smN, defn_sm1, dv_sm1, defn_s, defp_s, dv_s, defn_sp1, dv_sp1,\
defn_spN, dv_spN = \
f_derivatives('electrons', djx_s, djx_sm1, djy_s, djy_smN, dxbar, dybar, sites)
# update the sparse matrix row and columns for the inner part of the system
dfn_rows = np.reshape(np.repeat(3*sites, 11), (len(sites), 11)).tolist()
dfn_cols = zip(3*(sites-1), 3*(sites-1)+2, 3*sites, 3*sites+1, 3*sites+2,
3*(sites+1), 3*(sites+1)+2, 3*(sites+Nx), 3*(sites+Nx)+2,\
3*(sites+Nx*(Ny-1)), 3*(sites+Nx*(Ny-1))+2)
dfn_data = zip(defn_sm1, dv_sm1, defn_s, defp_s, dv_s, defn_sp1, dv_sp1,\
defn_spN, dv_spN, defn_smN, dv_smN)
update(dfn_rows, dfn_cols, dfn_data)
#------------------------ fp derivatives ----------------------------------
# get the derivatives of jx_s, jx_sm1, jy_s, jy_smN
djx_s = get_jp_derivs(sys, efp, v, sites, sites+1, dx)
djx_sm1 = get_jp_derivs(sys, efp, v, sites - 1, sites, dxm1)
djy_s = get_jp_derivs(sys, efp, v, sites, sites+Nx, dy)
djy_smN = get_jp_derivs(sys, efp, v, sites + Nx*(Ny-1), sites, dym1)
defp_smN, dv_smN, defp_sm1, dv_sm1, defn_s, defp_s, dv_s, defp_sp1, dv_sp1,\
defp_spN, dv_spN = \
f_derivatives('holes', djx_s, djx_sm1, djy_s, djy_smN, dxbar, dybar, sites)
# update the sparse matrix row and columns for the inner part of the system
dfp_rows = np.reshape(np.repeat(3*sites+1, 11), (len(sites), 11)).tolist()
dfp_cols = zip(3*(sites-1)+1, 3*(sites-1)+2, 3*sites, 3*sites+1, 3*sites+2,
3*(sites+1)+1, 3*(sites+1)+2, 3*(sites+Nx)+1,\
3*(sites+Nx)+2, 3*(sites+Nx*(Ny-1))+1, 3*(sites+Nx*(Ny-1))+2)
dfp_data = zip(defp_sm1, dv_sm1, defn_s, defp_s, dv_s, defp_sp1, dv_sp1,\
defp_spN, dv_spN, defp_smN, dv_smN)
update(dfp_rows, dfp_cols, dfp_data)
#---------------- fv derivatives inside the system ------------------------
eps_m1x = .5 * (sys.epsilon[sites - 1] + sys.epsilon[sites])
eps_p1x = .5 * (sys.epsilon[sites + 1] + sys.epsilon[sites])
eps_m1y = .5 * (sys.epsilon[sites + Nx * (Ny - 1)] + sys.epsilon[sites])
eps_p1y = .5 * (sys.epsilon[sites + Nx] + sys.epsilon[sites])
dvmN = -eps_m1y * 1. / (dym1 * dybar)
dvm1 = -eps_m1x * 1. / (dxm1 * dxbar)
dv = eps_m1x / (dxm1 * dxbar) + eps_p1x / (dx * dxbar) + eps_m1y / (dym1 * dybar) + eps_p1y / (dy * dybar) - \
drho_dv_s[sites]
dvp1 = -eps_p1x * 1. / (dx * dxbar)
dvpN = -eps_p1y * 1. / (dy * dybar)
defn = - drho_defn_s[sites]
defp = - drho_defp_s[sites]
#dvmN, dvm1, dv, defn, defp, dvp1, dvpN = fv_derivatives(dx, dy, dxm1, dym1, sys.epsilon, sites)
# update the sparse matrix row and columns for the inner part of the system
dfv_rows = np.reshape(np.repeat(3*sites+2, 7), (len(sites), 7)).tolist()
dfv_cols = zip(3*(sites-1)+2, 3*sites, 3*sites+1, 3*sites+2, 3*(sites+1)+2,\
3*(sites+Nx)+2, 3*(sites+Nx*(Ny-1))+2)
dfv_data = zip(dvm1, defn, defp, dv, dvp1, dvpN, dvmN)
update(dfv_rows, dfv_cols, dfv_data)
###########################################################################
# boundary: 0 < i < Nx-1 and j = Ny-1 #
###########################################################################
# We apply drift diffusion equations with the periodic boundary conditions.
# list of the sites inside the system
sites = _sites[Ny-1, 1:Nx-1].flatten()
# lattice distances
dx = sys.dx[1:]
dxm1 = sys.dx[:-1]
dy = np.repeat((sys.dy[0] + sys.dy[-1])/2., Nx-2)
dym1 = np.repeat(sys.dy[-1], Nx-2)
dxbar = (dxm1 + dx) / 2.
dybar = (dym1 + dy) / 2.
#------------------------ fn derivatives ----------------------------------
# get the derivatives of jx_s, jx_sm1, jy_s, jy_smN
djx_s = get_jn_derivs(sys, efn, v, sites, sites+1, dx)
djx_sm1 = get_jn_derivs(sys, efn, v, sites-1, sites, dxm1)
djy_s = get_jn_derivs(sys, efn, v, sites, sites - Nx*(Ny-1), dy)
djy_smN = get_jn_derivs(sys, efn, v, sites-Nx, sites, dym1)
defn_smN, dv_smN, defn_sm1, dv_sm1, defn_s, defp_s, dv_s, defn_sp1, dv_sp1,\
defn_spN, dv_spN = \
f_derivatives('electrons', djx_s, djx_sm1, djy_s, djy_smN, dxbar, dybar, sites)
# update the sparse matrix row and columns for the inner part of the system
dfn_rows = np.reshape(np.repeat(3*sites, 11), (len(sites), 11)).tolist()
dfn_cols = zip(3*(sites-Nx*(Ny-1)), 3*(sites-Nx*(Ny-1))+2, 3*(sites-Nx),
3*(sites-Nx)+2, 3*(sites-1), 3*(sites-1)+2, 3*sites,\
3*sites+1, 3*sites+2, 3*(sites+1), 3*(sites+1)+2)
dfn_data = zip(defn_spN, dv_spN, defn_smN, dv_smN, defn_sm1, dv_sm1,\
defn_s, defp_s, dv_s, defn_sp1, dv_sp1)
update(dfn_rows, dfn_cols, dfn_data)
#------------------------ fp derivatives ----------------------------------
# get the derivatives of jx_s, jx_sm1, jy_s, jy_smN
djx_s = get_jp_derivs(sys, efp, v, sites, sites+1, dx)
djx_sm1 = get_jp_derivs(sys, efp, v, sites-1, sites, dxm1)
djy_s = get_jp_derivs(sys, efp, v, sites, sites - Nx*(Ny-1), dy)
djy_smN = get_jp_derivs(sys, efp, v, sites-Nx, sites, dym1)
defp_smN, dv_smN, defp_sm1, dv_sm1, defn_s, defp_s, dv_s, defp_sp1, dv_sp1,\
defp_spN, dv_spN = \
f_derivatives('holes', djx_s, djx_sm1, djy_s, djy_smN, dxbar, dybar, sites)
# update the sparse matrix row and columns for the inner part of the system
dfp_rows = np.reshape(np.repeat(3*sites+1, 11), (len(sites), 11)).tolist()
dfp_cols = zip(3*(sites-Nx*(Ny-1))+1, 3*(sites-Nx*(Ny-1))+2,\
3*(sites-Nx)+1,3*(sites-Nx)+2, 3*(sites-1)+1, 3*(sites-1)+2,\
3*sites, 3*sites+1, 3*sites+2, 3*(sites+1)+1, 3*(sites+1)+2)
dfp_data = zip(defp_spN, dv_spN, defp_smN, dv_smN, defp_sm1, dv_sm1,\
defn_s, defp_s, dv_s, defp_sp1, dv_sp1,)
update(dfp_rows, dfp_cols, dfp_data)
#---------------- fv derivatives inside the system ------------------------
eps_m1x = .5 * (sys.epsilon[sites - 1] + sys.epsilon[sites])
eps_p1x = .5 * (sys.epsilon[sites + 1] + sys.epsilon[sites])
eps_m1y = .5 * (sys.epsilon[sites - Nx] + sys.epsilon[sites])
eps_p1y = .5 * (sys.epsilon[sites - Nx * (Ny - 1)] + sys.epsilon[sites])
dvmN = -eps_m1y * 1. / (dym1 * dybar)
dvm1 = -eps_m1x * 1. / (dxm1 * dxbar)
dv = eps_m1x / (dxm1 * dxbar) + eps_p1x / (dx * dxbar) + eps_m1y / (dym1 * dybar) + eps_p1y / (dy * dybar) - \
drho_dv_s[sites]
dvp1 = -eps_p1x * 1. / (dx * dxbar)
dvpN = -eps_p1y * 1. / (dy * dybar)
defn = - drho_defn_s[sites]
defp = - drho_defp_s[sites]
#dvmN, dvm1, dv, defn, defp, dvp1, dvpN = fv_derivatives(dx, dy, dxm1, dym1, sys.epsilon, sites)
# update the sparse matrix row and columns for the inner part of the system
dfv_rows = np.reshape(np.repeat(3*sites+2, 7), (len(sites), 7)).tolist()
dfv_cols = zip(3*(sites-Nx*(Ny-1))+2, 3*(sites-Nx)+2, 3*(sites-1)+2,
3*sites, 3*sites+1, 3*sites+2, 3*(sites+1)+2)
dfv_data = zip(dvpN, dvmN, dvm1, defn, defp, dv, dvp1)
update(dfv_rows, dfv_cols, dfv_data)
return rows, columns, data
| 40.896403
| 125
| 0.535517
| 4,504
| 28,423
| 3.152087
| 0.046847
| 0.076072
| 0.0355
| 0.016341
| 0.871945
| 0.836515
| 0.799887
| 0.764739
| 0.741776
| 0.71825
| 0
| 0.04419
| 0.238856
| 28,423
| 694
| 126
| 40.955331
| 0.612046
| 0.213911
| 0
| 0.459459
| 0
| 0
| 0.005372
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013514
| false
| 0
| 0.010811
| 0
| 0.035135
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e84eabbd460f2f829dad15e2a6f8d4a4f2a0739f
| 14,581
|
py
|
Python
|
gnocchi/indexer/alembic/versions/1c98ac614015_initial_base.py
|
NeCTAR-RC/gnocchi
|
aa2e5d1ce03291d492808b60c674537733d3f1a9
|
[
"Apache-2.0"
] | null | null | null |
gnocchi/indexer/alembic/versions/1c98ac614015_initial_base.py
|
NeCTAR-RC/gnocchi
|
aa2e5d1ce03291d492808b60c674537733d3f1a9
|
[
"Apache-2.0"
] | null | null | null |
gnocchi/indexer/alembic/versions/1c98ac614015_initial_base.py
|
NeCTAR-RC/gnocchi
|
aa2e5d1ce03291d492808b60c674537733d3f1a9
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Initial base for Gnocchi 1.0.0
Revision ID: 1c98ac614015
Revises:
Create Date: 2015-04-27 16:05:13.530625
"""
# revision identifiers, used by Alembic.
revision = '1c98ac614015'
down_revision = None
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
import gnocchi.indexer.sqlalchemy_base
def upgrade():
op.create_table('resource',
sa.Column('type', sa.Enum('generic', 'instance', 'swift_account', 'volume', 'ceph_account', 'network', 'identity', 'ipmi', 'stack', 'image', name='resource_type_enum'), nullable=False),
sa.Column('created_by_user_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=True),
sa.Column('created_by_project_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=True),
sa.Column('started_at', gnocchi.indexer.sqlalchemy_base.PreciseTimestamp(), nullable=False),
sa.Column('revision_start', gnocchi.indexer.sqlalchemy_base.PreciseTimestamp(), nullable=False),
sa.Column('ended_at', gnocchi.indexer.sqlalchemy_base.PreciseTimestamp(), nullable=True),
sa.Column('user_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=True),
sa.Column('project_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=True),
sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False),
sa.PrimaryKeyConstraint('id'),
mysql_charset='utf8',
mysql_engine='InnoDB'
)
op.create_index('ix_resource_id', 'resource', ['id'], unique=False)
op.create_table('archive_policy',
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('back_window', sa.Integer(), nullable=False),
sa.Column('definition', gnocchi.indexer.sqlalchemy_base.ArchivePolicyDefinitionType(), nullable=False),
sa.Column('aggregation_methods', gnocchi.indexer.sqlalchemy_base.SetType(), nullable=False),
sa.PrimaryKeyConstraint('name'),
mysql_charset='utf8',
mysql_engine='InnoDB'
)
op.create_index('ix_archive_policy_name', 'archive_policy', ['name'], unique=False)
op.create_table('volume',
sa.Column('display_name', sa.String(length=255), nullable=False),
sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False),
sa.ForeignKeyConstraint(['id'], ['resource.id'], name="fk_volume_id_resource_id", ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
mysql_charset='utf8',
mysql_engine='InnoDB'
)
op.create_index('ix_volume_id', 'volume', ['id'], unique=False)
op.create_table('instance',
sa.Column('flavor_id', sa.Integer(), nullable=False),
sa.Column('image_ref', sa.String(length=255), nullable=False),
sa.Column('host', sa.String(length=255), nullable=False),
sa.Column('display_name', sa.String(length=255), nullable=False),
sa.Column('server_group', sa.String(length=255), nullable=True),
sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False),
sa.ForeignKeyConstraint(['id'], ['resource.id'], name="fk_instance_id_resource_id", ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
mysql_charset='utf8',
mysql_engine='InnoDB'
)
op.create_index('ix_instance_id', 'instance', ['id'], unique=False)
op.create_table('stack',
sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False),
sa.ForeignKeyConstraint(['id'], ['resource.id'], name="fk_stack_id_resource_id", ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
mysql_charset='utf8',
mysql_engine='InnoDB'
)
op.create_index('ix_stack_id', 'stack', ['id'], unique=False)
op.create_table('archive_policy_rule',
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('archive_policy_name', sa.String(length=255), nullable=False),
sa.Column('metric_pattern', sa.String(length=255), nullable=False),
sa.ForeignKeyConstraint(['archive_policy_name'], ['archive_policy.name'], name="fk_archive_policy_rule_archive_policy_name_archive_policy_name", ondelete='RESTRICT'),
sa.PrimaryKeyConstraint('name'),
mysql_charset='utf8',
mysql_engine='InnoDB'
)
op.create_index('ix_archive_policy_rule_name', 'archive_policy_rule', ['name'], unique=False)
op.create_table('swift_account',
sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False),
sa.ForeignKeyConstraint(['id'], ['resource.id'], name="fk_swift_account_id_resource_id", ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
mysql_charset='utf8',
mysql_engine='InnoDB'
)
op.create_index('ix_swift_account_id', 'swift_account', ['id'], unique=False)
op.create_table('ceph_account',
sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False),
sa.ForeignKeyConstraint(['id'], ['resource.id'], name="fk_ceph_account_id_resource_id", ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
mysql_charset='utf8',
mysql_engine='InnoDB'
)
op.create_index('ix_ceph_account_id', 'ceph_account', ['id'], unique=False)
op.create_table('ipmi',
sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False),
sa.ForeignKeyConstraint(['id'], ['resource.id'], name="fk_ipmi_id_resource_id", ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
mysql_charset='utf8',
mysql_engine='InnoDB'
)
op.create_index('ix_ipmi_id', 'ipmi', ['id'], unique=False)
op.create_table('image',
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('container_format', sa.String(length=255), nullable=False),
sa.Column('disk_format', sa.String(length=255), nullable=False),
sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False),
sa.ForeignKeyConstraint(['id'], ['resource.id'], name="fk_image_id_resource_id", ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
mysql_charset='utf8',
mysql_engine='InnoDB'
)
op.create_index('ix_image_id', 'image', ['id'], unique=False)
op.create_table('resource_history',
sa.Column('type', sa.Enum('generic', 'instance', 'swift_account', 'volume', 'ceph_account', 'network', 'identity', 'ipmi', 'stack', 'image', name='resource_type_enum'), nullable=False),
sa.Column('created_by_user_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=True),
sa.Column('created_by_project_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=True),
sa.Column('started_at', gnocchi.indexer.sqlalchemy_base.PreciseTimestamp(), nullable=False),
sa.Column('revision_start', gnocchi.indexer.sqlalchemy_base.PreciseTimestamp(), nullable=False),
sa.Column('ended_at', gnocchi.indexer.sqlalchemy_base.PreciseTimestamp(), nullable=True),
sa.Column('user_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=True),
sa.Column('project_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=True),
sa.Column('revision', sa.Integer(), nullable=False),
sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False),
sa.Column('revision_end', gnocchi.indexer.sqlalchemy_base.PreciseTimestamp(), nullable=False),
sa.ForeignKeyConstraint(['id'], ['resource.id'], name="fk_resource_history_id_resource_id", ondelete='CASCADE'),
sa.PrimaryKeyConstraint('revision'),
mysql_charset='utf8',
mysql_engine='InnoDB'
)
op.create_index('ix_resource_history_id', 'resource_history', ['id'], unique=False)
op.create_table('identity',
sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False),
sa.ForeignKeyConstraint(['id'], ['resource.id'], name="fk_identity_id_resource_id", ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
mysql_charset='utf8',
mysql_engine='InnoDB'
)
op.create_index('ix_identity_id', 'identity', ['id'], unique=False)
op.create_table('network',
sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False),
sa.ForeignKeyConstraint(['id'], ['resource.id'], name="fk_network_id_resource_id", ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
mysql_charset='utf8',
mysql_engine='InnoDB'
)
op.create_index('ix_network_id', 'network', ['id'], unique=False)
op.create_table('metric',
sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False),
sa.Column('archive_policy_name', sa.String(length=255), nullable=False),
sa.Column('created_by_user_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=True),
sa.Column('created_by_project_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=True),
sa.Column('resource_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=True),
sa.Column('name', sa.String(length=255), nullable=True),
sa.ForeignKeyConstraint(['archive_policy_name'], ['archive_policy.name'], name="fk_metric_archive_policy_name_archive_policy_name", ondelete='RESTRICT'),
sa.ForeignKeyConstraint(['resource_id'], ['resource.id'], name="fk_metric_resource_id_resource_id", ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('resource_id', 'name', name='uniq_metric0resource_id0name'),
mysql_charset='utf8',
mysql_engine='InnoDB'
)
op.create_index('ix_metric_id', 'metric', ['id'], unique=False)
op.create_table('identity_history',
sa.Column('revision', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['revision'], ['resource_history.revision'], name="fk_identity_history_resource_history_revision", ondelete='CASCADE'),
sa.PrimaryKeyConstraint('revision'),
mysql_charset='utf8',
mysql_engine='InnoDB'
)
op.create_index('ix_identity_history_revision', 'identity_history', ['revision'], unique=False)
op.create_table('instance_history',
sa.Column('flavor_id', sa.Integer(), nullable=False),
sa.Column('image_ref', sa.String(length=255), nullable=False),
sa.Column('host', sa.String(length=255), nullable=False),
sa.Column('display_name', sa.String(length=255), nullable=False),
sa.Column('server_group', sa.String(length=255), nullable=True),
sa.Column('revision', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['revision'], ['resource_history.revision'], name="fk_instance_history_resource_history_revision", ondelete='CASCADE'),
sa.PrimaryKeyConstraint('revision'),
mysql_charset='utf8',
mysql_engine='InnoDB'
)
op.create_index('ix_instance_history_revision', 'instance_history', ['revision'], unique=False)
op.create_table('network_history',
sa.Column('revision', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['revision'], ['resource_history.revision'], name="fk_network_history_resource_history_revision", ondelete='CASCADE'),
sa.PrimaryKeyConstraint('revision'),
mysql_charset='utf8',
mysql_engine='InnoDB'
)
op.create_index('ix_network_history_revision', 'network_history', ['revision'], unique=False)
op.create_table('swift_account_history',
sa.Column('revision', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['revision'], ['resource_history.revision'], name="fk_swift_account_history_resource_history_revision", ondelete='CASCADE'),
sa.PrimaryKeyConstraint('revision'),
mysql_charset='utf8',
mysql_engine='InnoDB'
)
op.create_index('ix_swift_account_history_revision', 'swift_account_history', ['revision'], unique=False)
op.create_table('ceph_account_history',
sa.Column('revision', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['revision'], ['resource_history.revision'], name="fk_ceph_account_history_resource_history_revision", ondelete='CASCADE'),
sa.PrimaryKeyConstraint('revision'),
mysql_charset='utf8',
mysql_engine='InnoDB'
)
op.create_index('ix_ceph_account_history_revision', 'ceph_account_history', ['revision'], unique=False)
op.create_table('ipmi_history',
sa.Column('revision', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['revision'], ['resource_history.revision'], name="fk_ipmi_history_resource_history_revision", ondelete='CASCADE'),
sa.PrimaryKeyConstraint('revision'),
mysql_charset='utf8',
mysql_engine='InnoDB'
)
op.create_index('ix_ipmi_history_revision', 'ipmi_history', ['revision'], unique=False)
op.create_table('image_history',
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('container_format', sa.String(length=255), nullable=False),
sa.Column('disk_format', sa.String(length=255), nullable=False),
sa.Column('revision', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['revision'], ['resource_history.revision'], name="fk_image_history_resource_history_revision", ondelete='CASCADE'),
sa.PrimaryKeyConstraint('revision'),
mysql_charset='utf8',
mysql_engine='InnoDB'
)
op.create_index('ix_image_history_revision', 'image_history', ['revision'], unique=False)
op.create_table('stack_history',
sa.Column('revision', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['revision'], ['resource_history.revision'], name="fk_stack_history_resource_history_revision", ondelete='CASCADE'),
sa.PrimaryKeyConstraint('revision'),
mysql_charset='utf8',
mysql_engine='InnoDB'
)
op.create_index('ix_stack_history_revision', 'stack_history', ['revision'], unique=False)
op.create_table('volume_history',
sa.Column('display_name', sa.String(length=255), nullable=False),
sa.Column('revision', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['revision'], ['resource_history.revision'], name="fk_volume_history_resource_history_revision", ondelete='CASCADE'),
sa.PrimaryKeyConstraint('revision'),
mysql_charset='utf8',
mysql_engine='InnoDB'
)
op.create_index('ix_volume_history_revision', 'volume_history', ['revision'], unique=False)
| 54.406716
| 189
| 0.729031
| 1,810
| 14,581
| 5.643646
| 0.095028
| 0.054038
| 0.077827
| 0.06373
| 0.84464
| 0.842095
| 0.812237
| 0.767597
| 0.738815
| 0.723642
| 0
| 0.010786
| 0.1098
| 14,581
| 267
| 190
| 54.610487
| 0.776194
| 0.048419
| 0
| 0.548117
| 0
| 0
| 0.275464
| 0.107269
| 0
| 0
| 0
| 0
| 0
| 1
| 0.004184
| false
| 0
| 0.016736
| 0
| 0.020921
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e8591f3ade3921c59c209e435e1a1cd0a5035ea4
| 2,247
|
py
|
Python
|
transliteration.py
|
HALLOWe3n/UA-to-EN-Transliteration
|
dfdf2baa2396a4d329e79e65000a26afe7818088
|
[
"MIT"
] | null | null | null |
transliteration.py
|
HALLOWe3n/UA-to-EN-Transliteration
|
dfdf2baa2396a4d329e79e65000a26afe7818088
|
[
"MIT"
] | null | null | null |
transliteration.py
|
HALLOWe3n/UA-to-EN-Transliteration
|
dfdf2baa2396a4d329e79e65000a26afe7818088
|
[
"MIT"
] | null | null | null |
def transliteration(text: str):
return text.replace("іє", 'ie') \
.replace("Іє", 'Ie') \
.replace("ія", 'ia') \
.replace("Ія", 'Ia') \
.replace("зг", 'zgh') \
.replace("Зг", 'Zgh') \
.replace("ьо", 'io') \
.replace("а", 'a') \
.replace("б", 'b') \
.replace("в", 'v') \
.replace("г", 'h') \
.replace("ґ", 'g') \
.replace("д", 'd') \
.replace("е", 'e') \
.replace("є", 'ie') \
.replace("ж", 'zh') \
.replace("з", 'z') \
.replace("и", 'y') \
.replace("і", 'i') \
.replace("ї", 'i') \
.replace("й", 'i') \
.replace("к", 'k') \
.replace("л", 'l') \
.replace("м", 'm') \
.replace("н", 'n') \
.replace("о", 'o') \
.replace("п", 'p') \
.replace("р", 'r') \
.replace("с", 's') \
.replace("т", 't') \
.replace("у", 'u') \
.replace("ф", 'f') \
.replace("х", 'kh') \
.replace("ц", 'ts') \
.replace("ч", 'ch') \
.replace("ш", 'sh') \
.replace("щ", 'sch') \
.replace("ь", '') \
.replace("ю", 'iu') \
.replace("я", 'ia') \
.replace("А", 'A') \
.replace("Б", 'B') \
.replace("В", 'V') \
.replace("Г", 'H') \
.replace("Ґ", 'G') \
.replace("Д", 'D') \
.replace("Е", 'E') \
.replace("Є", 'Ie') \
.replace("Ж", 'Zh') \
.replace("З", 'Z') \
.replace("И", 'Y') \
.replace("І", 'I') \
.replace("Ї", 'I') \
.replace("Й", 'I') \
.replace("К", 'K') \
.replace("Л", 'L') \
.replace("М", 'M') \
.replace("Н", 'N') \
.replace("О", 'O') \
.replace("П", 'P') \
.replace("Р", 'R') \
.replace("С", 'S') \
.replace("Т", 'T') \
.replace("У", 'U') \
.replace("Ф", 'F') \
.replace("Х", 'Kh') \
.replace("Ц", 'Ts') \
.replace("Ч", 'Ch') \
.replace("Ш", 'Sh') \
.replace("Щ", 'Sch') \
.replace("Ь", '') \
.replace("Ю", 'Iu') \
.replace("Я", 'Ia')
if __name__ == '__main__':
print(translit("Ірпінь"))
| 28.443038
| 37
| 0.339564
| 229
| 2,247
| 3.296943
| 0.353712
| 0.063576
| 0.029139
| 0.047682
| 0.810596
| 0.810596
| 0.810596
| 0.810596
| 0.810596
| 0.810596
| 0
| 0
| 0.361816
| 2,247
| 78
| 38
| 28.807692
| 0.526499
| 0
| 0
| 0
| 0
| 0
| 0.086337
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013158
| false
| 0
| 0
| 0.013158
| 0.026316
| 0.013158
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e85cda2ddfb268ae2c5888ae2565ff5f50640c7b
| 233
|
py
|
Python
|
juliany_pizza/orders/admin.py
|
kzborisov/Juliany-Pizza
|
4ebc0b21e314b244048df79e4858f30447b43f8b
|
[
"MIT"
] | null | null | null |
juliany_pizza/orders/admin.py
|
kzborisov/Juliany-Pizza
|
4ebc0b21e314b244048df79e4858f30447b43f8b
|
[
"MIT"
] | 9
|
2022-03-23T13:13:23.000Z
|
2022-03-28T13:40:20.000Z
|
juliany_pizza/orders/admin.py
|
kzborisov/Juliany-Pizza
|
4ebc0b21e314b244048df79e4858f30447b43f8b
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from juliany_pizza.orders.models import Order
@admin.register(Order)
class OrderAdmin(admin.ModelAdmin):
pass
# @admin.register(OrderItem)
# class OrderItemAdmin(admin.ModelAdmin):
# pass
| 17.923077
| 45
| 0.772532
| 28
| 233
| 6.392857
| 0.607143
| 0.145251
| 0.212291
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133047
| 233
| 12
| 46
| 19.416667
| 0.886139
| 0.321888
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
e87a31a79153ad251d1817df0aca5b1ed38eb0a2
| 15,599
|
py
|
Python
|
test/test_objects.py
|
vercity/czsc
|
7a372baa3a550b18ff319008ac3fcab0f3faa684
|
[
"MIT"
] | 1
|
2022-02-22T06:31:40.000Z
|
2022-02-22T06:31:40.000Z
|
test/test_objects.py
|
vercity/czsc
|
7a372baa3a550b18ff319008ac3fcab0f3faa684
|
[
"MIT"
] | 1
|
2021-09-25T02:32:39.000Z
|
2021-09-25T02:32:39.000Z
|
test/test_objects.py
|
vercity/czsc
|
7a372baa3a550b18ff319008ac3fcab0f3faa684
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from collections import OrderedDict
import pandas as pd
from czsc.objects import Signal, Factor, Event, Freq, Operate, PositionLong, PositionShort
def test_signal():
s = Signal(k1="1分钟", k3="倒1形态", v1="类一买", v2="七笔", v3="基础型", score=3)
assert str(s) == "Signal('1分钟_任意_倒1形态_类一买_七笔_基础型_3')"
assert s.key == "1分钟_倒1形态"
s1 = Signal(signal='1分钟_任意_倒1形态_类一买_七笔_基础型_3')
assert s == s1
assert s.is_match({"1分钟_倒1形态": "类一买_七笔_基础型_3"})
assert not s.is_match({"1分钟_倒1形态": "类一买_七笔_特例一_3"})
assert not s.is_match({"1分钟_倒1形态": "类一买_九笔_基础型_3"})
s = Signal(k1="1分钟", k2="倒1形态", k3="类一买", score=3)
assert str(s) == "Signal('1分钟_倒1形态_类一买_任意_任意_任意_3')"
assert s.key == "1分钟_倒1形态_类一买"
try:
s = Signal(k1="1分钟", k2="倒1形态", k3="类一买", score=101)
except ValueError as e:
assert str(e) == 'score 必须在0~100之间'
def test_factor():
freq = Freq.F15
s = OrderedDict()
default_signals = [
Signal(k1=str(freq.value), k2="倒0笔", k3="方向", v1="向上", v2='其他', v3='其他'),
Signal(k1=str(freq.value), k2="倒0笔", k3="长度", v1="大于5", v2='其他', v3='其他'),
Signal(k1=str(freq.value), k2="倒0笔", k3="三K形态", v1="顶分型", v2='其他', v3='其他'),
Signal(k1=str(freq.value), k2="倒1笔", k3="表里关系", v1="其他", v2='其他', v3='其他'),
Signal(k1=str(freq.value), k2="倒1笔", k3="RSQ状态", v1="小于0.2", v2='其他', v3='其他'),
]
for signal in default_signals:
s[signal.key] = signal.value
factor = Factor(
name="单测",
signals_all=[
Signal(k1=str(freq.value), k2="倒0笔", k3="方向", v1="向上", v2='其他', v3='其他'),
Signal(k1=str(freq.value), k2="倒0笔", k3="长度", v1="大于5", v2='其他', v3='其他')
]
)
assert factor.is_match(s)
factor = Factor(
name="单测",
signals_all=[
Signal(k1=str(freq.value), k2="倒0笔", k3="方向", v1="向上", v2='其他', v3='其他'),
Signal(k1=str(freq.value), k2="倒0笔", k3="长度", v1="大于5", v2='其他', v3='其他')
],
signals_any=[
Signal(k1=str(freq.value), k2="倒1笔", k3="RSQ状态", v1="小于0.2", v2='其他', v3='其他')
]
)
assert factor.is_match(s)
factor = Factor(
name="单测",
signals_all=[
Signal(k1=str(freq.value), k2="倒0笔", k3="方向", v1="向上", v2='其他', v3='其他'),
Signal(k1=str(freq.value), k2="倒0笔", k3="长度", v1="大于5", v2='其他', v3='其他')
],
signals_any=[
Signal(k1=str(freq.value), k2="倒1笔", k3="RSQ状态", v1="小于0.8", v2='其他', v3='其他')
]
)
assert not factor.is_match(s)
factor = Factor(
name="单测",
signals_all=[
Signal(k1=str(freq.value), k2="倒0笔", k3="方向", v1="向上", v2='其他', v3='其他'),
Signal(k1=str(freq.value), k2="倒0笔", k3="长度", v1="大于5", v2='其他', v3='其他')
],
signals_any=[
Signal(k1=str(freq.value), k2="倒1笔", k3="RSQ状态", v1="小于0.2", v2='其他', v3='其他')
],
signals_not=[
Signal(k1=str(freq.value), k2="倒0笔", k3="三K形态", v1="顶分型", v2='其他', v3='其他'),
]
)
assert not factor.is_match(s)
def test_event():
freq = Freq.F15
s = OrderedDict()
default_signals = [
Signal(k1=str(freq.value), k2="倒0笔", k3="方向", v1="向上", v2='其他', v3='其他'),
Signal(k1=str(freq.value), k2="倒0笔", k3="长度", v1="大于5", v2='其他', v3='其他'),
Signal(k1=str(freq.value), k2="倒0笔", k3="三K形态", v1="顶分型", v2='其他', v3='其他'),
Signal(k1=str(freq.value), k2="倒1笔", k3="表里关系", v1="其他", v2='其他', v3='其他'),
Signal(k1=str(freq.value), k2="倒1笔", k3="RSQ状态", v1="小于0.2", v2='其他', v3='其他'),
]
for signal in default_signals:
s[signal.key] = signal.value
event = Event(name="单测", operate=Operate.LO, factors=[
Factor(
name="测试",
signals_all=[
Signal(k1=str(freq.value), k2="倒0笔", k3="方向", v1="向上", v2='其他', v3='其他'),
Signal(k1=str(freq.value), k2="倒0笔", k3="长度", v1="大于5", v2='其他', v3='其他')]
)
])
m, f = event.is_match(s)
assert m and f
event = Event(name="单测", operate=Operate.LO, factors=[
Factor(
name="测试",
signals_all=[
Signal('15分钟_倒0笔_方向_向上_其他_其他_0'), Signal('15分钟_倒0笔_长度_任意_其他_其他_0')
]
)
])
m, f = event.is_match(s)
assert m and f
event = Event(name="单测", operate=Operate.LO, factors=[
Factor(
name="测试",
signals_all=[
Signal('15分钟_倒0笔_方向_向上_其他_其他_20'), Signal('15分钟_倒0笔_长度_任意_其他_其他_0')
]
)
])
m, f = event.is_match(s)
assert not m and not f
event = Event(name="单测", operate=Operate.LO, factors=[
Factor(
name="测试",
signals_all=[
Signal('15分钟_倒0笔_方向_向下_其他_其他_0'), Signal('15分钟_倒0笔_长度_任意_其他_其他_0')
]
)
])
m, f = event.is_match(s)
assert not m and not f
def test_position_long():
pos_long = PositionLong(symbol="000001.XSHG")
pos_long.update(dt=pd.to_datetime('2021-01-01'), op=Operate.HO, price=100, bid=0)
assert not pos_long.pos_changed and pos_long.pos == 0
pos_long.update(dt=pd.to_datetime('2021-01-02'), op=Operate.LO, price=100, bid=1, op_desc="首次开仓测试")
assert pos_long.pos_changed and pos_long.pos == 0.5
pos_long.update(dt=pd.to_datetime('2021-01-03'), op=Operate.LO, price=100, bid=2, op_desc="首次开仓测试")
assert not pos_long.pos_changed and pos_long.pos == 0.5
pos_long.update(dt=pd.to_datetime('2021-01-04'), op=Operate.LA1, price=100, bid=3)
assert pos_long.pos_changed and pos_long.pos == 0.8
pos_long.update(dt=pd.to_datetime('2021-01-05'), op=Operate.LA1, price=100, bid=4)
assert not pos_long.pos_changed and pos_long.pos == 0.8
pos_long.update(dt=pd.to_datetime('2021-01-06'), op=Operate.LA2, price=100, bid=5)
assert pos_long.pos_changed and pos_long.pos == 1
pos_long.update(dt=pd.to_datetime('2021-01-07'), op=Operate.LR1, price=100, bid=6)
assert pos_long.pos_changed and pos_long.pos == 0.8
pos_long.update(dt=pd.to_datetime('2021-01-08'), op=Operate.LR2, price=100, bid=7)
assert pos_long.pos_changed and pos_long.pos == 0.5
pos_long.update(dt=pd.to_datetime('2021-01-08'), op=Operate.LR2, price=100, bid=7)
assert not pos_long.pos_changed and pos_long.pos == 0.5
pos_long.update(dt=pd.to_datetime('2021-01-09'), op=Operate.LA2, price=100, bid=8)
assert not pos_long.pos_changed and pos_long.pos == 0.5
pos_long.update(dt=pd.to_datetime('2021-01-10'), op=Operate.LA1, price=100, bid=9)
assert pos_long.pos_changed and pos_long.pos == 0.8
pos_long.update(dt=pd.to_datetime('2021-01-11'), op=Operate.LE, price=100, bid=10)
assert pos_long.pos_changed and pos_long.pos == 0
assert len(pos_long.pairs) == 1
assert pos_long.pairs[0]['持仓天数'] == 9
pos_long.evaluate_operates()
def test_position_long_t0():
"""测试T0逻辑"""
pos_long = PositionLong(symbol="000001.XSHG", T0=False)
pos_long.update(dt=pd.to_datetime('2021-01-01'), op=Operate.HO, price=100, bid=0)
assert not pos_long.pos_changed and pos_long.pos == 0
pos_long.update(dt=pd.to_datetime('2021-01-02'), op=Operate.LO, price=100, bid=1, op_desc="首次开仓测试")
assert pos_long.pos_changed and pos_long.pos == 0.5
pos_long.update(dt=pd.to_datetime('2021-01-02'), op=Operate.LA1, price=100, bid=3)
assert pos_long.pos_changed and pos_long.pos == 0.8
pos_long.update(dt=pd.to_datetime('2021-01-02'), op=Operate.LA2, price=100, bid=5)
assert pos_long.pos_changed and pos_long.pos == 1
# T0 平仓信号不生效
pos_long.update(dt=pd.to_datetime('2021-01-02'), op=Operate.LE, price=100, bid=8)
assert not pos_long.pos_changed and pos_long.pos == 1
pos_long.update(dt=pd.to_datetime('2021-01-03'), op=Operate.LE, price=100, bid=10)
assert pos_long.pos_changed and pos_long.pos == 0
try:
pos_long.update(dt=pd.to_datetime('2021-01-03'), op=Operate.SO, price=100, bid=11)
except AssertionError as e:
print(e)
assert len(pos_long.pairs) == 1
pos_long.evaluate_operates()
def test_position_long_min_interval():
"""测试T0逻辑"""
pos_long = PositionLong(symbol="000001.XSHG", T0=False, long_min_interval=3600*72)
pos_long.update(dt=pd.to_datetime('2021-01-01'), op=Operate.HO, price=100, bid=0)
assert not pos_long.pos_changed and pos_long.pos == 0
pos_long.update(dt=pd.to_datetime('2021-01-02'), op=Operate.LO, price=100, bid=1, op_desc="首次开仓测试")
assert pos_long.pos_changed and pos_long.pos == 0.5
pos_long.update(dt=pd.to_datetime('2021-01-02'), op=Operate.LA1, price=100, bid=3)
assert pos_long.pos_changed and pos_long.pos == 0.8
pos_long.update(dt=pd.to_datetime('2021-01-02'), op=Operate.LA2, price=100, bid=5)
assert pos_long.pos_changed and pos_long.pos == 1
# T0 平仓信号不生效
pos_long.update(dt=pd.to_datetime('2021-01-02'), op=Operate.LE, price=100, bid=8)
assert not pos_long.pos_changed and pos_long.pos == 1
pos_long.update(dt=pd.to_datetime('2021-01-03'), op=Operate.LE, price=100, bid=10)
assert pos_long.pos_changed and pos_long.pos == 0
assert len(pos_long.pairs) == 1
pos_long.update(dt=pd.to_datetime('2021-01-04'), op=Operate.LE, price=100, bid=11)
assert not pos_long.pos_changed and pos_long.pos == 0
# 测试最小开仓间隔
pos_long.update(dt=pd.to_datetime('2021-01-04'), op=Operate.LO, price=100, bid=12, op_desc="第二次开仓测试")
assert not pos_long.pos_changed and pos_long.pos == 0
pos_long.update(dt=pd.to_datetime('2021-01-05'), op=Operate.LO, price=100, bid=13, op_desc="第二次开仓测试")
assert not pos_long.pos_changed and pos_long.pos == 0
pos_long.update(dt=pd.to_datetime('2021-01-06'), op=Operate.LO, price=100, bid=14, op_desc="第二次开仓测试")
assert pos_long.pos_changed and pos_long.pos == 0.5
pos_long.update(dt=pd.to_datetime('2021-01-09'), op=Operate.LA1, price=100, bid=15)
assert pos_long.pos_changed and pos_long.pos == 0.8
pos_long.update(dt=pd.to_datetime('2021-01-10'), op=Operate.LA2, price=100, bid=16)
assert pos_long.pos_changed and pos_long.pos == 1
assert len(pos_long.pairs) == 1
print(pos_long.evaluate_operates())
def test_position_short():
pos_short = PositionShort(symbol="000001.XSHG")
pos_short.update(dt=pd.to_datetime('2021-01-01'), op=Operate.HO, price=100, bid=0)
assert not pos_short.pos_changed and pos_short.pos == 0
pos_short.update(dt=pd.to_datetime('2021-01-02'), op=Operate.SO, price=100, bid=1, op_desc="首次开仓测试")
assert pos_short.pos_changed and pos_short.pos == 0.5
pos_short.update(dt=pd.to_datetime('2021-01-03'), op=Operate.SO, price=100, bid=2, op_desc="首次开仓测试")
assert not pos_short.pos_changed and pos_short.pos == 0.5
pos_short.update(dt=pd.to_datetime('2021-01-04'), op=Operate.SA1, price=100, bid=3)
assert pos_short.pos_changed and pos_short.pos == 0.8
pos_short.update(dt=pd.to_datetime('2021-01-05'), op=Operate.SA1, price=100, bid=4)
assert not pos_short.pos_changed and pos_short.pos == 0.8
pos_short.update(dt=pd.to_datetime('2021-01-06'), op=Operate.SA2, price=100, bid=5)
assert pos_short.pos_changed and pos_short.pos == 1
pos_short.update(dt=pd.to_datetime('2021-01-07'), op=Operate.SR1, price=100, bid=6)
assert pos_short.pos_changed and pos_short.pos == 0.8
pos_short.update(dt=pd.to_datetime('2021-01-08'), op=Operate.SR2, price=100, bid=7)
assert pos_short.pos_changed and pos_short.pos == 0.5
pos_short.update(dt=pd.to_datetime('2021-01-08'), op=Operate.SR2, price=100, bid=7)
assert not pos_short.pos_changed and pos_short.pos == 0.5
pos_short.update(dt=pd.to_datetime('2021-01-09'), op=Operate.SA2, price=100, bid=8)
assert not pos_short.pos_changed and pos_short.pos == 0.5
pos_short.update(dt=pd.to_datetime('2021-01-10'), op=Operate.SA1, price=100, bid=9)
assert pos_short.pos_changed and pos_short.pos == 0.8
pos_short.update(dt=pd.to_datetime('2021-01-11'), op=Operate.SE, price=100, bid=10)
assert pos_short.pos_changed and pos_short.pos == 0
assert len(pos_short.pairs) == 1
assert pos_short.pairs[0]['持仓天数'] == 9
pos_short.evaluate_operates()
def test_position_short_t0():
"""测试T0逻辑"""
pos_short = PositionShort(symbol="000001.XSHG", T0=False)
pos_short.update(dt=pd.to_datetime('2021-01-01'), op=Operate.HO, price=100, bid=0)
assert not pos_short.pos_changed and pos_short.pos == 0
pos_short.update(dt=pd.to_datetime('2021-01-02'), op=Operate.SO, price=100, bid=1, op_desc="首次开仓测试")
assert pos_short.pos_changed and pos_short.pos == 0.5
pos_short.update(dt=pd.to_datetime('2021-01-02'), op=Operate.SA1, price=100, bid=3)
assert pos_short.pos_changed and pos_short.pos == 0.8
pos_short.update(dt=pd.to_datetime('2021-01-02'), op=Operate.SA2, price=100, bid=5)
assert pos_short.pos_changed and pos_short.pos == 1
# T0 平仓信号不生效
pos_short.update(dt=pd.to_datetime('2021-01-02'), op=Operate.SE, price=100, bid=8)
assert not pos_short.pos_changed and pos_short.pos == 1
pos_short.update(dt=pd.to_datetime('2021-01-03'), op=Operate.SE, price=100, bid=10)
assert pos_short.pos_changed and pos_short.pos == 0
try:
pos_short.update(dt=pd.to_datetime('2021-01-03'), op=Operate.LO, price=100, bid=11)
except AssertionError as e:
print(e)
assert len(pos_short.pairs) == 1
pos_short.evaluate_operates()
def test_position_short_min_interval():
"""测试T0逻辑"""
pos_short = PositionShort(symbol="000001.XSHG", T0=False, short_min_interval=3600*72)
pos_short.update(dt=pd.to_datetime('2021-01-01'), op=Operate.HO, price=100, bid=0)
assert not pos_short.pos_changed and pos_short.pos == 0
pos_short.update(dt=pd.to_datetime('2021-01-02'), op=Operate.SO, price=100, bid=1, op_desc="首次开仓测试")
assert pos_short.pos_changed and pos_short.pos == 0.5
pos_short.update(dt=pd.to_datetime('2021-01-02'), op=Operate.SA1, price=100, bid=3)
assert pos_short.pos_changed and pos_short.pos == 0.8
pos_short.update(dt=pd.to_datetime('2021-01-02'), op=Operate.SA2, price=100, bid=5)
assert pos_short.pos_changed and pos_short.pos == 1
# T0 平仓信号不生效
pos_short.update(dt=pd.to_datetime('2021-01-02'), op=Operate.SE, price=100, bid=8)
assert not pos_short.pos_changed and pos_short.pos == 1
pos_short.update(dt=pd.to_datetime('2021-01-03'), op=Operate.SE, price=100, bid=10)
assert pos_short.pos_changed and pos_short.pos == 0
assert len(pos_short.pairs) == 1
pos_short.update(dt=pd.to_datetime('2021-01-04'), op=Operate.SE, price=100, bid=11)
assert not pos_short.pos_changed and pos_short.pos == 0
# 测试最小开仓间隔
pos_short.update(dt=pd.to_datetime('2021-01-04'), op=Operate.SO, price=100, bid=12, op_desc="第二次开仓测试")
assert not pos_short.pos_changed and pos_short.pos == 0
pos_short.update(dt=pd.to_datetime('2021-01-05'), op=Operate.SO, price=100, bid=13, op_desc="第二次开仓测试")
assert not pos_short.pos_changed and pos_short.pos == 0
pos_short.update(dt=pd.to_datetime('2021-01-06'), op=Operate.SO, price=100, bid=14, op_desc="第二次开仓测试")
assert pos_short.pos_changed and pos_short.pos == 0.5
pos_short.update(dt=pd.to_datetime('2021-01-09'), op=Operate.SA1, price=100, bid=15)
assert pos_short.pos_changed and pos_short.pos == 0.8
pos_short.update(dt=pd.to_datetime('2021-01-10'), op=Operate.SA2, price=100, bid=16)
assert pos_short.pos_changed and pos_short.pos == 1
assert len(pos_short.pairs) == 1
print(pos_short.evaluate_operates())
| 39.895141
| 106
| 0.649336
| 2,701
| 15,599
| 3.577194
| 0.057386
| 0.073898
| 0.064169
| 0.077003
| 0.95415
| 0.94763
| 0.916684
| 0.901573
| 0.881598
| 0.844339
| 0
| 0.095458
| 0.180012
| 15,599
| 390
| 107
| 39.997436
| 0.659917
| 0.006667
| 0
| 0.678445
| 0
| 0
| 0.095487
| 0.014482
| 0
| 0
| 0
| 0
| 0.314488
| 1
| 0.031802
| false
| 0
| 0.010601
| 0
| 0.042403
| 0.014134
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e8886f2bf86837ee354ec37211859aa3541d896e
| 22
|
py
|
Python
|
advex_uar/__init__.py
|
nraghuraman/advex-uar
|
b2bd5c2bf3ae07d3d5c65b81e4a6c5e21284fa43
|
[
"Apache-2.0"
] | 75
|
2019-08-22T04:56:17.000Z
|
2022-03-28T02:32:55.000Z
|
advex_uar/__init__.py
|
nraghuraman/advex-uar
|
b2bd5c2bf3ae07d3d5c65b81e4a6c5e21284fa43
|
[
"Apache-2.0"
] | 7
|
2019-10-08T16:27:48.000Z
|
2022-02-18T01:36:02.000Z
|
advex_uar/__init__.py
|
nraghuraman/advex-uar
|
b2bd5c2bf3ae07d3d5c65b81e4a6c5e21284fa43
|
[
"Apache-2.0"
] | 18
|
2019-08-22T15:55:22.000Z
|
2022-02-17T19:32:10.000Z
|
from . import attacks
| 11
| 21
| 0.772727
| 3
| 22
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 22
| 1
| 22
| 22
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e8915dda0203d11b42a8e571e29d17404d45bccb
| 32
|
py
|
Python
|
qutip/solver/ode/__init__.py
|
jakelishman/qutip
|
fbb7fad5bc205910228db622d90601c82db45e4b
|
[
"BSD-3-Clause"
] | null | null | null |
qutip/solver/ode/__init__.py
|
jakelishman/qutip
|
fbb7fad5bc205910228db622d90601c82db45e4b
|
[
"BSD-3-Clause"
] | 2
|
2020-07-13T12:11:30.000Z
|
2020-08-09T22:45:05.000Z
|
qutip/solver/ode/__init__.py
|
jakelishman/qutip
|
fbb7fad5bc205910228db622d90601c82db45e4b
|
[
"BSD-3-Clause"
] | null | null | null |
from .scipy_integrator import *
| 16
| 31
| 0.8125
| 4
| 32
| 6.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 32
| 1
| 32
| 32
| 0.892857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fa38a9f27834f3a988a97022e44848896c2c206a
| 148
|
py
|
Python
|
examples/more/ED/28orb/get_fock.py
|
danielballan/edrixs
|
57fbd11ba9aaeaa393c3e2f06af41e4e386749e4
|
[
"BSD-3-Clause"
] | null | null | null |
examples/more/ED/28orb/get_fock.py
|
danielballan/edrixs
|
57fbd11ba9aaeaa393c3e2f06af41e4e386749e4
|
[
"BSD-3-Clause"
] | null | null | null |
examples/more/ED/28orb/get_fock.py
|
danielballan/edrixs
|
57fbd11ba9aaeaa393c3e2f06af41e4e386749e4
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
from edrixs.fock_basis import write_fock_dec_by_N
if __name__ == "__main__":
write_fock_dec_by_N(28, 14, "fock_i.in")
| 21.142857
| 51
| 0.736486
| 27
| 148
| 3.37037
| 0.740741
| 0.197802
| 0.263736
| 0.307692
| 0.32967
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031496
| 0.141892
| 148
| 6
| 52
| 24.666667
| 0.685039
| 0.135135
| 0
| 0
| 0
| 0
| 0.133858
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
fa3aa658b62b752c19ebe536c74ad96d2358d2c1
| 31,544
|
py
|
Python
|
datasets.py
|
vauxgomes/ml-datasets
|
e9bb187bb049eccd176d25cf215836770bd0352b
|
[
"MIT"
] | null | null | null |
datasets.py
|
vauxgomes/ml-datasets
|
e9bb187bb049eccd176d25cf215836770bd0352b
|
[
"MIT"
] | null | null | null |
datasets.py
|
vauxgomes/ml-datasets
|
e9bb187bb049eccd176d25cf215836770bd0352b
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
# Constants
PATH = '~/Projects/datasets/data/'
#
def load_bcw(dropna=True, verbosity=False):
'''
Breast Cancer Winsconsin
COLUMNS
-------------------------------------------
ID*
Clump Thickness
Uniformity of Cell Size
Uniformity of Cell Shape
Marginal Adhesion
Single Epithelial Cell Size
Bare Nuclei**
Bland Chromatin
Normal Nucleoli
Mitoses
Class
* Delete
** Drop NaN
--
https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data
'''
NAME = 'bcw.data'
COLUMNS = [
'ID',
'Clump Thickness',
'Uniformity of Cell Size',
'Uniformity of Cell Shape',
'Marginal Adhesion',
'Single Epithelial Cell Size',
'Bare Nuclei',
'Bland Chromatin',
'Normal Nucleoli',
'Mitoses',
'Class'
]
df = pd.read_csv(f'{PATH}{NAME}', names=COLUMNS)
df.drop(['ID'], axis=1, inplace=True)
df.replace('?', np.NaN, inplace=True)
df.dropna(inplace=True)
df['Bare Nuclei'] = df['Bare Nuclei'].astype('int')
if verbosity:
aux = '\n '
print(f'Data: {NAME}')
print(f'Lines: {df.shape[0]}')
print(f'Columns:\n {aux.join(df.columns)}')
return df
#
def load_car(dropna=True, verbosity=False):
'''
Car Evaluation Database
Marko Bohanec
COLUMNS
-------------------------------------------
Buying
Maint
Doors
Persons
Luggage Boot
Safety
Note: All data items was categorized
--
https://archive.ics.uci.edu/ml/machine-learning-databases/car/car.data
'''
NAME = 'car.data'
COLUMNS = [
'Buying',
'Maint',
'Doors',
'Persons',
'Luggage Boot',
'Safety',
'Class'
]
df = pd.read_csv(f'{PATH}{NAME}', names=COLUMNS)
for c in df.columns:
df[c] = pd.Categorical(df[c]).codes
if verbosity:
aux = '\n '
print(f'Data: {NAME}')
print(f'Lines: {df.shape[0]}')
print(f'Columns:\n {aux.join(df.columns)}')
return df
#
def load_cortex_nuclear(dropna=True, verbosity=False):
'''
Data Cortex Nuclear
COLUMNS
-------------------------------------------
MouseID*,
DYRK1A_N,
ITSN1_N,
BDNF_N,
NR1_N,
NR2A_N,
pAKT_N,
pBRAF_N,
pCAMKII_N,
pCREB_N,
pELK_N,
pERK_N,
pJNK_N,
PKCA_N,
pMEK_N,
pNR1_N,
pNR2A_N,
pNR2B_N,
pPKCAB_N,
pRSK_N,
AKT_N,
BRAF_N,
CAMKII_N,
CREB_N,
ELK_N,
ERK_N,
GSK3B_N,
JNK_N,
MEK_N,
TRKA_N,
RSK_N,
APP_N,
Bcatenin_N,
SOD1_N,
MTOR_N,
P38_N,
pMTOR_N,
DSCR1_N,
AMPKA_N,
NR2B_N,
pNUMB_N,
RAPTOR_N,
TIAM1_N,
pP70S6_N,
NUMB_N,
P70S6_N,
pGSK3B_N,
pPKCG_N,
CDK5_N,
S6_N,
ADARB1_N,
AcetylH3K9_N,
RRP1_N,
BAX_N,
ARC_N,
ERBB4_N,
nNOS_N,
Tau_N,
GFAP_N,
GluR3_N,
GluR4_N,
IL1B_N,
P3525_N,
pCASP9_N,
PSD95_N,
SNCA_N,
Ubiquitin_N,
pGSK3B_Tyr216_N,
SHH_N,
BAD_N,
BCL2_N,
pS6_N,
pCFOS_N,
SYP_N,
H3AcK18_N,
EGR1_N,
H3MeK4_N,
CaNA_N,
Genotype,
Treatment,
Behavior,
Class
*Delete
--
https://archive.ics.uci.edu/ml/machine-learning-databases/00342/Data_Cortex_Nuclear.xls
'''
NAME = 'cortex_nuclear.data'
COLUMNS = [
'MouseID',
'DYRK1A_N',
'ITSN1_N',
'BDNF_N',
'NR1_N',
'NR2A_N',
'pAKT_N',
'pBRAF_N',
'pCAMKII_N',
'pCREB_N',
'pELK_N',
'pERK_N',
'pJNK_N',
'PKCA_N',
'pMEK_N',
'pNR1_N',
'pNR2A_N',
'pNR2B_N',
'pPKCAB_N',
'pRSK_N',
'AKT_N',
'BRAF_N',
'CAMKII_N',
'CREB_N',
'ELK_N',
'ERK_N',
'GSK3B_N',
'JNK_N',
'MEK_N',
'TRKA_N',
'RSK_N',
'APP_N',
'Bcatenin_N',
'SOD1_N',
'MTOR_N',
'P38_N',
'pMTOR_N',
'DSCR1_N',
'AMPKA_N',
'NR2B_N',
'pNUMB_N',
'RAPTOR_N',
'TIAM1_N',
'pP70S6_N',
'NUMB_N',
'P70S6_N',
'pGSK3B_N',
'pPKCG_N',
'CDK5_N',
'S6_N',
'ADARB1_N',
'AcetylH3K9_N',
'RRP1_N',
'BAX_N',
'ARC_N',
'ERBB4_N',
'nNOS_N',
'Tau_N',
'GFAP_N',
'GluR3_N',
'GluR4_N',
'IL1B_N',
'P3525_N',
'pCASP9_N',
'PSD95_N',
'SNCA_N',
'Ubiquitin_N',
'pGSK3B_Tyr216_N',
'SHH_N',
'BAD_N',
'BCL2_N',
'pS6_N',
'pCFOS_N',
'SYP_N',
'H3AcK18_N',
'EGR1_N',
'H3MeK4_N',
'CaNA_N',
'Genotype',
'Treatment',
'Behavior',
'Class'
]
df = pd.read_csv(f'{PATH}{NAME}', names=COLUMNS)
df.replace('?', np.NaN, inplace=True)
df.drop('MouseID', axis=1, inplace=True)
df['Genotype'] = df['Genotype'].astype('category').cat.codes
df['Treatment'] = df['Treatment'].astype('category').cat.codes
df['Behavior'] = df['Behavior'].astype('category').cat.codes
df['Class'] = df['Class'].astype('category').cat.codes
if dropna: df.dropna(inplace=True)
if verbosity:
aux = '\n '
print(f'Data: {NAME}')
print(f'Lines: {df.shape[0]}')
print(f'Columns:\n {aux.join(df.columns)}')
return df
#
def load_credit_card_clients(dropna=True, verbosity=False):
'''
Credit Card Clients
COLUMNS
-------------------------------------------
ID*,
Limit_bal,
Sex,
Education,
Marriage,
Age,
Pay_0,
Pay_2,
Pay_3,
Pay_4,
Pay_5,
Pay_6,
Bill_amt1,
Bill_amt2,
Bill_amt3,
Bill_amt4,
Bill_amt5,
Bill_amt6,
Pay_amt1,
Pay_amt2,
Pay_amt3,
Pay_amt4,
Pay_amt5,
Pay_amt6,
Class
* Delete
--
https://archive.ics.uci.edu/ml/machine-learning-databases/00350/default%20of%20credit%20card%20clients.xls
'''
NAME = 'credit_card_clients.data'
COLUMNS = [
'ID',
'Limit_bal',
'Sex',
'Education',
'Marriage',
'Age',
'Pay_0',
'Pay_2',
'Pay_3',
'Pay_4',
'Pay_5',
'Pay_6',
'Bill_amt1',
'Bill_amt2',
'Bill_amt3',
'Bill_amt4',
'Bill_amt5',
'Bill_amt6',
'Pay_amt1',
'Pay_amt2',
'Pay_amt3',
'Pay_amt4',
'Pay_amt5',
'Pay_amt6',
'Class'
]
df = pd.read_csv(f'{PATH}{NAME}', names=COLUMNS)
df.drop(['ID'], axis=1, inplace=True)
if verbosity:
aux = '\n '
print(f'Data: {NAME}')
print(f'Lines: {df.shape[0]}')
print(f'Columns:\n {aux.join(df.columns)}')
return df
#
def load_dermatology(dropna=True, verbosity=False):
'''
Dermatology Dataset
COLUMNS
-------------------------------------------
Erythema,
Scaling,
Definite Borders,
Itching,
Koebner Phenomenon,
Polygonal Papules,
Follicular Papules,
Oral Mucosal Involvement,
Knee And Elbow Involvement,
Scalp Involvement,
Family History,
Melanin Incontinence,
Eosinophils In The Infiltrate,
Pnl Infiltrate,
Fibrosis Of The Papillary Dermis,
Exocytosis,
Acanthosis,
Hyperkeratosis,
Parakeratosis,
Clubbing Of The Rete Ridges,
Elongation Of The Rete Ridges,
Thinning Of The Suprapapillary Epidermis,
Spongiform Pustule,
Munro Microabcess,
Focal Hypergranulosis,
Disappearance Of The Granular Layer,
Vacuolisation And Damage Of Basal Layer,
Spongiosis,
Saw-Tooth Appearance Of Retes,
Follicular Horn Plug,
Perifollicular Parakeratosis,
Inflammatory Monoluclear Inflitrate,
Band-Like Infiltrate,
Age**,
Class
**Missing Values
--
https://archive.ics.uci.edu/ml/machine-learning-databases/dermatology/dermatology.data
'''
NAME = 'dermatology.data'
COLUMNS = [
'Erythema',
'Scaling',
'Definite Borders',
'Itching',
'Koebner Phenomenon',
'Polygonal Papules',
'Follicular Papules',
'Oral Mucosal Involvement',
'Knee And Elbow Involvement',
'Scalp Involvement',
'Family History',
'Melanin Incontinence',
'Eosinophils In The Infiltrate',
'Pnl Infiltrate',
'Fibrosis Of The Papillary Dermis',
'Exocytosis',
'Acanthosis',
'Hyperkeratosis',
'Parakeratosis',
'Clubbing Of The Rete Ridges',
'Elongation Of The Rete Ridges',
'Thinning Of The Suprapapillary Epidermis',
'Spongiform Pustule',
'Munro Microabcess',
'Focal Hypergranulosis',
'Disappearance Of The Granular Layer',
'Vacuolisation And Damage Of Basal Layer',
'Spongiosis',
'Saw-Tooth Appearance Of Retes',
'Follicular Horn Plug',
'Perifollicular Parakeratosis',
'Inflammatory Monoluclear Inflitrate',
'Band-Like Infiltrate',
'Age',
'Class'
]
df = pd.read_csv(f'{PATH}{NAME}', names=COLUMNS)
df.replace('?', np.NaN, inplace=True)
if dropna:
df.dropna(inplace=True)
if verbosity:
aux = '\n '
print(f'Data: {NAME}')
print(f'Lines: {df.shape[0]}')
print(f'Columns:\n {aux.join(df.columns)}')
return df
#
def load_ecoli(dropna=True, verbosity=False):
'''
Ecoli Data Set
COLUMNS
-------------------------------------------
Sequence Name*,
MCG,
GVH,
LIP,
CHG,
AAC,
ALM1,
ALM2,
Class,
*Delete
--
https://archive.ics.uci.edu/ml/machine-learning-databases/ecoli/ecoli.data
'''
NAME = 'ecoli.data'
COLUMNS = [
'Sequence Name',
'MCG',
'GVH',
'LIP',
'CHG',
'AAC',
'ALM1',
'ALM2',
'Class'
]
df = pd.read_csv(f'{PATH}{NAME}', names=COLUMNS)
df.replace('?', np.NaN, inplace=True)
df.drop('Sequence Name', axis=1, inplace=True)
if dropna: df.dropna(inplace=True)
if verbosity:
aux = '\n '
print(f'Data: {NAME}')
print(f'Lines: {df.shape[0]}')
print(f'Columns:\n {aux.join(df.columns)}')
return df
#
def load_eeg_eye_state(dropna=True, verbosity=False):
'''
Data Cortex Nuclear
COLUMNS
-------------------------------------------
AF3,
F7,
F3,
FC5,
T7,
P7,
O1,
O2,
P8,
T8,
FC6,
F4,
F8,
AF4,
Class
--
https://archive.ics.uci.edu/ml/machine-learning-databases/00264/EEG%20Eye%20State.arff
'''
NAME = 'eeg_eye_state.data'
COLUMNS = [
'AF3',
'F7',
'F3',
'FC5',
'T7',
'P7',
'O1',
'O2',
'P8',
'T8',
'FC6',
'F4',
'F8',
'AF4',
'Class'
]
df = pd.read_csv(f'{PATH}{NAME}', names=COLUMNS)
if dropna: df.dropna(inplace=True)
if verbosity:
aux = '\n '
print(f'Data: {NAME}')
print(f'Lines: {df.shape[0]}')
print(f'Columns:\n {aux.join(df.columns)}')
return df
#
def load_glass(dropna=True, verbosity=False):
'''
Glass Type Dataset
COLUMNS
-------------------------------------------
ID*,
Refractive Index,
Na,
Mg,
Al,
Si,
K,
Ca,
Ba,
Fe,
Class
* Delete
--
https://archive.ics.uci.edu/ml/machine-learning-databases/glass/glass.data
'''
NAME = 'glass.data'
COLUMNS = [
'ID',
'Refractive Index',
'Na',
'Mg',
'Al',
'Si',
'K',
'Ca',
'Ba',
'Fe',
'Class'
]
df = pd.read_csv(f'{PATH}{NAME}', names=COLUMNS)
if verbosity:
aux = '\n '
print(f'Data: {NAME}')
print(f'Lines: {df.shape[0]}')
print(f'Columns:\n {aux.join(df.columns)}')
return df
#
def load_haberman(dropna=True, verbosity=False):
'''
Haberman's Survival Data Set
COLUMNS
-------------------------------------------
Age,
Years of Operation,
Positive Axillary Nodes,
Class
--
https://archive.ics.uci.edu/ml/machine-learning-databases/00264/EEG%20Eye%20State.arff
'''
NAME = 'haberman.data'
COLUMNS = [
'Age',
'Years of Operation',
'Positive Axillary Nodes',
'Class'
]
df = pd.read_csv(f'{PATH}{NAME}', names=COLUMNS)
if dropna: df.dropna(inplace=True)
if verbosity:
aux = '\n '
print(f'Data: {NAME}')
print(f'Lines: {df.shape[0]}')
print(f'Columns:\n {aux.join(df.columns)}')
return df
#
def load_ionosphere(dropna=True, verbosity=False):
'''
Ionosphere Data Set
COLUMNS
-------------------------------------------
ATT 1,
ATT 2,
ATT 3,
ATT 4,
ATT 5,
ATT 6,
ATT 7,
ATT 8,
ATT 9,
ATT 10,
ATT 11,
ATT 12,
ATT 13,
ATT 14,
ATT 15,
ATT 16,
ATT 17,
ATT 18,
ATT 19,
ATT 20,
ATT 21,
ATT 22,
ATT 23,
ATT 24,
ATT 25,
ATT 26,
ATT 27,
ATT 28,
ATT 29,
ATT 30,
ATT 31,
ATT 32,
ATT 33,
ATT 34,
Class
--
https://archive.ics.uci.edu/ml/machine-learning-databases/ionosphere/ionosphere.data
'''
NAME = 'ionosphere.data'
COLUMNS = [
'ATT 1',
'ATT 2',
'ATT 3',
'ATT 4',
'ATT 5',
'ATT 6',
'ATT 7',
'ATT 8',
'ATT 9',
'ATT 10',
'ATT 11',
'ATT 12',
'ATT 13',
'ATT 14',
'ATT 15',
'ATT 16',
'ATT 17',
'ATT 18',
'ATT 19',
'ATT 20',
'ATT 21',
'ATT 22',
'ATT 23',
'ATT 24',
'ATT 25',
'ATT 26',
'ATT 27',
'ATT 28',
'ATT 29',
'ATT 30',
'ATT 31',
'ATT 32',
'ATT 33',
'ATT 34',
'Class'
]
df = pd.read_csv(f'{PATH}{NAME}', names=COLUMNS)
df['Class'] = df['Class'].astype('category').cat.codes
if dropna: df.dropna(inplace=True)
if verbosity:
aux = '\n '
print(f'Data: {NAME}')
print(f'Lines: {df.shape[0]}')
print(f'Columns:\n {aux.join(df.columns)}')
return df
#
def load_iris(dropna=True, verbosity=False):
'''
Iris Plants Database
R.A. Fisher
COLUMNS
-------------------------------------------
Sepal Length
Sepal Width
Petal Length
Petal Width
Class
0. Iris-setosa: 50
1. Iris-versicolor: 50
2. Iris-virginica: 50
--
https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data
'''
NAME = 'iris.data'
COLUMNS = [
'Sepal Length',
'Sepal Width',
'Petal Length',
'Petal Width',
'Class'
]
df = pd.read_csv(f'{PATH}{NAME}', names=COLUMNS)
df['Class'] = df['Class'].astype('category').cat.codes
if verbosity:
aux = '\n '
print(f'Data: {NAME}')
print(f'Lines: {df.shape[0]}')
print(f'Columns:\n {aux.join(df.columns)}')
return df
#
def load_messidor(dropna=True, verbosity=False):
'''
Diabetic Retinopathy Debrecen
COLUMNS
-------------------------------------------
Quality assessment,
Pre-screening,
MA Detection 0.5,
MA Detection 0.6,
MA Detection 0.7,
MA Detection 0.8,
MA Detection 0.9,
MA Detection 1.0,
MA detection Exut 1,
MA detection Exut 2,
MA detection Exut 3,
MA detection Exut 4,
MA detection Exut 5,
MA detection Exut 6,
MA detection Exut 7,
MA detection Exut 8,
Distance,
Diameter,
AmFm Classification,
Class,
--
https://archive.ics.uci.edu/ml/machine-learning-databases/00329/messidor_features.arff
'''
NAME = 'messidor_features.data'
COLUMNS = [
'Quality Assessment',
'Pre-screening',
'MA Detection 0.5',
'MA Detection 0.6',
'MA Detection 0.7',
'MA Detection 0.8',
'MA Detection 0.9',
'MA Detection 1.0',
'MA detection Exut 1',
'MA detection Exut 2',
'MA detection Exut 3',
'MA detection Exut 4',
'MA detection Exut 5',
'MA detection Exut 6',
'MA detection Exut 7',
'MA detection Exut 8',
'Distance',
'Diameter',
'AmFm Classification',
'Class'
]
df = pd.read_csv(f'{PATH}{NAME}', names=COLUMNS)
df.replace('?', np.NaN, inplace=True)
if dropna: df.dropna(inplace=True)
if verbosity:
aux = '\n '
print(f'Data: {NAME}')
print(f'Lines: {df.shape[0]}')
print(f'Columns:\n {aux.join(df.columns)}')
return df
#
def load_nursery(dropna=True, verbosity=False):
'''
Nursery Data Set
COLUMNS
-------------------------------------------
Parents
Has Nurs
Form
Children
Housing
Finance
Social
Health
Class
--
https://archive.ics.uci.edu/ml/machine-learning-databases/nursery/nursery.data
'''
NAME = 'nursery.data'
COLUMNS = [
'Parents',
'Has Nurs',
'Form',
'Children',
'Housing',
'Finance',
'Social',
'Health',
'Class'
]
df = pd.read_csv(f'{PATH}{NAME}', names=COLUMNS)
for c in df.columns:
df[c] = df[c].astype('category').cat.codes
if dropna: df.dropna(inplace=True)
if verbosity:
aux = '\n '
print(f'Data: {NAME}')
print(f'Lines: {df.shape[0]}')
print(f'Columns:\n {aux.join(df.columns)}')
return df
#
def load_phishing_websites(dropna=True, verbosity=False):
'''
Phishing Websites Data Set
COLUMNS
-------------------------------------------
Sfh,
Popupwidnow,
Sslfinal_state,
Request_url,
Url_of_anchor,
Web_traffic,
Url_length,
Age_of_domain,
Having_ip_address,
Class,
--
https://archive.ics.uci.edu/ml/machine-learning-databases/00379/PhishingData.arff
'''
NAME = 'phishing.data'
COLUMNS = [
'Sfh',
'Popupwidnow',
'Sslfinal_state',
'Request_url',
'Url_of_anchor',
'Web_traffic',
'Url_length',
'Age_of_domain',
'Having_ip_address',
'Class'
]
df = pd.read_csv(f'{PATH}{NAME}', names=COLUMNS)
df.replace('?', np.NaN, inplace=True)
if dropna: df.dropna(inplace=True)
if verbosity:
aux = '\n '
print(f'Data: {NAME}')
print(f'Lines: {df.shape[0]}')
print(f'Columns:\n {aux.join(df.columns)}')
return df
#
def load_seeds(dropna=True, verbosity=False):
'''
Seeds Data Set
COLUMNS
-------------------------------------------
Area,
Perimeter,
Compactness,
Length of kernel,
Width of kernel,
Asymmetry coefficient,
Length of kernel groove,
Class
--
https://archive.ics.uci.edu/ml/machine-learning-databases/00236/seeds_dataset.txt
'''
NAME = 'seeds.data'
COLUMNS = [
'Area',
'Perimeter',
'Compactness',
'Length of Kernel',
'Width of Kernel',
'Asymmetry Coefficient',
'Length of Kernel Groove',
'Class'
]
df = pd.read_csv(f'{PATH}{NAME}', sep='\t', names=COLUMNS)
for c in df.columns:
df[c] = df[c].astype('category').cat.codes
if dropna: df.dropna(inplace=True)
if verbosity:
aux = '\n '
print(f'Data: {NAME}')
print(f'Lines: {df.shape[0]}')
print(f'Columns:\n {aux.join(df.columns)}')
return df
#
def load_seismic_bumps(dropna=True, verbosity=False):
'''
Seismic Bumps Data Set
COLUMNS
-------------------------------------------
Seismic,
Seismoacoustic,
Shift,
Genergy,
Gpuls,
Gdenergy,
Gdpuls,
Ghazard,
Nbumps,
Nbumps2,
Nbumps3,
Nbumps4,
Nbumps5,
Nbumps6,
Nbumps7,
Nbumps89,
Energy,
Maxenergy,
Class
--
https://archive.ics.uci.edu/ml/machine-learning-databases/00266/seismic-bumps.arff
'''
NAME = 'seismic_bumps.data'
COLUMNS = [
'Seismic',
'Seismoacoustic',
'Shift',
'Genergy',
'Gpuls',
'Gdenergy',
'Gdpuls',
'Ghazard',
'Nbumps',
'Nbumps2',
'Nbumps3',
'Nbumps4',
'Nbumps5',
'Nbumps6',
'Nbumps7',
'Nbumps89',
'Energy',
'Maxenergy',
'Class'
]
df = pd.read_csv(f'{PATH}{NAME}', names=COLUMNS)
df['Seismic'] = df['Seismic'].astype('category').cat.codes
df['Seismoacoustic'] = df['Seismoacoustic'].astype('category').cat.codes
df['Shift'] = df['Shift'].astype('category').cat.codes
df['Ghazard'] = df['Ghazard'].astype('category').cat.codes
df['Class'] = df['Class'].astype('category').cat.codes
if dropna: df.dropna(inplace=True)
if verbosity:
aux = '\n '
print(f'Data: {NAME}')
print(f'Lines: {df.shape[0]}')
print(f'Columns:\n {aux.join(df.columns)}')
return df
#
def load_soybean(dropna=True, verbosity=False):
'''
Soybean Large Data Set
COLUMNS
-------------------------------------------
Date,
Plant-Stand,
Precip,
Temp,
Hail,
Crop-Hist,
Area-Damaged,
Severity,
Seed-Tmt,
Germination,
Plant-Growth,
Leaves,
Leafspots-Halo,
Leafspots-Marg,
Leafspot-Size,
Leaf-Shread,
Leaf-Malf,
Leaf-Mild,
Stem,
Lodging,
Stem-Cankers,
Canker-Lesion,
Fruiting-Bodies,
External Decay,
Mycelium,
Int-Discolor,
Sclerotia,
Fruit-Pods,
Fruit Spots,
Seed,
Mold-Growth,
Seed-Discolor,
Seed-Size,
Shriveling,
Roots,
Class
--
https://archive.ics.uci.edu/ml/machine-learning-databases/soybean/soybean-large.data
'''
NAME = 'soybean.data'
COLUMNS = [
'Class',
'Date',
'Plant-Stand',
'Precip',
'Temp',
'Hail',
'Crop-Hist',
'Area-Damaged',
'Severity',
'Seed-Tmt',
'Germination',
'Plant-Growth',
'Leaves',
'Leafspots-Halo',
'Leafspots-Marg',
'Leafspot-Size',
'Leaf-Shread',
'Leaf-Malf',
'Leaf-Mild',
'Stem',
'Lodging',
'Stem-Cankers',
'Canker-Lesion',
'Fruiting-Bodies',
'External Decay',
'Mycelium',
'Int-Discolor',
'Sclerotia',
'Fruit-Pods',
'Fruit Spots',
'Seed',
'Mold-Growth',
'Seed-Discolor',
'Seed-Size',
'Shriveling',
'Roots'
]
df = pd.read_csv(f'{PATH}{NAME}', names=COLUMNS)
df.replace('?', np.NaN, inplace=True)
if dropna: df.dropna(inplace=True)
for c in df.columns:
df[c] = df[c].astype('category').cat.codes
class_col = df.pop('Class')
df['Class'] = class_col
if verbosity:
aux = '\n '
print(f'Data: {NAME}')
print(f'Lines: {df.shape[0]}')
print(f'Columns:\n {aux.join(df.columns)}')
return df
#
def load_tae(dropna=True, verbosity=False):
'''
Teaching Assistant Evaluation Data Set
COLUMNS
-------------------------------------------
Native English Speaker,
Instructor,
Course,
Summer/Regular,
Class Size (numerical),
Class
--
https://archive.ics.uci.edu/ml/machine-learning-databases/tae/tae.data
'''
NAME = 'tae.data'
COLUMNS = [
'Native English Speaker',
'Instructor',
'Course',
'Summer/Regular',
'Class Size',
'Class'
]
df = pd.read_csv(f'{PATH}{NAME}', names=COLUMNS)
df.replace('?', np.NaN, inplace=True)
if dropna: df.dropna(inplace=True)
if verbosity:
aux = '\n '
print(f'Data: {NAME}')
print(f'Lines: {df.shape[0]}')
print(f'Columns:\n {aux.join(df.columns)}')
return df
#
def load_wholesale(dropna=True, verbosity=False):
'''
Wholesale customers Data Set
COLUMNS
-------------------------------------------
Channel**
Region*
Fresh
Milk
Grocery
Frozen
Detergents_Paper
Delicassen
*Chosen class
**Could be class
--
https://archive.ics.uci.edu/ml/machine-learning-databases/00292/Wholesale%20customers%20data.csv
'''
NAME = 'wholesale.data'
COLUMNS = [
'Channel',
'Region',
'Fresh,'
'Milk',
'Grocery',
'Frozen',
'Detergents_Paper',
'Delicassen'
]
df = pd.read_csv(f'{PATH}{NAME}', names=COLUMNS)
df.replace('?', np.NaN, inplace=True)
if dropna: df.dropna(inplace=True)
df['Class'] = df.pop('Channel')
if verbosity:
aux = '\n '
print(f'Data: {NAME}')
print(f'Lines: {df.shape[0]}')
print(f'Columns:\n {aux.join(df.columns)}')
return df
#
def load_wine(dropna=True, verbosity=False):
'''
Wine Quality
P. Cortez, A. Cerdeira, F. Almeida, T. Matos and J. Reis.
COLUMNS
-------------------------------------------
Fixed Acidity
Volatile Acidity
Citric Acid
Residual Sugar
Chlorides
Free Sulfur Dioxide
Total Sulfur Dioxide
Density
Ph
Sulphates
Alcohol
Class (Quality)
--
https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv
'''
NAME = 'wine.data'
COLUMNS = [
'Fixed Acidity',
'Volatile Acidity',
'Citric Acid',
'Residual Sugar',
'Chlorides',
'Free Sulfur Dioxide',
'Total Sulfur Dioxide',
'Density',
'Ph',
'Sulphates',
'Alcohol',
'Class'
]
df = pd.read_csv(f'{PATH}{NAME}', names=COLUMNS, sep=';') #, skiprows=1)
if verbosity:
aux = '\n '
print(f'Data: {NAME}')
print(f'Lines: {df.shape[0]}')
print(f'Columns:\n {aux.join(df.columns)}')
return df
#
def load_titanic(dropna=True, verbosity=False):
'''
Titanic
COLUMNS
-------------------------------------------
Survived
Pclass
Name
Sex
Age
Siblings/Spouses Aboard,
Parents/Children Aboard
Fare
*New
--
https://www.kaggle.com/c/titanic/data?select=train.csv
'''
NAME = 'titanic.data'
COLUMNS = [
'Survived',
'Pclass',
'Name',
'Sex',
'Age',
'SS Aboard',
'PC Aboard',
'Fare',
]
df = pd.read_csv(f'{PATH}{NAME}', names=COLUMNS)
df['Sex'] = df['Sex'].astype('category').cat.codes
#df['Title'] = df['Name'].apply(lambda x: x.split('.')[0]).astype('category').cat.codes
columns = list(df.columns)
if verbosity:
aux = '\n '
print(f'Data: {NAME}')
print(f'Lines: {df.shape[0]}')
print(f'Columns:\n {aux.join(df.columns)}')
return df[columns[1:] + columns[0:1]]
| 22.339943
| 118
| 0.446773
| 3,103
| 31,544
| 4.452143
| 0.183693
| 0.027362
| 0.028882
| 0.036482
| 0.821281
| 0.807673
| 0.803185
| 0.801231
| 0.787043
| 0.763228
| 0
| 0.023745
| 0.404546
| 31,544
| 1,412
| 119
| 22.339943
| 0.711761
| 0.302213
| 0
| 0.3454
| 0
| 0
| 0.32775
| 0.030161
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031674
| false
| 0
| 0.003017
| 0
| 0.066365
| 0.095023
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d72773cb3f6e14f15d6ff0d24a1a9437a90f37c5
| 136
|
py
|
Python
|
OpenCart/pages/__init__.py
|
turovod/Otus
|
57433c6944bca155177b07ff361139ff30f7f692
|
[
"MIT"
] | null | null | null |
OpenCart/pages/__init__.py
|
turovod/Otus
|
57433c6944bca155177b07ff361139ff30f7f692
|
[
"MIT"
] | null | null | null |
OpenCart/pages/__init__.py
|
turovod/Otus
|
57433c6944bca155177b07ff361139ff30f7f692
|
[
"MIT"
] | null | null | null |
from .base_page import BasePage
from .common_page import CommonPage
from .login_page import LoginLogout
from .main_page import MainPage
| 27.2
| 35
| 0.852941
| 20
| 136
| 5.6
| 0.55
| 0.357143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 136
| 4
| 36
| 34
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d773e429daa9526ecb1f71d424d914614fd4e568
| 174
|
py
|
Python
|
tests/unit/test_random_port.py
|
danni-m/RLTest
|
85c09592e96e26edab94a22077a582fa425b62fa
|
[
"BSD-3-Clause"
] | 15
|
2018-09-06T12:07:47.000Z
|
2022-03-02T05:27:31.000Z
|
tests/unit/test_random_port.py
|
danni-m/RLTest
|
85c09592e96e26edab94a22077a582fa425b62fa
|
[
"BSD-3-Clause"
] | 79
|
2018-09-04T13:25:56.000Z
|
2022-03-31T22:48:26.000Z
|
tests/unit/test_random_port.py
|
danni-m/RLTest
|
85c09592e96e26edab94a22077a582fa425b62fa
|
[
"BSD-3-Clause"
] | 12
|
2018-09-04T23:17:04.000Z
|
2021-07-18T12:33:54.000Z
|
from unittest import TestCase
class Test(TestCase):
def test_register_port(self):
pass
class Test(TestCase):
def test_get_random_port(self):
pass
| 14.5
| 35
| 0.689655
| 23
| 174
| 5
| 0.565217
| 0.156522
| 0.295652
| 0.347826
| 0.417391
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.241379
| 174
| 11
| 36
| 15.818182
| 0.871212
| 0
| 0
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0.285714
| 0.142857
| 0
| 0.714286
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
d792ea9172cd0ad6ba515cb3f4de8f42b7942d67
| 32
|
py
|
Python
|
src/filters/__init__.py
|
raboakye/python-packages-intro
|
ef17ad08ad7b822d900762bc1c320028096b0523
|
[
"MIT"
] | null | null | null |
src/filters/__init__.py
|
raboakye/python-packages-intro
|
ef17ad08ad7b822d900762bc1c320028096b0523
|
[
"MIT"
] | null | null | null |
src/filters/__init__.py
|
raboakye/python-packages-intro
|
ef17ad08ad7b822d900762bc1c320028096b0523
|
[
"MIT"
] | null | null | null |
from app.src.filters import main
| 32
| 32
| 0.84375
| 6
| 32
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09375
| 32
| 1
| 32
| 32
| 0.931034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ad06034c82d1f4d7dee1a88657a9094a3b3c96c5
| 59
|
py
|
Python
|
packages/pyright-scip/snapshots/input/aliased_import/actual.py
|
sourcegraph/pyright
|
f6a94a47f7e61172fd108ee9a4c62f748e1d24af
|
[
"MIT"
] | null | null | null |
packages/pyright-scip/snapshots/input/aliased_import/actual.py
|
sourcegraph/pyright
|
f6a94a47f7e61172fd108ee9a4c62f748e1d24af
|
[
"MIT"
] | 19
|
2022-03-17T03:20:34.000Z
|
2022-03-31T02:53:12.000Z
|
packages/pyright-scip/snapshots/input/aliased_import/actual.py
|
sourcegraph/pyright
|
f6a94a47f7e61172fd108ee9a4c62f748e1d24af
|
[
"MIT"
] | null | null | null |
import aliased
import aliased as A
print(A.SOME_CONSTANT)
| 11.8
| 22
| 0.813559
| 10
| 59
| 4.7
| 0.7
| 0.553191
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135593
| 59
| 4
| 23
| 14.75
| 0.921569
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0.333333
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ad4f085a7e8fc8d52f1dae9b983c384c82286474
| 44
|
py
|
Python
|
scraper/__init__.py
|
u-aaa/Vilnius-Apartment-Predictions
|
de9ab9433aa71891b6d19cc4deecef33b0453ac3
|
[
"MIT"
] | null | null | null |
scraper/__init__.py
|
u-aaa/Vilnius-Apartment-Predictions
|
de9ab9433aa71891b6d19cc4deecef33b0453ac3
|
[
"MIT"
] | null | null | null |
scraper/__init__.py
|
u-aaa/Vilnius-Apartment-Predictions
|
de9ab9433aa71891b6d19cc4deecef33b0453ac3
|
[
"MIT"
] | null | null | null |
from .aruodas_scraper import AruodasScraper
| 22
| 43
| 0.886364
| 5
| 44
| 7.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 44
| 1
| 44
| 44
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ad903555804ce744773c13a81923e8bf04dbb36f
| 10,039
|
py
|
Python
|
ChineseReverseDictionary/code/result_analysis_Ch.py
|
thunlp/MultiRD
|
fe72148c00a72eaebcd22e58104e9588dfb72fa4
|
[
"MIT"
] | 82
|
2019-12-08T05:01:45.000Z
|
2022-03-09T06:32:44.000Z
|
ChineseReverseDictionary/code/result_analysis_Ch.py
|
thunlp/MultiRD
|
fe72148c00a72eaebcd22e58104e9588dfb72fa4
|
[
"MIT"
] | 3
|
2021-03-28T15:02:07.000Z
|
2022-03-21T01:29:48.000Z
|
ChineseReverseDictionary/code/result_analysis_Ch.py
|
thunlp/MultiRD
|
fe72148c00a72eaebcd22e58104e9588dfb72fa4
|
[
"MIT"
] | 20
|
2019-12-19T08:18:17.000Z
|
2022-03-21T01:37:15.000Z
|
import argparse
import json, os
import numpy as np
def evaluate_test(ground_truth, prediction):
accu_1 = 0.
accu_10 = 0.
accu_100 = 0.
length = len(ground_truth)
pred_rank = []
for i in range(length):
try:
pred_rank.append(prediction[i][:].index(ground_truth[i]))
except:
pred_rank.append(1000)
if ground_truth[i] in prediction[i][:100]:
accu_100 += 1
if ground_truth[i] in prediction[i][:10]:
accu_10 += 1
if ground_truth[i] == prediction[i][0]:
accu_1 += 1
return pred_rank, accu_1/length*100, accu_10/length*100, accu_100/length*100, np.median(pred_rank), np.sqrt(np.var(pred_rank))
def evaluate_synset(ground_truth, prediction): # one batch
accu_1 = 0.
accu_10 = 0.
accu_100 = 0.
length = len(ground_truth) # batch size
for i in range(length):
if prediction[i][0] in ground_truth[i]:
accu_1 += 1
accu_10 += 1
accu_100 += 1
elif set(prediction[i][:10]).intersection(set(ground_truth[i])):
accu_10 += 1
accu_100 += 1
elif set(prediction[i][:100]).intersection(set(ground_truth[i])):
accu_100 += 1
return accu_1/length*100, accu_10/length*100, accu_100/length*100
def evaluate_1stChar(ground_truth, prediction):
accu_1 = 0.
accu_10 = 0.
accu_100 = 0.
length = len(ground_truth)
prediction_char = [[]]*length
i = 0
for gt in ground_truth:
if len(gt)==1: # 中文中要排除只有一个字的情况,当只有一个字时,仍然用原来的预测结果,不进行已知字的筛选。
prediction_char[i] = prediction[i]
i += 1
continue
char1st = gt[0]
prediction_char[i] = []
for wd in prediction[i]:
if wd[0] == char1st:
prediction_char[i].append(wd)
i += 1
pred_rank = []
for i in range(length):
try:
pred_rank.append(prediction_char[i][:].index(ground_truth[i]))
except:
pred_rank.append(1000)
if ground_truth[i] in prediction_char[i][:100]:
accu_100 += 1
if ground_truth[i] in prediction_char[i][:10]:
accu_10 += 1
if ground_truth[i] == prediction_char[i][0]:
accu_1 += 1
return accu_1/length*100, accu_10/length*100, accu_100/length*100, np.median(pred_rank), np.sqrt(np.var(pred_rank))
def evaluate_len(ground_truth, prediction):
accu_1 = 0.
accu_10 = 0.
accu_100 = 0.
length = len(ground_truth)
prediction_len = [[]]*length
i = 0
for gt in ground_truth:
leng = len(gt)
prediction_len[i] = []
for wd in prediction[i]:
if len(wd) == leng:
prediction_len[i].append(wd)
i += 1
pred_rank = []
for i in range(length):
try:
pred_rank.append(prediction_len[i][:].index(ground_truth[i]))
except:
pred_rank.append(1000)
if ground_truth[i] in prediction_len[i][:100]:
accu_100 += 1
if ground_truth[i] in prediction_len[i][:10]:
accu_10 += 1
if ground_truth[i] == prediction_len[i][0]:
accu_1 += 1
return accu_1/length*100, accu_10/length*100, accu_100/length*100, np.median(pred_rank), np.sqrt(np.var(pred_rank))
def evaluate_POS(ground_truth, prediction, word_pos):
accu_1 = 0.
accu_10 = 0.
accu_100 = 0.
length = len(ground_truth)
prediction_pos = [[]]*length
i = 0
for gt in ground_truth:
pos = set(word_pos[gt])
prediction_pos[i] = []
for wd in prediction[i]:
try:
if (set(word_pos[wd]) & pos):
prediction_pos[i].append(wd)
except:
prediction_pos[i].append(wd) # 为什么会有没词性的?
#print(wd)
i += 1
pred_rank = []
for i in range(length):
try:
pred_rank.append(prediction_pos[i][:].index(ground_truth[i]))
except:
pred_rank.append(1000)
if ground_truth[i] in prediction_pos[i][:100]:
accu_100 += 1
if ground_truth[i] in prediction_pos[i][:10]:
accu_10 += 1
if ground_truth[i] == prediction_pos[i][0]:
accu_1 += 1
return accu_1/length*100, accu_10/length*100, accu_100/length*100, np.median(pred_rank), np.sqrt(np.var(pred_rank))
def main(mode):
label_list_wd = json.load(open(mode+'_label_list.json'))
print('load file : '+mode+'_label_list.json'+' [OK]')
pred_list_wd = json.load(open(mode+'_pred_list.json'))
print('load file : '+mode+'_pred_list.json'+' [OK]')
synset_all = dict()
with open('../data/word2synset_synset.txt') as f:
for line in f.readlines():
wd_l = line.split()
synset_all[wd_l[0]] = wd_l # it must include itself
synset = []
for wd in label_list_wd:
if wd in synset_all:
synset.append(synset_all[wd])
else:
synset.append(wd)
diction = json.load(open('../data/dictionary_sense.json'))
word_pos = {}
word_pos['<OOV>'] = []
for wd in diction:
if diction[wd]['POS'] == []:
word_pos[wd] = ['介', '副', '数', '连', '助', '动', '形', '代', '拟声', '量', '名', '叹']
else:
word_pos[wd] = diction[wd]['POS']
print('Test on 2000: ')
pred_rank_list, test_accu_1, test_accu_10, test_accu_100, median, variance = evaluate_test(label_list_wd[:2000], pred_list_wd[:2000])
print('test_accu(1/10/100): %.2f %.2F %.2f %.1f %.2f'%(test_accu_1, test_accu_10, test_accu_100, median, variance))
print('Test on 2000 synset: ')
test_accu_1, test_accu_10, test_accu_100 = evaluate_synset(synset[:2000], pred_list_wd[:2000])
print('test_accu(1/10/100): %.2f %.2F %.2f'%(test_accu_1, test_accu_10, test_accu_100))
print('Test on 2000 char1st: ')
test_accu_1, test_accu_10, test_accu_100, median, variance = evaluate_1stChar(label_list_wd[:2000], pred_list_wd[:2000])
print('test_accu(1/10/100): %.2f %.2F %.2f %.1f %.2f'%(test_accu_1, test_accu_10, test_accu_100, median, variance))
print('Test on 2000 wordLen: ')
test_accu_1, test_accu_10, test_accu_100, median, variance = evaluate_len(label_list_wd[:2000], pred_list_wd[:2000])
print('test_accu(1/10/100): %.2f %.2F %.2f %.1f %.2f'%(test_accu_1, test_accu_10, test_accu_100, median, variance))
print('Test on 2000 POS: ')
test_accu_1, test_accu_10, test_accu_100, median, variance = evaluate_POS(label_list_wd[:2000], pred_list_wd[:2000], word_pos)
print('test_accu(1/10/100): %.2f %.2F %.2f %.1f %.2f'%(test_accu_1, test_accu_10, test_accu_100, median, variance))
print('Test on 200: ')
pred_rank_list, test_accu_1, test_accu_10, test_accu_100, median, variance = evaluate_test(label_list_wd[2000:2200], pred_list_wd[2000:2200])
print('test_accu(1/10/100): %.2f %.2F %.2f %.1f %.2f'%(test_accu_1, test_accu_10, test_accu_100, median, variance))
print('Test on 200 synset: ')
test_accu_1, test_accu_10, test_accu_100 = evaluate_synset(synset[2000:2200], pred_list_wd[2000:2200])
print('test_accu(1/10/100): %.2f %.2F %.2f'%(test_accu_1, test_accu_10, test_accu_100))
print('Test on 200 char1st: ')
test_accu_1, test_accu_10, test_accu_100, median, variance = evaluate_1stChar(label_list_wd[2000:2200], pred_list_wd[2000:2200])
print('test_accu(1/10/100): %.2f %.2F %.2f %.1f %.2f'%(test_accu_1, test_accu_10, test_accu_100, median, variance))
print('Test on 200 wordLen: ')
test_accu_1, test_accu_10, test_accu_100, median, variance = evaluate_len(label_list_wd[2000:2200], pred_list_wd[2000:2200])
print('test_accu(1/10/100): %.2f %.2F %.2f %.1f %.2f'%(test_accu_1, test_accu_10, test_accu_100, median, variance))
print('Test on 200 POS: ')
test_accu_1, test_accu_10, test_accu_100, median, variance = evaluate_POS(label_list_wd[2000:2200], pred_list_wd[2000:2200], word_pos)
print('test_accu(1/10/100): %.2f %.2F %.2f %.1f %.2f'%(test_accu_1, test_accu_10, test_accu_100, median, variance))
print('Test on 272: ')
pred_rank_list, test_accu_1, test_accu_10, test_accu_100, median, variance = evaluate_test(label_list_wd[2200:], pred_list_wd[2200:])
print('test_accu(1/10/100): %.2f %.2F %.2f %.1f %.2f'%(test_accu_1, test_accu_10, test_accu_100, median, variance))
print('Test on 272 synset: ')
test_accu_1, test_accu_10, test_accu_100 = evaluate_synset(synset[2200:], pred_list_wd[2200:])
print('test_accu(1/10/100): %.2f %.2F %.2f'%(test_accu_1, test_accu_10, test_accu_100))
print('Test on 272 char1st: ')
test_accu_1, test_accu_10, test_accu_100, median, variance = evaluate_1stChar(label_list_wd[2200:], pred_list_wd[2200:])
print('test_accu(1/10/100): %.2f %.2F %.2f %.1f %.2f'%(test_accu_1, test_accu_10, test_accu_100, median, variance))
print('Test on 272 wordLen: ')
test_accu_1, test_accu_10, test_accu_100, median, variance = evaluate_len(label_list_wd[2200:], pred_list_wd[2200:])
print('test_accu(1/10/100): %.2f %.2F %.2f %.1f %.2f'%(test_accu_1, test_accu_10, test_accu_100, median, variance))
print('Test on 272 POS: ')
test_accu_1, test_accu_10, test_accu_100, median, variance = evaluate_POS(label_list_wd[2200:], pred_list_wd[2200:], word_pos)
print('test_accu(1/10/100): %.2f %.2F %.2f %.1f %.2f'%(test_accu_1, test_accu_10, test_accu_100, median, variance))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--mode', type=str, default='[mode]')
args = parser.parse_args()
main(args.mode)
| 43.647826
| 146
| 0.602949
| 1,496
| 10,039
| 3.750668
| 0.078877
| 0.149706
| 0.07218
| 0.069506
| 0.823026
| 0.812155
| 0.779184
| 0.771698
| 0.749243
| 0.749243
| 0
| 0.105897
| 0.255005
| 10,039
| 230
| 147
| 43.647826
| 0.644337
| 0.011156
| 0
| 0.482412
| 0
| 0
| 0.116281
| 0.006087
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030151
| false
| 0
| 0.015075
| 0
| 0.070352
| 0.160804
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d10957b0da9d2b12cbe5cddb20608a916b170a7e
| 33
|
py
|
Python
|
app/Watch/__init__.py
|
LonglyCode/flask-blog
|
b7f36e8798c61aa1669ede59452f3ca446f5b9ce
|
[
"MIT"
] | 2
|
2016-10-04T14:53:27.000Z
|
2019-01-11T02:08:47.000Z
|
app/Watch/__init__.py
|
LonglyCode/flask-blog
|
b7f36e8798c61aa1669ede59452f3ca446f5b9ce
|
[
"MIT"
] | null | null | null |
app/Watch/__init__.py
|
LonglyCode/flask-blog
|
b7f36e8798c61aa1669ede59452f3ca446f5b9ce
|
[
"MIT"
] | null | null | null |
from .file_watch import init_app
| 16.5
| 32
| 0.848485
| 6
| 33
| 4.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 33
| 1
| 33
| 33
| 0.896552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d10b3ece23ed9f0d45baa9b433a739d7832b9008
| 74
|
py
|
Python
|
algo/__init__.py
|
rwbfd/rl_lab
|
a402b4c595c8abf5659e2493614d9890e62ff7b6
|
[
"MIT"
] | 43
|
2018-09-18T02:36:30.000Z
|
2022-03-09T09:41:11.000Z
|
algo/__init__.py
|
rwbfd/rl_lab
|
a402b4c595c8abf5659e2493614d9890e62ff7b6
|
[
"MIT"
] | 1
|
2019-05-30T06:46:22.000Z
|
2019-05-30T06:46:22.000Z
|
algo/__init__.py
|
rwbfd/rl_lab
|
a402b4c595c8abf5659e2493614d9890e62ff7b6
|
[
"MIT"
] | 8
|
2018-09-21T16:01:50.000Z
|
2020-11-30T11:42:09.000Z
|
from .a2c_acktr import A2C_ACKTR
from .ppo import PPO
from .sil import SIL
| 24.666667
| 32
| 0.810811
| 14
| 74
| 4.142857
| 0.428571
| 0.275862
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031746
| 0.148649
| 74
| 3
| 33
| 24.666667
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d110a58ff31c9ea059663346de63755ad5a3a3dd
| 25,907
|
py
|
Python
|
models/model_builder.py
|
norton-chris/MARS-Net
|
6f671837d0629422680c78adf9b643894debae70
|
[
"MIT"
] | null | null | null |
models/model_builder.py
|
norton-chris/MARS-Net
|
6f671837d0629422680c78adf9b643894debae70
|
[
"MIT"
] | null | null | null |
models/model_builder.py
|
norton-chris/MARS-Net
|
6f671837d0629422680c78adf9b643894debae70
|
[
"MIT"
] | null | null | null |
'''
Author Junbong Jang
Date 6/2/2021
To build model for train.py and predict.py
'''
from deeplabv3 import Deeplabv3
from deep_neural_net_classifier import *
from deep_neural_net_MTL import *
from deep_neural_net import *
from deep_neural_net_3D import *
from deep_neural_net_attn import *
from deep_neural_net_layer import *
from model_utils import get_MTL_weights, get_MTL_auto_remove_task
import loss
import numpy as np
from tensorflow.keras import backend as K
from tensorflow.keras.optimizers import Adam, SGD
if tf.__version__.split('.')[0] == '2':
import tensorflow_addons as tfa
from sam import SAMModel
def build_model_predict(constants, frame, repeat_index, model_name, image_rows, image_cols, orig_rows, orig_cols):
weights_path = constants.get_trained_weights_path(str(frame), model_name, str(repeat_index))
if "VGG19_MTL" in str(constants.strategy_type):
model = VGG19_MTL(image_rows, image_cols, 0, image_cols-orig_cols, image_rows-orig_rows, weights_path=weights_path)
elif "VGG19_classifier_regressor" in str(constants.strategy_type):
model = VGG19_classifier_regressor(image_rows, image_cols, weights_path=weights_path)
elif "VGG19_classifier" in str(constants.strategy_type):
model = VGG19_classifier(image_rows, image_cols, weights_path=weights_path)
elif "VGG19D_classifier" in str(constants.strategy_type):
model = VGG19D_classifier(image_rows, image_cols, weights_path=weights_path)
elif "EFF_B7_classifier" in str(constants.strategy_type):
model = EFF_B7_classifier(image_rows, image_cols, weights_path=weights_path)
elif "vit_classifier" in str(constants.strategy_type):
model = vit_classifier(image_rows, image_cols, 1, weights_path=weights_path)
# --------------------------------------------------------------------------------
elif "Res50V2" in str(constants.strategy_type):
model = ResNet50V2Keras(image_rows, image_cols, 0, image_cols-orig_cols, image_rows-orig_rows, weights_path=weights_path)
elif "Dense201" in str(constants.strategy_type):
model = DenseNet201Keras(image_rows, image_cols, 0, image_cols-orig_cols, image_rows-orig_rows, weights_path=weights_path)
elif "InceptionResV2" in str(constants.strategy_type):
model = InceptionResV2(image_rows, image_cols, 0, image_cols - orig_cols, image_rows - orig_rows, weights_path=weights_path)
elif "deeplabv3" in str(constants.strategy_type):
model = Deeplabv3(input_shape=(image_rows, image_cols, 3), output_shape=(orig_rows, orig_cols))
model.load_weights(weights_path, by_name=True)
elif "VGG16_dropout" in str(constants.strategy_type):
model = VGG16_dropout(image_rows, image_cols, 0, image_cols-orig_cols, image_rows-orig_rows, weights_path=weights_path)
elif "VGG16_batchnorm" in str(constants.strategy_type):
model = VGG16_batchnorm(image_rows, image_cols, 0, image_cols-orig_cols, image_rows-orig_rows, weights_path=weights_path)
elif "VGG16_instancenorm" in str(constants.strategy_type):
model = VGG16_instancenorm(image_rows, image_cols, 0, image_cols-orig_cols, image_rows-orig_rows, weights_path=weights_path)
elif "movie3" in str(constants.strategy_type):
model = VGG16_movie(image_rows, image_cols, 0, image_cols-orig_cols, image_rows-orig_rows, weights_path=weights_path)
elif "VGG16_dac_input256" in constants.strategy_type:
model = VGG16_dac(image_rows, image_cols, 0, image_cols-orig_cols, image_rows-orig_rows, weights_path=weights_path)
elif "VGG16_spp_input256" in constants.strategy_type:
model = VGG16_spp(image_rows, image_cols, 0, image_cols-orig_cols, image_rows-orig_rows, weights_path=weights_path)
elif "VGG16" in str(constants.strategy_type):
model = VGG16(image_rows, image_cols, 0, image_cols-orig_cols, image_rows-orig_rows, weights_path=weights_path)
elif "spheroid_test_VGG19" in str(constants.strategy_type):
# model = VGG19(image_rows, image_cols, int((image_cols-orig_cols)/2), 0, 0, weights_path=weights_path, encoder_weights=None)
model = VGG19(image_rows, image_cols, 64, image_cols-orig_cols-64, image_rows-orig_rows-64, weights_path=weights_path, encoder_weights=None)
elif "VGG19D_temporal_context_residual" in str(constants.strategy_type):
model = VGG19D_temporal_context_residual(image_rows, image_cols, 0, image_cols-orig_cols, image_rows-orig_rows, weights_path=weights_path)
elif "VGG19D_temporal_distributed_v2" in str(constants.strategy_type):
model = VGG19D_temporal_distributed_v2(image_rows, image_cols, 0, image_cols-orig_cols, image_rows-orig_rows, weights_path=weights_path)
elif "VGG19D_temporal_distributed" in str(constants.strategy_type):
model = VGG19D_temporal_distributed(image_rows, image_cols, 0, image_cols-orig_cols, image_rows-orig_rows, weights_path=weights_path)
elif "VGG19D_temporal_attn_v3" in str(constants.strategy_type):
model = VGG19D_temporal_attn_v3(image_rows, image_cols, 0, image_cols-orig_cols, image_rows-orig_rows, weights_path=weights_path)
elif "VGG19D_temporal_attn_v2" in str(constants.strategy_type):
model = VGG19D_temporal_attn_v2(image_rows, image_cols, 0, image_cols-orig_cols, image_rows-orig_rows, weights_path=weights_path)
elif "VGG19D_temporal_attn" in str(constants.strategy_type):
model = VGG19D_temporal_attn(image_rows, image_cols, 0, image_cols-orig_cols, image_rows-orig_rows, weights_path=weights_path)
elif "VGG19D_crop_first" in str(constants.strategy_type):
model = VGG19D_crop_first(image_rows, image_cols, 0, image_cols-orig_cols, image_rows-orig_rows, weights_path=weights_path)
elif "VGG19D_se" in str(constants.strategy_type):
model = VGG19D_se(image_rows, image_cols, 0, image_cols-orig_cols, image_rows-orig_rows, weights_path=weights_path)
elif "VGG19_dropout_gelu" in str(constants.strategy_type):
model = VGG19_dropout_gelu(image_rows, image_cols, 0, image_cols-orig_cols, image_rows-orig_rows, weights_path=weights_path)
elif "VGG19_dropout_swish" in str(constants.strategy_type):
model = VGG19_dropout_swish(image_rows, image_cols, 0, image_cols-orig_cols, image_rows-orig_rows, weights_path=weights_path)
elif "VGG19_dropout_dac" in str(constants.strategy_type):
model = VGG19_dropout_dac(image_rows, image_cols, 0, image_cols-orig_cols, image_rows-orig_rows, weights_path=weights_path)
elif "VGG19_dropout_feature_extractor" in str(constants.strategy_type):
model = VGG19_dropout_feature_extractor(image_rows, image_cols, 0, image_cols - orig_cols, image_rows - orig_rows, weights_path=weights_path)
elif "VGG19_batchnorm_dropout" in str(constants.strategy_type):
model = VGG19_batchnorm_dropout(image_rows, image_cols, 0, image_cols-orig_cols, image_rows-orig_rows, weights_path=weights_path)
elif "VGG19_batchnorm" in str(constants.strategy_type):
model = VGG19_batchnorm(image_rows, image_cols, 0, image_cols-orig_cols, image_rows-orig_rows, weights_path=weights_path)
elif "VGG19_dropout" in str(constants.strategy_type) or "VGG19D" in str(constants.strategy_type):
model = VGG19_dropout(image_rows, image_cols, 0, image_cols-orig_cols, image_rows-orig_rows, weights_path=weights_path)
elif "VGG19_freeze" in str(constants.strategy_type):
model = VGG19_freeze(image_rows, image_cols, 0, image_cols-orig_cols, image_rows-orig_rows, weights_path=weights_path, encoder_weights=None)
elif "VGG19_imagenet_pretrained" in str(constants.strategy_type):
model = VGG19_imagenet_pretrained(image_rows, image_cols, 0, image_cols-orig_cols, image_rows-orig_rows, weights_path=weights_path)
elif "VGG19" in str(constants.strategy_type):
model = VGG19(image_rows, image_cols, 0, image_cols-orig_cols, image_rows-orig_rows, weights_path=weights_path, encoder_weights=None)
elif "EFF_B7" in str(constants.strategy_type):
model = EFF_B7(image_rows, image_cols, 0, image_cols-orig_cols, image_rows-orig_rows, weights_path=weights_path)
elif "unet_3D" in str(constants.strategy_type):
model = UNet_3D(32, image_rows, image_cols, 0, image_cols-orig_cols, image_rows-orig_rows, weights_path=weights_path)
elif "unet_feature_extractor" in str(constants.strategy_type):
model = UNet_feature_extractor(image_rows, image_cols, 0, image_cols-orig_cols, image_rows-orig_rows, weights_path=weights_path)
elif "unet_imagenet_pretrained" in str(constants.strategy_type):
model = UNet_imagenet_pretrained(image_rows, image_cols, 0, image_cols-orig_cols, image_rows-orig_rows, weights_path=weights_path)
elif "unet" in str(constants.strategy_type) or "Unet" in str(constants.strategy_type):
model = UNet(image_rows, image_cols, 0, image_cols-orig_cols, image_rows-orig_rows, weights_path=weights_path)
return model
def build_model_train(constants, args, frame, model_name):
pretrained_weights_path = constants.get_pretrained_weights_path(frame, model_name)
if "VGG19_MTL_auto" in str(constants.strategy_type):
removed_tasks = get_MTL_auto_remove_task(constants.strategy_type)
model = VGG19_MTL_auto(args.input_size, args.input_size, args.cropped_boundary, 0, 0, removed_tasks, weights_path=pretrained_weights_path)
model.compile(optimizer=Adam(lr=1e-5), loss=None)
elif "VGG19_MTL" in str(constants.strategy_type):
model = VGG19_MTL(args.input_size, args.input_size, args.cropped_boundary, 0, 0, weights_path=pretrained_weights_path)
cls, reg, aut, seg = get_MTL_weights(constants.strategy_type)
model.compile(optimizer=Adam(lr=1e-5), loss=['binary_crossentropy', tf.keras.losses.MeanSquaredError(), tf.keras.losses.MeanAbsoluteError(), tfa.losses.sigmoid_focal_crossentropy],
loss_weights={"segmentation": seg, "autoencoder": aut, "regressor": reg, "classifier": cls})
elif "VGG19_classifier_regressor" in str(constants.strategy_type):
model = VGG19_classifier_regressor(args.input_size, args.input_size, weights_path=pretrained_weights_path)
model.compile(optimizer=Adam(lr=1e-5), loss=[tf.keras.losses.MeanAbsoluteError(), tfa.losses.sigmoid_focal_crossentropy],
loss_weights={"regressor":0.01,"classifier":1})
elif "VGG19_classifier_custom_loss" in str(constants.strategy_type):
model = VGG19_classifier_custom_loss(args.input_size, args.input_size, weights_path=pretrained_weights_path)
model.compile(optimizer=Adam(lr=1e-5), loss=None)
elif "VGG19_classifier_binary" in str(constants.strategy_type):
model = VGG19_classifier(args.input_size, args.input_size, weights_path=pretrained_weights_path)
model.compile(optimizer=Adam(lr=1e-5), loss=[tf.keras.losses.BinaryCrossentropy()], metrics=['accuracy'])
elif "VGG19_classifier" in str(constants.strategy_type):
model = VGG19_classifier(args.input_size, args.input_size, weights_path=pretrained_weights_path)
model.compile(optimizer=Adam(lr=1e-5), loss=[tfa.losses.SigmoidFocalCrossEntropy(alpha=0.5)], metrics=['accuracy'])
elif "VGG19D_classifier" in str(constants.strategy_type):
model = VGG19D_classifier(args.input_size, args.input_size, weights_path=pretrained_weights_path)
model.compile(optimizer=Adam(lr=1e-5), loss=[tfa.losses.SigmoidFocalCrossEntropy(alpha=0.5)], metrics=['accuracy'])
elif "EFF_B7_classifier" in str(constants.strategy_type):
model = EFF_B7_classifier(args.input_size, args.input_size, weights_path=pretrained_weights_path)
model.compile(optimizer=Adam(lr=1e-5), loss=[tfa.losses.SigmoidFocalCrossEntropy(alpha=0.5)], metrics=['accuracy'])
elif "vit_classifier" in str(constants.strategy_type):
model = vit_classifier(args.input_size, args.input_size, 1, weights_path=pretrained_weights_path)
# model = SAMModel(model)
model.compile(optimizer=Adam(lr=1e-5), loss=[tfa.losses.SigmoidFocalCrossEntropy(alpha=0.5)], metrics=['accuracy'])
# --------------------------------------------------------------------------------
elif "Res50V2" in str(constants.strategy_type):
model = ResNet50V2Keras(args.input_size, args.input_size, args.cropped_boundary, 0, 0,
weights_path=pretrained_weights_path)
model.compile(optimizer=Adam(lr=1e-5), loss=['binary_crossentropy'], metrics=[loss.dice_coef])
elif "InceptionResV2" in str(constants.strategy_type):
model = InceptionResV2(args.input_size, args.input_size, args.cropped_boundary, 0, 0,
weights_path=pretrained_weights_path)
model.compile(optimizer=Adam(lr=1e-5), loss=['binary_crossentropy'], metrics=[loss.dice_coef])
elif "Dense201" in str(constants.strategy_type):
model = DenseNet201Keras(args.input_size, args.input_size, args.cropped_boundary, 0, 0,
weights_path=pretrained_weights_path)
model.compile(optimizer=Adam(lr=1e-5), loss=['binary_crossentropy'], metrics=[loss.dice_coef])
elif "deeplabv3" in str(constants.strategy_type):
model = Deeplabv3(input_shape=(args.input_size, args.input_size, 3), output_shape=(68, 68), right_crop=0,
bottom_crop=0)
model.compile(optimizer=Adam(lr=1e-5), loss=['binary_crossentropy'], metrics=[loss.dice_coef])
elif "VGG16_dropout" in str(constants.strategy_type):
model = VGG16_dropout(args.input_size, args.input_size, args.cropped_boundary, 0, 0,
weights_path=pretrained_weights_path)
model.compile(optimizer=Adam(lr=1e-5), loss=['binary_crossentropy'], metrics=[loss.dice_coef])
elif "VGG16_batchnorm" in str(constants.strategy_type):
model = VGG16_batchnorm(args.input_size, args.input_size, args.cropped_boundary, 0, 0,
weights_path=pretrained_weights_path)
model.compile(optimizer=Adam(lr=1e-5), loss=['binary_crossentropy'], metrics=[loss.dice_coef])
elif "VGG16_instancenorm" in str(constants.strategy_type):
model = VGG16_instancenorm(args.input_size, args.input_size, args.cropped_boundary, 0, 0,
weights_path=pretrained_weights_path)
model.compile(optimizer=Adam(lr=1e-5), loss=['binary_crossentropy'], metrics=[loss.dice_coef])
elif "VGG16_movie3" in str(constants.strategy_type):
model = VGG16_movie(args.input_size, args.input_size, args.cropped_boundary, 0, 0,
weights_path=pretrained_weights_path)
model.compile(optimizer=Adam(lr=1e-5), loss=loss.temporal_cross_entropy, metrics=[loss.dice_coef])
elif "VGG16_dice" in str(constants.strategy_type):
model = VGG16(args.input_size, args.input_size, args.cropped_boundary, 0, 0,
weights_path=pretrained_weights_path)
model.compile(optimizer=Adam(lr=1e-5), loss=[loss.dice_coef], metrics=['binary_crossentropy'])
elif "VGG16_l2" in str(constants.strategy_type):
model = VGG16_l2(args.input_size, args.input_size, args.cropped_boundary, 0, 0,
weights_path=pretrained_weights_path)
model.compile(optimizer=Adam(lr=1e-5), loss=['binary_crossentropy'], metrics=[loss.dice_coef])
elif "VGG16_dac_input256" in constants.strategy_type:
model = VGG16_dac(args.input_size, args.input_size, args.cropped_boundary, 0, 0,
weights_path=pretrained_weights_path)
model.compile(optimizer=Adam(lr=1e-5), loss=['binary_crossentropy'], metrics=[loss.dice_coef])
elif "VGG16_spp_input256" in constants.strategy_type:
model = VGG16_spp(args.input_size, args.input_size, args.cropped_boundary, 0, 0,
weights_path=pretrained_weights_path)
model.compile(optimizer=Adam(lr=1e-5), loss=['binary_crossentropy'], metrics=[loss.dice_coef])
elif "VGG16_no_pretrain" in str(constants.strategy_type):
model = VGG16(args.input_size, args.input_size, args.cropped_boundary, 0, 0,
weights_path=pretrained_weights_path, encoder_weights=None)
model.compile(optimizer=Adam(lr=1e-5), loss=['binary_crossentropy'], metrics=[loss.dice_coef])
elif "VGG16" in str(constants.strategy_type):
model = VGG16(args.input_size, args.input_size, args.cropped_boundary, 0, 0,
weights_path=pretrained_weights_path)
model.compile(optimizer=Adam(lr=1e-5), loss=['binary_crossentropy'], metrics=[loss.dice_coef])
elif "VGG19D_crop_first" in str(constants.strategy_type):
model = VGG19D_crop_first(args.input_size, args.input_size, args.cropped_boundary, 0, 0,
weights_path=pretrained_weights_path)
model.compile(optimizer=Adam(lr=1e-5), loss=['binary_crossentropy'], metrics=[loss.dice_coef])
elif "VGG19D_se" in str(constants.strategy_type):
model = VGG19D_se(args.input_size, args.input_size, args.cropped_boundary, 0, 0,
weights_path=pretrained_weights_path)
model.compile(optimizer=Adam(lr=1e-5), loss=['binary_crossentropy'], metrics=[loss.dice_coef])
elif "VGG19D_temporal_se" in str(constants.strategy_type):
model = VGG19D_temporal_se(args.input_size, args.input_size, args.cropped_boundary, 0, 0,
weights_path=pretrained_weights_path)
model.compile(optimizer=Adam(lr=1e-5), loss=['binary_crossentropy'], metrics=[loss.dice_coef])
elif "VGG19D_temporal_distributed_v2" in str(constants.strategy_type):
model = VGG19D_temporal_distributed_v2(args.input_size, args.input_size, args.cropped_boundary, 0, 0,
weights_path=pretrained_weights_path)
model.compile(optimizer=Adam(lr=1e-5), loss=['binary_crossentropy'], metrics=[loss.dice_coef])
elif "VGG19D_temporal_distributed" in str(constants.strategy_type):
model = VGG19D_temporal_distributed(args.input_size, args.input_size, args.cropped_boundary, 0, 0,
weights_path=pretrained_weights_path)
model.compile(optimizer=Adam(lr=1e-5), loss=['binary_crossentropy'], metrics=[loss.dice_coef])
elif "VGG19D_temporal_context_residual" in str(constants.strategy_type):
model = VGG19D_temporal_context_residual(args.input_size, args.input_size, args.cropped_boundary, 0, 0,
weights_path=pretrained_weights_path)
model.compile(optimizer=Adam(lr=1e-5), loss=['binary_crossentropy'], metrics=[loss.dice_coef])
elif "VGG19D_temporal_attn_v3" in str(constants.strategy_type):
model = VGG19D_temporal_attn_v3(args.input_size, args.input_size, args.cropped_boundary, 0, 0,
weights_path=pretrained_weights_path)
model.compile(optimizer=Adam(lr=1e-5), loss=['binary_crossentropy'], metrics=[loss.dice_coef])
elif "VGG19D_temporal_attn_v2" in str(constants.strategy_type):
model = VGG19D_temporal_attn_v2(args.input_size, args.input_size, args.cropped_boundary, 0, 0,
weights_path=pretrained_weights_path)
model.compile(optimizer=Adam(lr=1e-5), loss=['binary_crossentropy'], metrics=[loss.dice_coef])
elif "VGG19D_temporal_attn" in str(constants.strategy_type):
model = VGG19D_temporal_attn(args.input_size, args.input_size, args.cropped_boundary, 0, 0,
weights_path=pretrained_weights_path)
model.compile(optimizer=Adam(lr=1e-5), loss=['binary_crossentropy'], metrics=[loss.dice_coef])
elif "VGG19_dropout_dac_input256" in str(constants.strategy_type):
model = VGG19_dropout_dac(args.input_size, args.input_size, args.cropped_boundary, 0, 0,
weights_path=pretrained_weights_path)
model.compile(optimizer=Adam(lr=1e-5), loss=['binary_crossentropy'], metrics=[loss.dice_coef])
elif "VGG19_dropout_feature_extractor" in str(constants.strategy_type):
model = VGG19_dropout_feature_extractor(args.input_size, args.input_size, args.cropped_boundary, 0, 0,
weights_path=pretrained_weights_path)
model.compile(optimizer=Adam(lr=1e-5), loss=['binary_crossentropy', loss.zero_loss],
metrics=[loss.dice_coef, loss.zero_loss])
elif "VGG19_batchnorm_dropout" in str(constants.strategy_type):
model = VGG19_batchnorm_dropout(args.input_size, args.input_size, args.cropped_boundary, 0, 0,
weights_path=pretrained_weights_path)
model.compile(optimizer=Adam(lr=1e-5), loss=['binary_crossentropy'], metrics=[loss.dice_coef])
elif "VGG19_dropout_gelu" in str(constants.strategy_type):
model = VGG19_dropout_gelu(args.input_size, args.input_size, args.cropped_boundary, 0, 0,
weights_path=pretrained_weights_path)
model.compile(optimizer=Adam(lr=1e-5), loss=['binary_crossentropy'], metrics=[loss.dice_coef])
elif "VGG19_dropout_swish" in str(constants.strategy_type):
model = VGG19_dropout_swish(args.input_size, args.input_size, args.cropped_boundary, 0, 0,
weights_path=pretrained_weights_path)
model.compile(optimizer=Adam(lr=1e-5), loss=['binary_crossentropy'], metrics=[loss.dice_coef])
elif "VGG19_dropout" in str(constants.strategy_type) or "VGG19D" in str(constants.strategy_type):
model = VGG19_dropout(args.input_size, args.input_size, args.cropped_boundary, 0, 0,
weights_path=pretrained_weights_path)
model.compile(optimizer=Adam(lr=1e-5), loss=['binary_crossentropy'], metrics=[loss.dice_coef])
elif "VGG19_batchnorm" in str(constants.strategy_type):
model = VGG19_batchnorm(args.input_size, args.input_size, args.cropped_boundary, 0, 0,
weights_path=pretrained_weights_path)
model.compile(optimizer=Adam(lr=1e-5), loss=['binary_crossentropy'], metrics=[loss.dice_coef])
elif "VGG19_no_pretrain_freeze" in str(constants.strategy_type):
model = VGG19_freeze(args.input_size, args.input_size, args.cropped_boundary, 0, 0,
weights_path=pretrained_weights_path, encoder_weights=None)
model.compile(optimizer=Adam(lr=1e-5), loss=['binary_crossentropy'], metrics=[loss.dice_coef])
elif "VGG19_no_pretrain" in str(constants.strategy_type):
model = VGG19(args.input_size, args.input_size, args.cropped_boundary, 0, 0,
weights_path=pretrained_weights_path, encoder_weights=None)
model.compile(optimizer=Adam(lr=1e-5), loss=['binary_crossentropy'], metrics=[loss.dice_coef])
elif "VGG19_imagenet_pretrained" in str(constants.strategy_type):
model = VGG19_imagenet_pretrained(args.input_size, args.input_size, args.cropped_boundary, 0, 0,
weights_path=pretrained_weights_path)
model.compile(optimizer=Adam(lr=1e-5), loss=['binary_crossentropy'], metrics=[loss.dice_coef])
elif "VGG19_freeze" in str(constants.strategy_type):
model = VGG19_freeze(args.input_size, args.input_size, args.cropped_boundary, 0, 0,
weights_path=pretrained_weights_path)
model.compile(optimizer=Adam(lr=1e-5), loss=['binary_crossentropy'], metrics=[loss.dice_coef])
elif "VGG19" in str(constants.strategy_type):
model = VGG19(args.input_size, args.input_size, args.cropped_boundary, 0, 0,
weights_path=pretrained_weights_path)
model.compile(optimizer=Adam(lr=1e-5), loss=['binary_crossentropy'], metrics=[loss.dice_coef])
elif "EFF_B7" in str(constants.strategy_type):
model = EFF_B7(args.input_size, args.input_size, args.cropped_boundary, 0, 0,
weights_path=pretrained_weights_path)
model.compile(optimizer=Adam(lr=1e-5), loss=['binary_crossentropy'], metrics=[loss.dice_coef])
elif "unet_3D" in str(constants.strategy_type):
model = UNet_3D(args.input_depth, args.input_size, args.input_size, args.cropped_boundary, 0, 0,
weights_path=pretrained_weights_path)
model.compile(optimizer=Adam(lr=1e-5), loss=['binary_crossentropy'], metrics=[loss.dice_coef])
elif "unet_feature_extractor" in str(constants.strategy_type):
model = UNet_feature_extractor(args.input_size, args.input_size, args.cropped_boundary, 0, 0,
weights_path=pretrained_weights_path)
model.compile(optimizer=Adam(lr=1e-5), loss=['binary_crossentropy', loss.zero_loss],
metrics=[loss.dice_coef, loss.zero_loss])
elif "unet_imagenet_pretrained" in str(constants.strategy_type):
model = UNet_imagenet_pretrained(args.input_size, args.input_size, args.cropped_boundary, 0, 0,
weights_path=pretrained_weights_path)
model.compile(optimizer=Adam(lr=1e-5), loss=['binary_crossentropy'], metrics=[loss.dice_coef])
elif "unet" in str(constants.strategy_type):
model = UNet(args.input_size, args.input_size, args.cropped_boundary, 0, 0,
weights_path=pretrained_weights_path)
model.compile(optimizer=Adam(lr=1e-5), loss=['binary_crossentropy'], metrics=[loss.dice_coef])
return model
| 69.642473
| 189
| 0.714633
| 3,446
| 25,907
| 5.050203
| 0.048172
| 0.11567
| 0.073206
| 0.135954
| 0.929552
| 0.916853
| 0.907947
| 0.900592
| 0.888123
| 0.853991
| 0
| 0.028557
| 0.176825
| 25,907
| 372
| 190
| 69.642473
| 0.787489
| 0.014977
| 0
| 0.565657
| 0
| 0
| 0.094518
| 0.026772
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006734
| false
| 0
| 0.047138
| 0
| 0.060606
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d13905354b9f40e14fa9941c71b7a41e4774328a
| 5,021
|
py
|
Python
|
multiflap/ms_package/integrator_2.py
|
vortexlab-uclouvain/multiflap
|
6de0a9ceabf8c42b72b2a82943fb78e105480636
|
[
"Apache-2.0"
] | 13
|
2020-12-05T15:35:57.000Z
|
2022-03-14T09:09:03.000Z
|
multiflap/ms_package/integrator_2.py
|
vortexlab-uclouvain/multiflap
|
6de0a9ceabf8c42b72b2a82943fb78e105480636
|
[
"Apache-2.0"
] | 1
|
2021-04-26T18:36:12.000Z
|
2021-04-27T14:20:43.000Z
|
multiflap/ms_package/integrator_2.py
|
vortexlab-uclouvain/multiflap
|
6de0a9ceabf8c42b72b2a82943fb78e105480636
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import collections
class Integrator:
def __init__(self, ode_system, x0, time_array):
self.ode_system = ode_system
self.x0 = x0
self.time_array = time_array
def rk4(self):
"""
Runge-Kutta 4 Integrator.
Inputs:
VelocityFunction: Function name to integrate
this function must have two inputs namely state space
vector and time. For example: velocity(ssp, t)
InitialCondition: Initial condition, 1xd NumPy array, where d is the
dimension of the state space
TimeArray: 1 x Nt NumPy array which contains instances for the solution
to be returned.
Outputs:
solution: Nt x d NumPy array which contains numerical solution of the
ODE.
"""
rk_sol = collections.namedtuple('rk_sol',['x', 't'])
#Generate the solution array to fill in:
solution = np.zeros((np.size(self.time_array, 0),
np.size(self.x0, 0)))
#Assign the initial condition to the first element:
solution[0, :] = self.x0
for i in range(0, np.size(self.time_array) - 1):
#Read time element:
deltat = self.time_array[i + 1] - self.time_array[i]
#Runge Kutta k's:
k1 = deltat * self.ode_system(solution[i], self.time_array[i])
k2 = deltat * self.ode_system(solution[i]+k1/2.0, self.time_array[i]+deltat/2.0)
k3 = deltat * self.ode_system(solution[i]+k2/2.0, self.time_array[i]+deltat/2.0)
k4 = deltat * self.ode_system(solution[i]+k3, self.time_array[i]+deltat)
#Next integration step:
solution[i + 1] = solution[i] + ((k1 +2*k2 + 2*k3 + k4)/6.0)
sol = rk_sol(solution, self.time_array)
return sol
def rk3(self):
"""
Runge-Kutta 3 Integrator.
Inputs:
VelocityFunction: Function name to integrate
this function must have two inputs namely state space
vector and time. For example: velocity(ssp, t)
InitialCondition: Initial condition, 1xd NumPy array, where d is the
dimension of the state space
TimeArray: 1 x Nt NumPy array which contains instances for the solution
to be returned.
Outputs:
solution: Nt x d NumPy array which contains numerical solution of the
ODE.
"""
rk_sol = collections.namedtuple('rk_sol',['x', 't'])
#Generate the solution array to fill in:
solution = np.zeros((np.size(self.time_array, 0),
np.size(self.x0, 0)))
#Assign the initial condition to the first element:
solution[0, :] = self.x0
for i in range(0, np.size(self.time_array) - 1):
#Read time element:
deltat = self.time_array[i + 1] - self.time_array[i]
#Runge Kutta k's:
k1 = deltat * self.ode_system(solution[i], self.time_array[i])
k2 = deltat * self.ode_system(solution[i]+k1/2.0, self.time_array[i]+deltat/2.0)
k3 = deltat * self.ode_system(solution[i] -k1 + 2*k2, self.time_array[i]+deltat)
#Next integration step:
solution[i + 1] = solution[i] + ((k1 +4*k2 + k3)/6.0)
sol = rk_sol(solution, self.time_array)
return sol
def rk2(self):
"""
Runge-Kutta 2 Integrator.
Inputs:
VelocityFunction: Function name to integrate
this function must have two inputs namely state space
vector and time. For example: velocity(ssp, t)
InitialCondition: Initial condition, 1xd NumPy array, where d is the
dimension of the state space
TimeArray: 1 x Nt NumPy array which contains instances for the solution
to be returned.
Outputs:
solution: Nt x d NumPy array which contains numerical solution of the
ODE.
"""
rk_sol = collections.namedtuple('rk_sol',['x', 't'])
#Generate the solution array to fill in:
solution = np.zeros((np.size(self.time_array, 0),
np.size(self.x0, 0)))
#Assign the initial condition to the first element:
solution[0, :] = self.x0
for i in range(0, np.size(self.time_array)-1):
#Read time element:
deltat = self.time_array[i + 1] - self.time_array[i]
#Runge Kutta k's:
k1 = deltat * self.ode_system(solution[i], self.time_array[i])
k2 = deltat * self.ode_system(solution[i]+k1*(2./3.), self.time_array[i]+deltat*(2./3.))
#Next integration step:
solution[i + 1] = solution[i] + (k1/4. + (3./4.)*k2)
sol = rk_sol(solution, self.time_array)
return sol
| 42.550847
| 100
| 0.560645
| 657
| 5,021
| 4.205479
| 0.14003
| 0.087948
| 0.117626
| 0.076004
| 0.92472
| 0.92291
| 0.905176
| 0.905176
| 0.90409
| 0.88165
| 0
| 0.028416
| 0.341167
| 5,021
| 117
| 101
| 42.91453
| 0.806832
| 0.403306
| 0
| 0.630435
| 0
| 0
| 0.009063
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.043478
| 0
| 0.217391
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d143b8eb90116bd3627e8f5565cdd2a5b0f44511
| 19,466
|
py
|
Python
|
experiments/0_setup.py
|
helenacuesta/multif0-estimation-polyvocals
|
4960f5415f8a170f2ff8d5b776bfd4cb5576d3ba
|
[
"MIT"
] | 36
|
2020-09-13T12:30:41.000Z
|
2022-02-15T08:52:58.000Z
|
experiments/0_setup.py
|
helenacuesta/multif0-estimation-polyvocals
|
4960f5415f8a170f2ff8d5b776bfd4cb5576d3ba
|
[
"MIT"
] | 6
|
2020-09-04T11:14:14.000Z
|
2022-02-09T23:49:59.000Z
|
experiments/0_setup.py
|
helenacuesta/multif0-estimation-polyvocals
|
4960f5415f8a170f2ff8d5b776bfd4cb5576d3ba
|
[
"MIT"
] | null | null | null |
'''
This script creates the audio mixtures for all the working datasets.
'''
import sox
import os
import pandas as pd
import librosa
import soundfile
import scipy
from experiments import config
import utils
def combine_audio_files(params):
cmb = sox.Combiner()
cmb.convert(samplerate=22050)
cmb.build(
[
os.path.join(params['audio_folder'], params['filenames'][0]),
os.path.join(params['audio_folder'], params['filenames'][1]),
os.path.join(params['audio_folder'], params['filenames'][2]),
os.path.join(params['audio_folder'], params['filenames'][3])
],
os.path.join(config.audio_save_folder, params['output_fname']), 'mix') # , 'mix', input_volumes=[0.6, 0.3, 0.3, 0.3])
# if the reverb option is active, this creates the reverb audio files using an IR from Isophonics
if params['reverb']:
y_ir, sr_ir = librosa.load('./ir/IR_greathall.wav', sr=params['sr'])
y_sig, sr_sig = librosa.load(os.path.join(config.audio_save_folder, params['output_fname']), sr=params['sr'])
y_rev = scipy.signal.convolve(y_sig, y_ir, mode="full")
soundfile.write(os.path.join(config.audio_save_folder, 'reverb', params['output_fname']), y_rev, samplerate=params['sr'])
def create_dict_entry(diction, audiopath, audiofname, annot_files, annot_folder):
diction[audiofname] = dict()
diction[audiofname]['audiopath'] = audiopath
diction[audiofname]['annot_files'] = annot_files
diction[audiofname]['annot_folder'] = annot_folder
return diction
def create_full_dataset_mixes(dataset, mixes_wavpath, reverb=True, compute_audio_mix=True, compute_metadata=True):
mtracks = dict()
# ------------ Process Choral Singing Dataset ------------ #
print("Processing Choral Singing Dataset...")
for song in dataset['CSD']['songs']:
for combo in dataset['CSD']['combos']:
params = {}
params['audio_folder'] = config.csd_folder
params['annot_folder'] = config.csd_folder
params['sr'] = 44100
params['reverb'] = True
params['filenames'] = [
'{}_soprano_{}.wav'.format(song, combo[0]),
'{}_alto_{}.wav'.format(song, combo[1]),
'{}_tenor_{}.wav'.format(song, combo[2]),
'{}_bass_{}.wav'.format(song, combo[3]),
]
params['output_fname'] = '{}_{}_{}_{}_{}.wav'.format(song, combo[0], combo[1], combo[2], combo[3])
if compute_audio_mix and not os.path.exists(os.path.join(mixes_wavpath, params['output_fname'])):
# create audio mixture and its reverb version if indicated
combine_audio_files(params)
if compute_metadata:
print("Annotations for {}".format(song))
# create_dict_entry(diction, audiopath, audiofname, annot_files, annot_folder)
annotation_files = [
'{}_soprano_{}.jams'.format(song, combo[0]), '{}_alto_{}.jams'.format(song, combo[1]),
'{}_tenor_{}.jams'.format(song, combo[2]), '{}_bass_{}.jams'.format(song, combo[3])
]
mtracks = create_dict_entry(mtracks, mixes_wavpath, params['output_fname'], annotation_files, params['annot_folder'])
if reverb:
idx=-1
for annot in annotation_files:
idx+=1
utils.shift_annotations(params['annot_folder'], annot, params['audio_folder'], params['filenames'][idx])
print("Mixtures for {} have been created.".format(song))
# ------------ Process ESMUC ChoralSet ------------ #
print("Processing ESMUC Choral Dataset...")
# Der Greis
for song in dataset['ECS']['DG_songs']:
for combo in dataset['ECS']['DG_combos']:
params = {}
params['audio_folder'] = config.ecs_folder
params['annot_folder'] = config.ecs_folder
params['sr'] = 22050
params['reverb'] = True
params['filenames'] = [
"{}_S{}.wav".format(song, combo[0]),
"{}_A{}.wav".format(song, combo[1]),
"{}_T{}.wav".format(song, combo[2]),
"{}_B{}.wav".format(song, combo[3])
]
params['output_fname'] = '{}_{}_{}_{}_{}.wav'.format(song, combo[0], combo[1], combo[2], combo[3])
if compute_audio_mix and not os.path.exists(os.path.join(mixes_wavpath, params['output_fname'])):
# create audio mixture and its reverb version if indicated
combine_audio_files(params)
if compute_metadata:
print("Annotations for {}".format(song))
# create_dict_entry(diction, audiopath, audiofname, annot_files, annot_folder)
annotation_files = [
'{}_S{}.jams'.format(song, combo[0]), '{}_A{}.jams'.format(song, combo[1]),
'{}_T{}.jams'.format(song, combo[2]), '{}_B{}.jams'.format(song, combo[3])
]
mtracks = create_dict_entry(mtracks, mixes_wavpath, params['output_fname'], annotation_files, params['annot_folder'])
if reverb:
idx=-1
for annot in annotation_files:
idx+=1
utils.shift_annotations(params['annot_folder'], annot, params['audio_folder'], params['filenames'][idx])
#print("Reverb annotations not created for the reverb versions. Working on annotation shift.")
print('{} quartets mixed and exported'.format(song))
# Die Himmel
for song in dataset['ECS']['DH_songs']:
for combo in dataset['ECS']['DG_combos']:
params = {}
params['audio_folder'] = config.ecs_folder
params['annot_folder'] = config.ecs_folder
params['sr'] = 22050
params['reverb'] = True
params['filenames'] = [
"{}_{}.wav".format(song, dataset['ECS']['DH_singers'][combo[0]-1]),
"{}_{}.wav".format(song, dataset['ECS']['DH_singers'][combo[1]-1+5]),
"{}_{}.wav".format(song, dataset['ECS']['DH_singers'][combo[2]-1+7]),
"{}_{}.wav".format(song, dataset['ECS']['DH_singers'][combo[3]-1+10])
]
params['output_fname'] = '{}_{}_{}_{}_{}.wav'.format(song, combo[0], combo[1], combo[2], combo[3])
if compute_audio_mix and not os.path.exists(os.path.join(mixes_wavpath, params['output_fname'])):
# create audio mixture and its reverb version if indicated
combine_audio_files(params)
if compute_metadata:
print("Annotations for {}".format(song))
# create_dict_entry(diction, audiopath, audiofname, annot_files, annot_folder)
annotation_files = [
'{}_{}.jams'.format(song, dataset['ECS']['DH_singers'][combo[0]-1]),
'{}_{}.jams'.format(song, dataset['ECS']['DH_singers'][combo[1]-1+5]),
'{}_{}.jams'.format(song, dataset['ECS']['DH_singers'][combo[2]-1+7]),
'{}_{}.jams'.format(song, dataset['ECS']['DH_singers'][combo[3]-1+10])
]
mtracks = create_dict_entry(mtracks, mixes_wavpath, params['output_fname'], annotation_files, params['annot_folder'])
if reverb:
idx=-1
for annot in annotation_files:
idx+=1
utils.shift_annotations(params['annot_folder'], annot, params['audio_folder'], params['filenames'][idx])
print('{} quartets mixed and exported'.format(song))
# Seele Christi
for song in dataset['ECS']['SC_songs']:
for combo in dataset['ECS']['SC_combos']:
params = {}
params['audio_folder'] = config.ecs_folder
params['annot_folder'] = config.ecs_folder
params['sr'] = 22050
params['reverb'] = True
params['filenames'] = [
"{}_S{}.wav".format(song, combo[0]),
"{}_A{}.wav".format(song, combo[1]),
"{}_T{}.wav".format(song, combo[2]),
"{}_B{}.wav".format(song, combo[3])
]
params['output_fname'] = '{}_{}_{}_{}_{}.wav'.format(song, combo[0], combo[1], combo[2], combo[3])
if compute_audio_mix and not os.path.exists(os.path.join(mixes_wavpath, params['output_fname'])):
# create audio mixture and its reverb version if indicated
combine_audio_files(params)
if compute_metadata:
print("Annotations for {}".format(song))
# create_dict_entry(diction, audiopath, audiofname, annot_files, annot_folder)
annotation_files = [
"{}_S{}.jams".format(song, combo[0]),
"{}_A{}.jams".format(song, combo[1]),
"{}_T{}.jams".format(song, combo[2]),
"{}_B{}.jams".format(song, combo[3])
]
mtracks = create_dict_entry(mtracks, mixes_wavpath, params['output_fname'], annotation_files,
params['annot_folder'])
if reverb:
idx = -1
for annot in annotation_files:
idx += 1
utils.shift_annotations(params['annot_folder'], annot, params['audio_folder'],
params['filenames'][idx])
print('{} quartets mixed and exported'.format(song))
# ------------ Process Dagstuhl ChoirSet ------------ #
print("Processing Dagstuhl ChoirSet...")
# Full Choir setting
for song in dataset['DCS']['FC_songs']:
params = {}
params['audio_folder'] = config.dcs_folder_audio
params['annot_folder'] = config.dcs_folder_annot
params['sr'] = 22050
params['reverb'] = True
params['filenames'] = [
"{}_{}.wav".format(song, dataset['DCS']['FC_singers'][0]),
"{}_{}.wav".format(song, dataset['DCS']['FC_singers'][1]),
"{}_{}.wav".format(song, dataset['DCS']['FC_singers'][2]),
"{}_{}.wav".format(song, dataset['DCS']['FC_singers'][3])
]
# no combos here, there are only four singers per song
params['output_fname'] = "{}_1_2_2_2.wav".format(song)
if compute_audio_mix and not os.path.exists(os.path.join(mixes_wavpath, params['output_fname'])):
combine_audio_files(params)
if compute_metadata:
print("Annotations for {}".format(song))
annotation_files = [
"{}_{}.jams".format(song, dataset['DCS']['FC_singers'][0]),
"{}_{}.jams".format(song, dataset['DCS']['FC_singers'][1]),
"{}_{}.jams".format(song, dataset['DCS']['FC_singers'][2]),
"{}_{}.jams".format(song, dataset['DCS']['FC_singers'][3])
]
mtracks = create_dict_entry(mtracks, mixes_wavpath, params['output_fname'], annotation_files, params['annot_folder'])
if reverb:
idx = -1
for annot in annotation_files:
idx += 1
utils.shift_annotations(params['annot_folder'], annot, params['audio_folder'],
params['filenames'][idx])
print('{} quartets mixed and exported'.format(song))
# Quartet A setting
for song in dataset['DCS']['QA_songs']:
params = {}
params['audio_folder'] = config.dcs_folder_audio
params['annot_folder'] = config.dcs_folder_annot
params['sr'] = 22050
params['reverb'] = True
params['filenames'] = [
"{}_{}.wav".format(song, dataset['DCS']['QA_singers'][0]),
"{}_{}.wav".format(song, dataset['DCS']['QA_singers'][1]),
"{}_{}.wav".format(song, dataset['DCS']['QA_singers'][2]),
"{}_{}.wav".format(song, dataset['DCS']['QA_singers'][3])
]
# no combos here, there are only four singers per song
params['output_fname'] = "{}_2_1_1_1.wav".format(song)
if compute_audio_mix and not os.path.exists(os.path.join(mixes_wavpath, params['output_fname'])):
combine_audio_files(params)
if compute_metadata:
print("Annotations for {}".format(song))
annotation_files = [
"{}_{}.jams".format(song, dataset['DCS']['QA_singers'][0]),
"{}_{}.jams".format(song, dataset['DCS']['QA_singers'][1]),
"{}_{}.jams".format(song, dataset['DCS']['QA_singers'][2]),
"{}_{}.jams".format(song, dataset['DCS']['QA_singers'][3])
]
mtracks = create_dict_entry(mtracks, mixes_wavpath, params['output_fname'], annotation_files, params['annot_folder'])
if reverb:
idx = -1
for annot in annotation_files:
idx += 1
utils.shift_annotations(params['annot_folder'], annot, params['audio_folder'],
params['filenames'][idx])
print('{} quartets mixed and exported'.format(song))
# Quartet B setting
for song in dataset['DCS']['QB_songs']:
params = {}
params['audio_folder'] = config.dcs_folder_audio
params['annot_folder'] = config.dcs_folder_annot
params['sr'] = 22050
params['reverb'] = True
params['filenames'] = [
"{}_{}.wav".format(song, dataset['DCS']['QB_singers'][0]),
"{}_{}.wav".format(song, dataset['DCS']['QB_singers'][1]),
"{}_{}.wav".format(song, dataset['DCS']['QB_singers'][2]),
"{}_{}.wav".format(song, dataset['DCS']['QB_singers'][3])
]
# no combos here, there are only four singers per song
params['output_fname'] = "{}_1_2_2_2.wav".format(song)
if compute_audio_mix and not os.path.exists(os.path.join(mixes_wavpath, params['output_fname'])):
combine_audio_files(params)
if compute_metadata:
print("Annotations for {}".format(song))
annotation_files = [
"{}_{}.jams".format(song, dataset['DCS']['QB_singers'][0]),
"{}_{}.jams".format(song, dataset['DCS']['QB_singers'][1]),
"{}_{}.jams".format(song, dataset['DCS']['QB_singers'][2]),
"{}_{}.jams".format(song, dataset['DCS']['QB_singers'][3])
]
mtracks = create_dict_entry(mtracks, mixes_wavpath, params['output_fname'], annotation_files, params['annot_folder'])
if reverb:
idx = -1
for annot in annotation_files:
idx += 1
utils.shift_annotations(params['annot_folder'], annot, params['audio_folder'],
params['filenames'][idx])
print('{} quartets mixed and exported'.format(song))
# ------------ Process Barbershop Quartets ------------ #
print("Processing Barbershop Quartets...")
song_idx = -1
for song in dataset['BSQ']['songs']:
song_idx += 1
parts = dataset['BSQ']['num_parts'][song_idx]
params = {}
params['audio_folder'] = config.bsq_folder_audio
params['annot_folder'] = config.bsq_folder_annot
params['sr'] = 44100
params['reverb'] = True
params['filenames'] = [
"{}_part{}_s_1ch.wav".format(song, parts),
"{}_part{}_a_1ch.wav".format(song, parts),
"{}_part{}_t_1ch.wav".format(song, parts),
"{}_part{}_b_1ch.wav".format(song, parts)
]
params['output_fname'] = "{}_{}_satb.wav".format(song, parts)
if compute_audio_mix and not os.path.exists(os.path.join(mixes_wavpath, params['output_fname'])):
combine_audio_files(params)
if compute_metadata:
print("Annotations for {}".format(song))
annotation_files = [
"{}_part{}_s_1ch_pyin.jams".format(song, parts),
"{}_part{}_a_1ch_pyin.jams".format(song, parts),
"{}_part{}_t_1ch_pyin.jams".format(song, parts),
"{}_part{}_b_1ch_pyin.jams".format(song, parts)
]
mtracks = create_dict_entry(mtracks, mixes_wavpath, params['output_fname'], annotation_files, params['annot_folder'])
if reverb:
idx = -1
for annot in annotation_files:
idx += 1
utils.shift_annotations(params['annot_folder'], annot, params['audio_folder'],
params['filenames'][idx])
print('{} quartets mixed and exported'.format(song))
# ------------ Process Bach Chorales ------------ #
print("Processing Bach Chorales...")
song_idx = -1
for song in dataset['BC']['songs']:
song_idx += 1
parts = dataset['BC']['num_parts'][song_idx]
params = {}
params['audio_folder'] = config.bc_folder_audio
params['annot_folder'] = config.bc_folder_annot
params['sr'] = 44100
params['reverb'] = True
params['filenames'] = [
"{}_part{}_s_1ch.wav".format(song, parts),
"{}_part{}_a_1ch.wav".format(song, parts),
"{}_part{}_t_1ch.wav".format(song, parts),
"{}_part{}_b_1ch.wav".format(song, parts)
]
params['output_fname'] = "{}_{}_satb.wav".format(song, parts)
if compute_audio_mix and not os.path.exists(os.path.join(mixes_wavpath, params['output_fname'])):
combine_audio_files(params)
if compute_metadata:
print("Annotations for {}".format(song))
annotation_files = [
"{}_part{}_s_1ch_pyin.jams".format(song, parts),
"{}_part{}_a_1ch_pyin.jams".format(song, parts),
"{}_part{}_t_1ch_pyin.jams".format(song, parts),
"{}_part{}_b_1ch_pyin.jams".format(song, parts)
]
mtracks = create_dict_entry(mtracks, mixes_wavpath, params['output_fname'], annotation_files, params['annot_folder'])
if reverb:
idx = -1
for annot in annotation_files:
idx += 1
utils.shift_annotations(params['annot_folder'], annot, params['audio_folder'],
params['filenames'][idx])
print('{} quartets mixed and exported'.format(song))
# Store the metadata file
if compute_metadata:
utils.save_json_data(mtracks, os.path.join(mixes_wavpath, 'mtracks_info.json'))
def main():
# load the dataset info
dataset = config.dataset
print("Dataset info loaded.")
# use the dataset information to create audio mixtures and annotations
create_full_dataset_mixes(dataset, config.audio_save_folder, reverb=True, compute_audio_mix=True, compute_metadata=True)
if __name__ == '__main__':
main()
| 40.136082
| 133
| 0.545823
| 2,122
| 19,466
| 4.773798
| 0.084354
| 0.09773
| 0.057749
| 0.047384
| 0.862685
| 0.82616
| 0.800888
| 0.728529
| 0.705133
| 0.68154
| 0
| 0.014578
| 0.295233
| 19,466
| 484
| 134
| 40.219008
| 0.723814
| 0.075465
| 0
| 0.606707
| 0
| 0
| 0.200546
| 0.012308
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012195
| false
| 0
| 0.02439
| 0
| 0.039634
| 0.073171
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d16568ace50afca4c1410f303eb90d9ff022f87d
| 6,667
|
py
|
Python
|
ElectroWeakAnalysis/WMuNu/test/WMuNuCandidateHistogrammer.py
|
m-sedghi/cmssw
|
859df8affee372c53be79cdd2d8a5ff001eae841
|
[
"Apache-2.0"
] | 1
|
2019-12-19T13:43:44.000Z
|
2019-12-19T13:43:44.000Z
|
ElectroWeakAnalysis/WMuNu/test/WMuNuCandidateHistogrammer.py
|
m-sedghi/cmssw
|
859df8affee372c53be79cdd2d8a5ff001eae841
|
[
"Apache-2.0"
] | 7
|
2020-02-10T18:55:34.000Z
|
2022-01-16T20:08:44.000Z
|
ElectroWeakAnalysis/WMuNu/test/WMuNuCandidateHistogrammer.py
|
m-sedghi/cmssw
|
859df8affee372c53be79cdd2d8a5ff001eae841
|
[
"Apache-2.0"
] | 1
|
2020-12-17T23:09:17.000Z
|
2020-12-17T23:09:17.000Z
|
import FWCore.ParameterSet.Config as cms
# Process, how many events, inout files, ...
process = cms.Process("wmunuplots")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.source = cms.Source("PoolSource",
debugVerbosity = cms.untracked.uint32(0),
debugFlag = cms.untracked.bool(False),
# fileNames = cms.untracked.vstring(
# '/store/user/cepeda/mytestSkim_PTR_Wmunu_10pb/EWK_WMuNu_SubSkim_31Xv3_1.root',
# '/store/user/cepeda/mytestSkim_PTR_Wmunu_10pb/EWK_WMuNu_SubSkim_31Xv3_2.root',
# '/store/user/cepeda/mytestSkim_PTR_Wmunu_10pb/EWK_WMuNu_SubSkim_31Xv3_3.root',
# '/store/user/cepeda/mytestSkim_PTR_Wmunu_10pb/EWK_WMuNu_SubSkim_31Xv3_4.root',
# '/store/user/cepeda/mytestSkim_PTR_Wmunu_10pb/EWK_WMuNu_SubSkim_31Xv3_5.root',
# '/store/user/cepeda/mytestSkim_PTR_Wmunu_10pb/EWK_WMuNu_SubSkim_31Xv3_6.root',
# '/store/user/cepeda/mytestSkim_PTR_Wmunu_10pb/EWK_WMuNu_SubSkim_31Xv3_7.root',
# '/store/user/cepeda/mytestSkim_PTR_Wmunu_10pb/EWK_WMuNu_SubSkim_31Xv3_8.root',
# '/store/user/cepeda/mytestSkim_PTR_Wmunu_10pb/EWK_WMuNu_SubSkim_31Xv3_9.root',
# '/store/user/cepeda/mytestSkim_PTR_Wmunu_10pb/EWK_WMuNu_SubSkim_31Xv3_10.root'
#)
fileNames = cms.untracked.vstring(
"file:EWK_WMuNu_SubSkim_31Xv3.root"
# "file:AOD_with_WCandidates.root"
)
)
# Debug/info printouts
process.MessageLogger = cms.Service("MessageLogger",
cerr = cms.untracked.PSet(
enable = cms.untracked.bool(False)
),
cout = cms.untracked.PSet(
default = cms.untracked.PSet(
limit = cms.untracked.int32(-1)
),
enable = cms.untracked.bool(True),
threshold = cms.untracked.string('DEBUG')
),
debugModules = cms.untracked.vstring(
'corMetWMuNus',
'selcorMet'
)
)
process.selcorMet = cms.EDFilter("WMuNuSelector",
# Fill Basc Histograms? ->
plotHistograms = cms.untracked.bool(True),
# Input collections ->
MuonTag = cms.untracked.InputTag("muons"),
TrigTag = cms.untracked.InputTag("TriggerResults::HLT8E29"),
JetTag = cms.untracked.InputTag("antikt5CaloJets"),
WMuNuCollectionTag = cms.untracked.InputTag("corMetWMuNus"),
# Preselection!
MuonTrig = cms.untracked.string("HLT_Mu9"),
PtThrForZ1 = cms.untracked.double(20.0),
PtThrForZ2 = cms.untracked.double(10.0),
EJetMin = cms.untracked.double(40.),
NJetMax = cms.untracked.int32(999999),
# Main cuts ->
PtCut = cms.untracked.double(25.0),
EtaCut = cms.untracked.double(2.1),
IsRelativeIso = cms.untracked.bool(True),
IsCombinedIso = cms.untracked.bool(False),
IsoCut03 = cms.untracked.double(0.1),
MtMin = cms.untracked.double(50.0),
MtMax = cms.untracked.double(200.0),
MetMin = cms.untracked.double(-999999.),
MetMax = cms.untracked.double(999999.),
AcopCut = cms.untracked.double(2.),
# Muon quality cuts ->
DxyCut = cms.untracked.double(0.2),
NormalizedChi2Cut = cms.untracked.double(10.),
TrackerHitsCut = cms.untracked.int32(11),
IsAlsoTrackerMuon = cms.untracked.bool(True),
# Select only W-, W+ ( default is all Ws)
SelectByCharge=cms.untracked.int32(0)
)
process.selpfMet = cms.EDFilter("WMuNuSelector",
# Fill Basc Histograms? ->
plotHistograms = cms.untracked.bool(True),
# Preselection!
MuonTrig = cms.untracked.string("HLT_Mu9"),
PtThrForZ1 = cms.untracked.double(20.0),
PtThrForZ2 = cms.untracked.double(10.0),
EJetMin = cms.untracked.double(40.),
NJetMax = cms.untracked.int32(999999),
# Input collections ->
MuonTag = cms.untracked.InputTag("muons"),
TrigTag = cms.untracked.InputTag("TriggerResults::HLT8E29"),
JetTag = cms.untracked.InputTag("antikt5CaloJets"),
WMuNuCollectionTag = cms.untracked.InputTag("pfMetWMuNus"),
# Main cuts ->
UseTrackerPt = cms.untracked.bool(True),
PtCut = cms.untracked.double(25.0),
EtaCut = cms.untracked.double(2.1),
IsRelativeIso = cms.untracked.bool(True),
IsCombinedIso = cms.untracked.bool(False),
IsoCut03 = cms.untracked.double(0.1),
MtMin = cms.untracked.double(50.0),
MtMax = cms.untracked.double(200.0),
MetMin = cms.untracked.double(-999999.),
MetMax = cms.untracked.double(999999.),
AcopCut = cms.untracked.double(2.),
# Muon quality cuts ->
DxyCut = cms.untracked.double(0.2),
NormalizedChi2Cut = cms.untracked.double(10.),
TrackerHitsCut = cms.untracked.int32(11),
IsAlsoTrackerMuon = cms.untracked.bool(True),
# Select only W-, W+ ( default is all Ws)
SelectByCharge=cms.untracked.int32(0)
)
process.seltcMet = cms.EDFilter("WMuNuSelector",
# Fill Basc Histograms? ->
plotHistograms = cms.untracked.bool(True),
# Input collections ->
MuonTag = cms.untracked.InputTag("muons"),
TrigTag = cms.untracked.InputTag("TriggerResults::HLT8E29"),
JetTag = cms.untracked.InputTag("antikt5CaloJets"),
WMuNuCollectionTag = cms.untracked.InputTag("tcMetWMuNus"),
# Preselection!
MuonTrig = cms.untracked.string("HLT_Mu9"),
PtThrForZ1 = cms.untracked.double(20.0),
PtThrForZ2 = cms.untracked.double(10.0),
EJetMin = cms.untracked.double(40.),
NJetMax = cms.untracked.int32(999999),
# Main cuts ->
UseTrackerPt = cms.untracked.bool(True),
PtCut = cms.untracked.double(25.0),
EtaCut = cms.untracked.double(2.1),
IsRelativeIso = cms.untracked.bool(True),
IsCombinedIso = cms.untracked.bool(False),
IsoCut03 = cms.untracked.double(0.1),
MtMin = cms.untracked.double(50.0),
MtMax = cms.untracked.double(200.0),
MetMin = cms.untracked.double(-999999.),
MetMax = cms.untracked.double(999999.),
AcopCut = cms.untracked.double(2.),
# Muon quality cuts ->
DxyCut = cms.untracked.double(0.2),
NormalizedChi2Cut = cms.untracked.double(10.),
TrackerHitsCut = cms.untracked.int32(11),
IsAlsoTrackerMuon = cms.untracked.bool(True),
# Select only W-, W+ ( default is all Ws)
SelectByCharge=cms.untracked.int32(0)
)
process.TFileService = cms.Service("TFileService", fileName = cms.string('WMuNuBasicPlots.root') )
# Steering the process
process.path1 = cms.Path(process.selcorMet)
process.path2 = cms.Path(process.selpfMet)
process.path3 = cms.Path(process.seltcMet)
| 37.24581
| 98
| 0.662817
| 754
| 6,667
| 5.757294
| 0.185676
| 0.251555
| 0.161714
| 0.055287
| 0.790832
| 0.790832
| 0.790832
| 0.790832
| 0.790832
| 0.790832
| 0
| 0.050509
| 0.20414
| 6,667
| 178
| 99
| 37.455056
| 0.767622
| 0.217489
| 0
| 0.669565
| 0
| 0
| 0.067001
| 0.019695
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.008696
| 0
| 0.008696
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
66fa8e02daaf7ea6f0e12b5a4f99e7170223ff9f
| 85
|
py
|
Python
|
rA9/synapses/__init__.py
|
junhoyeo/rA9
|
6ab5537880f842b36ae666f0ef5645acc62c236e
|
[
"MIT"
] | 2
|
2020-10-09T00:36:06.000Z
|
2020-10-20T06:20:19.000Z
|
rA9/synapses/__init__.py
|
junhoyeo/rA9
|
6ab5537880f842b36ae666f0ef5645acc62c236e
|
[
"MIT"
] | null | null | null |
rA9/synapses/__init__.py
|
junhoyeo/rA9
|
6ab5537880f842b36ae666f0ef5645acc62c236e
|
[
"MIT"
] | 1
|
2020-10-09T00:36:08.000Z
|
2020-10-09T00:36:08.000Z
|
from .Conv import *
from .pooling import *
from .Linear import *
from .loss import *
| 17
| 22
| 0.717647
| 12
| 85
| 5.083333
| 0.5
| 0.491803
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.188235
| 85
| 4
| 23
| 21.25
| 0.884058
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
66fab7ec7906c810a48e49907a5ed0f0e65a9b43
| 108
|
py
|
Python
|
indexedconv/__init__.py
|
vuillaut/IndexedConv
|
781f6252248fc80ce80524389c51b8bb74de3052
|
[
"MIT"
] | 13
|
2018-11-05T13:17:44.000Z
|
2022-01-08T12:01:09.000Z
|
indexedconv/__init__.py
|
vuillaut/IndexedConv
|
781f6252248fc80ce80524389c51b8bb74de3052
|
[
"MIT"
] | 12
|
2018-10-20T13:31:20.000Z
|
2019-10-23T10:55:05.000Z
|
indexedconv/__init__.py
|
vuillaut/IndexedConv
|
781f6252248fc80ce80524389c51b8bb74de3052
|
[
"MIT"
] | 7
|
2018-11-26T16:49:08.000Z
|
2020-07-28T01:58:56.000Z
|
import indexedconv.utils
import indexedconv.engine
import indexedconv.nets
from .version import __version__
| 21.6
| 32
| 0.87037
| 13
| 108
| 6.923077
| 0.538462
| 0.566667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092593
| 108
| 4
| 33
| 27
| 0.918367
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0f19c06d28b709e4056741947f49ef6303871738
| 252
|
bzl
|
Python
|
test/proto_cross_repo_boundary/repo.bzl
|
blorente/rules_scala
|
0c1ed832f2db5fa1069c7b21d546f234d078d210
|
[
"Apache-2.0"
] | 326
|
2016-02-24T18:28:10.000Z
|
2022-03-30T08:51:08.000Z
|
test/proto_cross_repo_boundary/repo.bzl
|
blorente/rules_scala
|
0c1ed832f2db5fa1069c7b21d546f234d078d210
|
[
"Apache-2.0"
] | 1,157
|
2016-02-24T04:26:27.000Z
|
2022-03-31T05:59:14.000Z
|
test/proto_cross_repo_boundary/repo.bzl
|
ConsultingMD/rules_scala
|
75b0bef95a2ced6062229e5ea4cfce7047eead30
|
[
"Apache-2.0"
] | 262
|
2016-02-24T18:29:21.000Z
|
2022-03-24T21:39:20.000Z
|
def proto_cross_repo_boundary_repository():
native.new_local_repository(
name = "proto_cross_repo_boundary",
path = "test/proto_cross_repo_boundary/repo",
build_file = "test/proto_cross_repo_boundary/repo/BUILD.repo",
)
| 36
| 70
| 0.72619
| 32
| 252
| 5.21875
| 0.4375
| 0.239521
| 0.335329
| 0.526946
| 0.419162
| 0.419162
| 0.419162
| 0
| 0
| 0
| 0
| 0
| 0.178571
| 252
| 6
| 71
| 42
| 0.806763
| 0
| 0
| 0
| 0
| 0
| 0.420635
| 0.420635
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| true
| 0
| 0
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0f4f366719f239c45d1f49a9aa45daa0b054431d
| 76
|
py
|
Python
|
codes/deeplearning/data/__init__.py
|
sarvai/proposals
|
578c0094db52594cd85acb843df82fe3c19db46d
|
[
"Apache-2.0"
] | null | null | null |
codes/deeplearning/data/__init__.py
|
sarvai/proposals
|
578c0094db52594cd85acb843df82fe3c19db46d
|
[
"Apache-2.0"
] | null | null | null |
codes/deeplearning/data/__init__.py
|
sarvai/proposals
|
578c0094db52594cd85acb843df82fe3c19db46d
|
[
"Apache-2.0"
] | null | null | null |
from .workbench import workbench
from .pose_workbench import pose_workbench
| 25.333333
| 42
| 0.868421
| 10
| 76
| 6.4
| 0.4
| 0.46875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 76
| 2
| 43
| 38
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0f68704d2b40022d35a3796f70e08e96b8d765b3
| 4,109
|
py
|
Python
|
tests/test_factoryboy_state.py
|
hrother/pytest-factoryboy-state
|
66661a0f608d2174e3996a0ccb6c3a27bf35284f
|
[
"MIT"
] | 1
|
2021-06-15T21:17:30.000Z
|
2021-06-15T21:17:30.000Z
|
tests/test_factoryboy_state.py
|
hrother/pytest-factoryboy-state
|
66661a0f608d2174e3996a0ccb6c3a27bf35284f
|
[
"MIT"
] | 2
|
2022-03-20T22:45:57.000Z
|
2022-03-21T00:06:07.000Z
|
tests/test_factoryboy_state.py
|
hrother/pytest-factoryboy-state
|
66661a0f608d2174e3996a0ccb6c3a27bf35284f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
def test_help_message(testdir):
result = testdir.runpytest(
"--help",
)
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines(
[
"factoryboy-state:",
"*--show-state*Show factoryboy state for failures.",
"*--set-state=FACTORYBOY_STATE",
"*Set factoryboy state.",
]
)
def test_does_nothing_when_not_explicitly_called(testdir):
testdir.makepyfile(
"""
def test_failure():
assert False
"""
)
result = testdir.runpytest("")
result.stdout.no_fnmatch_line("=*= factory-boy random state =*=")
def test_shows_state_on_failure(testdir):
testdir.makepyfile(
"""
def test_failure():
assert False
"""
)
result = testdir.runpytest("--show-state")
result.stdout.fnmatch_lines(["=*= factory-boy random state =*="])
def test_shows_state_on_failure_from_environment_variable(testdir, monkeypatch):
monkeypatch.setenv("SHOW_FACTORYBOY_STATE", "True")
testdir.makepyfile(
"""
def test_failure():
assert False
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["=*= factory-boy random state =*="])
def test_shows_state_on_error(testdir):
testdir.makepyfile(
"""
import pytest
@pytest.fixture
def foo():
raise Exception
def test_failure(foo):
assert True
"""
)
result = testdir.runpytest("--show-state")
result.stdout.fnmatch_lines(["=*= factory-boy random state =*="])
def test_shows_state_on_error_for_environment_variable(testdir, monkeypatch):
monkeypatch.setenv("SHOW_FACTORYBOY_STATE", "True")
testdir.makepyfile(
"""
import pytest
@pytest.fixture
def foo():
raise Exception
def test_failure(foo):
assert True
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["=*= factory-boy random state =*="])
def test_uses_set_state(testdir, state):
testdir.makepyfile(
"""
import factory
class User:
def __init__(self, name):
self.name = name
class UserFactory(factory.Factory):
class Meta:
model = User
name = factory.Faker("first_name")
def test_user_name():
user = UserFactory()
assert user.name == "Sara"
"""
)
result = testdir.runpytest("-v", f"--set-state={state}")
result.stdout.fnmatch_lines(
[
"*::test_user_name PASSED*",
]
)
assert result.ret == 0
def test_uses_set_state_from_environment(testdir, state, monkeypatch):
monkeypatch.setenv("FACTORYBOY_STATE", state)
testdir.makepyfile(
"""
import factory
class User:
def __init__(self, name):
self.name = name
class UserFactory(factory.Factory):
class Meta:
model = User
name = factory.Faker("first_name")
def test_user_name():
user = UserFactory()
assert user.name == "Sara"
"""
)
result = testdir.runpytest("-v")
result.stdout.fnmatch_lines(
[
"*::test_user_name PASSED*",
]
)
assert result.ret == 0
def test_ignores_invalid_state(testdir):
testdir.makepyfile(
"""
import factory
class User:
def __init__(self, name):
self.name = name
class UserFactory(factory.Factory):
class Meta:
model = User
name = factory.Faker("first_name")
def test_user_name():
user = UserFactory()
assert user.name == "Sara"
"""
)
result = testdir.runpytest("-v", "--set-state=x")
result.stdout.fnmatch_lines(
[
"*::test_user_name FAILED*",
]
)
assert result.ret != 0
| 21.856383
| 80
| 0.553176
| 399
| 4,109
| 5.471178
| 0.182957
| 0.054512
| 0.090701
| 0.087952
| 0.80394
| 0.790655
| 0.790655
| 0.774164
| 0.774164
| 0.774164
| 0
| 0.001451
| 0.329277
| 4,109
| 187
| 81
| 21.973262
| 0.790639
| 0.015576
| 0
| 0.371429
| 0
| 0
| 0.20541
| 0.030008
| 0
| 0
| 0
| 0
| 0.042857
| 1
| 0.128571
| false
| 0.028571
| 0
| 0
| 0.128571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7e67dae07dd26619ffb58032d4d5f68712f7e834
| 711
|
py
|
Python
|
test_utils.py
|
zakomo/GMReorganizer
|
9253e80cf89bbad84d868bd7aa191e6b6b18b5d8
|
[
"MIT"
] | null | null | null |
test_utils.py
|
zakomo/GMReorganizer
|
9253e80cf89bbad84d868bd7aa191e6b6b18b5d8
|
[
"MIT"
] | null | null | null |
test_utils.py
|
zakomo/GMReorganizer
|
9253e80cf89bbad84d868bd7aa191e6b6b18b5d8
|
[
"MIT"
] | null | null | null |
import utils
def test_sanitize_names_html_entity_with_hash():
assert utils.sanitize_name("c'erano") == "c'erano"
def test_sanitize_names_html_entity_with_name():
assert utils.sanitize_name("hello&g'day") == "hello&g'day"
def test_sanitize_names_with_pathsep_linux():
assert utils.sanitize_name("hello/world") == "hello-world"
def test_sanitize_names_with_pathsep_win():
assert utils.sanitize_name("hello\\world") == "hello-world"
def test_sanitize_names_with_spaces():
assert utils.sanitize_name(" hello world \t") == "hello world"
def test_sanitize_names_mixed():
assert utils.sanitize_name(" The night & the day are 'rad") == "The night & the day are 'rad"
| 27.346154
| 105
| 0.739803
| 106
| 711
| 4.632075
| 0.283019
| 0.08554
| 0.183299
| 0.244399
| 0.674134
| 0.617108
| 0.411405
| 0.272912
| 0.272912
| 0.272912
| 0
| 0.006452
| 0.127989
| 711
| 25
| 106
| 28.44
| 0.785484
| 0
| 0
| 0
| 0
| 0
| 0.253165
| 0
| 0
| 0
| 0
| 0
| 0.461538
| 1
| 0.461538
| true
| 0
| 0.076923
| 0
| 0.538462
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
7e9d404a592c5e253f882f7b8330b61e4612fd2e
| 43
|
py
|
Python
|
horsepics/__init__.py
|
Mason-McGough/HorsePics
|
deacc3bedb69147b1584bf8b159624789fbdd5c9
|
[
"MIT"
] | null | null | null |
horsepics/__init__.py
|
Mason-McGough/HorsePics
|
deacc3bedb69147b1584bf8b159624789fbdd5c9
|
[
"MIT"
] | null | null | null |
horsepics/__init__.py
|
Mason-McGough/HorsePics
|
deacc3bedb69147b1584bf8b159624789fbdd5c9
|
[
"MIT"
] | null | null | null |
from .stitch import *
from .adjust import *
| 21.5
| 21
| 0.744186
| 6
| 43
| 5.333333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162791
| 43
| 2
| 22
| 21.5
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0e2f40c94ea37960f3215f15956564b4b827f8ad
| 107
|
py
|
Python
|
src/repository/__init__.py
|
DiceNameIsMy/fastapi-registration
|
ea1d0f69bb6fcdac77adbc136a8061ca56e05e18
|
[
"MIT"
] | 1
|
2022-01-12T14:29:51.000Z
|
2022-01-12T14:29:51.000Z
|
src/repository/__init__.py
|
DiceNameIsMy/fastapi-registration
|
ea1d0f69bb6fcdac77adbc136a8061ca56e05e18
|
[
"MIT"
] | null | null | null |
src/repository/__init__.py
|
DiceNameIsMy/fastapi-registration
|
ea1d0f69bb6fcdac77adbc136a8061ca56e05e18
|
[
"MIT"
] | null | null | null |
from .repository import Repository, get_repository_class
__all__ = ["Repository", "get_repository_class"]
| 26.75
| 56
| 0.813084
| 12
| 107
| 6.583333
| 0.5
| 0.329114
| 0.582278
| 0.708861
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093458
| 107
| 3
| 57
| 35.666667
| 0.814433
| 0
| 0
| 0
| 0
| 0
| 0.280374
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
0e6c575a9082626a215abb10d8b10b1bb63f4eca
| 11,719
|
py
|
Python
|
cogs/logging.py
|
Joystickplays/GoMod
|
f48af46b08e095136cb048d9dbb76a5f539f4ea1
|
[
"Apache-2.0"
] | 1
|
2022-02-25T04:25:21.000Z
|
2022-02-25T04:25:21.000Z
|
cogs/logging.py
|
Joystickplays/GoMod
|
f48af46b08e095136cb048d9dbb76a5f539f4ea1
|
[
"Apache-2.0"
] | null | null | null |
cogs/logging.py
|
Joystickplays/GoMod
|
f48af46b08e095136cb048d9dbb76a5f539f4ea1
|
[
"Apache-2.0"
] | null | null | null |
import discord
import asyncio
from discord.ext import commands
from bot import GoModBot
class Logging(commands.Cog):
def __init__(self, bot):
self.bot = bot
async def cog_check(self, ctx):
lookup = await self.bot.db.fetchrow("SELECT * FROM modules WHERE server = $1 AND module = $2", ctx.guild.id, "lg")
return lookup is not None
@commands.Cog.listener()
async def on_member_join(self, member):
for record in self.bot.logcache:
if record["guildid"] == member.guild.id and record["loggingtype"] == "m":
channel = self.bot.get_channel(record["channelid"])
if channel is None:
return
embed = discord.Embed(title="Member joined", description=f"{member.name} has joined {member.guild.name}", color=discord.Color.green()).add_field(name="Member count", value=f"{member.guild.member_count}")
await channel.send(embed=embed)
return
logs = await self.bot.db.fetch("SELECT * FROM logch")
for log in logs:
tempdict = {}
tempdict["guildid"] = log["guildid"]
tempdict["channelid"] = log["channelid"]
tempdict["loggingtype"] = log["loggingtype"]
self.bot.logcache.append(tempdict)
if log["guildid"] == member.guild.id and log["loggingtype"] == "m":
channel = self.bot.get_channel(record["channelid"])
if channel is None:
return
embed = discord.Embed(title="Member joined", description=f"{member.name} has joined {member.guild.name}", color=discord.Color.green()).add_field(name="Member count", value=f"{member.guild.member_count}")
await channel.send(embed=embed)
return
@commands.Cog.listener()
async def on_member_remove(self, member):
for record in self.bot.logcache:
if record["guildid"] == member.guild.id and record["loggingtype"] == "m":
channel = self.bot.get_channel(record["channelid"])
if channel is None:
return
embed = discord.Embed(title="Member left", description=f"{member.name} has left {member.guild.name}", color=discord.Color.orange()).add_field(name="Member count", value=f"{member.guild.member_count}")
await channel.send(embed=embed)
return
logs = await self.bot.db.fetch("SELECT * FROM logch")
for log in logs:
tempdict = {}
tempdict["guildid"] = log["guildid"]
tempdict["channelid"] = log["channelid"]
tempdict["loggingtype"] = log["loggingtype"]
self.bot.logcache.append(tempdict)
if log["guildid"] == member.guild.id and log["loggingtype"] == "m":
channel = self.bot.get_channel(record["channelid"])
if channel is None:
return
embed = discord.Embed(title="Member left", description=f"{member.name} has left {member.guild.name}", color=discord.Color.orange()).add_field(name="Member count", value=f"{member.guild.member_count}")
await channel.send(embed=embed)
return
@commands.Cog.listener()
async def on_message_edit(self, messagebefore, messageafter):
if messagebefore.author.bot:
return
if messagebefore.guild is None:
return
for ign in self.bot.logign:
if ign["channel"] == messagebefore.channel.id:
return
ignore = await self.bot.db.fetch("SELECT * FROM ignoredlogs")
for ign in ignore:
tempdict = {}
tempdict["server"] = ign["server"]
tempdict["channel"] = ign["channel"]
self.bot.logign.append(tempdict)
if ign["server"] == messagebefore.guild.id and ign["channel"] == messagebefore.channel.id:
return
for record in self.bot.logcache:
if record["guildid"] == messagebefore.guild.id and record["loggingtype"] == "e":
channel = self.bot.get_channel(record["channelid"])
if channel is None:
return
embed = discord.Embed(title="Message edited", description=f"The following message was edited in `{messagebefore.channel.name}` by `{messagebefore.author.name}`:\n\nFrom:\n```\n{messagebefore.content}\n```\nTo:\n```{messageafter.content}```", color=discord.Color.orange())
await channel.send(embed=embed)
return
logs = await self.bot.db.fetch("SELECT * FROM logch")
for log in logs:
tempdict = {}
tempdict["guildid"] = log["guildid"]
tempdict["channelid"] = log["channelid"]
tempdict["loggingtype"] = log["loggingtype"]
self.bot.logcache.append(tempdict)
if log["guildid"] == messagebefore.guild.id and log["loggingtype"] == "e":
channel = self.bot.get_channel(record["channelid"])
if channel is None:
return
embed = discord.Embed(title="Message edited", description=f"The following message was edited in `{messagebefore.channel.name}` by `{messagebefore.author.name}`:\n\nFrom:\n```\n{messagebefore.content}\n```\nTo:\n```{messageafter.content}```", color=discord.Color.orange())
await channel.send(embed=embed)
return
@commands.Cog.listener()
async def on_message_delete(self, message):
if message.author.bot:
return
if message.guild is None:
return
for record in self.bot.logcache:
if record["guildid"] == message.guild.id and record["loggingtype"] == "d":
channel = self.bot.get_channel(record["channelid"])
if channel is None:
return
embed = discord.Embed(title="Message deleted", description=f"The following message was deleted in `{message.channel.name}` by `{message.author.name}`:\n\n```\n{message.content}\n```", color=discord.Color.red())
await channel.send(embed=embed)
return
logs = await self.bot.db.fetch("SELECT * FROM logch")
for log in logs:
tempdict = {}
tempdict["guildid"] = log["guildid"]
tempdict["channelid"] = log["channelid"]
tempdict["loggingtype"] = log["loggingtype"]
self.bot.logcache.append(tempdict)
if log["guildid"] == message.guild.id and log["loggingtype"] == "d":
channel = self.bot.get_channel(log["channelid"])
if channel is None:
return
embed = discord.Embed(title="Message deleted", description=f"The following message was deleted in `{message.channel.name}` by `{message.author.name}`:\n\n```\n{message.content}\n```", color=discord.Color.red())
await channel.send(embed=embed)
return
@commands.Cog.listener()
async def on_guild_channel_delete(self, channel):
for record in self.bot.logcache:
if record["channelid"] == channel.id:
await self.bot.db.execute("DELETE FROM logch WHERE channelid = $1", channel.id)
self.bot.logcache.remove(record)
return
logs = await self.bot.db.fetch("SELECT * FROM logch")
for log in logs:
tempdict = {}
tempdict["guildid"] = log["guildid"]
tempdict["channelid"] = log["channelid"]
tempdict["loggingtype"] = log["loggingtype"]
self.bot.logcache.append(tempdict)
if log["channelid"] == channel.id:
await self.bot.db.execute("DELETE FROM logch WHERE channelid = $1", channel.id)
self.bot.logcache.remove(log)
return
@commands.command()
@commands.has_permissions(manage_messages=True)
async def createlogging(self, ctx):
embed = discord.Embed(title="Logging setup", description=f"You will setup the channel {ctx.channel.mention}. Continue?", color=0x00b2ff)
msg = await ctx.send(embed=embed)
await msg.add_reaction("✅")
await msg.add_reaction("❌")
def check(reaction, user):
return user == ctx.author and reaction.message.id == msg.id and str(reaction.emoji) in ["✅", "❌"]
try:
reaction, user = await self.bot.wait_for('reaction_add', check=check, timeout=60)
except asyncio.TimeoutError:
await msg.edit(embed=discord.Embed(title="Logging setup", description="Timed out.", color=0x00b2ff))
return
if str(reaction.emoji) == "❌":
await msg.edit(embed=discord.Embed(title="Logging setup", description="Cancelled.", color=0x00b2ff))
return
embed = discord.Embed(title="Logging setup", description="Do you want to make the channel a deletion log, edit log or member log?", color=0x00b2ff)
msg = await ctx.send(embed=embed)
await msg.add_reaction("🗑")
await msg.add_reaction("📝")
await msg.add_reaction("👤")
def check(reaction, user):
return user == ctx.author and reaction.message.id == msg.id and str(reaction.emoji) in ["🗑", "📝", "👤"]
try:
reaction, user = await self.bot.wait_for('reaction_add', check=check, timeout=60)
except asyncio.TimeoutError:
await msg.edit(embed=discord.Embed(title="Logging setup", description="Timed out.", color=0x00b2ff))
return
if str(reaction.emoji) == "🗑":
embed = discord.Embed(title="Setting...", description="Setting up deletion logging...", color=0x00b2ff)
await ctx.send(embed=embed)
await self.bot.db.execute("INSERT INTO logch (channelid, loggingtype, guildid) VALUES ($1, $2, $3)", ctx.channel.id, "d", ctx.guild.id)
embed = discord.Embed(title="Logging setup", description="Complete! To test, try deleting a message.", color=0x00b2ff)
await ctx.send(embed=embed)
elif str(reaction.emoji) == "📝":
embed = discord.Embed(title="Setting...", description="Setting up edit logging...", color=0x00b2ff)
await ctx.send(embed=embed)
await self.bot.db.execute("INSERT INTO logch (channelid, loggingtype, guildid) VALUES ($1, $2, $3)", ctx.channel.id, "e", ctx.guild.id)
embed = discord.Embed(title="Logging setup", description="Complete! To test, try editing a message.", color=0x00b2ff)
await ctx.send(embed=embed)
elif str(reaction.emoji) == "👤":
embed = discord.Embed(title="Setting...", description="Setting up member logging...", color=0x00b2ff)
await ctx.send(embed=embed)
await self.bot.db.execute("INSERT INTO logch (channelid, loggingtype, guildid) VALUES ($1, $2, $3)", ctx.channel.id, "m", ctx.guild.id)
embed = discord.Embed(title="Logging setup", description="Complete! To test, try adding a member.", color=0x00b2ff)
await ctx.send(embed=embed)
@commands.command()
@commands.has_permissions(manage_messages=True)
async def ignorelogging(self, ctx, channel: discord.TextChannel):
await self.bot.db.execute("INSERT INTO ignoredlogs (server, channel) VALUES ($1, $2)", ctx.guild.id, channel.id)
embed = discord.Embed(title="Logging setup", description=f"Channel {channel.mention} has been added to the ignore list.", color=0x00b2ff)
await ctx.send(embed=embed)
def setup(bot:GoModBot):
bot.add_cog(Logging(bot))
| 51.174672
| 287
| 0.598686
| 1,374
| 11,719
| 5.082969
| 0.111354
| 0.039089
| 0.048683
| 0.063001
| 0.850802
| 0.831472
| 0.821449
| 0.783648
| 0.744273
| 0.744273
| 0
| 0.007799
| 0.266917
| 11,719
| 229
| 288
| 51.174672
| 0.803515
| 0
| 0
| 0.671717
| 0
| 0.020202
| 0.232679
| 0.048294
| 0
| 0
| 0.008191
| 0
| 0
| 1
| 0.020202
| false
| 0
| 0.020202
| 0.010101
| 0.19697
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0e9661a2f2bfcede9d59ddb5cec3cf02ca62cdbd
| 48
|
py
|
Python
|
waterbutler/providers/dataverse/__init__.py
|
alexschiller/waterbutler
|
24014d7705aca3e99a6565fc3b9b4075ec6ec563
|
[
"Apache-2.0"
] | 65
|
2015-01-23T03:22:04.000Z
|
2022-01-11T22:33:19.000Z
|
waterbutler/providers/dataverse/__init__.py
|
alexschiller/waterbutler
|
24014d7705aca3e99a6565fc3b9b4075ec6ec563
|
[
"Apache-2.0"
] | 300
|
2015-02-16T16:45:02.000Z
|
2022-01-31T14:49:07.000Z
|
waterbutler/providers/dataverse/__init__.py
|
Johnetordoff/waterbutler
|
b505cdbcffadaba12984dcb19c9139068e6c314d
|
[
"Apache-2.0"
] | 76
|
2015-01-20T20:45:17.000Z
|
2021-07-30T13:18:10.000Z
|
from .provider import DataverseProvider # noqa
| 24
| 47
| 0.8125
| 5
| 48
| 7.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145833
| 48
| 1
| 48
| 48
| 0.95122
| 0.083333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7eba36971a1b3c4583004d613ef4fdb0410f2235
| 3,127
|
py
|
Python
|
pyspark_proxy/sql/column.py
|
abronte/PysparkProxy
|
cc28bacb0d4ee6fb87ced763a73e9ea791612414
|
[
"Apache-2.0"
] | 3
|
2018-09-06T18:37:35.000Z
|
2018-09-07T17:49:44.000Z
|
pyspark_proxy/sql/column.py
|
abronte/PysparkProxy
|
cc28bacb0d4ee6fb87ced763a73e9ea791612414
|
[
"Apache-2.0"
] | 29
|
2018-09-04T23:53:42.000Z
|
2018-12-12T21:46:59.000Z
|
pyspark_proxy/sql/column.py
|
abronte/PysparkProxy
|
cc28bacb0d4ee6fb87ced763a73e9ea791612414
|
[
"Apache-2.0"
] | null | null | null |
from pyspark_proxy.proxy import Proxy
__all__ = ['Column']
class Column(Proxy):
def alias(self, *args, **kwargs):
return self._call(self._id, 'alias', (args, kwargs))
def cast(self, *args, **kwargs):
return self._call(self._id, 'cast', (args, kwargs))
def __repr__(self):
return self._call(self._id, '__repr__', ((), {}))
# better way to define these?
def _op_func(self, name, *args, **kwargs):
return self._call(self._id, '__neg__', (args, kwargs))
def __add__(self, *args, **kwargs):
return self._call(self._id, '__add__', (args, kwargs))
def __sub__(self, *args, **kwargs):
return self._call(self._id, '__sub__', (args, kwargs))
def __mul__(self, *args, **kwargs):
return self._call(self._id, '__mul__', (args, kwargs))
def __div__(self, *args, **kwargs):
return self._call(self._id, '__div__', (args, kwargs))
def __truediv__(self, *args, **kwargs):
return self._call(self._id, '__truediv__', (args, kwargs))
def __mod__(self, *args, **kwargs):
return self._call(self._id, '__mod__', (args, kwargs))
def __radd__(self, *args, **kwargs):
return self._call(self._id, '__radd__', (args, kwargs))
def __rsub__(self, *args, **kwargs):
return self._call(self._id, '__rsub__', (args, kwargs))
def __rmul__(self, *args, **kwargs):
return self._call(self._id, '__rmul__', (args, kwargs))
def __rdiv__(self, *args, **kwargs):
return self._call(self._id, '__rdiv__', (args, kwargs))
def __rtruediv__(self, *args, **kwargs):
return self._call(self._id, '__rdiv__', (args, kwargs))
def __rmod__(self, *args, **kwargs):
return self._call(self._id, '__rmod__', (args, kwargs))
def __pow__(self, *args, **kwargs):
return self._call(self._id, '__pow__', (args, kwargs))
def __rpow__(self, *args, **kwargs):
return self._call(self._id, '__rpow__', (args, kwargs))
def __eq__(self, *args, **kwargs):
return self._call(self._id, '__eq__', (args, kwargs))
def __ne__(self, *args, **kwargs):
return self._call(self._id, '__ne__', (args, kwargs))
def __lt__(self, *args, **kwargs):
return self._call(self._id, '__lt__', (args, kwargs))
def __le__(self, *args, **kwargs):
return self._call(self._id, '__le__', (args, kwargs))
def __ge__(self, *args, **kwargs):
return self._call(self._id, '__ge__', (args, kwargs))
def __gt__(self, *args, **kwargs):
return self._call(self._id, '__gt__', (args, kwargs))
def __and__(self, *args, **kwargs):
return self._call(self._id, '__and__', (args, kwargs))
def __or__(self, *args, **kwargs):
return self._call(self._id, '__or__', (args, kwargs))
def __invert__(self, *args, **kwargs):
return self._call(self._id, '__invert__', (args, kwargs))
def __rand__(self, *args, **kwargs):
return self._call(self._id, '__rand__', (args, kwargs))
def __ror__(self, *args, **kwargs):
return self._call(self._id, '__ror__', (args, kwargs))
| 33.623656
| 66
| 0.610169
| 391
| 3,127
| 4.171356
| 0.130435
| 0.343348
| 0.248927
| 0.320049
| 0.614347
| 0.602085
| 0.602085
| 0.583691
| 0.062538
| 0.062538
| 0
| 0
| 0.210106
| 3,127
| 92
| 67
| 33.98913
| 0.660324
| 0.008634
| 0
| 0.032787
| 0
| 0
| 0.068754
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.47541
| false
| 0
| 0.016393
| 0.47541
| 0.983607
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
7ec1b8e4f45c838696db7b30d3d3dad0ea0b0e22
| 32
|
py
|
Python
|
src/core/app/app/services/__init__.py
|
exytab/FrontLineLiveUA
|
733bb0c84062e3a3d8eec3cf988add7e1470d392
|
[
"MIT"
] | null | null | null |
src/core/app/app/services/__init__.py
|
exytab/FrontLineLiveUA
|
733bb0c84062e3a3d8eec3cf988add7e1470d392
|
[
"MIT"
] | null | null | null |
src/core/app/app/services/__init__.py
|
exytab/FrontLineLiveUA
|
733bb0c84062e3a3d8eec3cf988add7e1470d392
|
[
"MIT"
] | null | null | null |
from . import map, need, supply
| 16
| 31
| 0.71875
| 5
| 32
| 4.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1875
| 32
| 1
| 32
| 32
| 0.884615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
addd1e083b1fc8ffd73e1ffa6c9c6b3dff9d8e53
| 38
|
py
|
Python
|
ezhost/__main__.py
|
zhexiao/ezhost1
|
4146bc0be14bb1bfe98ec19283d19fab420871b3
|
[
"MIT"
] | 4
|
2016-12-16T20:22:44.000Z
|
2018-10-31T07:12:34.000Z
|
ezhost/__main__.py
|
zhexiao/ezhost1
|
4146bc0be14bb1bfe98ec19283d19fab420871b3
|
[
"MIT"
] | null | null | null |
ezhost/__main__.py
|
zhexiao/ezhost1
|
4146bc0be14bb1bfe98ec19283d19fab420871b3
|
[
"MIT"
] | 1
|
2017-07-19T05:36:58.000Z
|
2017-07-19T05:36:58.000Z
|
import ezhost.main
ezhost.main.main()
| 12.666667
| 18
| 0.789474
| 6
| 38
| 5
| 0.5
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078947
| 38
| 2
| 19
| 19
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
ade56c6eca6b0d226dc5be00b20de6149339cf64
| 105
|
py
|
Python
|
module/__init__.py
|
liurenfeng007/DSRE
|
7b3b257c68b1991b8b12c817a245af022a5fbeaa
|
[
"MIT"
] | null | null | null |
module/__init__.py
|
liurenfeng007/DSRE
|
7b3b257c68b1991b8b12c817a245af022a5fbeaa
|
[
"MIT"
] | null | null | null |
module/__init__.py
|
liurenfeng007/DSRE
|
7b3b257c68b1991b8b12c817a245af022a5fbeaa
|
[
"MIT"
] | null | null | null |
from .embedding import Embedding
from .encoder import *
from .selector import *
from .classifier import *
| 26.25
| 32
| 0.790476
| 13
| 105
| 6.384615
| 0.461538
| 0.240964
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 105
| 4
| 33
| 26.25
| 0.922222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bc05d5098b93ea3846c65ff6a9527cd48f7c9d95
| 42
|
py
|
Python
|
recognition/src/models/__init__.py
|
AlexeyZhuravlev/OCR-experiments
|
8493045054678a2e13cafce6d9e85c7581086c7a
|
[
"MIT"
] | 2
|
2020-05-28T18:46:37.000Z
|
2020-08-29T12:49:57.000Z
|
recognition/src/models/__init__.py
|
AlexeyZhuravlev/OCR-experiments
|
8493045054678a2e13cafce6d9e85c7581086c7a
|
[
"MIT"
] | null | null | null |
recognition/src/models/__init__.py
|
AlexeyZhuravlev/OCR-experiments
|
8493045054678a2e13cafce6d9e85c7581086c7a
|
[
"MIT"
] | null | null | null |
from .multi_head import MultiHeadOcrModel
| 21
| 41
| 0.880952
| 5
| 42
| 7.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 42
| 1
| 42
| 42
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bc37af5a16986b639b10ae157cdf53ebf4cfc204
| 21,116
|
py
|
Python
|
pyfuzzy_toolbox/features/count.py
|
matheuscas/pyfuzzy_toolbox
|
57885f3ff53d1b7ab3559c7ff6197ceb97f67c3b
|
[
"BSD-3-Clause"
] | null | null | null |
pyfuzzy_toolbox/features/count.py
|
matheuscas/pyfuzzy_toolbox
|
57885f3ff53d1b7ab3559c7ff6197ceb97f67c3b
|
[
"BSD-3-Clause"
] | null | null | null |
pyfuzzy_toolbox/features/count.py
|
matheuscas/pyfuzzy_toolbox
|
57885f3ff53d1b7ab3559c7ff6197ceb97f67c3b
|
[
"BSD-3-Clause"
] | null | null | null |
from . import pre
from . import set_pos_tags_codes
from . import set_ngram_polarity_statement
from . import ADJS, ADVS, VERBS, ALL, ADJS_AND_ADVS, ADJS_AND_VERBS, ADVS_AND_VERBS,\
ADJS_AND_BI_ADV_ADJ, ADVS_AND_BI_ADV_ADV, VERBS_AND_BI_ADV_VERB, ALL_NON_GENERAL_BIGRAMS
""" ------------------------------ Base functions ------------------------------ """
def count_of_unigrams_scores(bow_sentences, unigram=ADJS, positive=True):
pos_tags_codes = set_pos_tags_codes(unigram)
polarity_eval_stm = set_ngram_polarity_statement(positive=positive)
_count = 0
for bs in bow_sentences:
for ngram in bs:
if pre.is_unigram(ngram) and ngram.pos_tag in pos_tags_codes and eval(polarity_eval_stm):
_count += 1
return _count
def count_of_bigrams_scores(bow_sentences, bigram_word_1=ADVS, bigram_word_2=ADJS, positive=True):
pos_tags_codes_word_1 = set_pos_tags_codes(bigram_word_1)
pos_tags_codes_word_2 = set_pos_tags_codes(bigram_word_2)
polarity_eval_stm = set_ngram_polarity_statement(positive=positive)
_count = 0
for bs in bow_sentences:
for ngram in bs:
if pre.is_bigram(ngram) and \
(ngram.word_1.pos_tag in pos_tags_codes_word_1) and \
(ngram.word_2.pos_tag in pos_tags_codes_word_2) and \
eval(polarity_eval_stm):
_count += 1
return _count
def positive_to_negative_ratio_count_unigrams_scores(bow_sentences, unigram=ADJS):
positive_sum = count_of_unigrams_scores(bow_sentences, unigram=unigram)
negative_sum = count_of_unigrams_scores(
bow_sentences, unigram=unigram, positive=False)
return positive_sum - negative_sum
def count_of_unigrams_and_bigrams_scores(bow_sentences, unigram=ADJS, bigram_word_1=ADVS, bigram_word_2=ADJS, positive=True):
unigrams_count = count_of_unigrams_scores(
bow_sentences, unigram=unigram, positive=positive)
bigrams_count = count_of_bigrams_scores(
bow_sentences, bigram_word_1=bigram_word_1, bigram_word_2=bigram_word_2, positive=positive)
return unigrams_count + bigrams_count
def positive_to_negative_ratio_count_unigrams_and_bigrams_scores(bow_sentences, unigram=ADJS, bigram_word_1=ADVS, bigram_word_2=ADJS):
positive_unigrams_and_bigrams_count = count_of_unigrams_and_bigrams_scores(
bow_sentences, unigram=unigram, bigram_word_1=bigram_word_1, bigram_word_2=bigram_word_2)
negative_unigrams_and_bigrams_count = count_of_unigrams_and_bigrams_scores(
bow_sentences, unigram=unigram, bigram_word_1=bigram_word_1, bigram_word_2=bigram_word_2, positive=False)
return positive_unigrams_and_bigrams_count - negative_unigrams_and_bigrams_count
# UNTESTED
def count_of_trigrams_scores(bow_sentences, trigram_word_1=ADVS, trigram_word_2=ADVS, trigram_word_3=ADJS, positive=True):
pos_tags_codes_word_1 = set_pos_tags_codes(trigram_word_1)
pos_tags_codes_word_2 = set_pos_tags_codes(trigram_word_2)
pos_tags_codes_word_3 = set_pos_tags_codes(trigram_word_3)
polarity_eval_stm = set_ngram_polarity_statement(positive=positive)
_count = 0
for bs in bow_sentences:
for ngram in bs:
if pre.is_trigram(ngram) and \
(ngram.word_1.pos_tag in pos_tags_codes_word_1) and \
(ngram.word_2.pos_tag in pos_tags_codes_word_2) and \
(ngram.word_3.pos_tag in pos_tags_codes_word_3) and \
eval(polarity_eval_stm):
_count += 1
return _count
# UNTESTED
def count_of_unigrams_bigrams_and_trigrams_scores(bow_sentences,
unigram=ADJS,
bigram_word_1=ADVS, bigram_word_2=ADJS,
trigram_word_1=ADVS, trigram_word_2=ADVS, trigram_word_3=ADJS,
positive=True):
unigrams_count_and_bigrams_count = count_of_unigrams_and_bigrams_scores(bow_sentences,
unigram=unigram,
bigram_word_1=bigram_word_1, bigram_word_2=bigram_word_2,
positive=positive)
return unigrams_count_and_bigrams_count + count_of_trigrams_scores(bow_sentences,
trigram_word_1=trigram_word_1,
trigram_word_2=trigram_word_2,
trigram_word_3=trigram_word_3,
positive=positive)
# UNTESTED
def positive_to_negative_ratio_count_unigrams_bigrams_and_trigrams_scores(bow_sentences,
unigram=ADJS,
bigram_word_1=ADVS, bigram_word_2=ADJS,
trigram_word_1=ADVS, trigram_word_2=ADVS, trigram_word_3=ADJS):
all_ratio_positives = count_of_unigrams_bigrams_and_trigrams_scores(bow_sentences,
unigram=unigram,
bigram_word_1=bigram_word_1, bigram_word_2=bigram_word_2,
trigram_word_1=trigram_word_1, trigram_word_2=trigram_word_2, trigram_word_3=trigram_word_3,
positive=True)
return all_ratio_positives - count_of_unigrams_bigrams_and_trigrams_scores(bow_sentences,
unigram=unigram,
bigram_word_1=bigram_word_1, bigram_word_2=bigram_word_2,
trigram_word_1=trigram_word_1, trigram_word_2=trigram_word_2, trigram_word_3=trigram_word_3,
positive=False)
def count_selected_ngrams(bow_sentences):
ngrams_selected = 0
for bs in bow_sentences:
ngrams_selected = ngrams_selected + len(bs)
return ngrams_selected
def document_size(bow_sentences):
for bs in bow_sentences:
for ngram in bs:
if pre.is_unigram(ngram):
if ngram.doc_word_count:
return ngram.doc_word_count
else:
return 0
elif pre.is_bigram(ngram):
if ngram.word_2.doc_word_count:
return ngram.word_2.doc_word_count
else:
return 0
elif pre.is_trigram(ngram):
if ngram.word_3.doc_word_count:
return ngram.word_3.doc_word_count
else:
return 0
return 0
def percentage_of_negated_ngrams_by_document_size(bow_sentences):
_count = 0
_doc_words_count = 0
for bs in bow_sentences:
for ngram in bs:
if pre.is_bigram(ngram) and ngram.word_1.word in pre.NEGATION_WORDS or \
pre.is_trigram(ngram) and ngram.word_1.word in pre.NEGATION_WORDS:
_doc_words_count = ngram.word_1.doc_word_count
_count += 1
_doc_words_count = float(_doc_words_count)
if _doc_words_count > 0:
return {'value': _count / float(_doc_words_count), 'name': 'percentage_of_negated_ngrams_by_document_size'}
else:
return {'value': 0.0, 'name': 'percentage_of_negated_ngrams_by_document_size'}
""" ------------------------------ Features functions ------------------------------ """
def positive_adjectives_count(bow_sentences):
return {'value': count_of_unigrams_scores(
bow_sentences, unigram=ADJS, positive=True), 'name': 'positive_adjectives_count'}
def negative_adjectives_count(bow_sentences):
return {'value': count_of_unigrams_scores(
bow_sentences, unigram=ADJS, positive=False), 'name': 'negative_adjectives_count'}
def positive_adverbs_count(bow_sentences):
return {'value': count_of_unigrams_scores(
bow_sentences, unigram=ADVS, positive=True), 'name': 'positive_adverbs_count'}
def negative_adverbs_count(bow_sentences):
return {'value': count_of_unigrams_scores(
bow_sentences, unigram=ADVS, positive=False), 'name': 'negative_adverbs_count'}
def positive_verbs_count(bow_sentences):
return {'value': count_of_unigrams_scores(
bow_sentences, unigram=VERBS, positive=True), 'name': 'positive_verbs_count'}
def negative_verbs_count(bow_sentences):
return {'value': count_of_unigrams_scores(
bow_sentences, unigram=VERBS, positive=False), 'name': 'negative_verbs_count'}
def positive_to_negative_ratio_of_adjectives_count(bow_sentences):
return {'value': positive_to_negative_ratio_count_unigrams_scores(
bow_sentences, unigram=ADJS), 'name': 'positive_to_negative_ratio_of_adjectives_count'}
def positive_to_negative_ratio_of_adverbs_count(bow_sentences):
return {'value': positive_to_negative_ratio_count_unigrams_scores(
bow_sentences, unigram=ADVS), 'name': 'positive_to_negative_ratio_of_adverbs_count'}
def positive_to_negative_ratio_of_verbs_count(bow_sentences):
return {'value': positive_to_negative_ratio_count_unigrams_scores(
bow_sentences, unigram=VERBS), 'name': 'positive_to_negative_ratio_of_verbs_count'}
def positive_adjectives_count_and_bigrams_with_adjectives(bow_sentences):
return {'value': count_of_unigrams_and_bigrams_scores(
bow_sentences, unigram=ADJS, bigram_word_1=ADVS, bigram_word_2=ADJS, positive=True), 'name': 'positive_adjectives_count_and_bigrams_with_adjectives'}
def negative_adjectives_count_and_bigrams_with_adjectives(bow_sentences):
return {'value': count_of_unigrams_and_bigrams_scores(
bow_sentences, unigram=ADJS, bigram_word_1=ADVS, bigram_word_2=ADJS, positive=False), 'name': 'negative_adjectives_count_and_bigrams_with_adjectives'}
def positive_adverbs_count_and_bigrams_with_adverbs(bow_sentences):
return {'value': count_of_unigrams_and_bigrams_scores(
bow_sentences, unigram=ADVS, bigram_word_1=ADVS, bigram_word_2=ADVS, positive=True), 'name': 'positive_adverbs_count_and_bigrams_with_adverbs'}
def negative_adverbs_count_and_bigrams_with_adverbs(bow_sentences):
return {'value': count_of_unigrams_and_bigrams_scores(
bow_sentences, unigram=ADVS, bigram_word_1=ADVS, bigram_word_2=ADVS, positive=False), 'name': 'negative_adverbs_count_and_bigrams_with_adverbs'}
def positive_verbs_count_and_bigrams_with_verbs(bow_sentences):
return {'value': count_of_unigrams_and_bigrams_scores(
bow_sentences, unigram=VERBS, bigram_word_1=ADVS, bigram_word_2=VERBS, positive=True), 'name': 'positive_verbs_count_and_bigrams_with_verbs'}
def negative_verbs_count_and_bigrams_with_verbs(bow_sentences):
return {'value': count_of_unigrams_and_bigrams_scores(
bow_sentences, unigram=VERBS, bigram_word_1=ADVS, bigram_word_2=VERBS, positive=False), 'name': 'negative_verbs_count_and_bigrams_with_verbs'}
def positive_unigrams_and_bigrams_count(bow_sentences):
return {'value': count_of_unigrams_and_bigrams_scores(
bow_sentences, unigram=ALL, bigram_word_1=ALL, bigram_word_2=ALL, positive=True), 'name': 'positive_unigrams_and_bigrams_count'}
def negative_unigrams_and_bigrams_count(bow_sentences):
return {'value': count_of_unigrams_and_bigrams_scores(
bow_sentences, unigram=ALL, bigram_word_1=ALL, bigram_word_2=ALL, positive=False), 'name': 'negative_unigrams_and_bigrams_count'}
def positive_unigrams_bigrams_and_trigrams_count(bow_sentences):
return {'value': count_of_unigrams_bigrams_and_trigrams_scores(
bow_sentences, unigram=ALL, bigram_word_1=ALL, bigram_word_2=ALL, positive=True,
trigram_word_1=ALL, trigram_word_2=ALL, trigram_word_3=ALL), 'name': 'positive_unigrams_bigrams_and_trigrams_count'}
def negative_unigrams_bigrams_and_trigrams_count(bow_sentences):
return {'value': count_of_unigrams_bigrams_and_trigrams_scores(
bow_sentences, unigram=ALL, bigram_word_1=ALL, bigram_word_2=ALL, positive=False,
trigram_word_1=ALL, trigram_word_2=ALL, trigram_word_3=ALL), 'name': 'negative_unigrams_bigrams_and_trigrams_count'}
def positive_to_negative_ratio_of_adjectives_count_and_bigrams_with_adjectives(bow_sentences):
return {'value': positive_to_negative_ratio_count_unigrams_and_bigrams_scores(
bow_sentences, unigram=ADJS, bigram_word_1=ADVS, bigram_word_2=ADJS), 'name': 'positive_to_negative_ratio_of_adjectives_count_and_bigrams_with_adjectives'}
def positive_to_negative_ratio_of_adverbs_count_and_bigrams_with_adverbs(bow_sentences):
return {'value': positive_to_negative_ratio_count_unigrams_and_bigrams_scores(
bow_sentences, unigram=ADVS, bigram_word_1=ADVS, bigram_word_2=ADVS), 'name': 'positive_to_negative_ratio_of_adverbs_count_and_bigrams_with_adverbs'}
def positive_to_negative_ratio_of_verbs_count_and_bigrams_with_verbs(bow_sentences):
return {'value': positive_to_negative_ratio_count_unigrams_and_bigrams_scores(
bow_sentences, unigram=VERBS, bigram_word_1=ADVS, bigram_word_2=VERBS), 'name': 'positive_to_negative_ratio_of_verbs_count_and_bigrams_with_verbs'}
def positive_to_negative_ratio_of_unigrams_and_bigrams_count(bow_sentences):
return {'value': positive_to_negative_ratio_count_unigrams_and_bigrams_scores(
bow_sentences, unigram=ALL, bigram_word_1=ALL, bigram_word_2=ALL), 'name': 'positive_to_negative_ratio_of_unigrams_and_bigrams_count'}
def positive_to_negative_ratio_of_unigrams_bigrams_and_trigrams_count(bow_sentences):
return {'value': positive_to_negative_ratio_count_unigrams_bigrams_and_trigrams_scores(bow_sentences,
unigram=ALL,
bigram_word_1=ALL, bigram_word_2=ALL,
trigram_word_1=ALL, trigram_word_2=ALL, trigram_word_3=ALL),
'name': 'positive_to_negative_ratio_of_unigrams_bigrams_and_trigrams_count'}
def selected_ngrams_count(bow_sentences):
return {'value': count_selected_ngrams(bow_sentences), 'name': 'selected_ngrams_count'}
def original_document_size(bow_sentences):
return {'value': document_size(bow_sentences), 'name': 'original_document_size'}
def all(bow_sentences,
unigrams_only=True,
unigrams_only_ratio=True,
unigram_type=ALL,
non_general_unigrams_and_bigrams=True,
non_general_unigrams_and_bigrams_ratio=True,
non_general_bigram_type=ALL_NON_GENERAL_BIGRAMS,
ngrams_count=True,
general_unigrams_and_bigrams=True,
general_unigrams_and_bigrams_ratio=True,
unigrams_and_bigrams_trigram=True,
unigrams_and_bigrams_trigram_ratio=True):
features_list = []
if unigrams_only:
if unigram_type == ADJS:
features_list.append(positive_adjectives_count(bow_sentences))
features_list.append(negative_adjectives_count(bow_sentences))
elif unigram_type == ADJS_AND_ADVS:
features_list.append(positive_adjectives_count(bow_sentences))
features_list.append(negative_adjectives_count(bow_sentences))
features_list.append(positive_adverbs_count(bow_sentences))
features_list.append(negative_adverbs_count(bow_sentences))
elif unigram_type == ADJS_AND_VERBS:
features_list.append(positive_adjectives_count(bow_sentences))
features_list.append(negative_adjectives_count(bow_sentences))
features_list.append(positive_verbs_count(bow_sentences))
features_list.append(negative_verbs_count(bow_sentences))
elif unigram_type == ADVS_AND_VERBS:
features_list.append(positive_adverbs_count(bow_sentences))
features_list.append(negative_adverbs_count(bow_sentences))
features_list.append(positive_verbs_count(bow_sentences))
features_list.append(negative_verbs_count(bow_sentences))
elif unigram_type == ADVS:
features_list.append(positive_adverbs_count(bow_sentences))
features_list.append(negative_adverbs_count(bow_sentences))
elif unigram_type == VERBS:
features_list.append(positive_verbs_count(bow_sentences))
features_list.append(negative_verbs_count(bow_sentences))
else:
features_list.append(positive_adjectives_count(bow_sentences))
features_list.append(negative_adjectives_count(bow_sentences))
features_list.append(positive_adverbs_count(bow_sentences))
features_list.append(negative_adverbs_count(bow_sentences))
features_list.append(positive_verbs_count(bow_sentences))
features_list.append(negative_verbs_count(bow_sentences))
if unigrams_only_ratio:
features_list.append(
positive_to_negative_ratio_of_adjectives_count(bow_sentences))
features_list.append(
positive_to_negative_ratio_of_adverbs_count(bow_sentences))
features_list.append(
positive_to_negative_ratio_of_verbs_count(bow_sentences))
if non_general_unigrams_and_bigrams:
if non_general_bigram_type == ADVS_AND_BI_ADV_ADV:
features_list.append(
positive_adverbs_count_and_bigrams_with_adverbs(bow_sentences))
features_list.append(
negative_adverbs_count_and_bigrams_with_adverbs(bow_sentences))
elif non_general_bigram_type == VERBS_AND_BI_ADV_VERB:
features_list.append(
positive_verbs_count_and_bigrams_with_verbs(bow_sentences))
features_list.append(
negative_verbs_count_and_bigrams_with_verbs(bow_sentences))
elif non_general_bigram_type == ADJS_AND_BI_ADV_ADJ:
features_list.append(
positive_adjectives_count_and_bigrams_with_adjectives(bow_sentences))
features_list.append(
negative_adjectives_count_and_bigrams_with_adjectives(bow_sentences))
else:
features_list.append(
positive_adverbs_count_and_bigrams_with_adverbs(bow_sentences))
features_list.append(
negative_adverbs_count_and_bigrams_with_adverbs(bow_sentences))
features_list.append(
positive_verbs_count_and_bigrams_with_verbs(bow_sentences))
features_list.append(
negative_verbs_count_and_bigrams_with_verbs(bow_sentences))
features_list.append(
positive_adjectives_count_and_bigrams_with_adjectives(bow_sentences))
features_list.append(
negative_adjectives_count_and_bigrams_with_adjectives(bow_sentences))
if non_general_unigrams_and_bigrams_ratio:
features_list.append(
positive_to_negative_ratio_of_adjectives_count_and_bigrams_with_adjectives(bow_sentences))
features_list.append(
positive_to_negative_ratio_of_adverbs_count_and_bigrams_with_adverbs(bow_sentences))
features_list.append(
positive_to_negative_ratio_of_verbs_count_and_bigrams_with_verbs(bow_sentences))
if ngrams_count:
features_list.append(selected_ngrams_count(bow_sentences))
features_list.append(original_document_size(bow_sentences))
if general_unigrams_and_bigrams:
features_list.append(
positive_unigrams_and_bigrams_count(bow_sentences))
features_list.append(
negative_unigrams_and_bigrams_count(bow_sentences))
features_list.append(
percentage_of_negated_ngrams_by_document_size(bow_sentences))
if general_unigrams_and_bigrams_ratio:
features_list.append(
positive_to_negative_ratio_of_unigrams_and_bigrams_count(bow_sentences))
if unigrams_and_bigrams_trigram:
features_list.append(
positive_unigrams_bigrams_and_trigrams_count(bow_sentences))
features_list.append(
negative_unigrams_bigrams_and_trigrams_count(bow_sentences))
if unigrams_and_bigrams_trigram_ratio:
features_list.append(
positive_to_negative_ratio_of_unigrams_bigrams_and_trigrams_count(bow_sentences))
return features_list
| 48.87963
| 171
| 0.691324
| 2,574
| 21,116
| 5.115385
| 0.032634
| 0.119389
| 0.06972
| 0.07215
| 0.928913
| 0.877269
| 0.842333
| 0.803296
| 0.764411
| 0.686717
| 0
| 0.008986
| 0.241097
| 21,116
| 431
| 172
| 48.993039
| 0.812668
| 0.001231
| 0
| 0.439873
| 0
| 0
| 0.067897
| 0.053935
| 0
| 0
| 0
| 0
| 0
| 1
| 0.120253
| false
| 0
| 0.012658
| 0.082278
| 0.275316
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
cb0fb9a66b11a896701283a1ee2ac90821edc15c
| 761
|
py
|
Python
|
pyid3tagger/__init__.py
|
pkla6/pyid3tagger
|
ca61136b319474d9c77339e514e5615f7343a30e
|
[
"MIT"
] | 1
|
2019-01-21T03:45:00.000Z
|
2019-01-21T03:45:00.000Z
|
pyid3tagger/__init__.py
|
pkla6/pyid3tagger
|
ca61136b319474d9c77339e514e5615f7343a30e
|
[
"MIT"
] | null | null | null |
pyid3tagger/__init__.py
|
pkla6/pyid3tagger
|
ca61136b319474d9c77339e514e5615f7343a30e
|
[
"MIT"
] | null | null | null |
# coding=utf-8
from const import *
from utilities import *
from tags import ID3v1Tag
from tags import ID3v1_1Tag
from tags import ID3v2_3Tag
from file_tags import FileTags
from id3v2_3frames import ID3v2_3_APIC_Frame
from id3v2_3frames import ID3v2_3_COMM_Frame
from id3v2_3frames import ID3v2_3_GEOB_Frame
from id3v2_3frames import ID3v2_3_TALB_Frame
from id3v2_3frames import ID3v2_3_TCON_Frame
from id3v2_3frames import ID3v2_3_TDAT_Frame
from id3v2_3frames import ID3v2_3_TIT2_Frame
from id3v2_3frames import ID3v2_3_TPE1_Frame
from id3v2_3frames import ID3v2_3_TPE2_Frame
from id3v2_3frames import ID3v2_3_TPOS_Frame
from id3v2_3frames import ID3v2_3_TRCK_Frame
from id3v2_3frames import ID3v2_3_TYER_Frame
from id3v2_3frames import ID3v2_3_WPUB_Frame
| 30.44
| 44
| 0.885677
| 132
| 761
| 4.689394
| 0.234848
| 0.248788
| 0.336026
| 0.462036
| 0.684976
| 0.684976
| 0.639742
| 0
| 0
| 0
| 0
| 0.132548
| 0.107753
| 761
| 24
| 45
| 31.708333
| 0.779087
| 0.015769
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cb5561e38b694e1a9f16090b3f758e2b38b8bd37
| 104
|
py
|
Python
|
src/__init__.py
|
paul-buechner/magic-illustrator
|
afab2391c318f800128fad886372cc5f1601bd27
|
[
"MIT"
] | null | null | null |
src/__init__.py
|
paul-buechner/magic-illustrator
|
afab2391c318f800128fad886372cc5f1601bd27
|
[
"MIT"
] | null | null | null |
src/__init__.py
|
paul-buechner/magic-illustrator
|
afab2391c318f800128fad886372cc5f1601bd27
|
[
"MIT"
] | null | null | null |
from src.illustrator_config import *
def main():
# Initialize illustrator thread
initialize()
| 14.857143
| 36
| 0.721154
| 11
| 104
| 6.727273
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.201923
| 104
| 6
| 37
| 17.333333
| 0.891566
| 0.278846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cb86d241b5a12e53dda69e152db6417d0b48050f
| 92
|
py
|
Python
|
Chapter09/testset1.py
|
ibiscum/Python-Parallel-Programming-Cookbook-Second-Edition
|
8fd583019778b4d797d4f948d091b5564e23f732
|
[
"MIT"
] | null | null | null |
Chapter09/testset1.py
|
ibiscum/Python-Parallel-Programming-Cookbook-Second-Edition
|
8fd583019778b4d797d4f948d091b5564e23f732
|
[
"MIT"
] | null | null | null |
Chapter09/testset1.py
|
ibiscum/Python-Parallel-Programming-Cookbook-Second-Edition
|
8fd583019778b4d797d4f948d091b5564e23f732
|
[
"MIT"
] | null | null | null |
# testset.py
from nose.tools import eq_
import unittest
def test_sum():
eq_(2 + 2, 4)
| 11.5
| 26
| 0.673913
| 16
| 92
| 3.6875
| 0.8125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.041667
| 0.217391
| 92
| 7
| 27
| 13.142857
| 0.777778
| 0.108696
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cbe2b6c21cb6f5b10154c5a8bb476316dcd1eedc
| 168
|
py
|
Python
|
doc/epsilon_pretty_gdb_load.py
|
Jojendersie/Epsilon-Intersection
|
23f020ddc9832742dc156c7dac038276070707b2
|
[
"MIT"
] | 14
|
2015-01-18T21:13:02.000Z
|
2022-01-19T17:24:29.000Z
|
doc/epsilon_pretty_gdb_load.py
|
Jojendersie/Epsilon-Intersection
|
23f020ddc9832742dc156c7dac038276070707b2
|
[
"MIT"
] | 28
|
2015-08-06T14:27:35.000Z
|
2022-03-21T09:03:44.000Z
|
doc/epsilon_pretty_gdb_load.py
|
Jojendersie/Epsilon-Intersection
|
23f020ddc9832742dc156c7dac038276070707b2
|
[
"MIT"
] | 5
|
2018-11-15T11:35:34.000Z
|
2021-08-16T03:38:41.000Z
|
import gdb.printing
import epsilon_pretty_printing
gdb.printing.register_pretty_printer(
gdb.current_objfile(),
epsilon_pretty_printing.build_pretty_printer())
| 28
| 51
| 0.839286
| 21
| 168
| 6.285714
| 0.47619
| 0.166667
| 0.318182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089286
| 168
| 6
| 51
| 28
| 0.862745
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0.8
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
1dad134c96f2d058a653cbab605f583da26e4399
| 163
|
py
|
Python
|
app/main/__init__.py
|
VirginiaNdungu1/Taarifa-App
|
0a04bd0ddffd43a59cb92a136645cd9c8d4a1768
|
[
"MIT"
] | null | null | null |
app/main/__init__.py
|
VirginiaNdungu1/Taarifa-App
|
0a04bd0ddffd43a59cb92a136645cd9c8d4a1768
|
[
"MIT"
] | null | null | null |
app/main/__init__.py
|
VirginiaNdungu1/Taarifa-App
|
0a04bd0ddffd43a59cb92a136645cd9c8d4a1768
|
[
"MIT"
] | null | null | null |
# import Blueprint class
from flask import Blueprint
# initialise the Blueprint class
main = Blueprint('main', __name__)
# import views module
from . import views
| 23.285714
| 34
| 0.785276
| 21
| 163
| 5.904762
| 0.52381
| 0.241935
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153374
| 163
| 6
| 35
| 27.166667
| 0.898551
| 0.447853
| 0
| 0
| 0
| 0
| 0.046512
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
1dcd0e766d97687fda0373b608e56a082a7779f4
| 90
|
py
|
Python
|
3_team/tests/sample_doctest_ok.py
|
pyfirst/pymook-samplecode
|
82321237c34515d287f28bd51ea86f870c1f5514
|
[
"MIT"
] | 31
|
2017-09-27T14:54:39.000Z
|
2021-05-26T14:03:44.000Z
|
3_team/tests/sample_doctest_ok.py
|
pyfirst/pymook-samplecode
|
82321237c34515d287f28bd51ea86f870c1f5514
|
[
"MIT"
] | 11
|
2018-03-11T05:28:14.000Z
|
2022-03-11T23:19:36.000Z
|
3_team/tests/sample_doctest_ok.py
|
pyfirst/pymook-samplecode
|
82321237c34515d287f28bd51ea86f870c1f5514
|
[
"MIT"
] | 41
|
2017-10-21T04:45:56.000Z
|
2021-07-16T14:12:33.000Z
|
def get_ok():
"""
文字列 'OK' を返す
>>> get_ok()
'OK'
"""
return 'OK'
| 10
| 16
| 0.366667
| 11
| 90
| 2.818182
| 0.545455
| 0.322581
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.411111
| 90
| 8
| 17
| 11.25
| 0.584906
| 0.344444
| 0
| 0
| 0
| 0
| 0.057143
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
1dcfcfe873ef4b985d677df7eb2f2763cc77a4c7
| 227
|
py
|
Python
|
{{cookiecutter.repo_name}}/app.py
|
xhlulu/dash-template
|
c76debd0e7c8cf119b46bf6e7233ea967851cb78
|
[
"MIT"
] | 1
|
2021-04-07T17:27:26.000Z
|
2021-04-07T17:27:26.000Z
|
{{cookiecutter.repo_name}}/app.py
|
xhlulu/dash-template
|
c76debd0e7c8cf119b46bf6e7233ea967851cb78
|
[
"MIT"
] | null | null | null |
{{cookiecutter.repo_name}}/app.py
|
xhlulu/dash-template
|
c76debd0e7c8cf119b46bf6e7233ea967851cb78
|
[
"MIT"
] | null | null | null |
{%- if cookiecutter.format == 'bootstrap' -%}
{%- include 'cookiecutter_templates/app_bootstrap.py' -%}
{%- elif cookiecutter.format == 'regular' -%}
{%- include 'cookiecutter_templates/app_regular.py' -%}
{%- endif -%}
| 45.4
| 61
| 0.656388
| 21
| 227
| 6.904762
| 0.52381
| 0.248276
| 0.386207
| 0.427586
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132159
| 227
| 5
| 62
| 45.4
| 0.736041
| 0
| 0
| 0
| 0
| 0
| 0.403509
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1dd6a3d04a7d00be735a4344b52b63f7d4e00faa
| 32
|
py
|
Python
|
atpg/utils/__init__.py
|
jstavr/SDN_Project
|
9fe5a65f46eadf15e1da43d9f8125b8c15161bbd
|
[
"Apache-2.0"
] | null | null | null |
atpg/utils/__init__.py
|
jstavr/SDN_Project
|
9fe5a65f46eadf15e1da43d9f8125b8c15161bbd
|
[
"Apache-2.0"
] | null | null | null |
atpg/utils/__init__.py
|
jstavr/SDN_Project
|
9fe5a65f46eadf15e1da43d9f8125b8c15161bbd
|
[
"Apache-2.0"
] | null | null | null |
'''
Created on Aug 14, 2011
'''
| 8
| 23
| 0.5625
| 5
| 32
| 3.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.24
| 0.21875
| 32
| 3
| 24
| 10.666667
| 0.48
| 0.71875
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
380909b6935f2ea342b89b999d1e6343dc2c6c30
| 271
|
py
|
Python
|
flask_unchained/_code_templates/project/app/extensions/__init__.py
|
achiang/flask-unchained
|
12788a6e618904a25ff2b571eb05ff1dc8f1840f
|
[
"MIT"
] | 69
|
2018-10-10T01:59:11.000Z
|
2022-03-29T17:29:30.000Z
|
flask_unchained/_code_templates/project/app/extensions/__init__.py
|
achiang/flask-unchained
|
12788a6e618904a25ff2b571eb05ff1dc8f1840f
|
[
"MIT"
] | 18
|
2018-11-17T12:42:02.000Z
|
2021-05-22T18:45:27.000Z
|
flask_unchained/_code_templates/project/app/extensions/__init__.py
|
achiang/flask-unchained
|
12788a6e618904a25ff2b571eb05ff1dc8f1840f
|
[
"MIT"
] | 7
|
2018-10-12T16:20:25.000Z
|
2021-10-06T12:18:21.000Z
|
# from vendor import ExtensionName
# extension_instance = ExtensionName()
EXTENSIONS = {
# 'extension_name': extension_instance,
# or, if an extension depends on other extension(s):
# 'extension_name': (extension_instance, ['ext', 'dependency', 'names']),
}
| 30.111111
| 77
| 0.704797
| 28
| 271
| 6.642857
| 0.642857
| 0.274194
| 0.236559
| 0.322581
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162362
| 271
| 8
| 78
| 33.875
| 0.819383
| 0.848708
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
69cb4e0b611917997928e326ebefaae54a7252b8
| 113
|
py
|
Python
|
test/resources/hello_virtual/python/develop/hello.py
|
Manu343726/biicode-common
|
91b32c6fd1e4a72ce5451183f1766d313cd0e420
|
[
"MIT"
] | 17
|
2015-04-15T09:40:23.000Z
|
2017-05-17T20:34:49.000Z
|
test/resources/hello_virtual/python/develop/hello.py
|
Manu343726/biicode-common
|
91b32c6fd1e4a72ce5451183f1766d313cd0e420
|
[
"MIT"
] | 2
|
2015-04-22T11:29:36.000Z
|
2018-09-25T09:31:09.000Z
|
test/resources/hello_virtual/python/develop/hello.py
|
bowlofstew/common
|
45e9ca902be7bbbdd73dafe3ab8957bc4a006020
|
[
"MIT"
] | 22
|
2015-04-15T09:46:00.000Z
|
2020-09-29T17:03:31.000Z
|
'''
Created on 17/07/2013
@author: drodri
'''
import sys
def hello():
sys.stdout.write("Develop: %PRINT%")
| 11.3
| 40
| 0.637168
| 16
| 113
| 4.5
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085106
| 0.168142
| 113
| 9
| 41
| 12.555556
| 0.680851
| 0.336283
| 0
| 0
| 0
| 0
| 0.238806
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
69cb8c70545ac2529f5d07b142a59d1bb203fbe5
| 44
|
py
|
Python
|
src/oop/Zoo/__init__.py
|
tborzyszkowski/TestAutomationInPython
|
843c71df796588e181466d9b9b549f03dd907a6e
|
[
"MIT"
] | 2
|
2020-10-08T09:44:12.000Z
|
2021-10-08T08:32:19.000Z
|
src/oop/Zoo/__init__.py
|
tborzyszkowski/TestAutomationInPython
|
843c71df796588e181466d9b9b549f03dd907a6e
|
[
"MIT"
] | null | null | null |
src/oop/Zoo/__init__.py
|
tborzyszkowski/TestAutomationInPython
|
843c71df796588e181466d9b9b549f03dd907a6e
|
[
"MIT"
] | 1
|
2020-10-19T14:08:00.000Z
|
2020-10-19T14:08:00.000Z
|
from .World import *
from .Position import *
| 22
| 23
| 0.75
| 6
| 44
| 5.5
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.159091
| 44
| 2
| 23
| 22
| 0.891892
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.