hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d6fd117c0e7ff9f0666672a6e3ed6dee73755e6c | 2,951 | py | Python | backend/app/ColorConsole.py | GJCav/thywy | 3c458bccdd23bab78b6a8bd65603c7845e643d70 | [
"MIT"
] | 8 | 2022-01-23T07:30:06.000Z | 2022-02-15T03:39:25.000Z | backend/app/ColorConsole.py | Dr-Left/thuwy | 3c458bccdd23bab78b6a8bd65603c7845e643d70 | [
"MIT"
] | 5 | 2022-01-21T03:31:22.000Z | 2022-03-04T00:01:59.000Z | backend/app/ColorConsole.py | Dr-Left/thuwy | 3c458bccdd23bab78b6a8bd65603c7845e643d70 | [
"MIT"
] | 2 | 2022-01-23T08:09:46.000Z | 2022-02-24T05:55:02.000Z | """
格式:
\033[0m -> 默认字体显示
\033[显示方式;前景色;背景色m -> 格式
三个参数顺序不敏感,因为值各不相同
显示方式列表:
0 - 默认值
1 - 高亮
4 - 下划线
5 - 闪烁
7 - 反显
8 - 不可见
前景色:
30 - 黑色
31 - 红色
32 - 绿色
33 - 黄色
34 - 蓝色
35 - 梅色
36 - 青色
37 - 白色
背景色:
40 - 黑色
前景色+10即可
"""
from copy import copy as _copy
METHOD_DEFAULT = -1
METHOD_BOLD = 1
METHOD_UNDERLINE = 4
METHOD_FLASH = 5
METHOD_REVERSE = 7
METHOD_HIDE = 8
FORE_BLACK = 30
FORE_RED = 31
FORE_GREEN = 32
FORE_YELLOW = 33
FORE_BLUE = 34
FORE_PLUM = 35
FORE_CYAN = 36
FORE_WHITE = 37
FORE_DEFAULT = -1
BACK_BLACK = 40
BACK_RED = 41
BACK_GREEN = 42
BACK_YELLOW = 43
BACK_BLUE = 44
BACK_PLUM = 45
BACK_CYAN = 46
BACK_WHITE = 47
BACK_DEFAULT = -1
def _ColorDecoratorAll(content, method, foreColor, backColor):
rtn = "\033["
if method != METHOD_DEFAULT:
rtn += str(method)
if foreColor != FORE_DEFAULT:
rtn += ";" + str(foreColor)
if backColor != BACK_DEFAULT:
rtn += ";" + str(backColor)
rtn += "m" + content + "\033[0m"
return rtn
class _StrDecorator:
method = METHOD_DEFAULT
foreColor = FORE_DEFAULT
backColor = BACK_DEFAULT
def __init__(
self, method=METHOD_DEFAULT, foreColor=FORE_DEFAULT, backColor=BACK_DEFAULT
):
self.method = method
self.foreColor = foreColor
self.backColor = backColor
def __add__(self, ano):
rtn = _copy(self)
if ano.method != METHOD_DEFAULT:
rtn.method = ano.method
if ano.foreColor != FORE_DEFAULT:
rtn.foreColor = ano.foreColor
if ano.backColor != BACK_DEFAULT:
rtn.backColor = ano.backColor
return rtn
def __call__(self, str):
return _ColorDecoratorAll(str, self.method, self.foreColor, self.backColor)
# Fore color
Black = _StrDecorator(foreColor=FORE_BLACK)
Red = _StrDecorator(foreColor=FORE_RED)
Green = _StrDecorator(foreColor=FORE_GREEN)
Yellow = _StrDecorator(foreColor=FORE_YELLOW)
Blue = _StrDecorator(foreColor=FORE_BLUE)
Plum = _StrDecorator(foreColor=FORE_PLUM)
Cyan = _StrDecorator(foreColor=FORE_CYAN)
White = _StrDecorator(foreColor=FORE_WHITE)
# Method
Bold = _StrDecorator(method=METHOD_BOLD)
Underline = _StrDecorator(method=METHOD_UNDERLINE)
Flash = _StrDecorator(method=METHOD_FLASH)
Reverse = _StrDecorator(method=METHOD_REVERSE)
Hide = _StrDecorator(method=METHOD_HIDE)
# Back Color
BackBlack = _StrDecorator(backColor=BACK_BLACK)
BackRed = _StrDecorator(backColor=BACK_RED)
BackGreen = _StrDecorator(backColor=BACK_GREEN)
BackYellow = _StrDecorator(backColor=BACK_YELLOW)
BackBlue = _StrDecorator(backColor=BACK_BLUE)
BackPlum = _StrDecorator(backColor=BACK_PLUM)
BackCyan = _StrDecorator(backColor=BACK_CYAN)
BackWhite = _StrDecorator(backColor=BACK_WHITE)
# Some short cuts
FontInfo = _StrDecorator() # All default
FontStrength = _copy(Bold)
FontWarining = Yellow + Bold
FontError = Red + Bold
| 22.18797 | 83 | 0.689597 | 367 | 2,951 | 5.280654 | 0.280654 | 0.080495 | 0.103199 | 0.022704 | 0.060888 | 0.060888 | 0.060888 | 0.060888 | 0.060888 | 0 | 0 | 0.034677 | 0.218231 | 2,951 | 132 | 84 | 22.356061 | 0.805375 | 0.120637 | 0 | 0.024691 | 0 | 0 | 0.005821 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.049383 | false | 0 | 0.012346 | 0.012346 | 0.148148 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba3553670d430c80e3adc22fd5128171a993576f | 740 | py | Python | tests/api/fixtures.py | eroberts9789/virtool-workflow | 18219eec2b9b934cedd3770ac319f40305c165f2 | [
"MIT"
] | 5 | 2020-09-24T20:29:08.000Z | 2022-03-17T14:50:56.000Z | tests/api/fixtures.py | eroberts9789/virtool-workflow | 18219eec2b9b934cedd3770ac319f40305c165f2 | [
"MIT"
] | 126 | 2020-10-01T23:38:34.000Z | 2022-03-31T08:26:28.000Z | tests/api/fixtures.py | eroberts9789/virtool-workflow | 18219eec2b9b934cedd3770ac319f40305c165f2 | [
"MIT"
] | 5 | 2020-09-29T21:29:46.000Z | 2021-07-27T20:34:58.000Z | import aiohttp
import pytest
from aiohttp import web
from virtool_workflow.api.client import JobApiHttpSession
from tests.api.mocks.mock_api import mock_routes
@pytest.fixture
def loop(event_loop):
return event_loop
@pytest.fixture
async def jobs_api_url():
return "/api"
@pytest.fixture
async def mock_jobs_api_app(loop):
app = web.Application(loop=loop)
for route_table in mock_routes:
app.add_routes(route_table)
return app
@pytest.fixture
async def http(mock_jobs_api_app, aiohttp_client) -> aiohttp.ClientSession:
"""Create an http client for accessing the mocked Jobs API."""
session = await aiohttp_client(mock_jobs_api_app, auto_decompress=False)
return JobApiHttpSession(session)
| 21.764706 | 76 | 0.768919 | 106 | 740 | 5.150943 | 0.386792 | 0.064103 | 0.098901 | 0.115385 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.156757 | 740 | 33 | 77 | 22.424242 | 0.875 | 0 | 0 | 0.190476 | 0 | 0 | 0.0059 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.238095 | 0.047619 | 0.47619 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba38dbf48279ca33d67ba94668726fa34f3bcd92 | 11,047 | py | Python | all_functions/configs/proxy_scraper/pygoogle-0.6/googletest.py | Heroku-elasa/-heroku-buildpack-python-ieee-new | 06ec2fda04d9e478ed2506400e460489b0ca91ab | [
"MIT"
] | null | null | null | all_functions/configs/proxy_scraper/pygoogle-0.6/googletest.py | Heroku-elasa/-heroku-buildpack-python-ieee-new | 06ec2fda04d9e478ed2506400e460489b0ca91ab | [
"MIT"
] | 15 | 2021-03-18T20:25:13.000Z | 2022-03-02T14:54:33.000Z | all_functions/configs/proxy_scraper/pygoogle-0.6/googletest.py | Heroku-elasa/heroku-buildpack-python-ieee-new | 06ec2fda04d9e478ed2506400e460489b0ca91ab | [
"MIT"
] | 1 | 2017-03-04T16:48:55.000Z | 2017-03-04T16:48:55.000Z | """Unit test for google.py"""
__author__ = "Mark Pilgrim (f8dy@diveintomark.org)"
__version__ = "$Revision: 1.4 $"
__date__ = "$Date: 2004/02/06 21:00:53 $"
__copyright__ = "Copyright (c) 2002 Mark Pilgrim"
__license__ = "Python"
import google
import unittest
import sys, os
import GoogleSOAPFacade
from StringIO import StringIO
class BaseClass(unittest.TestCase):
q = "python unit testing"
url = "http://www.python.org/"
phrase = "ptyhon"
searchparams = {"func":"doGoogleSearch"}
luckyparams = {}
luckyparams.update(searchparams)
luckyparams.update({"feelingLucky":1})
metaparams = {}
metaparams.update(searchparams)
metaparams.update({"showMeta":1})
reverseparams = {}
reverseparams.update(searchparams)
reverseparams.update({"reverseOrder":1})
cacheparams = {"func":"doGetCachedPage"}
spellingparams = {"func":"doSpellingSuggestion"}
envkey = "GOOGLE_LICENSE_KEY"
badkey = "a"
class Redirector(BaseClass):
def setUp(self):
self.savestdout = sys.stdout
self.output = StringIO()
sys.stdout = self.output
def tearDown(self):
sys.stdout = self.savestdout
class CommandLineTest(Redirector):
def lastOutput(self):
self.output.seek(0)
rc = self.output.read()
self.output.seek(0)
return rc
def testVersion(self):
"""-v should print version"""
google.main(["-v"])
commandLineAnswer = self.lastOutput()
google._version()
self.assertEqual(commandLineAnswer, self.lastOutput())
def testVersionLong(self):
"""--version should print version"""
google.main(["--version"])
commandLineAnswer = self.lastOutput()
google._version()
self.assertEqual(commandLineAnswer, self.lastOutput())
def testHelp(self):
"""-h should print usage"""
google.main(["-h"])
commandLineAnswer = self.lastOutput()
google._usage()
self.assertEqual(commandLineAnswer, self.lastOutput())
def testHelpLong(self):
"""--help should print usage"""
google.main(["--help"])
commandLineAnswer = self.lastOutput()
google._usage()
self.assertEqual(commandLineAnswer, self.lastOutput())
def testSearch(self):
"""-s should search"""
google.main(["-s %s" % self.q])
commandLineAnswer = self.lastOutput()
google._output(google.doGoogleSearch(self.q), self.searchparams)
self.assertEqual(commandLineAnswer, self.lastOutput())
def testSearchLong(self):
"""--search should search"""
google.main(["--search", self.q])
commandLineAnswer = self.lastOutput()
google._output(google.doGoogleSearch(self.q), self.searchparams)
self.assertEqual(commandLineAnswer, self.lastOutput())
def testSearchDefault(self):
"""no options + search phrase should search"""
google.main([self.q])
commandLineAnswer = self.lastOutput()
google._output(google.doGoogleSearch(self.q), self.searchparams)
self.assertEqual(commandLineAnswer, self.lastOutput())
def testNoOptions(self):
"""no options at all should print usage"""
google.main([])
commandLineAnswer = self.lastOutput()
google._usage()
self.assertEqual(commandLineAnswer, self.lastOutput())
def testCache(self):
"""-c should retrieve cache"""
google.main(["-c", self.url])
commandLineAnswer = self.lastOutput()
google._output(google.doGetCachedPage(self.url), self.cacheparams)
self.assertEqual(commandLineAnswer, self.lastOutput())
def testCacheLong(self):
"""--cache should retrieve cache"""
google.main(["--cache", self.url])
commandLineAnswer = self.lastOutput()
google._output(google.doGetCachedPage(self.url), self.cacheparams)
self.assertEqual(commandLineAnswer, self.lastOutput())
def testSpelling(self):
"""-p should check spelling"""
google.main(["-p", self.phrase])
commandLineAnswer = self.lastOutput()
google._output(google.doSpellingSuggestion(self.phrase), self.spellingparams)
self.assertEqual(commandLineAnswer, self.lastOutput())
def testSpellingLong(self):
"""--spelling should check spelling"""
google.main(["--spelling", self.phrase])
commandLineAnswer = self.lastOutput()
google._output(google.doSpellingSuggestion(self.phrase), self.spellingparams)
self.assertEqual(commandLineAnswer, self.lastOutput())
def testLucky(self):
"""-l should return only first result"""
google.main(["-l", "-s", self.q])
commandLineAnswer = self.lastOutput()
google._output(google.doGoogleSearch(self.q), self.luckyparams)
self.assertEqual(commandLineAnswer, self.lastOutput())
def testLucky1(self):
"""-1 should return only first result"""
google.main(["-1", "-s", self.q])
commandLineAnswer = self.lastOutput()
google._output(google.doGoogleSearch(self.q), self.luckyparams)
self.assertEqual(commandLineAnswer, self.lastOutput())
def testLuckyLong(self):
"""--lucky should return only first result"""
google.main(["--lucky", "-s", self.q])
commandLineAnswer = self.lastOutput()
google._output(google.doGoogleSearch(self.q), self.luckyparams)
self.assertEqual(commandLineAnswer, self.lastOutput())
def testMeta(self):
"""-m should return meta information"""
google.main(["-m", "-s", self.q])
commandLineAnswer = self.lastOutput()
commandLineAnswer = commandLineAnswer[:commandLineAnswer.index('searchTime')]
google._output(google.doGoogleSearch(self.q), self.metaparams)
realAnswer = self.lastOutput()
realAnswer = realAnswer[:realAnswer.index('searchTime')]
self.assertEqual(commandLineAnswer, realAnswer)
def testMetaLong(self):
"""--meta should return meta information"""
google.main(["--meta", "-s", self.q])
commandLineAnswer = self.lastOutput()
commandLineAnswer = commandLineAnswer[:commandLineAnswer.index('searchTime')]
google._output(google.doGoogleSearch(self.q), self.metaparams)
realAnswer = self.lastOutput()
realAnswer = realAnswer[:realAnswer.index('searchTime')]
self.assertEqual(commandLineAnswer, realAnswer)
def testReverse(self):
"""-r should reverse results"""
google.main(["-r", "-s", self.q])
commandLineAnswer = self.lastOutput()
google._output(google.doGoogleSearch(self.q), self.reverseparams)
self.assertEqual(commandLineAnswer, self.lastOutput())
def testReverseLong(self):
"""--reverse should reverse results"""
google.main(["--reverse", "-s", self.q])
commandLineAnswer = self.lastOutput()
google._output(google.doGoogleSearch(self.q), self.reverseparams)
self.assertEqual(commandLineAnswer, self.lastOutput())
class LicenseKeyTest(Redirector):
licensefile = "googlekey.txt"
licensebackup = "googlekey.txt.bak"
def safeRename(self, dirname, old, new):
if dirname:
old = os.path.join(dirname, old)
new = os.path.join(dirname, new)
try:
os.rename(old, new)
except OSError:
pass
def safeDelete(self, dirname, filename):
if dirname:
filename = os.path.join(dirname, filename)
try:
os.remove(filename)
except OSError:
pass
def createfile(self, dirname, filename, content):
if dirname:
filename = os.path.join(dirname, filename)
fsock = open(filename, "w")
fsock.write(content)
fsock.close()
def rememberKeys(self):
self.moduleLicenseKey = google.LICENSE_KEY
self.envLicenseKey = os.environ.get(self.envkey, None)
self.safeRename(os.environ["HOME"], self.licensefile, self.licensebackup)
self.safeRename("", self.licensefile, self.licensebackup)
self.safeRename(google._getScriptDir(), self.licensefile, self.licensebackup)
def restoreKeys(self):
google.LICENSE_KEY = self.moduleLicenseKey
if self.envLicenseKey:
os.environ[self.envkey] = self.envLicenseKey
self.safeDelete(os.environ["HOME"], self.licensefile)
self.safeRename(os.environ["HOME"], self.licensebackup, self.licensefile)
self.safeDelete("", self.licensefile)
self.safeRename("", self.licensebackup, self.licensefile)
self.safeDelete(google._getScriptDir(), self.licensefile)
self.safeRename(google._getScriptDir(), self.licensebackup, self.licensefile)
def clearKeys(self):
google.setLicense(None)
if os.environ.get(self.envkey):
del os.environ[self.envkey]
def setUp(self):
Redirector.setUp(self)
self.rememberKeys()
self.clearKeys()
def tearDown(self):
Redirector.tearDown(self)
self.clearKeys()
self.restoreKeys()
def testNoKey(self):
"""having no license key should raise google.NoLicenseKey"""
self.assertRaises(google.NoLicenseKey, google.doGoogleSearch, q=self.q)
def testPassInvalidKey(self):
"""passing invalid license key should fail with faultType"""
self.assertRaises(GoogleSOAPFacade.faultType, google.doGoogleSearch, q=self.q, license_key=self.badkey)
def testSetInvalidKey(self):
"""setting invalid module-level license key should fail with faultType"""
google.setLicense(self.badkey)
self.assertRaises(GoogleSOAPFacade.faultType, google.doGoogleSearch, q=self.q)
def testEnvInvalidKey(self):
"""invalid environment variable license key should fail with faultType"""
os.environ[self.envkey] = self.badkey
self.assertRaises(GoogleSOAPFacade.faultType, google.doGoogleSearch, q=self.q)
def testHomeDirKey(self):
"""invalid license key in home directory should fail with faultType"""
self.createfile(os.environ["HOME"], self.licensefile, self.badkey)
self.assertRaises(GoogleSOAPFacade.faultType, google.doGoogleSearch, q=self.q)
def testCurDirKey(self):
"""invalid license key in current directory should fail with faultType"""
self.createfile("", self.licensefile, self.badkey)
self.assertRaises(GoogleSOAPFacade.faultType, google.doGoogleSearch, q=self.q)
def testScriptDirKey(self):
"""invalid license key in script directory should fail with faultType"""
self.createfile(google._getScriptDir(), self.licensefile, self.badkey)
self.assertRaises(GoogleSOAPFacade.faultType, google.doGoogleSearch, q=self.q)
if __name__ == "__main__":
unittest.main()
| 37.962199 | 111 | 0.650674 | 1,080 | 11,047 | 6.603704 | 0.181481 | 0.074593 | 0.156478 | 0.088194 | 0.646242 | 0.563657 | 0.495794 | 0.460881 | 0.449103 | 0.439708 | 0 | 0.003384 | 0.224314 | 11,047 | 290 | 112 | 38.093103 | 0.828918 | 0.094505 | 0 | 0.404651 | 0 | 0 | 0.047431 | 0.002331 | 0 | 0 | 0 | 0 | 0.12093 | 1 | 0.172093 | false | 0.013953 | 0.023256 | 0 | 0.27907 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba38dbf6245155e0bd5d6fb74ada7b2d40f61c9a | 1,591 | py | Python | services/processdata/processdata/server.py | matheusmercadante/space-hub | 6956d4fad5c92f2ce5903852bdd77e124d7941ef | [
"RSA-MD"
] | null | null | null | services/processdata/processdata/server.py | matheusmercadante/space-hub | 6956d4fad5c92f2ce5903852bdd77e124d7941ef | [
"RSA-MD"
] | null | null | null | services/processdata/processdata/server.py | matheusmercadante/space-hub | 6956d4fad5c92f2ce5903852bdd77e124d7941ef | [
"RSA-MD"
] | null | null | null | import sys
import asyncio
import tornado.ioloop
from classes.rabbitmq_tornado import TornadoAdapter
from tornado import gen
from services.read_sheet import read_sheet
RABBIT_URI = "amqp://guest:guest@localhost:5672/"
@gen.coroutine
def handle_message(logger, message):
logger.info("File request {}".format(message))
res = read_sheet(message)
logger.info("File result {}".format(res))
return res
if __name__ == "__main__":
if sys.platform == 'win32':
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
configuration = dict(
publish=dict(
outgoing_1=dict(
exchange="processdata-rpc",
exchange_type="direct",
routing_key="processdata",
queue="process-data-finished",
durable=True,
auto_delete=False,
prefetch_count=1
)
),
receive=dict(
incoming=dict(
exchange="processdata-rpc",
exchange_type="direct",
routing_key="processdata",
queue="process-data-comming",
durable=True,
auto_delete=False,
prefetch_count=1
)
)
)
# Using Tornado IO Loop
io_loop = tornado.ioloop.IOLoop.current()
rabbit_connection = TornadoAdapter(rabbitmq_url=RABBIT_URI, configuration=configuration, io_loop=io_loop)
rabbit_connection.receive(handler=handle_message, queue=configuration["receive"]["incoming"]["queue"])
io_loop.start() | 30.596154 | 109 | 0.615336 | 160 | 1,591 | 5.9 | 0.4625 | 0.03178 | 0.036017 | 0.044492 | 0.256356 | 0.256356 | 0.256356 | 0.256356 | 0.17161 | 0.17161 | 0 | 0.007909 | 0.284727 | 1,591 | 52 | 110 | 30.596154 | 0.821617 | 0.013199 | 0 | 0.272727 | 0 | 0 | 0.128107 | 0.035054 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022727 | false | 0 | 0.136364 | 0 | 0.181818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba3a19f10a71c2771193a02d9bae8cb86fc3ea41 | 16,428 | py | Python | second-floor.py | levabd/smart-climat-daemon | 8ff273eeb74fb03ea04fda11b0128fa13d35b500 | [
"MIT"
] | null | null | null | second-floor.py | levabd/smart-climat-daemon | 8ff273eeb74fb03ea04fda11b0128fa13d35b500 | [
"MIT"
] | 1 | 2021-06-02T03:55:13.000Z | 2021-06-02T03:55:13.000Z | second-floor.py | levabd/smart-climat-daemon | 8ff273eeb74fb03ea04fda11b0128fa13d35b500 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import json
import argparse
import re
import datetime
import paramiko
import requests
# cmd ['ssh', 'smart',
# 'mkdir -p /home/levabd/smart-home-temp-humidity-monitor;
# cat - > /home/levabd/smart-home-temp-humidity-monitor/lr.json']
from btlewrap import available_backends, BluepyBackend
from mitemp_bt.mitemp_bt_poller import MiTempBtPoller, \
MI_TEMPERATURE, MI_HUMIDITY, MI_BATTERY
br_state = {}
cb_state = {}
f = open('/home/pi/smart-climat-daemon/ac_br_state.json')
br_state = json.load(f)
f = open('/home/pi/smart-climat-daemon/ac_cb_state.json')
cb_state = json.load(f)
dummy_ac_url = 'http://smart.levabd.pp.ua:2002'
def valid_mitemp_mac(mac, pat=re.compile(r"[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}")):
"""Check for valid mac addresses."""
if not pat.match(mac.upper()):
raise argparse.ArgumentTypeError(
'The MAC address "{}" seems to be in the wrong format'.format(mac))
return mac
# turn_on_humidifier():
# """Turn on humidifier on a first floor."""
# hummidifier_plug = chuangmi_plug.ChuangmiPlug(
# ip='192.168.19.61',
# token='14f5b868a58ef4ffaef6fece61c65b16',
# start_id=0,
# debug=1,
# lazy_discover=True,
# model='chuangmi.plug.m1')
# hummidifier_plug.on()
#
#
# def turn_off_humidifier():
# """Turn off humidifier on a first floor."""
# hummidifier_plug = chuangmi_plug.ChuangmiPlug(
# ip='192.168.19.61',
# token='14f5b868a58ef4ffaef6fece61c65b16',
# start_id=0,
# debug=1,
# lazy_discover=True,
# model='chuangmi.plug.m1')
# hummidifier_plug.off()
def check_if_ac_off(room):
"""Check if AC is turned off."""
status_url = dummy_ac_url
if room == 'br':
status_url = 'http://smart.levabd.pp.ua:2002/status-bedroom?key=27fbc501b51b47663e77c46816a'
elif room == 'cb':
status_url = 'http://smart.levabd.pp.ua:2002/status-office?key=27fbc501b51b47663e77c46816a'
response = requests.get(status_url, timeout=(20, 30))
if 'Pow' in response.json():
print(response.json()['Pow'])
if response.json()['Pow'] == "ON":
return False
return True
return None
def check_if_ac_heat(room):
"""Check if AC is turned for a automate cooling."""
status_url = dummy_ac_url
if room == 'br':
status_url = 'http://smart.levabd.pp.ua:2002/status-bedroom?key=27fbc501b51b47663e77c46816a'
elif room == 'cb':
status_url = 'http://smart.levabd.pp.ua:2002/status-office?key=27fbc501b51b47663e77c46816a'
response = requests.get(status_url, timeout=(20, 30))
print(response.json())
if 'Pow' in response.json():
if (response.json()['Pow'] == "ON") and (response.json()['Mod'] == "HEAT"):
return True
return False
return None
def check_if_ac_cool(room):
"""Check if AC is turned for a automate cooling."""
status_url = dummy_ac_url
if room == 'br':
status_url = 'http://smart.levabd.pp.ua:2002/status-bedroom?key=27fbc501b51b47663e77c46816a'
elif room == 'cb':
status_url = 'http://smart.levabd.pp.ua:2002/status-office?key=27fbc501b51b47663e77c46816a'
response = requests.get(status_url, timeout=(20, 30))
print(response.json())
if 'Pow' in response.json():
if (response.json()['Pow'] == "ON") and (response.json()['Mod'] == "COOL"):
return True
return False
return None
def set_cool_temp_ac(room, temp):
"""Set AC temerature of cooling if AC already turned cool."""
state = {}
state = br_state if room == 'br' else cb_state # 'cb'
if (not state['wasTurnedCool'] == 1 and check_if_ac_cool(room)) or (check_if_ac_heat('br')):
return
temp_url = dummy_ac_url
if room == 'br':
temp_url = 'http://smart.levabd.pp.ua:2002/setTemp-bedroom?key=27fbc501b51b47663e77c46816a&temp='
elif room == 'cb':
temp_url = 'http://smart.levabd.pp.ua:2002/setTemp-office?key=27fbc501b51b47663e77c46816a&temp='
response = requests.get(temp_url + temp)
print(response)
def turn_on_cool_ac(room):
"""Turn on AC for a cooling if it was not."""
state = {}
state = br_state if room == 'br' else cb_state # 'cb'
ac_cool = check_if_ac_cool(room)
if ((state['wasTurnedCool'] == 1) and not state['triedTurnedCool'] == 1) or (ac_cool is None) or (check_if_ac_heat('br')):
return
if ac_cool and (state['triedTurnedCool'] == 1):
if room == 'br':
br_state['triedTurnedOff'] = 0
br_state['wasTurnedOff'] = 0
br_state['triedTurnedCool'] = 0
br_state['wasTurnedCool'] = 1
br_state['triedTurnedHeat'] = 0
br_state['wasTurnedHeat'] = 0
with open('/home/pi/smart-climat-daemon/ac_br_state.json', 'w') as file:
json.dump(br_state, file)
elif room == 'cb':
cb_state['triedTurnedOff'] = 0
cb_state['wasTurnedOff'] = 0
cb_state['triedTurnedCool'] = 0
cb_state['wasTurnedCool'] = 1
cb_state['triedTurnedHeat'] = 0
cb_state['wasTurnedHeat'] = 0
with open('/home/pi/smart-climat-daemon/ac_cb_state.json', 'w') as file:
json.dump(cb_state, file)
return
cool_url = dummy_ac_url
turn_on_url = dummy_ac_url
temp_url = dummy_ac_url
if room == 'br':
turn_on_url = 'http://smart.levabd.pp.ua:2002/powerOn-bedroom?key=27fbc501b51b47663e77c46816a'
cool_url = 'http://smart.levabd.pp.ua:2002/cool-bedroom?autoFan=false&key=27fbc501b51b47663e77c46816a'
temp_url = 'http://smart.levabd.pp.ua:2002/setTemp-bedroom?key=27fbc501b51b47663e77c46816a&temp=26'
elif room == 'cb':
turn_on_url = 'http://smart.levabd.pp.ua:2002/powerOn-office?key=27fbc501b51b47663e77c46816a'
cool_url = 'http://smart.levabd.pp.ua:2002/cool-office?autoFan=false&key=27fbc501b51b47663e77c46816a'
temp_url = 'http://smart.levabd.pp.ua:2002/setTemp-office?key=27fbc501b51b47663e77c46816a&temp=26'
if room == 'br':
br_state['triedTurnedCool'] = 1
br_state['wasTurnedCool'] = 0
with open('/home/pi/smart-climat-daemon/ac_br_state.json', 'w') as file:
json.dump(br_state, file)
elif room == 'cb':
cb_state['triedTurnedCool'] = 1
cb_state['wasTurnedCool'] = 0
with open('/home/pi/smart-climat-daemon/ac_cb_state.json', 'w') as file:
json.dump(cb_state, file)
response = requests.get(temp_url)
print(response)
response = requests.get(cool_url)
print(response)
response = requests.get(turn_on_url)
print(response)
def turn_on_heat_ac(room):
"""Turn on AC for a heating if it was not."""
state = {}
state = br_state if room == 'br' else cb_state # 'cb'
ac_heat = check_if_ac_heat(room)
if ((state['wasTurnedHeat'] == 1) and not state['triedTurnedHeat'] == 1) or (ac_heat is None):
return
if ac_heat and (state['triedTurnedHeat'] == 1):
if room == 'br':
br_state['triedTurnedOff'] = 0
br_state['wasTurnedOff'] = 0
br_state['triedTurnedCool'] = 0
br_state['wasTurnedCool'] = 0
br_state['triedTurnedHeat'] = 0
br_state['wasTurnedHeat'] = 1
with open('/home/pi/smart-climat-daemon/ac_br_state.json', 'w') as file:
json.dump(br_state, file)
elif room == 'cb':
cb_state['triedTurnedOff'] = 0
cb_state['wasTurnedOff'] = 0
cb_state['triedTurnedCool'] = 0
cb_state['wasTurnedCool'] = 0
cb_state['triedTurnedHeat'] = 0
cb_state['wasTurnedHeat'] = 1
with open('/home/pi/smart-climat-daemon/ac_cb_state.json', 'w') as file:
json.dump(cb_state, file)
return
heat_url = dummy_ac_url
turn_on_url = dummy_ac_url
temp_url = dummy_ac_url
if room == 'br':
turn_on_url = 'http://smart.levabd.pp.ua:2002/powerOn-bedroom?key=27fbc501b51b47663e77c46816a'
heat_url = 'http://smart.levabd.pp.ua:2002/heat-bedroom?key=27fbc501b51b47663e77c46816a'
temp_url = 'http://smart.levabd.pp.ua:2002/setTemp-bedroom?key=27fbc501b51b47663e77c46816a&temp=25'
elif room == 'cb':
turn_on_url = 'http://smart.levabd.pp.ua:2002/powerOn-office?key=27fbc501b51b47663e77c46816a'
heat_url = 'http://smart.levabd.pp.ua:2002/heat-office?autoFan=false&key=27fbc501b51b47663e77c46816a'
temp_url = 'http://smart.levabd.pp.ua:2002/setTemp-office?key=27fbc501b51b47663e77c46816a&temp=25'
if room == 'br':
br_state['triedTurnedHeat'] = 1
br_state['wasTurnedHeat'] = 0
with open('/home/pi/smart-climat-daemon/ac_br_state.json', 'w') as file:
json.dump(br_state, file)
elif room == 'cb':
cb_state['triedTurnedHeat'] = 1
cb_state['wasTurnedHeat'] = 0
with open('/home/pi/smart-climat-daemon/ac_cb_state.json', 'w') as file:
json.dump(cb_state, file)
response = requests.get(temp_url)
print(response)
response = requests.get(heat_url)
print(response)
response = requests.get(turn_on_url)
print(response)
def turn_off_ac(room):
"""Turn off AC ."""
state = {}
state = br_state if room == 'br' else cb_state # 'cb'
ac_off = check_if_ac_off(room)
if ((state['wasTurnedOff'] == 1) and not state['triedTurnedOff'] == 1) or (ac_off is None):
return
if ac_off and (state['triedTurnedCool'] == 1):
if room == 'br':
br_state['triedTurnedOff'] = 0
br_state['wasTurnedOff'] = 1
br_state['triedTurnedCool'] = 0
br_state['wasTurnedCool'] = 0
br_state['triedTurnedHeat'] = 0
br_state['wasTurnedHeat'] = 0
with open('/home/pi/smart-climat-daemon/ac_br_state.json', 'w') as file:
json.dump(br_state, file)
elif room == 'cb':
cb_state['triedTurnedOff'] = 0
cb_state['wasTurnedOff'] = 1
cb_state['triedTurnedCool'] = 0
cb_state['wasTurnedCool'] = 0
cb_state['triedTurnedHeat'] = 0
cb_state['wasTurnedHeat'] = 0
with open('/home/pi/smart-climat-daemon/ac_cb_state.json', 'w') as file:
json.dump(cb_state, file)
turn_url = dummy_ac_url
if room == 'br':
turn_url = 'http://smart.levabd.pp.ua:2002/powerOff-bedroom?key=27fbc501b51b47663e77c46816a'
elif room == 'cb':
turn_url = 'http://smart.levabd.pp.ua:2002/powerOff-office?key=27fbc501b51b47663e77c46816a'
if room == 'br':
br_state['triedTurnedOff'] = 1
br_state['wasTurnedOff'] = 0
with open('/home/pi/smart-climat-daemon/ac_br_state.json', 'w') as file:
json.dump(br_state, file)
elif room == 'cb':
cb_state['triedTurnedOff'] = 1
cb_state['wasTurnedOff'] = 0
with open('/home/pi/smart-climat-daemon/ac_cb_state.json', 'w') as file:
json.dump(cb_state, file)
response = requests.get(turn_url)
print(response)
def record_temp_humid(temperature, humidity, room):
"""Record temperature and humidity data for web interface monitor"""
dicty = {
"temperature": temperature,
"humidity": humidity
}
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect('smart.levabd.pp.ua', port = 2001, username='levabd', password='vapipu280.')
sftp = ssh.open_sftp()
with sftp.open('smart-home-temp-humidity-monitor/' + room + '.json', 'w') as outfile:
json.dump(dicty, outfile)
ssh.close()
def poll_temp_humidity(room):
"""Poll data frstate['triedTurnedOff']om the sensor."""
today = datetime.datetime.today()
backend = BluepyBackend
mac = '58:2d:34:38:be:2e' if room == 'br' else '58:2d:34:39:27:4e' # 'cb'
poller = MiTempBtPoller(mac, backend)
temperature = poller.parameter_value(MI_TEMPERATURE)
humidity = poller.parameter_value(MI_HUMIDITY)
print("Month: {}".format(today.month))
print("Getting data from Mi Temperature and Humidity Sensor")
print("FW: {}".format(poller.firmware_version()))
print("Name: {}".format(poller.name()))
print("Battery: {}".format(poller.parameter_value(MI_BATTERY)))
print("Temperature: {}".format(poller.parameter_value(MI_TEMPERATURE)))
print("Humidity: {}".format(poller.parameter_value(MI_HUMIDITY)))
return (today, temperature, humidity)
# scan(args):
# """Scan for sensors."""
# backend = _get_backend(args)
# print('Scanning for 10 seconds...')
# devices = mitemp_scanner.scan(backend, 10)
# devices = []
# print('Found {} devices:'.format(len(devices)))
# for device in devices:
# print(' {}'.format(device))
def list_backends(_):
"""List all available backends."""
backends = [b.__name__ for b in available_backends()]
print('\n'.join(backends))
def main():
"""Main function."""
# check bedroom
(today, temperature, humidity) = poll_temp_humidity('br')
# if (humidity > 49) and (today.month < 10) and (today.month > 4):
# turn_off_humidifier()
# if (humidity < 31) and (today.month < 10) and (today.month > 4):
# turn_on_humidifier()
# if (humidity < 31) and ((today.month > 9) or (today.month < 5)):
# turn_on_humidifier()
# if (humidity > 49) and ((today.month > 9) or (today.month < 5)):
# turn_off_humidifier()
#
# Prevent Sleep of Xiaomi Smart Plug
# hummidifier_plug = chuangmi_plug.ChuangmiPlug(
# ip='192.168.19.59',
# token='14f5b868a58ef4ffaef6fece61c65b16',
# start_id=0,
# debug=0,
# lazy_discover=True,
# model='chuangmi.plug.m1')
# print(hummidifier_plug.status())
# Record temperature and humidity for monitor
record_temp_humid(temperature, humidity, 'br')
# clear env at night
if today.hour == 3:
br_state['triedTurnedOff'] = 0
br_state['wasTurnedOff'] = 0
br_state['triedTurnedCool'] = 0
br_state['wasTurnedCool'] = 0
br_state['triedTurnedHeat'] = 0
br_state['wasTurnedHeat'] = 0
cb_state['triedTurnedOff'] = 0
cb_state['wasTurnedOff'] = 0
cb_state['triedTurnedCool'] = 0
cb_state['wasTurnedCool'] = 0
cb_state['triedTurnedHeat'] = 0
cb_state['wasTurnedHeat'] = 0
with open('/home/pi/smart-climat-daemon/ac_br_state.json', 'w') as file:
json.dump(br_state, file)
with open('/home/pi/smart-climat-daemon/ac_cb_state.json', 'w') as file:
json.dump(cb_state, file)
# if (temperature > 24.0) and (today.month < 6) and (today.month > 3) and (today.hour < 11) and (today.hour > 3):
# turn_on_cool_ac('br')
if (temperature > 32) and (today.hour < 24) and (today.hour > 7):
turn_on_cool_ac('br')
if (temperature > 25.3) and (today.month < 10) and (today.month > 4) and (today.hour < 8) and (today.hour > 4):
turn_on_cool_ac('br')
if (temperature < 22) and (today.month == 10) and (today.hour < 9):
turn_on_heat_ac('br')
if (temperature < 22) and (today.month == 10) and (today.hour > 22):
turn_on_heat_ac('br')
if (temperature > 25) and (today.month == 10) and (today.hour < 9):
turn_off_ac('br')
if (temperature > 25) and (today.month == 10) and (today.hour > 22):
turn_off_ac('br')
if (today.month == 10) and (today.hour == 0) and (today.minute == 0):
turn_off_ac('br')
if (temperature < 23.3) and (today.hour < 8) and (today.hour > 4) and (not(check_if_ac_heat('br'))):
turn_off_ac('br')
if (temperature < 19) and (today.hour < 24) and (today.hour > 8) and (not(check_if_ac_heat('br'))):
turn_off_ac('br')
# _if (temperature < 20) and ((today.month > 9) or (today.month < 5)) and (today.hour < 24) and (today.hour > 9):
# turn_on_heat_ac()
# if (temperature > 22) and ((today.month > 9) or (today.month < 5)):
# turn_off_ac()
# record the office room numbers
(_, temperature, humidity) = poll_temp_humidity('cb')
record_temp_humid(temperature, humidity, 'cb')
if __name__ == '__main__':
main()
| 40.562963 | 126 | 0.623204 | 2,166 | 16,428 | 4.560018 | 0.110342 | 0.036145 | 0.031589 | 0.036448 | 0.754885 | 0.691202 | 0.664068 | 0.616786 | 0.597854 | 0.57315 | 0 | 0.064908 | 0.229121 | 16,428 | 404 | 127 | 40.663366 | 0.715019 | 0.164475 | 0 | 0.589041 | 0 | 0.034247 | 0.294749 | 0.060597 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041096 | false | 0.003425 | 0.027397 | 0 | 0.126712 | 0.065068 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba3ad00158e6db261842bb50d50fbeca583ec7db | 2,880 | py | Python | swot_simulator/error/orbital.py | CNES/swot_simulator | 92d0bb4a274ec9923265567968beea3be4283e61 | [
"BSD-3-Clause"
] | 17 | 2020-05-28T08:20:11.000Z | 2022-03-25T07:40:48.000Z | swot_simulator/error/orbital.py | CNES/swot_simulator | 92d0bb4a274ec9923265567968beea3be4283e61 | [
"BSD-3-Clause"
] | 7 | 2021-07-21T02:15:52.000Z | 2021-11-14T10:46:41.000Z | swot_simulator/error/orbital.py | CNES/swot_simulator | 92d0bb4a274ec9923265567968beea3be4283e61 | [
"BSD-3-Clause"
] | 8 | 2020-05-17T13:53:43.000Z | 2022-03-25T07:40:58.000Z | # Copyright (c) 2021 CNES/JPL
#
# All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
"""
Orbital error
-------------
"""
from typing import Dict, Tuple
#
import dask.array as da
import numpy as np
#
from .. import random_signal
from .. import settings
from .. import VOLUMETRIC_MEAN_RADIUS
#: Signal amplitude of the orbital error in micro-radians
AMPLITUDE = 100
#: Delta T of the spatial sampling in seconds
DT = 60
def _orbital_error_spectrum(
orbit_duration: np.timedelta64,
rng: np.random.Generator) -> Tuple[np.ndarray, float]:
"""Calculate orbital error spectrum
Args:
orbit_duration (float): Orbit duration in fractional days
rng (np.random.Generator): Random number generator
Returns:
tuple: (yg, fmaxr)
"""
df = 1 / (1000 * 86400)
spatial_frequency = np.arange(df, 1 / DT, df)
orbital_frequency = 1 / float(
orbit_duration.astype("timedelta64[us]").astype("float64") * 1e-6)
sigma_peak = orbital_frequency / 1000
ps_orbital = np.exp(-0.5 * (spatial_frequency - orbital_frequency)**2 /
sigma_peak**2)
ps_orbital[ps_orbital < 1 / 1000] = 0.
ps_orbital /= np.sum(ps_orbital * df)
ps_orbital *= AMPLITUDE**2
return random_signal.gen_psd_1d(spatial_frequency,
ps_orbital,
rng,
alpha=10)
class Orbital:
"""
Simulate the error orbital
Args:
parameters (Parameters): Simulation parameters.
orbit_duration (np.timedelta64): Orbit duration.
"""
def __init__(self, parameters: settings.Parameters,
orbit_duration: np.timedelta64) -> None:
yg, self.fmaxr = _orbital_error_spectrum(orbit_duration,
parameters.rng())
self.yg = da.from_array(yg, name="orbital_error").persist()
assert parameters.height is not None
height = parameters.height * 1e-3
self.conversion_factor = (1 + height / VOLUMETRIC_MEAN_RADIUS) * 1e-3
def generate(
self,
time: np.ndarray,
x_ac: np.ndarray,
) -> Dict[str, np.ndarray]:
"""Generate orbital error
Args:
time (np.ndarray): time vector
Returns:
np.ndarray: orbital error
"""
time = time.astype("datetime64[us]").astype("float64") * 1e-6
xg = np.linspace(0, 0.5 / self.fmaxr * self.yg.shape[0],
self.yg.shape[0])
error_orbital = np.interp(np.mod(time, xg.max()), xg,
self.yg.compute())
return {
"simulated_error_orbital":
x_ac * error_orbital[:, np.newaxis] * self.conversion_factor,
}
| 30.315789 | 77 | 0.587847 | 340 | 2,880 | 4.835294 | 0.382353 | 0.058394 | 0.036496 | 0.047445 | 0.105839 | 0 | 0 | 0 | 0 | 0 | 0 | 0.0335 | 0.305556 | 2,880 | 94 | 78 | 30.638298 | 0.7885 | 0.246181 | 0 | 0 | 0 | 0 | 0.03848 | 0.011203 | 0 | 0 | 0 | 0 | 0.020833 | 1 | 0.0625 | false | 0 | 0.125 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba3d14185541e8c15e9893b318d33f3a291b4fb0 | 20,940 | py | Python | backend/mlarchive/archive/thread.py | dkg/mailarch | 562757c09e212c202c35231d7e7c588cd4d3fb65 | [
"BSD-3-Clause"
] | 6 | 2022-03-09T23:10:28.000Z | 2022-03-21T05:32:40.000Z | backend/mlarchive/archive/thread.py | dkg/mailarch | 562757c09e212c202c35231d7e7c588cd4d3fb65 | [
"BSD-3-Clause"
] | 5 | 2022-03-11T09:39:47.000Z | 2022-03-30T16:48:09.000Z | backend/mlarchive/archive/thread.py | dkg/mailarch | 562757c09e212c202c35231d7e7c588cd4d3fb65 | [
"BSD-3-Clause"
] | 4 | 2022-03-04T15:36:19.000Z | 2022-03-28T23:45:44.000Z | '''This module implements the Zawinksi threading algorithm.
https://www.jwz.org/doc/threading.html
The main function is process(), which takes a queryset, ie. all messages
in a list, and returns the root_node of a container tree representing
the thread. Use root_node.walk() to walk the container tree.
NOTE: There are certain circumstances where this container tree will
have empty containers at the root level:
1) When multiple top-level messages are found with the same base subject line
(all prefixes stripped away) they are collected under a top-level dummy
container. This is potentially confusing when there are messages with the
same subject line that aren't part of a thread. ie. generic email
notifications that reuse the same subject line.
2) Currently, if a thread contains messages that were identified (correctly)
by the subject, and they have no references, we will get a top-level dummy
container that has these as siblings to the original first message of
the thread.
'''
from builtins import input
import re
from collections import defaultdict, namedtuple, OrderedDict
from operator import methodcaller
CONTAINER_COUNT = 0
DEBUG = False
MESSAGE_ID_RE = re.compile(r'<(.*?)>')
class Container(object):
'''Used to construct the thread ordering then discarded'''
def __init__(self, message=None):
self.message = message
self.parent = None
self.child = None
self.next = None
self.depth = None
def __str__(self):
if self.parent:
parent = self.parent.descriptor()
else:
parent = 'None'
if self.child:
child = self.child.descriptor()
else:
child = 'None'
if self.next:
next_ = self.next.descriptor()
else:
next_ = 'None'
return 'parent:{},message:{},child:{},next:{}'.format(
parent,
self.descriptor(),
child,
next_)
def descriptor(self):
'''Descriptive text for display of container object'''
if self.is_empty():
return 'Empty'
else:
subject = self.message.subject.encode('ascii', 'replace')
return '{} ({})'.format(subject, self.message.msgid)
def has_ancestor(self, target):
'''Returns True if target is an ancestor'''
if self.parent is None:
return False
elif self.parent == target:
return True
else:
return self.parent.has_ancestor(target)
def has_descendent(self, target):
'''Returns True if the target is a descendent'''
flat = [c for c in self.walk()]
return target in flat
def has_relative(self, target):
'''Returns True if target is either an ancestor or descendent'''
return self.has_descendent(target) or self.has_ancestor(target)
def is_empty(self):
'''Returns True if the container has no message'''
return self.message is None
def reverse_children(self):
'''Reverse order of children'''
if self.child:
prev = None
kid = self.child
rest = kid.next
while kid:
kid.next = prev
# continue
prev = kid
kid = rest
rest = None if rest is None else rest.next
self.child = prev
kid = self.child
while kid:
kid.reverse_children()
kid = kid.next
def sort_date(self):
'''Returns the date to use for sorting. Either the
date of self.message or if this is a dummy container,
the date of self.child.message
'''
if not self.is_empty():
return self.message.date
elif not self.child.is_empty():
return self.child.message.date
else:
return None
def walk(self, depth=0):
'''Returns a generator that walks the tree and returns
containers'''
container = self
while container:
container.depth = depth
yield container
if container.child:
for c in container.child.walk(depth=depth + 1):
yield c
if depth == 0:
break
container = container.next
def build_container(message, id_table, bogus_id_count):
'''Builds Container objects for messages'''
msgid = message.msgid
container = id_table.get(msgid, None)
if container:
if container.is_empty():
container.message = message
else:
# indicates a duplicate message-id
msgid = "Bogus-id:{}".format(bogus_id_count)
bogus_id_count += 1
container = None
if not container:
container = Container(message)
id_table[msgid] = container
# 1.B
# process references
parent_ref = None
# switch to message.get_references() after migration
for reference_id in get_references_or_in_reply_to(message):
ref = id_table.get(reference_id, None)
if not ref:
ref = Container()
id_table[reference_id] = ref
# init list
if DEBUG:
print("in message: {}".format(message.msgid))
print("checking reference: {}".format(reference_id))
print("checking {} for descendent {}".format(parent_ref, ref))
if (parent_ref and # there is a parent
ref.parent is None and # don't have a parent already
parent_ref != ref and # not a tight loop
not parent_ref.has_relative(ref)): # not a wide loop
ref.parent = parent_ref
ref.next = parent_ref.child
parent_ref.child = ref
parent_ref = ref
# At this point parent_ref is set to the container of the last element
# in the reference field. make that be the parent of this container,
# unless doing so would introduce circularity
if parent_ref and (parent_ref == container or
container.has_descendent(parent_ref)):
parent_ref = None
# If it has a parent already, that's there because we saw this message
# in a references field, and presumed a parent based on the other
# entries in that field. Now that we have the actual message, we can
# be more definitive, so throw away the old parent and use this new one.
# Find this container in the parent's child-list and unlink it
if container.parent:
prev = None
rest = container.parent.child
while rest:
if rest == container:
break
prev = rest
rest = rest.next
if rest is None:
raise Exception("Couldn't find {} in parent {}".format(
container,
container.parent))
if prev is None:
container.parent.child = container.next
else:
prev.next = container.next
container.next = None
container.parent = None
if parent_ref:
container.parent = parent_ref
container.next = parent_ref.child
parent_ref.child = container
if DEBUG:
root = find_root(container)
display_thread(root)
input("Press enter")
def build_subject_table(root_node):
'''Builds a mapping of base subject (subject stripped of prefixes) to
container'''
subject_table = {}
container = root_node.child
while container:
message = container.message
if message is None:
message = container.child.message
if message.base_subject:
existing = subject_table.get(message.base_subject)
# add this container to the table if:
# there is no container in the table with this subject
if not existing:
subject_table[message.base_subject] = container
# this one is a dummy container and the old one is not: the
# dummy one is more interesting as a root, so put it in the table
# instead
elif container.is_empty() and not existing.is_empty():
subject_table[message.base_subject] = container
# the container in the table has a "Re:" version of this subjet,
# and this container has a non-"Re:" version.
# the non-"Re:" version is the more interesting of the two
elif (existing.message and
subject_is_reply(existing.message) and
(container.message and
not subject_is_reply(container.message))):
subject_table[message.base_subject] = container
container = container.next
return subject_table
def compute_thread(thread):
'''Computes the thread tree for given thread (Thread object or list of messages).
Returns OrderedDict key=hashcode,value=(message,depth,order)
'''
if hasattr(thread, '__iter__'):
messages = thread
else:
messages = thread.message_set.all().order_by('date')
data = OrderedDict()
ThreadInfo = namedtuple('ThreadInfo', ['message', 'depth', 'order'])
root_node = process(messages)
for branch in get_root_set(root_node):
for order, container in enumerate(branch.walk()):
if container.is_empty():
pass
else:
message = container.message
data[message.hashcode] = ThreadInfo(message=message,
depth=container.depth,
order=order)
return data
def reconcile_thread(thread_data):
'''Updates message.thread_depth and message.thread_order as needed, given
computed thread info
'''
for info in thread_data.values():
message = info.message
if (message.thread_order != info.order or message.thread_depth != info.depth):
message.thread_order = info.order
message.thread_depth = info.depth
message.save()
def container_stats(parent, id_table):
'''Show container stats for help in debugging'''
empty = 0
empty_top = 0
empty_top_nochild = 0
print("Length if id_table: {}".format(len(id_table)))
print("Length of walk(): {}".format(len(list(parent.walk()))))
for c in parent.walk():
if c.is_empty():
empty = empty + 1
if c.parent is None:
empty_top = empty_top + 1
if c.child is None:
empty_top_nochild = empty_top_nochild + 1
print(c)
print("Total empty: {}".format(empty))
print("Total empty top-level: {}".format(empty_top))
print("Total empty top-level no child: {}".format(empty_top_nochild))
display_thread(parent)
def count_root_set(parent):
'''Returns the number of top-level containers in the root set'''
container = parent.child
count = 1
while container.next:
count = count + 1
container = container.next
return count
def display_thread(parent):
'''Prints the thread.'''
for container in parent.walk():
if container.message:
print('{indent}{subject} {date}'.format(
indent=' ' * container.depth,
subject=get_ascii(container.message.subject),
date=container.message.date.strftime("%Y-%m-%d %H:%M")))
else:
if container.parent is None:
print("(Empty)")
else:
print(container)
def find_root(node):
'''Find the top level node'''
if not node.parent:
return node
else:
return find_root(node.parent)
def find_root_set(id_table):
'''Find the root set of Containers and return a root node.
A container is in the root set if it has no parents
Takes mapping of message-id to containers
'''
root = Container()
for container in id_table.values():
if container.parent is None:
if container.next is not None:
raise Exception('container.next is {}'.format(container.next))
container.next = root.child
root.child = container
return root
def gather_siblings(parent, siblings):
'''Build mapping of parent to list of children containers'''
container = parent.child
while container:
siblings[container.parent].append(container)
if container.child:
gather_siblings(container, siblings)
container = container.next
def gather_subjects(root_node):
'''If any two members of the root set have the same subject, merge them.
This is so that messages which don't have References headers at all
still get threaded (to the extent possible, at least.)
'''
subject_table = build_subject_table(root_node)
if len(subject_table) == 0:
return
# subject_table is now populated with one entry for each subject which
# occurs in the root set. Now itereate over the root set, and gather
# together the difference
prev = None
container = root_node.child
rest = container.next
while container:
message = container.message
if message is None:
message = container.child.message
subject = message.base_subject
if subject:
old = subject_table.get(subject)
if old != container:
# remove the "second" mssage from the root set.
if prev is None:
root_node.child = container.next
else:
prev.next = container.next
container.next = None
# if both are dummies, merge them
if old.message is None and container.message is None:
tail = Container()
tail = old.child
while tail and tail.next:
tail = tail.next
tail.next = container.child
tail = container.child
while tail:
tail.parent = old
tail = tail.next
container.child = None
# if old is empty and container is reply and old is not
elif old.message is None or (container.message and
subject_is_reply(container.message) and
not subject_is_reply(old.message)):
container.parent = old
container.next = old.child
old.child = container
# Otherwise, make a new dummy container and make both messages be a
# child of it. This catches the both-are-replies and neither-are-
# replies cases, and makes them be siblings instead of asserting
# a hiierarchical relationship which might not be true
else:
new_container = Container()
new_container.message = old.message
new_container.child = old.child
tail = new_container.child
while tail:
tail.parent = new_container
tail = tail.next
old.message = None
old.child = None
container.parent = old
new_container.parent = old
old.child = container
container.next = new_container
container = prev
prev = container
container = rest
rest = None if rest is None else rest.next
def get_ascii(value):
'''Returns ascii of value'''
return value.encode('ascii', errors='replace')
def get_in_reply_to(message):
'''Returns a qualified message id from in_reply_to_value contents'''
if not message.in_reply_to_value:
return None
message_ids = parse_message_ids(message.in_reply_to_value)
if message_ids:
return message_ids[0]
def get_references(message):
'''Returns list of message-ids from References header'''
# remove all whitespace
refs = ''.join(message.references.split())
refs = parse_message_ids(refs)
# de-dupe
results = []
for ref in refs:
if ref not in results:
results.append(ref)
return results
def get_references_or_in_reply_to(message):
'''Returns list of message-ids from References header if it exists,
else In-Reply-To header if it exists'''
refs = get_references(message)
if refs:
return refs
in_reply_to = get_in_reply_to(message)
if in_reply_to:
return [in_reply_to]
else:
return []
def get_root_set(root_node):
'''Returns generator of top-level nodes given root_node'''
node = root_node.child
while node:
yield node
node = node.next
def parse_message_ids(text):
'''Returns message ids from header text'''
if not text:
return []
return MESSAGE_ID_RE.findall(text)
def prune_empty_containers(parent):
'''Walk through the threads and discard any empty container objects.
After calling this, there will only be empty container objects
at depth 0, and those will all have at least two kids
'''
prev = None
container = parent.child
if container is None:
return
next_ = container.next
while container:
# remove empty container with no children
if container.message is None and container.child is None:
if prev is None:
parent.child = container.next
else:
prev.next = container.next
container = prev
elif (container.message is None and
container.child and
(container.parent or container.child.next is None)):
tail = Container()
kids = container.child
if prev is None:
parent.child = kids
else:
prev.next = kids
# splice kids into the list in place of container
tail = kids
while tail.next:
tail.parent = container.parent
tail = tail.next
tail.parent = container.parent
tail.next = container.next
next_ = kids
container = prev
elif container.child:
prune_empty_containers(container)
# continue with loop
prev = container
container = next_
next_ = None if container is None else container.next
def process(queryset, display=False, debug=False):
'''Takes an iterable of messages and returns the threaded structure'''
global DEBUG
DEBUG = debug
id_table = {} # message-ids to container
bogus_id_count = 0 # use when there are duplicate message ids
for message in queryset:
build_container(message, id_table, bogus_id_count)
# 2 Find the root set
root_node = find_root_set(id_table)
# 3 Discard id_table
# 4 Prune Empty Containers
prune_empty_containers(root_node)
root_node.reverse_children()
# 5 Group the root set by subject
gather_subjects(root_node)
# 7 Sort
sort_thread(root_node)
# debug
if display:
display_thread(root_node)
print("messages count: {}".format(queryset.count()))
print("root set count: {}".format(count_root_set(root_node)))
print("total containers: {}".format(CONTAINER_COUNT))
return root_node
def sort_siblings(siblings, reverse=False):
'''Sort siblings (list of containers) by date. Set new order
by adjusting container.next. Returns sorted list.
* Has side-effects *
'''
sorted_siblings = sorted(
siblings,
key=methodcaller('sort_date'),
reverse=reverse)
sorted_siblings_iter = iter(sorted_siblings)
prev = next(sorted_siblings_iter)
for container in sorted_siblings_iter:
prev.next = container
prev = container
prev.next = None
return sorted_siblings
def sort_thread(root_node):
'''Sort messages in the thread. By default sort top-level, first
message in thread, by date descending, then sub-thread siblings
by date ascending
'''
siblings = defaultdict(list)
gather_siblings(root_node, siblings)
# sort root set (they have no parent)
root_set = siblings.pop(None)
root_node.child = sort_siblings(root_set, reverse=True)[0]
# sort remaining siblings
for parent, children in siblings.items():
if len(children) > 1:
parent.child = sort_siblings(children)[0]
def subject_is_reply(message):
'''Returns True if the subject indicates this message is a reply'''
return message.subject.startswith('Re: ')
| 33.397129 | 86 | 0.600621 | 2,551 | 20,940 | 4.823599 | 0.140729 | 0.017554 | 0.008046 | 0.005201 | 0.145713 | 0.104592 | 0.081349 | 0.048436 | 0.041934 | 0.033807 | 0 | 0.001976 | 0.323305 | 20,940 | 626 | 87 | 33.450479 | 0.866408 | 0.253964 | 0 | 0.245146 | 0 | 0 | 0.032644 | 0.002421 | 0 | 0 | 0 | 0 | 0 | 1 | 0.07767 | false | 0.002427 | 0.009709 | 0 | 0.167476 | 0.036408 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba3e99a2d1a0148697392807082e928f2f44d6e9 | 10,039 | py | Python | ci/scripts/python/nrf5_cmake/library.py | perfectco/cmake-nRF5x | 08b9158fa7bfa0c8641df468d48917dec46fb115 | [
"MIT"
] | 111 | 2017-11-21T06:21:18.000Z | 2022-03-30T07:40:03.000Z | ci/scripts/python/nrf5_cmake/library.py | perfectco/cmake-nRF5x | 08b9158fa7bfa0c8641df468d48917dec46fb115 | [
"MIT"
] | 41 | 2018-01-09T15:44:11.000Z | 2021-10-31T08:45:24.000Z | ci/scripts/python/nrf5_cmake/library.py | giuliocorradini/cmake-nRF5x | a5b5d489768dc397a7eddc57d4ad65e6b3039b08 | [
"MIT"
] | 39 | 2018-03-13T14:03:10.000Z | 2022-02-28T17:46:17.000Z | from __future__ import annotations
from unittest import TestCase
from enum import Enum
from typing import Dict, Iterable, Optional, Set, List
from jsonschema import validate as validate_json
from nrf5_cmake.property import Access, Property
from nrf5_cmake.version import Version
class LibraryProperty(Enum):
DEPENDENCIES = "dependencies"
INCLUDES = "includes"
CFLAGS = "cflags"
ASMFLAGS = "asmflags"
LDFLAGS = "ldflags"
class Library:
props_json_schema = {
"sources": {
"type": "array",
"items": {
"type": "string"
}
},
** {x.value: Property.json_schema for x in LibraryProperty}
}
json_schema = {
"type": "object",
"properties": props_json_schema
}
def __init__(self,
sources: Optional[Set[str]] = None,
dependencies: Optional[Property] = None,
includes: Optional[Property] = None,
cflags: Optional[Property] = None,
asmflags: Optional[Property] = None,
ldflags: Optional[Property] = None
):
self._sources: Set[str] = sources or set()
self._props: Dict[LibraryProperty, Property] = {}
self._props[LibraryProperty.DEPENDENCIES] = dependencies or Property()
self._props[LibraryProperty.INCLUDES] = includes or Property()
self._props[LibraryProperty.CFLAGS] = cflags or Property()
self._props[LibraryProperty.ASMFLAGS] = asmflags or Property()
self._props[LibraryProperty.LDFLAGS] = ldflags or Property()
def __str__(self):
return str(self.to_json())
def __eq__(self, other: object) -> bool:
if not isinstance(other, Library):
return False
if self._sources != other._sources:
return False
for prop in LibraryProperty:
if self._props[prop] != other._props[prop]:
return False
return True
@staticmethod
def from_json(json_value: dict) -> Library:
validate_json(instance=json_value,
schema=Library.json_schema)
library_props = Library()
if "sources" in json_value:
library_props._sources = set(json_value["sources"])
for property_name in LibraryProperty:
if property_name.value in json_value:
library_props._props[property_name] = Property.from_json(
json_value[property_name.value]
)
return library_props
def to_json(self) -> dict:
json_value = {}
if len(self._sources) != 0:
sources_json = list(self._sources)
sources_json.sort()
json_value["sources"] = sources_json
for property_name in LibraryProperty:
if len(self._props[property_name].get_all_items()) == 0:
continue
prop_json = self._props[property_name].to_json()
json_value[property_name.value] = prop_json
return json_value
@property
def sources(self) -> Set[str]:
return self._sources
@sources.setter
def sources(self, sources: Set[str]):
self._sources = sources
def get_prop(self, property_name: LibraryProperty) -> Property:
return self._props[property_name]
def set_prop(self, property_name: LibraryProperty, prop: Property):
self._props[property_name] = prop
@staticmethod
def _prop_action(libraries: Iterable[Library], set_action, prop_action):
library = Library()
sources: List[Set[str]] = []
properties: Dict[LibraryProperty, List[Property]] = {
prop: [] for prop in LibraryProperty
}
for lib in libraries:
sources.append(lib._sources)
for prop in LibraryProperty:
properties[prop].append(lib._props[prop])
if sources:
library._sources = set_action(*sources)
for prop in LibraryProperty:
if properties[prop]:
library._props[prop] = prop_action(
properties[prop],
Access.PUBLIC
)
return library
@staticmethod
def union(libraries: Iterable[Library]) -> Library:
return Library._prop_action(libraries, set.union, Property.union)
def union_update(self, library: Library):
self._sources.update(library._sources)
for prop in LibraryProperty:
self._props[prop].union_update(
library._props[prop],
Access.PUBLIC
)
@staticmethod
def intersection(libraries: Iterable[Library]) -> Library:
return Library._prop_action(libraries, set.intersection, Property.intersection)
def intersection_update(self, library: Library):
self._sources.intersection_update(library._sources)
for prop in LibraryProperty:
self._props[prop].intersection_update(
library._props[prop],
Access.PUBLIC
)
@staticmethod
def difference(libraries: Iterable[Library]) -> Library:
return Library._prop_action(libraries, set.difference, Property.difference)
def difference_update(self, library: Library):
self._sources.difference_update(library._sources)
for prop in LibraryProperty:
self._props[prop].difference_update(
library._props[prop],
Access.PUBLIC
)
class LibraryTestCase(TestCase):
def setUp(self):
self.lib1 = Library(
sources={'s1', 's2'},
includes=Property(
public={"pub_inc1", "pub_inc2"},
private={'prv_inc1', "prv_inc2"}
)
)
self.lib2 = Library(
sources={'s1', 's2', 's3'},
includes=Property(
public={"pub_inc1", "pub_inc2", "pub_inc3"},
private={'prv_inc1', "prv_inc2", "prv_inc3"}
),
dependencies=Property(
public={"dep1", "dep2"}
)
)
self.lib3 = Library(
sources={'s2', 's3'},
includes=Property(
public={"pub_inc2", "pub_inc3"},
private={'prv_inc2', "prv_inc3"}
)
)
def test_json(self):
json_value = {
"sources": ["s1", "s2"],
"dependencies": {
"private": ["dep1", "dep2"]
},
"includes": {
"public": ["inc1"]
},
"cflags": {
"interface": ["int1"]
},
"asmflags": {
"public": ["asm1"]
},
"ldflags": {
"public": ["ld1"]
}
}
value = Library.from_json(json_value)
self.assertSetEqual(value.sources, {"s1", "s2"})
LP = LibraryProperty
self.assertEqual(
value.get_prop(LP.DEPENDENCIES),
Property(private={"dep1", "dep2"})
)
self.assertEqual(
value.get_prop(LP.INCLUDES),
Property(public={"inc1"})
)
self.assertEqual(
value.get_prop(LP.CFLAGS),
Property(interface={"int1"})
)
self.assertEqual(
value.get_prop(LP.ASMFLAGS),
Property(public={"asm1"})
)
self.assertEqual(
value.get_prop(LP.LDFLAGS),
Property(public={"ld1"})
)
self.assertEqual(json_value, value.to_json())
def test_union(self):
self.assertEqual(
Library.union([]),
Library()
)
union_lib = Library.union([self.lib1, self.lib2, self.lib3])
self.assertEqual(
union_lib,
Library(
sources={'s1', 's2', 's3'},
includes=Property(
public={"pub_inc1", "pub_inc2", "pub_inc3"},
private={'prv_inc1', "prv_inc2", "prv_inc3"}
),
dependencies=Property(
public={"dep1", "dep2"}
)
)
)
def test_union_update(self):
self.lib1.union_update(self.lib2)
self.assertEqual(
self.lib1,
Library(
sources={'s1', 's2', 's3'},
includes=Property(
public={"pub_inc1", "pub_inc2", "pub_inc3"},
private={'prv_inc1', "prv_inc2", "prv_inc3"}
),
dependencies=Property(
public={"dep1", "dep2"}
)
)
)
def test_intersection(self):
self.assertEqual(
Library.intersection([]),
Library()
)
intersection = Library.intersection([self.lib1, self.lib2, self.lib3])
self.assertEqual(
intersection,
Library(
sources={'s2'},
includes=Property(
public={"pub_inc2"},
private={"prv_inc2"}
)
)
)
def test_intersection_update(self):
self.lib1.intersection_update(self.lib2)
self.assertEqual(
self.lib1,
Library(
sources={'s1', 's2'},
includes=Property(
public={"pub_inc1", "pub_inc2"},
private={"prv_inc1", "prv_inc2"}
)
)
)
def test_difference_update(self):
self.lib2.difference_update(self.lib1)
self.assertEqual(
self.lib2,
Library(
sources={'s3'},
includes=Property(
public={"pub_inc3"},
private={"prv_inc3"}
),
dependencies=Property(
public={"dep1", "dep2"}
)
)
)
| 30.237952 | 87 | 0.526248 | 911 | 10,039 | 5.599341 | 0.113063 | 0.041168 | 0.038816 | 0.039208 | 0.405607 | 0.327191 | 0.23662 | 0.228583 | 0.19408 | 0.19408 | 0 | 0.015199 | 0.364279 | 10,039 | 331 | 88 | 30.329305 | 0.78408 | 0 | 0 | 0.271127 | 0 | 0 | 0.058472 | 0 | 0 | 0 | 0 | 0 | 0.049296 | 1 | 0.080986 | false | 0 | 0.024648 | 0.021127 | 0.18662 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba40978bf21bdd4277341b0362355d60c177f3a7 | 2,069 | py | Python | src/installer/src/tortuga/package/rpm.py | sutasu/tortuga | 48d7cde4fa652346600b217043b4a734fa2ba455 | [
"Apache-2.0"
] | 33 | 2018-03-02T17:07:39.000Z | 2021-05-21T18:02:51.000Z | src/installer/src/tortuga/package/rpm.py | sutasu/tortuga | 48d7cde4fa652346600b217043b4a734fa2ba455 | [
"Apache-2.0"
] | 201 | 2018-03-05T14:28:24.000Z | 2020-11-23T19:58:27.000Z | src/installer/src/tortuga/package/rpm.py | sutasu/tortuga | 48d7cde4fa652346600b217043b4a734fa2ba455 | [
"Apache-2.0"
] | 23 | 2018-03-02T17:21:59.000Z | 2020-11-18T14:52:38.000Z | # Copyright 2008-2018 Univa Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tortuga.package.abstractPackage import AbstractPackage
from tortuga.os_utility.tortugaSubprocess import TortugaSubprocess
class RPM(AbstractPackage):
def get_package_license(self, pkgFile): # pylint: disable=no-self-use
'''
Returns the packages' license (BSD, GPL, etc...)
'''
p = TortugaSubprocess(
'rpm -qp --queryformat "%%{LICENSE}" %s 2>/dev/null' % (
pkgFile))
p.run()
licensetxt = p.getStdOut()
return licensetxt
def get_rpm_license_files(self, pkgFile): # pylint: disable=no-self-use
'''
Returns a list of license files found in the package
'''
p = TortugaSubprocess(
'rpm2cpio %s | cpio -it | grep -e COPYING -e LICENSE || true' % (
pkgFile))
p.run()
a = p.getStdOut().split("\n")
while a and a[-1] == '':
a.pop() # There's always a blank line at the end
return a
def extract_license_file(self, pkgFile, path, license_fulldir, txtfile): \
# pylint: disable=no-self-use
'''
Extract it into the license_fulldir, changing all
slashes to dashes, removing any leading punctuation,
and adding an extension that makes browsers happy.
'''
p = TortugaSubprocess(
'rpm2cpio %s | cpio -i --to-stdout %s > %s/%s' % (
pkgFile, path, license_fulldir, txtfile))
p.run()
| 31.348485 | 78 | 0.628323 | 259 | 2,069 | 4.976834 | 0.525097 | 0.054306 | 0.034911 | 0.04422 | 0.176881 | 0.062064 | 0.062064 | 0.062064 | 0 | 0 | 0 | 0.01066 | 0.274529 | 2,069 | 65 | 79 | 31.830769 | 0.848101 | 0.453359 | 0 | 0.333333 | 0 | 0 | 0.150632 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.083333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba41f739b5b8880b18c2364e081ff83f4ded08cc | 1,248 | py | Python | main.py | TobisMa/GUI | b88ea203378be464f1daaad5c5d41baaecd43f82 | [
"MIT"
] | null | null | null | main.py | TobisMa/GUI | b88ea203378be464f1daaad5c5d41baaecd43f82 | [
"MIT"
] | null | null | null | main.py | TobisMa/GUI | b88ea203378be464f1daaad5c5d41baaecd43f82 | [
"MIT"
] | null | null | null | import pygame, time
from pygame.constants import QUIT, WINDOWCLOSE
#from src import *
win = pygame.display.set_mode([800,600], 16)
pygame.init()
quitcount = 0
while True:
win.fill([200, 200, 200])
for event in pygame.event.get():
if event.type in (
#pygame.QUIT,
#pygame.WINDOWCLOSE,
#pygame.WINDOWENTER,
#pygame.WINDOWLEAVE,
pygame.WINDOWMINIMIZED,
pygame.WINDOWMAXIMIZED,
pygame.WINDOWRESTORED,
pygame.WINDOWEXPOSED,
pygame.WINDOWRESIZED
):
print(int(time.time()), event)
if event.type == pygame.QUIT:
quitcount += 1
if quitcount >= 2:
pygame.display.quit()
exit()
pygame.display.flip()
"""
if __name__ == '__main__':
w2 = Widget("2", Vector3(10, 10), Vector2(20, 20), Style(fg=Color(255, 255, 255)))
w = WidgetContainer("1", Vector3(40, 10), Vector2(40, 40), style=Style(fg=Color(0, 0, 0)))
screen = pygame.display.set_mode((300, 300))
screen.fill([200, 200, 200])
print(w != w2)
w.widgets.append(w2)
w.draw(screen)
pygame.display.flip()
import time
while True: ...""" | 27.130435 | 95 | 0.559295 | 144 | 1,248 | 4.777778 | 0.423611 | 0.094477 | 0.046512 | 0.05814 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.082759 | 0.302885 | 1,248 | 46 | 96 | 27.130435 | 0.708046 | 0.069712 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.090909 | 0.045455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba43ab4474fde3f20ff82136cb2e5742c53c8ff0 | 4,838 | py | Python | qc3/libpango/fonts.py | wtfo-guru/queconverter | fc3529e46d5af1d90840c52ed9f58fb3c255523b | [
"BSD-2-Clause"
] | null | null | null | qc3/libpango/fonts.py | wtfo-guru/queconverter | fc3529e46d5af1d90840c52ed9f58fb3c255523b | [
"BSD-2-Clause"
] | null | null | null | qc3/libpango/fonts.py | wtfo-guru/queconverter | fc3529e46d5af1d90840c52ed9f58fb3c255523b | [
"BSD-2-Clause"
] | null | null | null | #
# Copyright (C) 2016-2020 by Ihor E. Novikov
# Copyright (C) 2020 by Krzysztof Broński
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import cairo
import html
import string
import typing as tp
from qc3 import qc3const
from . import _libpango
from .core import PANGO_LAYOUT
FAMILIES_LIST = []
FAMILIES_DICT = {}
def bbox_size(bbox: qc3const.ScreenBboxType) -> qc3const.SizeType:
"""Returns bounding box size
:param bbox: (qc3const.ScreenBboxType) bounding box
:return: (qc3const.SizeType) bounding box size
"""
x0, y0, x1, y1 = bbox
w = abs(x1 - x0)
h = abs(y1 - y0)
return w, h
def update_fonts(do_update: bool = True) -> None:
"""Updates font families list and font face dict
:param do_update: (bool) update flag
"""
if do_update:
FAMILIES_LIST[:] = []
FAMILIES_DICT.clear()
font_map = _libpango.get_fontmap()
for item in font_map:
font_name = item[0]
font_faces = item[1]
if font_faces:
FAMILIES_LIST.append(font_name)
FAMILIES_DICT[font_name] = list(font_faces)
FAMILIES_LIST.sort()
def get_fonts() -> tp.Tuple[tp.List[str], tp.Dict[str, tp.List[str]]]:
"""Returns actual font families list and font face dict.
Updates them if needed.
:return: (tuple) actual font families list and font face dict
"""
update_fonts(do_update=not FAMILIES_LIST)
return FAMILIES_LIST, FAMILIES_DICT
def find_font_family(family: str = None) -> tp.Tuple[str, tp.List[str]]:
"""Returns font family name and list of font faces for
provided font family. If family is not found, uses
fallback 'Sans' family.
:param family: (str) font family name
:return: (tuple) font family name and list of font faces
"""
update_fonts(do_update=not FAMILIES_LIST)
if not family or family not in FAMILIES_LIST:
# TODO: here should be substitution staff
if string.capwords(family) in FAMILIES_LIST:
family = string.capwords(family)
elif string.capwords(family.lower()) in FAMILIES_LIST:
family = string.capwords(family.lower())
else:
family = "Sans"
return family, FAMILIES_DICT[family]
def find_font_and_face(family: str = None) -> tp.Tuple[str, str]:
"""Returns font family name and normal font face for
provided font family. If family is not found, uses
fallback 'Sans' family. tries to find 'Regular' or 'Normal' face.
If not returns first face name.
:param family: (str) font family name
:return: (tuple) font family name and normal font face
"""
family, faces = find_font_family(family)
a, b = "Regular", "Normal"
font_face = a if a in faces else b if b in faces else faces[0]
return family, font_face
# ---Font sampling
def _set_sample_layout(
layout: qc3const.PyCapsule, text: str, family: str, fontsize: tp.Union[float, int]
) -> None:
"""Helper function. Sets text on Pango layout.
:param layout: (PyCapsule) Pango layout
:param text: (str) text string
:param family: (str) font family name
:param fontsize: (float|int) font size
"""
_libpango.set_layout_width(layout, -1)
fnt_descr = family + ", " + str(fontsize)
fnt_descr = _libpango.create_font_description(fnt_descr)
_libpango.set_layout_font_description(layout, fnt_descr)
markup = html.escape(text)
_libpango.set_layout_markup(layout, markup)
def get_sample_size(
text: str, family: str, fontsize: tp.Union[float, int]
) -> tp.Tuple[int, int]:
"""Calcs sample text size in pixels (w,h)
:param text: (str) sample text
:param family: (str) font family name
:param fontsize: (float|int) font
:return: (tuple) sample size in pixels
"""
_set_sample_layout(PANGO_LAYOUT, text, family, fontsize)
return _libpango.get_layout_pixel_size(PANGO_LAYOUT)
def render_sample(
ctx: cairo.Context, text: str, family: str, fontsize: tp.Union[float, int]
) -> None:
"""Renders sample text on provided Cairo context
:param ctx: (cairo.Context) cairo context
:param text: (str) sample text
:param family: (str) font family name
:param fontsize: (float|int) font size
"""
ctx.new_path()
ctx.move_to(0, 0)
layout = _libpango.create_layout(ctx)
_set_sample_layout(layout, text, family, fontsize)
_libpango.layout_path(ctx, layout)
# ---Font sampling end
| 30.427673 | 84 | 0.713725 | 719 | 4,838 | 4.681502 | 0.258693 | 0.046346 | 0.037433 | 0.026738 | 0.335116 | 0.311646 | 0.292038 | 0.205288 | 0.16934 | 0.157754 | 0 | 0.008632 | 0.185821 | 4,838 | 158 | 85 | 30.620253 | 0.8459 | 0.462381 | 0 | 0.060606 | 0 | 0 | 0.007705 | 0 | 0 | 0 | 0 | 0.006329 | 0 | 1 | 0.121212 | false | 0 | 0.106061 | 0 | 0.30303 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba43edc0fc43df598c8d46e089a42825fd6726ad | 2,137 | py | Python | runserver.py | charbec1/pokemapfuntimesyay | d8301930c7733041114ca33fe26117d7157d9149 | [
"MIT"
] | null | null | null | runserver.py | charbec1/pokemapfuntimesyay | d8301930c7733041114ca33fe26117d7157d9149 | [
"MIT"
] | null | null | null | runserver.py | charbec1/pokemapfuntimesyay | d8301930c7733041114ca33fe26117d7157d9149 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import logging
from threading import Thread
from pogom import config
from pogom.app import Pogom
from pogom.utils import get_args, insert_mock_data, load_credentials
from pogom.search import search_loop
from pogom.models import create_tables, Pokemon
from pogom.pgoapi.utilities import get_pos_by_name
log = logging.getLogger(__name__)
app = Pogom(__name__)
def start_locator_thread(args):
search_thread = Thread(target=search_loop, args=(args,))
search_thread.daemon = True
search_thread.name = 'search_thread'
search_thread.start()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO, format='%(asctime)s [%(module)11s] [%(levelname)7s] %(message)s')
logging.getLogger("peewee").setLevel(logging.INFO)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("pogom.pgoapi.pgoapi").setLevel(logging.WARNING)
logging.getLogger("pogom.pgoapi.rpc_api").setLevel(logging.INFO)
args = get_args()
if args.debug:
logging.getLogger("requests").setLevel(logging.DEBUG)
logging.getLogger("pgoapi").setLevel(logging.DEBUG)
logging.getLogger("rpc_api").setLevel(logging.DEBUG)
create_tables()
position = get_pos_by_name(args.location)
log.info('Parsed location is: {:.4f}/{:.4f}/{:.4f} (lat/lng/alt)'.
format(*position))
config['ORIGINAL_LATITUDE'] = position[0]
config['ORIGINAL_LONGITUDE'] = position[1]
if args.ignore:
Pokemon.IGNORE = [i.lower().strip() for i in args.ignore.split(',')]
elif args.only:
Pokemon.ONLY = [i.lower().strip() for i in args.only.split(',')]
if not args.mock:
start_locator_thread(args)
else:
insert_mock_data(args.location, 6)
#app = Pogom(__name__)
config['ROOT_PATH'] = app.root_path
if args.gmaps_key is not None:
config['GMAPS_KEY'] = args.gmaps_key
else:
config['GMAPS_KEY'] = load_credentials(os.path.dirname(os.path.realpath(__file__)))['gmaps_key']
app.run(threaded=True, debug=args.debug, host=args.host, port=args.port)
| 30.971014 | 109 | 0.697707 | 284 | 2,137 | 5.03169 | 0.348592 | 0.089573 | 0.044087 | 0.016795 | 0.181945 | 0.097971 | 0.097971 | 0 | 0 | 0 | 0 | 0.005587 | 0.162377 | 2,137 | 68 | 110 | 31.426471 | 0.792737 | 0.027609 | 0 | 0.042553 | 0 | 0 | 0.133494 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.021277 | false | 0 | 0.191489 | 0 | 0.212766 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba45fe8e373630e80ff13ce801d49d7f931b8428 | 1,244 | py | Python | volatility/contrib/plugins/disablewarnings.py | williamclot/MemoryVisualizer | 2ff9f30f07519d6578bc36c12f8d08acc9cb4383 | [
"MIT"
] | 2 | 2018-07-16T13:30:40.000Z | 2018-07-17T12:02:05.000Z | volatility/contrib/plugins/disablewarnings.py | williamclot/MemoryVisualizer | 2ff9f30f07519d6578bc36c12f8d08acc9cb4383 | [
"MIT"
] | null | null | null | volatility/contrib/plugins/disablewarnings.py | williamclot/MemoryVisualizer | 2ff9f30f07519d6578bc36c12f8d08acc9cb4383 | [
"MIT"
] | null | null | null | # Volatility
#
# Authors:
# Mike Auty <mike.auty@gmail.com>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
import volatility.conf as conf
import logging
config = conf.ConfObject()
def disable_warnings(_option, _opt_str, _value, _parser):
"""Sets the location variable in the parser to the filename in question"""
rootlogger = logging.getLogger('')
rootlogger.setLevel(logging.WARNING + 1)
config.add_option("WARNINGS", default = False, action = "callback",
callback = disable_warnings,
short_option = 'W', nargs = 0,
help = "Disable warning messages")
| 34.555556 | 78 | 0.716238 | 173 | 1,244 | 5.098266 | 0.612717 | 0.017007 | 0.044218 | 0.064626 | 0.092971 | 0.063492 | 0 | 0 | 0 | 0 | 0 | 0.003024 | 0.202572 | 1,244 | 35 | 79 | 35.542857 | 0.886089 | 0.609325 | 0 | 0 | 0 | 0 | 0.088937 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.2 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba4893ab88ff1d7c68c6773f7219ebc4e78b9dfe | 697 | py | Python | icekit_events/migrations/0018_auto_20170314_1401.py | ic-labs/django-icekit | c507ea5b1864303732c53ad7c5800571fca5fa94 | [
"MIT"
] | 52 | 2016-09-13T03:50:58.000Z | 2022-02-23T16:25:08.000Z | icekit_events/migrations/0018_auto_20170307_1458.py | ic-labs/django-icekit | c507ea5b1864303732c53ad7c5800571fca5fa94 | [
"MIT"
] | 304 | 2016-08-11T14:17:30.000Z | 2020-07-22T13:35:18.000Z | icekit_events/migrations/0018_auto_20170314_1401.py | ic-labs/django-icekit | c507ea5b1864303732c53ad7c5800571fca5fa94 | [
"MIT"
] | 12 | 2016-09-21T18:46:35.000Z | 2021-02-15T19:37:50.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import colorful.fields
class Migration(migrations.Migration):
dependencies = [
('icekit_events', '0017_eventtype_color'),
]
operations = [
migrations.AlterField(
model_name='eventtype',
name='color',
field=colorful.fields.RGBColorField(default=b'#cccccc', colors=[b'#00BBCC', b'#0055CC', b'#1100CC', b'#7600CC', b'#CC00BB', b'#CC0054', b'#CC1100', b'#CC7700', b'#BBCC00', b'#00CC77', b'#008C99', b'#003F99', b'#0C0099', b'#590099', b'#99008C', b'#99003F', b'#990C00', b'#995900', b'#8C9900', b'#009959']),
),
]
| 33.190476 | 317 | 0.61406 | 83 | 697 | 5.048193 | 0.626506 | 0.066826 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.16341 | 0.192253 | 697 | 20 | 318 | 34.85 | 0.580817 | 0.030129 | 0 | 0 | 0 | 0 | 0.287834 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.214286 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba49b434abc0426d76176a6199f207345e908862 | 2,318 | py | Python | data/datagen/explicit_1d.py | verdverm/pypge | 7f94595735c08e147bd17056f15d944da61eec6d | [
"MIT"
] | 43 | 2015-09-09T21:22:01.000Z | 2021-05-04T08:15:10.000Z | data/datagen/explicit_1d.py | verdverm/pypge | 7f94595735c08e147bd17056f15d944da61eec6d | [
"MIT"
] | 10 | 2016-03-31T21:54:06.000Z | 2019-11-26T22:40:32.000Z | data/datagen/explicit_1d.py | verdverm/pypge | 7f94595735c08e147bd17056f15d944da61eec6d | [
"MIT"
] | 9 | 2016-06-13T16:14:32.000Z | 2020-02-26T14:26:42.000Z | from pypge.benchmarks import explicit
import numpy as np
# visualization libraries
import matplotlib.pyplot as plt
# Set your output directories
img_dir = "../img/explicit/"
data_dir = "../benchmarks/explicit/"
names = [
"koza_01",
"koza_02",
"koza_03",
"lipson_01",
"lipson_02",
"lipson_03",
"nguyen_01",
"nguyen_02",
"nguyen_03",
"nguyen_04",
"nguyen_05",
"nguyen_06",
"nguyen_07",
"nguyen_08"
]
def get_generator(name):
if name == "koza_01":
return explicit.Koza_01
elif name == "koza_02":
return explicit.Koza_02
elif name == "koza_03":
return explicit.Koza_03
elif name == "lipson_01":
return explicit.Lipson_01
elif name == "lipson_02":
return explicit.Lipson_02
elif name == "lipson_03":
return explicit.Lipson_03
elif name == "nguyen_01":
return explicit.Nguyen_01
elif name == "nguyen_02":
return explicit.Nguyen_02
elif name == "nguyen_03":
return explicit.Nguyen_03
elif name == "nguyen_04":
return explicit.Nguyen_04
elif name == "nguyen_05":
return explicit.Nguyen_05
elif name == "nguyen_06":
return explicit.Nguyen_06
elif name == "nguyen_07":
return explicit.Nguyen_07
elif name == "nguyen_08":
return explicit.Nguyen_08
def output_graphs(prob):
fig = plt.figure()
fig.set_size_inches(16, 12)
plt.plot(prob['xpts'][0], prob['ypure'], 'r.')
plt.legend(loc='center left', bbox_to_anchor=(0.67, 0.12))
plt.title(prob['name'] + " Clean", fontsize=36)
plt.savefig(img_dir + prob['name'].lower() + "_clean.png", dpi=200)
fig = plt.figure()
fig.set_size_inches(16, 12)
plt.plot(prob['xpts'][0], prob['ypts'], 'b.')
plt.legend(loc='center left', bbox_to_anchor=(0.67, 0.12))
plt.title(prob['name'] + " Noisy", fontsize=36)
plt.savefig(img_dir + prob['name'].lower() + "_noisy.png", dpi=200)
def output_data(prob,ypts,label):
data = np.array([prob['xpts'][0],ypts]).T
cols = [['x', 'out']]
out_data = cols + data.tolist()
f_csv = open(data_dir + prob['name'].lower() + "_" + label + ".csv", 'w')
for row in out_data:
line = ", ".join([str(col) for col in row]) + "\n"
f_csv.write(line)
f_csv.close()
for name in names:
print(name)
gen = get_generator(name)
prob = gen(noise=0.025, npts=1000)
output_graphs(prob)
output_data(prob, prob['ypure'], 'clean')
output_data(prob, prob['ypts'], 'noisy')
| 21.462963 | 74 | 0.672994 | 357 | 2,318 | 4.173669 | 0.277311 | 0.131544 | 0.075168 | 0.032215 | 0.197315 | 0.197315 | 0.197315 | 0.197315 | 0.197315 | 0.144966 | 0 | 0.063678 | 0.153149 | 2,318 | 107 | 75 | 21.663551 | 0.695364 | 0.022002 | 0 | 0.075949 | 0 | 0 | 0.181858 | 0.010177 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037975 | false | 0 | 0.037975 | 0 | 0.253165 | 0.012658 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba4ba7e002ea6d31161dc21ca21b7718fc4b612c | 1,193 | py | Python | build.py | ShawnFrueh/actions_test | c0fdfce36970a1d7e9620871d6a9de2b5c5ed1e9 | [
"MIT"
] | null | null | null | build.py | ShawnFrueh/actions_test | c0fdfce36970a1d7e9620871d6a9de2b5c5ed1e9 | [
"MIT"
] | null | null | null | build.py | ShawnFrueh/actions_test | c0fdfce36970a1d7e9620871d6a9de2b5c5ed1e9 | [
"MIT"
] | null | null | null | from os import mkdir
from pathlib import Path
from shutil import rmtree
from zipfile import ZipFile, ZIP_DEFLATED
# Get the root path to this repo
repo_dir = Path(__file__).parent
# Get the kit directory
kit_dir = repo_dir / "test_kit"
# Get the build directory
build_dir = repo_dir / "build"
# Get the license file
license_file = repo_dir / "LICENSE"
with repo_dir.joinpath("VERSION").open("r") as version_file:
version = version_file.read().strip()
# Get all files in the kit directory and male sure no pyc files come along
kit_files = [f for f in kit_dir.glob("**/*") if f.is_file() and not f.name.endswith(".pyc")]
# Clear the build directory
if build_dir.exists():
rmtree(build_dir)
# Remake the build directory
mkdir(build_dir)
# Format the lpk file name with the version number from the VERSION file
lpk_name = f"test_kit_{version}.lpk"
lpk_path = build_dir / lpk_name
# Build the LPK file.
with ZipFile(lpk_path, mode="w", compression=ZIP_DEFLATED) as lpk:
# Add the license
lpk.write(license_file, "license")
# Write all file into the lpk
for file in kit_files:
print(file.relative_to(kit_dir))
lpk.write(file, file.relative_to(kit_dir))
| 30.589744 | 92 | 0.732607 | 199 | 1,193 | 4.211055 | 0.326633 | 0.041766 | 0.060859 | 0.040573 | 0.047733 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.172674 | 1,193 | 38 | 93 | 31.394737 | 0.849037 | 0.300084 | 0 | 0 | 0 | 0 | 0.080194 | 0.026731 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.190476 | 0 | 0.190476 | 0.047619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba4cb12b8de5610b0cb63dfb5d497fc87e99f2ea | 1,965 | py | Python | cortaswamp/authentication/models.py | parthakonda/cortaswamp-backend | 5e6875cbe994931cd747ac0d614250e3a6649500 | [
"MIT"
] | null | null | null | cortaswamp/authentication/models.py | parthakonda/cortaswamp-backend | 5e6875cbe994931cd747ac0d614250e3a6649500 | [
"MIT"
] | null | null | null | cortaswamp/authentication/models.py | parthakonda/cortaswamp-backend | 5e6875cbe994931cd747ac0d614250e3a6649500 | [
"MIT"
] | null | null | null | import uuid
from django.db import models
from cortaswamp import enums
from django.contrib.auth.models import AbstractBaseUser, UserManager
from django.contrib.postgres.fields import JSONField
class UserAccountManager(UserManager):
def get_by_natural_key(self, username):
"""
To match against case insensitive
"""
case_insensitive_username_field = '{}__iexact'.format(
self.model.USERNAME_FIELD)
return self.get(**{case_insensitive_username_field: username})
class User(AbstractBaseUser):
objects = UserAccountManager()
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
first_name = models.CharField(
help_text='First Name of user', max_length=200, null=True)
last_name = models.CharField(
help_text='Last Name of user', max_length=200, null=True)
username = models.CharField(
help_text='Username for the user',
max_length=200,
null=False,
unique=True)
email = models.EmailField(
help_text='Email of the user', max_length=200, null=False, unique=True)
login_attempts = models.IntegerField(
help_text='To track no of invalid login attempts', default=0)
USERNAME_FIELD = 'email'
class Meta:
db_table = 'user'
class ForgotPassword(models.Model):
"""
To maintain all the password reset links
"""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
email = models.EmailField(
help_text='Email of the user', max_length=200, null=False)
valid_upto = models.DateTimeField(
help_text='DateTime valid upto', null=False)
expired = models.BooleanField(
help_text='If True - Link can not be used, False - Link can be used',
default=False)
created_on = models.DateTimeField(
help_text='Reset link creation date', auto_now_add=True)
class Meta:
db_table = 'forgot_password'
| 33.87931 | 79 | 0.690076 | 246 | 1,965 | 5.353659 | 0.386179 | 0.05467 | 0.049355 | 0.060744 | 0.311314 | 0.270311 | 0.270311 | 0.270311 | 0.224753 | 0.188307 | 0 | 0.011688 | 0.216285 | 1,965 | 57 | 80 | 34.473684 | 0.843506 | 0.037659 | 0 | 0.142857 | 0 | 0 | 0.140389 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02381 | false | 0.047619 | 0.119048 | 0 | 0.595238 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba4d573b166c76762e7e2415d9817e7041d732f6 | 341 | py | Python | tweetsender/util.py | Udomomo/tweetsender | ac26da8d43945031c62f194ee41652fa819ed02f | [
"MIT"
] | null | null | null | tweetsender/util.py | Udomomo/tweetsender | ac26da8d43945031c62f194ee41652fa819ed02f | [
"MIT"
] | 8 | 2019-01-15T02:15:02.000Z | 2021-06-25T15:33:11.000Z | tweetsender/util.py | Udomomo/tweetsender | ac26da8d43945031c62f194ee41652fa819ed02f | [
"MIT"
] | null | null | null | import os, json
CONFIG_PATH = os.path.expanduser('~') + os.sep + '.tweetsender_config.json'
def load_config(path):
if not os.path.exists(path):
return {}
with open(path, 'r') as f:
config = json.load(f)
return config
def update_config(config, path):
with open(path, 'w') as f:
json.dump(config, f) | 22.733333 | 75 | 0.618768 | 51 | 341 | 4.058824 | 0.431373 | 0.144928 | 0.115942 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.234604 | 341 | 15 | 76 | 22.733333 | 0.793103 | 0 | 0 | 0 | 0 | 0 | 0.078947 | 0.070175 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.090909 | 0 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba4ed8f42161c47e07364d23da358704878de3a9 | 539 | py | Python | app/controller/yushu_book.py | dollarkillerx/PyFlaskLearning | b2c7d76572f9ec4a5ad17541f47aa06d22a2d153 | [
"MIT"
] | null | null | null | app/controller/yushu_book.py | dollarkillerx/PyFlaskLearning | b2c7d76572f9ec4a5ad17541f47aa06d22a2d153 | [
"MIT"
] | null | null | null | app/controller/yushu_book.py | dollarkillerx/PyFlaskLearning | b2c7d76572f9ec4a5ad17541f47aa06d22a2d153 | [
"MIT"
] | null | null | null | from utils.http import HTTP
class YuShuBook:
isbn_url = 'http://t.yushu.im/v2/book/isbn/{}'
keyword_url = 'http://t.yushu.im/v2/book/search?q={}&count={}&start={}'
@classmethod
def search_by_isbn(cls, isbn):
url = YuShuBook.isbn_url.format(isbn)
result = HTTP.get(url)
# dict json
return result
@classmethod
def search_by_keyword(cls, keyword,start=0,count=15):
url = YuShuBook.keyword_url.format(keyword,count,start)
result = HTTP.get(url)
return result
| 26.95 | 75 | 0.634508 | 74 | 539 | 4.513514 | 0.405405 | 0.062874 | 0.095808 | 0.077844 | 0.125749 | 0.125749 | 0.125749 | 0 | 0 | 0 | 0 | 0.012019 | 0.2282 | 539 | 19 | 76 | 28.368421 | 0.790865 | 0.016698 | 0 | 0.428571 | 0 | 0 | 0.166667 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.071429 | 0 | 0.571429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba5a6263e9060a2bbe3f845d221d8dd4af1cb784 | 3,292 | py | Python | node/blockchain/utils/lock.py | thenewboston-developers/Node | e71a405f4867786a54dd17ddd97595dd3a630018 | [
"MIT"
] | 18 | 2021-11-30T04:02:13.000Z | 2022-03-24T12:33:57.000Z | node/blockchain/utils/lock.py | thenewboston-developers/Node | e71a405f4867786a54dd17ddd97595dd3a630018 | [
"MIT"
] | 1 | 2022-02-04T17:07:38.000Z | 2022-02-04T17:07:38.000Z | node/blockchain/utils/lock.py | thenewboston-developers/Node | e71a405f4867786a54dd17ddd97595dd3a630018 | [
"MIT"
] | 5 | 2022-01-31T05:28:13.000Z | 2022-03-08T17:25:31.000Z | import functools
import logging
import time
from typing import Optional
from django.conf import settings
from django.db import transaction
from pymongo.errors import DuplicateKeyError
from node.core.database import get_database
from node.core.exceptions import BlockchainIsNotLockedError, BlockchainLockingError, BlockchainUnlockingError
logger = logging.getLogger(__name__)
def get_lock_collection():
return get_database().lock
def make_filter(name):
return {'_id': name}
def is_locked(name):
return bool(get_lock_collection().find_one(make_filter(name)))
def insert_lock(name):
get_lock_collection().insert_one(make_filter(name))
def create_lock(name, timeout_seconds: Optional[float] = None):
# TODO(dmu) HIGH: Make sure that timeout works correctly in conjunction with async behavior (Daphne)
# https://thenewboston.atlassian.net/browse/BC-258
if timeout_seconds is None: # shortcut
try:
insert_lock(name)
except DuplicateKeyError:
raise BlockchainLockingError('Lock could not be acquired: %s', name)
return
sleep_seconds = timeout_seconds / 10
timeout_moment = time.time() + timeout_seconds
while True:
if not is_locked(name):
try:
insert_lock(name)
return
except DuplicateKeyError:
logger.warning('Could not manage to get the lock :(')
logger.debug('Waiting to acquire lock: %s', name)
time.sleep(sleep_seconds)
if time.time() >= timeout_moment: # this makes sure we have at least one iteration
break
raise BlockchainLockingError('Blockchain locking timeout for lock: %s', name)
def delete_lock(name):
logger.debug('Deleting lock: %s', name)
result = get_lock_collection().delete_one(make_filter(name))
if result.deleted_count < 1:
logger.warning('Lock %s was not found', name)
else:
logger.debug('Deleted lock: %s', name)
return result
def delete_all_locks():
return get_lock_collection().remove()
def lock(name, expect_locked=False):
outer_expect_locked = expect_locked
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
bypass_lock_validation = kwargs.pop('bypass_lock_validation', False)
if bypass_lock_validation:
return func(*args, **kwargs)
inner_expect_locked = kwargs.pop('expect_locked', outer_expect_locked)
if inner_expect_locked:
is_already_locked = is_locked(name)
if not is_already_locked:
raise BlockchainIsNotLockedError
return func(*args, **kwargs)
try:
create_lock(name, timeout_seconds=settings.LOCK_DEFAULT_TIMEOUT_SECONDS)
transaction.get_connection().on_rollback(lambda: delete_lock(name))
except DuplicateKeyError:
raise BlockchainLockingError
return_value = func(*args, **kwargs)
delete_result = delete_lock(name)
if delete_result.deleted_count < 1:
raise BlockchainUnlockingError
return return_value
return wrapper
return decorator
| 29.132743 | 109 | 0.662515 | 376 | 3,292 | 5.593085 | 0.343085 | 0.034237 | 0.040418 | 0.024251 | 0.100808 | 0.055159 | 0 | 0 | 0 | 0 | 0 | 0.002869 | 0.258809 | 3,292 | 112 | 110 | 29.392857 | 0.859016 | 0.066525 | 0 | 0.16 | 0 | 0 | 0.072686 | 0.007171 | 0 | 0 | 0 | 0.008929 | 0 | 1 | 0.133333 | false | 0.026667 | 0.12 | 0.053333 | 0.413333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba5bd16d958d549e745c8628fc70adf5078acfe8 | 1,272 | py | Python | researches/ocr/textbox/tb_preset.py | loveorchids/sroie2019 | d6bec71cdf0d4b4f7fc24a9ed6f1838da6fada05 | [
"Apache-2.0"
] | 14 | 2019-05-06T11:28:29.000Z | 2020-05-18T22:36:09.000Z | researches/ocr/textbox/tb_preset.py | loveorchids/sroie2019 | d6bec71cdf0d4b4f7fc24a9ed6f1838da6fada05 | [
"Apache-2.0"
] | 3 | 2019-09-02T16:11:32.000Z | 2019-10-22T14:47:03.000Z | researches/ocr/textbox/tb_preset.py | whq-hqw/sroie2019 | d6bec71cdf0d4b4f7fc24a9ed6f1838da6fada05 | [
"Apache-2.0"
] | 5 | 2020-06-10T05:13:46.000Z | 2021-07-29T03:38:55.000Z | def GeneralPattern(args):
args.path = "~/Downloads/dataset/ocr"
# this will create a folder named "_text_detection" under "~/Pictures/dataset/ocr"
args.code_name = "_text_detection"
# Set it to True to make experiment result reproducible
args.deterministic_train = False
args.cudnn_benchmark = False
# Random seed for everything
# If deterministic_train is disabled, then it will have no meaning
args.seed = 1
# Training Hyperparameter
args.learning_rate = 1e-4
args.batch_size_per_gpu = 1
args.loading_threads = 2
args.img_channel = 3
args.epoch_num = 2000
args.finetune = True
# Because augmentation operation is defined in tb_augment.py
args.do_imgaug = False
# Image Normalization
args.img_mean = (0.5, 0.5, 0.5)
args.img_std = (1.0, 1.0, 1.0)
args.img_bias = (0.0, 0.0, 0.0)
return args
def Unique_Patterns(args):
args.train_sources = ["SROIE2019"]
args.train_aux = [{"txt": "txt", "img": "jpg"}]
args.fix_size = True
return args
def Runtime_Patterns(args):
args.model_prefix_finetune = "768",
args.model_prefix = "768",
return args
PRESET = {
"general": GeneralPattern,
"unique": Unique_Patterns,
"runtime": Runtime_Patterns,
} | 28.266667 | 86 | 0.673742 | 177 | 1,272 | 4.672316 | 0.542373 | 0.012092 | 0.01451 | 0.01451 | 0.007255 | 0 | 0 | 0 | 0 | 0 | 0 | 0.038268 | 0.21934 | 1,272 | 45 | 87 | 28.266667 | 0.794562 | 0.258648 | 0 | 0.096774 | 0 | 0 | 0.090812 | 0.024573 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096774 | false | 0 | 0 | 0 | 0.193548 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba5bf2bbd4612e848682a3b4059b75e3c19f0d5e | 5,243 | py | Python | src/ml_things/text_functions.py | techthiyanes/ml_things | ddeeb16c55cf1d55cf80963217a8d1bffd0913cc | [
"Apache-2.0"
] | 153 | 2020-10-10T05:12:16.000Z | 2022-03-17T07:48:42.000Z | src/ml_things/text_functions.py | techthiyanes/ml_things | ddeeb16c55cf1d55cf80963217a8d1bffd0913cc | [
"Apache-2.0"
] | 21 | 2020-09-15T22:52:43.000Z | 2022-02-21T15:27:16.000Z | src/ml_things/text_functions.py | techthiyanes/ml_things | ddeeb16c55cf1d55cf80963217a8d1bffd0913cc | [
"Apache-2.0"
] | 42 | 2020-10-11T07:33:32.000Z | 2022-03-11T01:43:54.000Z | # coding=utf-8
# Copyright 2020 George Mihaila.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions that deal with text/string"""
import re
import copy
import string
def clean_text(text, full_clean=False, punctuation=False, numbers=False, lower=False, extra_spaces=False,
control_characters=False, tokenize_whitespace=False, remove_characters=''):
r"""
Clean text using various techniques.
I took inspiration from the cleantext library `https://github.com/prasanthg3/cleantext`. I did not like the whole
implementation so I made my own changes.
Note:
As in the original cleantext library I will add: stop words removal, stemming and
negative-positive words removal.
Arguments:
text (:obj:`str`):
String that needs cleaning.
full_clean (:obj:`bool`, `optional`, defaults to :obj:`False`):
Remove: punctuation, numbers, extra space, control characters and lower case. This argument is optional and
it has a default value attributed inside the function.
punctuation (:obj:`bool`, `optional`, defaults to :obj:`False`):
Remove punctuation from text. This argument is optional and it has a default value attributed inside
the function.
numbers (:obj:`bool`, `optional`, defaults to :obj:`False`):
Remove digits from text. This argument is optional and it has a default value attributed inside
the function.
lower (:obj:`bool`, `optional`, defaults to :obj:`False`):
Lower case all text. This argument is optional and it has a default value attributed inside the function.
extra_spaces (:obj:`bool`, `optional`, defaults to :obj:`False`):
Remove extra spaces - everything beyond one space. This argument is optional and it has a default value
attributed inside the function.
control_characters (:obj:`bool`, `optional`, defaults to :obj:`False`):
Remove characters like `\n`, `\t` etc.This argument is optional and it has a default value attributed
inside the function.
tokenize_whitespace (:obj:`bool`, `optional`, defaults to :obj:`False`):
Return a list of tokens split on whitespace. This argument is optional and it has a default value
attributed inside the function.
remove_characters (:obj:`str`, `optional`, defaults to :obj:`''`):
Remove defined characters form text. This argument is optional and it has a default value attributed
inside the function.
Returns:
:obj:`str`: Clean string.
Raises:
ValueError: If `text` is not of type string.
ValueError: If `remove_characters` needs to be a string.
"""
if not isinstance(text, str):
# `text` is not type of string
raise ValueError("`text` is not of type str!")
if not isinstance(remove_characters, str):
# remove characters need to be a string
raise ValueError("`remove_characters` needs to be a string!")
# all control characters like `\t` `\n` `\r` etc.
# Stack Overflow: https://stackoverflow.com/a/8115378/11281368
control_characters_list = ''.join([chr(char) for char in range(1, 32)])
# define control characters table
table_control_characters = str.maketrans(dict.fromkeys(control_characters_list))
# remove punctuation table
table_punctuation = str.maketrans(dict.fromkeys(string.punctuation))
# remove numbers table
table_digits = str.maketrans(dict.fromkeys('0123456789'))
# remove certain characters table
table_remove_characters = str.maketrans(dict.fromkeys(remove_characters))
# make a copy of text to make sure it doesn't affect original text
cleaned = copy.deepcopy(text)
if full_clean or punctuation:
# remove punctuation
cleaned = cleaned.translate(table_punctuation)
if full_clean or numbers:
# remove numbers
cleaned = cleaned.translate(table_digits)
if full_clean or extra_spaces:
# remove extra spaces - also removes control characters
# Stack Overflow https://stackoverflow.com/a/2077906/11281368
cleaned = re.sub('\s+', ' ', cleaned).strip()
if full_clean or lower:
# lowercase
cleaned = cleaned.lower()
if control_characters:
# remove control characters
cleaned = cleaned.translate(table_control_characters)
if tokenize_whitespace:
# tokenizes text n whitespace
cleaned = re.split('\s+', cleaned)
if remove_characters:
# remove these characters from text
cleaned = cleaned.translate(table_remove_characters)
return cleaned
| 37.719424 | 119 | 0.682815 | 673 | 5,243 | 5.261516 | 0.289747 | 0.057611 | 0.040666 | 0.047444 | 0.310364 | 0.282689 | 0.26292 | 0.226207 | 0.193166 | 0.164925 | 0 | 0.013207 | 0.234599 | 5,243 | 138 | 120 | 37.992754 | 0.869175 | 0.657829 | 0 | 0 | 0 | 0 | 0.052566 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032258 | false | 0 | 0.096774 | 0 | 0.16129 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba5efff6aa38c2a1a87edc12bea27198aedf9cbd | 1,027 | py | Python | samples/timeit_3.py | thierrydecker/learning-python | d67242740c33037e1ff270a8e2107f915e0fd44a | [
"Apache-2.0"
] | 1 | 2020-11-05T13:34:30.000Z | 2020-11-05T13:34:30.000Z | samples/timeit_3.py | thierrydecker/learning-python | d67242740c33037e1ff270a8e2107f915e0fd44a | [
"Apache-2.0"
] | null | null | null | samples/timeit_3.py | thierrydecker/learning-python | d67242740c33037e1ff270a8e2107f915e0fd44a | [
"Apache-2.0"
] | 1 | 2019-01-21T08:46:37.000Z | 2019-01-21T08:46:37.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import timeit
import ssl
from urllib.request import Request, urlopen
class Timer(object):
def __init__(self, verbose=False):
self.verbose = verbose
self.timer = timeit.default_timer
def __enter__(self):
self.start = timeit.default_timer()
return self
def __exit__(self, *args):
end = timeit.default_timer()
self.elapsed_secs = end - self.start
self.elapsed = self.elapsed_secs * 1000
if self.verbose:
print('elapsed time: {} ms'.format(self.elapsed))
def my_function():
myssl = ssl.create_default_context()
myssl.check_hostname = False
myssl.verify_mode = ssl.CERT_NONE
with Timer(verbose=True) as t:
req = Request('https://tutorialedge.net', headers={'User-Agent': 'Mozilla/5.0'})
response = urlopen(req, context=myssl)
print("Elapsed Time: {} seconds".format(t.elapsed_secs))
def main():
my_function()
if __name__ == '__main__':
main()
| 24.452381 | 88 | 0.642648 | 129 | 1,027 | 4.860465 | 0.511628 | 0.070175 | 0.086124 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008838 | 0.228822 | 1,027 | 41 | 89 | 25.04878 | 0.782828 | 0.040896 | 0 | 0 | 0 | 0 | 0.09766 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.178571 | false | 0 | 0.107143 | 0 | 0.357143 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba65bc096367d297da217b8327a7fdb4c4c548e9 | 7,928 | py | Python | tests/test_nakfa.py | fscm/multicurrency | 5eabdcbfbf427dcafe08d4d05cfce8c9348aeb91 | [
"MIT"
] | 2 | 2021-03-26T18:19:57.000Z | 2021-07-27T01:15:50.000Z | tests/test_nakfa.py | fscm/multicurrency | 5eabdcbfbf427dcafe08d4d05cfce8c9348aeb91 | [
"MIT"
] | null | null | null | tests/test_nakfa.py | fscm/multicurrency | 5eabdcbfbf427dcafe08d4d05cfce8c9348aeb91 | [
"MIT"
] | null | null | null | # -*- coding: UTF-8 -*-
#
# copyright: 2020-2022, Frederico Martins
# author: Frederico Martins <http://github.com/fscm>
# license: SPDX-License-Identifier: MIT
"""Tests for the Nakfa currency representation(s)."""
from decimal import Context
from pytest import raises
from multicurrency import Currency
from multicurrency import (
CurrencyMismatchException,
CurrencyTypeException)
CONTEXT = Context(prec=28, rounding='ROUND_HALF_EVEN').copy()
"""Tests for the Nakfa representation."""
from multicurrency import Nakfa
class TestNakfa:
"""Nakfa currency tests."""
def test_nakfa(self):
"""test_nakfa."""
amount = CONTEXT.create_decimal(1) / CONTEXT.create_decimal(7)
nakfa = Nakfa(amount=amount)
decimal = CONTEXT.create_decimal(amount)
assert nakfa.amount == decimal
assert nakfa.numeric_code == '232'
assert nakfa.alpha_code == 'ERN'
assert nakfa.decimal_places == 2
assert nakfa.decimal_sign == '.'
assert nakfa.grouping_places == 3
assert nakfa.grouping_sign == ','
assert not nakfa.international
assert nakfa.symbol == 'Nfk'
assert nakfa.symbol_ahead
assert nakfa.symbol_separator == '\u00A0'
assert nakfa.localized_symbol == 'Nfk'
assert nakfa.convertion == ''
assert nakfa.__hash__() == hash(
(nakfa.__class__, decimal, 'ERN', '232'))
assert nakfa.__repr__() == (
'Nakfa(amount: 0.1428571428571428571428571429, '
'alpha_code: "ERN", '
'symbol: "Nfk", '
'symbol_ahead: True, '
'symbol_separator: "\u00A0", '
'localized_symbol: "Nfk", '
'numeric_code: "232", '
'decimal_places: "2", '
'decimal_sign: ".", '
'grouping_places: "3", '
'grouping_sign: ",", '
'convertion: "", '
'international: False)')
assert nakfa.__str__() == 'Nfk 0.14'
def test_nakfa_negative(self):
"""test_nakfa_negative."""
amount = -100
nakfa = Nakfa(amount=amount)
decimal = CONTEXT.create_decimal(amount)
assert nakfa.numeric_code == '232'
assert nakfa.alpha_code == 'ERN'
assert nakfa.decimal_places == 2
assert nakfa.decimal_sign == '.'
assert nakfa.grouping_places == 3
assert nakfa.grouping_sign == ','
assert not nakfa.international
assert nakfa.symbol == 'Nfk'
assert nakfa.symbol_ahead
assert nakfa.symbol_separator == '\u00A0'
assert nakfa.localized_symbol == 'Nfk'
assert nakfa.convertion == ''
assert nakfa.__hash__() == hash(
(nakfa.__class__, decimal, 'ERN', '232'))
assert nakfa.__repr__() == (
'Nakfa(amount: -100, '
'alpha_code: "ERN", '
'symbol: "Nfk", '
'symbol_ahead: True, '
'symbol_separator: "\u00A0", '
'localized_symbol: "Nfk", '
'numeric_code: "232", '
'decimal_places: "2", '
'decimal_sign: ".", '
'grouping_places: "3", '
'grouping_sign: ",", '
'convertion: "", '
'international: False)')
assert nakfa.__str__() == 'Nfk -100.00'
def test_nakfa_custom(self):
"""test_nakfa_custom."""
amount = 1000
nakfa = Nakfa(
amount=amount,
decimal_places=5,
decimal_sign=',',
grouping_places=2,
grouping_sign='.',
international=True,
symbol_ahead=False,
symbol_separator='_')
decimal = CONTEXT.create_decimal(amount)
assert nakfa.amount == decimal
assert nakfa.numeric_code == '232'
assert nakfa.alpha_code == 'ERN'
assert nakfa.decimal_places == 5
assert nakfa.decimal_sign == ','
assert nakfa.grouping_places == 2
assert nakfa.grouping_sign == '.'
assert nakfa.international
assert nakfa.symbol == 'Nfk'
assert not nakfa.symbol_ahead
assert nakfa.symbol_separator == '_'
assert nakfa.localized_symbol == 'Nfk'
assert nakfa.convertion == ''
assert nakfa.__hash__() == hash(
(nakfa.__class__, decimal, 'ERN', '232'))
assert nakfa.__repr__() == (
'Nakfa(amount: 1000, '
'alpha_code: "ERN", '
'symbol: "Nfk", '
'symbol_ahead: False, '
'symbol_separator: "_", '
'localized_symbol: "Nfk", '
'numeric_code: "232", '
'decimal_places: "5", '
'decimal_sign: ",", '
'grouping_places: "2", '
'grouping_sign: ".", '
'convertion: "", '
'international: True)')
assert nakfa.__str__() == 'ERN 10,00.00000'
def test_nakfa_changed(self):
"""test_cnakfa_changed."""
nakfa = Nakfa(amount=1000)
with raises(
AttributeError,
match='can\'t set attribute'):
nakfa.amount = 999
with raises(
AttributeError,
match='can\'t set attribute'):
nakfa.alpha_code = 'EUR'
with raises(
AttributeError,
match='can\'t set attribute'):
nakfa.convertion = '0123456789,.'
with raises(
AttributeError,
match='can\'t set attribute'):
nakfa.symbol = '€'
with raises(
AttributeError,
match='can\'t set attribute'):
nakfa.symbol_ahead = False
with raises(
AttributeError,
match='can\'t set attribute'):
nakfa.symbol_separator = '_'
with raises(
AttributeError,
match='can\'t set attribute'):
nakfa.localized_symbol = '€'
with raises(
AttributeError,
match='can\'t set attribute'):
nakfa.numeric_code = '978'
with raises(
AttributeError,
match='can\'t set attribute'):
nakfa.decimal_places = 3
with raises(
AttributeError,
match='can\'t set attribute'):
nakfa.decimal_sign = ','
with raises(
AttributeError,
match='can\'t set attribute'):
nakfa.grouping_places = 4
with raises(
AttributeError,
match='can\'t set attribute'):
nakfa.grouping_sign = '.'
with raises(
AttributeError,
match='can\'t set attribute'):
nakfa.international = True
def test_nakfa_math_add(self):
"""test_nakfa_math_add."""
nakfa_one = Nakfa(amount=1)
nakfa_two = Nakfa(amount=2)
nakfa_three = Nakfa(amount=3)
currency = Currency(amount=1, alpha_code='OTHER')
with raises(
CurrencyMismatchException,
match='unsupported operation between currency ERN and OTHER.'):
_ = nakfa_one + currency
with raises(
CurrencyTypeException,
match=(
'unsupported operation between <class \'multicurrency.'
'nakfa.Nakfa\'> '
'and <class \'str\'>.')):
_ = nakfa_one.__add__('1.00')
assert (
nakfa_one +
nakfa_two) == nakfa_three
def test_nakfa_slots(self):
"""test_nakfa_slots."""
nakfa = Nakfa(amount=1000)
with raises(
AttributeError,
match=(
'\'Nakfa\' '
'object has no attribute \'new_variable\'')):
nakfa.new_variable = 'fail' # pylint: disable=assigning-non-slot
| 34.620087 | 79 | 0.531282 | 731 | 7,928 | 5.533516 | 0.166895 | 0.122373 | 0.083066 | 0.100371 | 0.657849 | 0.632633 | 0.632633 | 0.603461 | 0.566873 | 0.517429 | 0 | 0.030868 | 0.350278 | 7,928 | 228 | 80 | 34.77193 | 0.754028 | 0.046418 | 0 | 0.61809 | 0 | 0 | 0.157768 | 0.004148 | 0 | 0 | 0 | 0 | 0.241206 | 1 | 0.030151 | false | 0 | 0.025126 | 0 | 0.060302 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba673bdb3cd7a82d7bc50d66607572efdb6a45e2 | 3,443 | py | Python | server.py | dataculturegroup/news-entity-server | e0726098a46b70dac5a97dcd927e5f39c68e68d1 | [
"MIT"
] | null | null | null | server.py | dataculturegroup/news-entity-server | e0726098a46b70dac5a97dcd927e5f39c68e68d1 | [
"MIT"
] | 1 | 2022-03-14T21:01:33.000Z | 2022-03-29T13:54:35.000Z | server.py | dataculturegroup/news-entity-server | e0726098a46b70dac5a97dcd927e5f39c68e68d1 | [
"MIT"
] | null | null | null | import logging
import os
from dotenv import load_dotenv
import sentry_sdk
from sentry_sdk.integrations.asgi import SentryAsgiMiddleware
from sentry_sdk.integrations.logging import ignore_logger
from typing import Optional
import helpers
import helpers.content as content
import helpers.entities as entities
from helpers.request import api_method
from fastapi import FastAPI, Form
# setup logging
logging.basicConfig(level=logging.INFO,
format="[%(asctime)s][%(levelname)s] %(name)s %(filename)s:%(funcName)s:%(lineno)d | %(message)s")
logger = logging.getLogger(__name__)
logger.info("---------------------------------------------------------------------------")
# load in config from local file or environment variables
load_dotenv()
app = FastAPI(
title="News Entity Server",
description="Extract entities from online news in multiple langauges",
version=helpers.VERSION,
license_info={
"name": "The MIT License"
}
)
SENTRY_DSN = os.environ.get('SENTRY_DSN', None) # optional centralized logging to Sentry
if SENTRY_DSN:
sentry_sdk.init(dsn=SENTRY_DSN, release=helpers.VERSION)
# make sure some errors we don't care about don't make it to sentry
ignore_logger("boilerpy3")
ignore_logger("trafilatura.utils")
ignore_logger("trafilatura.core")
ignore_logger("readability.readability")
logger.info(" SENTRY_DSN: {}".format(SENTRY_DSN))
try:
app.add_middleware(SentryAsgiMiddleware)
except Exception:
# pass silently if the Sentry integration failed
pass
else:
logger.info("Not logging errors to Sentry")
@app.get("/version")
@api_method
def version():
return {}
@app.get("/languages")
@api_method
def supported_languages():
return helpers.LANGUAGES
@app.post("/entities/from-url")
@api_method
def entities_from_url(url: str = Form(..., description="A publicly accessible web url of a news story."),
language: str = Form(..., description="One of the supported two-letter language codes.", length=2),
title: Optional[int] = Form(None, description="Optional 1 or 0 indicating if the title should be prefixed the content before checking for entities.",)):
"""
Return all the entities found in content extracted from the URL.
"""
article_info = content.from_url(url)
include_title = title == 1 if title is not None else False
article_text = ""
if include_title and (article_info['title'] is not None):
article_text += article_info['title'] + " "
article_text += article_info['text']
data = entities.from_text(article_text, language)
return data
@app.post("/content/from-url")
@api_method
def content_from_url(url: str = Form(..., description="A publicly accessible web url of a news story.")):
"""
Return the content found at the URL. This uses a fallback mechanism to iterate through a list of 3rd party content
extractors. It will try each until it finds one that succeeds.
"""
return content.from_url(url)
@app.post("/entities/from-content")
@api_method
def entities_from_content(text: str = Form(..., description="Raw text to check for entities."),
language: str = Form(..., description="One of the supported two-letter language codes.", length=2)):
"""
Return all the entities found in content passed in.
"""
return entities.from_text(text, language)
| 34.089109 | 174 | 0.689805 | 452 | 3,443 | 5.143805 | 0.34292 | 0.04129 | 0.025806 | 0.021935 | 0.177204 | 0.147097 | 0.147097 | 0.117849 | 0.117849 | 0.117849 | 0 | 0.002496 | 0.185594 | 3,443 | 100 | 175 | 34.43 | 0.826676 | 0.15016 | 0 | 0.072464 | 0 | 0.014493 | 0.271936 | 0.064067 | 0 | 0 | 0 | 0 | 0 | 1 | 0.072464 | false | 0.014493 | 0.173913 | 0.028986 | 0.318841 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba676614bbcce93369400054be1dfe970a2717d6 | 1,319 | py | Python | tele_saavn.py | rsoorajs/Champ | eb6811bcb5bd0aff3464b1f996514419465dabfd | [
"MIT"
] | 17 | 2018-06-28T03:17:46.000Z | 2021-07-15T13:22:35.000Z | tele_saavn.py | rsoorajs/Champ | eb6811bcb5bd0aff3464b1f996514419465dabfd | [
"MIT"
] | null | null | null | tele_saavn.py | rsoorajs/Champ | eb6811bcb5bd0aff3464b1f996514419465dabfd | [
"MIT"
] | 23 | 2018-09-10T08:02:43.000Z | 2021-09-09T07:07:18.000Z | from bs4 import BeautifulSoup
import requests
def songs_info(res):
soup = BeautifulSoup(res.text, 'lxml')
data = soup.find('ol', {'class': 'content-list'})
return data
def get_songs(data, limit=10):
song_list = []
count = 0
for i, count in zip(data.find_all('div', {'class': 'details'}), range(1, int(limit) + 1)):
song = i.find('p', {'class': 'song-name'}).text
album = i.find('p', {'class': 'album-name'}).text
count += 1
item = song
if album != song:
item = item + " (" + album + ")"
song_list.append(item)
return song_list
def saavn_tops(lang):
res = requests.get("https://www.saavn.com/s/featured/" + lang + "/Weekly+Top+Songs")
data = songs_info(res)
return get_songs(data)
def hindi_chartbusters():
res = requests.get("https://www.saavn.com/s/charts/Hindi-Chartbusters/u-75xwHI4ks_?&utm_content=wap%3Ahome%3Atop_charts%3Aplay%3Aclick&utm_page=home&utm_button=top_charts")
data = songs_info(res)
return get_songs(data)
def english_chartbusters():
res = requests.get("https://www.saavn.com/s/charts/English-Chartbusters/9J4ePDXBp8k_?utm_content=wap%3Aall_top_charts%3Atop_charts%3Aplay%3Aclick&utm_page=all_top_charts&utm_button=top_charts&")
data = songs_info(res)
return get_songs(data)
| 30.674419 | 195 | 0.666414 | 189 | 1,319 | 4.486772 | 0.359788 | 0.053066 | 0.056604 | 0.067217 | 0.395047 | 0.395047 | 0.321934 | 0.321934 | 0.285377 | 0.238208 | 0 | 0.019301 | 0.175133 | 1,319 | 42 | 196 | 31.404762 | 0.76011 | 0 | 0 | 0.2 | 0 | 0.066667 | 0.337386 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.066667 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba69a75af6fe44e2f9b818fc23054a99d5ef6411 | 6,735 | py | Python | bin/calc_word.py | hasibaasma/alfpy | c8c0c1300108015746320cede2207ac57e630d3e | [
"MIT"
] | 19 | 2017-02-20T17:42:02.000Z | 2021-12-16T19:07:17.000Z | bin/calc_word.py | eggleader/alfpy | e0782e9551458ef17ab29df8af13fc0f8925e894 | [
"MIT"
] | 3 | 2018-03-12T23:54:27.000Z | 2020-12-09T21:53:19.000Z | bin/calc_word.py | eggleader/alfpy | e0782e9551458ef17ab29df8af13fc0f8925e894 | [
"MIT"
] | 6 | 2016-12-06T09:12:04.000Z | 2021-09-24T14:40:47.000Z | #! /usr/bin/env python
# Copyright (c) 2016 Zielezinski A, combio.pl
import argparse
import sys
from alfpy import word_distance
from alfpy import word_pattern
from alfpy import word_vector
from alfpy.utils import distmatrix
from alfpy.utils import seqrecords
from alfpy.version import __version__
def get_parser():
parser = argparse.ArgumentParser(
description='''Calculate distances between DNA/protein sequences based
on subsequence (words) occurrences.''',
add_help=False, prog='calc_word.py'
)
group = parser.add_argument_group('REQUIRED ARGUMENTS')
group.add_argument('--fasta', '-f',
help='input FASTA sequence filename', required=True,
type=argparse.FileType('r'), metavar="FILE")
group = parser.add_argument_group(' Choose between the two options')
g1 = group.add_mutually_exclusive_group()
g1.add_argument('--word_size', '-s', metavar="N",
help='word size for creating word patterns',
type=int)
g1.add_argument('--word_pattern', '-w',
help='input filename w/ pre-computed word patterns',
type=argparse.FileType('r'), metavar="FILE")
group = parser.add_argument_group('OPTIONAL ARGUMENTS')
distlist = word_distance.Distance.get_disttypes()
group.add_argument('--distance', '-d', choices=distlist,
help='choose from: {} [DEFAULT: %(default)s]'.format(
", ".join(distlist)),
metavar='', default="google")
veclist = ['counts', 'freqs', 'freqs_std']
group.add_argument('--vector', '-v', choices=veclist,
help='choose from: {} [DEFAULT: %(default)s]'.format(
", ".join(veclist)),
metavar='', default="freqs")
group.add_argument('--char_weights', '-W', metavar="FILE",
help='''file w/ weights of background sequence
characters (nt/aa)''',
type=argparse.FileType('r'))
group = parser.add_argument_group('FREQUENCY MODEL ARGUMENTS',
''' Required for vector \'freqs_std\'.
Specify one of the two options:''')
group.add_argument('--char_freqs', '-F', metavar="FILE",
help='''file w/ frequencies of background sequence
characters (nt/aa)''',
type=argparse.FileType('r'))
group.add_argument('--alphabet_size', '-a', metavar="N",
help='alphabet size', type=int)
group = parser.add_argument_group('OUTPUT ARGUMENTS')
group.add_argument('--out', '-o', help="output filename",
metavar="FILE")
group.add_argument('--outfmt', choices=['phylip', 'pairwise'],
default='phylip',
help='distances output format [DEFAULT: %(default)s]')
group = parser.add_argument_group("OTHER OPTIONS")
group.add_argument("-h", "--help", action="help",
help="show this help message and exit")
group.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
if len(sys.argv[1:]) == 0:
# parser.print_help()
parser.print_usage()
parser.exit()
return parser
def validate_args(parser):
args = parser.parse_args()
if args.word_size:
if args.word_size < 1:
parser.error('word size must be >= 1')
elif args.word_pattern:
pass
else:
parser.error("Specify either: --word_size or --word_pattern.")
if args.distance == 'kld' and args.vector != 'freqs':
parser.error("--distance kld requires --vector freqs.")
if args.char_weights is not None:
if args.vector == 'freqs_std':
e = '--char_weights requires a vector of either \'freqs\''
e += ' or \'counts\''
parser.error(e)
else:
try:
weights = word_vector.read_weightfile(args.char_weights)
args.char_weights = weights
except Exception:
e = 'Invalid format for --char_weights {0}'.format(
args.char_weights.name)
parser.error(e)
if args.vector == 'freqs_std':
if args.char_freqs is None and args.alphabet_size is None:
e = "freqs_std requires either --alphabet_size or --char_freqs"
parser.error(e)
elif args.char_freqs is not None:
try:
freqs = word_vector.read_freqfile(args.char_freqs)
args.char_freqs = freqs
except Exception:
e = 'Invalid format for --char_freqs {0}'.format(
args.char_freqs.name)
parser.error(e)
elif args.alphabet_size < 2:
parser.error('Alphabet size must be >=2.')
else:
if args.char_freqs is not None:
parser.error("Option --char_freqs requires --vector freqs_std ")
if args.alphabet_size is not None:
parser.error("Option --alphabet_size requires --vector freqs_std ")
return args
def main():
parser = get_parser()
args = validate_args(parser)
seq_records = seqrecords.read_fasta(args.fasta)
if args.word_size:
p = word_pattern.create(seq_records.seq_list, args.word_size)
else:
p = word_pattern.read(args.word_pattern)
veccls = {'counts': word_vector.Counts,
'freqs': word_vector.Freqs}
vecclsw = {'counts': word_vector.CountsWeight,
'freqs': word_vector.FreqsWeight
}
if args.vector == 'counts' or args.vector == 'freqs':
if args.char_weights is None:
vec = veccls[args.vector](seq_records.length_list, p)
else:
weightmodel = word_vector.WeightModel(
char_weights=args.char_weights)
vec = vecclsw[args.vector](seq_records.length_list, p, weightmodel)
else:
if args.alphabet_size:
freqmodel = word_vector.EqualFreqs(
alphabet_size=args.alphabet_size)
else:
freqmodel = word_vector.EquilibriumFreqs(args.char_freqs)
vec = word_vector.FreqsStd(seq_records.length_list, p, freqmodel)
dist = word_distance.Distance(vec, args.distance)
matrix = distmatrix.create(seq_records.id_list, dist)
if args.out:
oh = open(args.out, 'w')
matrix.write_to_file(oh, args.outfmt)
oh.close()
else:
matrix.display(args.outfmt)
if __name__ == '__main__':
main()
| 37.837079 | 79 | 0.580401 | 760 | 6,735 | 4.968421 | 0.236842 | 0.052436 | 0.042373 | 0.034958 | 0.245763 | 0.158104 | 0.135064 | 0.083686 | 0.06303 | 0.06303 | 0 | 0.00318 | 0.299629 | 6,735 | 177 | 80 | 38.050847 | 0.797329 | 0.012621 | 0 | 0.193103 | 0 | 0 | 0.215302 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02069 | false | 0.006897 | 0.055172 | 0 | 0.089655 | 0.006897 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba6aeda61d95d834eae2422e11670a7111135c85 | 18,770 | py | Python | dd_pose/evaluation_helpers.py | kevinsu628/dd-pose | 889d117170fd0bc86e1ca7fd5b429c54b225f35b | [
"MIT"
] | null | null | null | dd_pose/evaluation_helpers.py | kevinsu628/dd-pose | 889d117170fd0bc86e1ca7fd5b429c54b225f35b | [
"MIT"
] | null | null | null | dd_pose/evaluation_helpers.py | kevinsu628/dd-pose | 889d117170fd0bc86e1ca7fd5b429c54b225f35b | [
"MIT"
] | null | null | null | import numpy as np
import zipfile
from io import StringIO
import os
import json
import pandas as pd
import transformations as tr
from multiprocess import Pool
import plotly
import plotly.graph_objs as go
from dd_pose.dataset_item import DatasetItem, StampedTransforms
# a coordinate frame which allows for identity transformation for a head frontally looking inside the camera
# (x pointing inside the camera (opposite to camera viewing direction)
# (y pointing towards right in camera image)
# (z pointing upwards in camera image)
T_camdriver_headfrontal = np.array([
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, -1.0, 0.0],
[-1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0]
])
T_headfrontal_camdriver = np.linalg.inv(T_camdriver_headfrontal)
class FilePredictor:
def __init__(self, predictions_dir, di_dict=None):
self.predictions_file = os.path.join(predictions_dir,\
'subject-%02d' % di_dict['subject'],\
'scenario-%02d' % di_dict['scenario'],\
di_dict['humanhash'],\
't-camdriver-head-predictions.json')
with open(self.predictions_file) as fp:
self.predictions = StampedTransforms(fp)
try:
with open(os.path.join(predictions_dir, 'metadata.json')) as fp:
self.metadata = json.load(fp)
except:
self.metadata = dict()
def get_T_camdriver_head(self, stamp):
return self.predictions.get_transform(stamp)
def get_t_camdriver_head(self, stamp):
T_camdriver_head = self.get_T_camdriver_head(stamp)
if T_camdriver_head is None:
return None
return T_camdriver_head[0:3,3]
def get_T_headfrontal_head(self, stamp):
T_camdriver_head = self.get_T_camdriver_head(stamp)
if T_camdriver_head is None:
return None
T_headfrontal_head = np.dot(T_headfrontal_camdriver, T_camdriver_head)
return T_headfrontal_head
class ZipFilePredictor(FilePredictor):
def __init__(self, zip_file, di_dict=None):
self.zf = zipfile.ZipFile(zip_file)
self.predictions_file = os.path.join('subject-%02d' % di_dict['subject'],\
'scenario-%02d' % di_dict['scenario'],\
di_dict['humanhash'],\
't-camdriver-head-predictions.json')
# read predictions json file from within zip file in memory
# wrap in StringIO to make file-like object for StampedTransforms
sio = StringIO.StringIO(self.zf.read(self.predictions_file))
try:
self.predictions = StampedTransforms(sio)
except ValueError as e:
e.message = 'File %s is malformed json' % self.predictions_file
raise e
try:
self.metadata = json.loads(self.zf.read('metadata.json'))
except:
self.metadata = dict()
class EvaluationData:
"""
EvaluationData ground truth and hypotheses in a pandas dataframe.
It allows to filter to subsets (easy, moderate, hard) and compute metrics.
Correspondence of ground truth and hypotheses is given via integer stamp.
"""
def __init__(self):
self.df = pd.DataFrame()
self.df.index.name = 'stamp'
self.name = ""
def load(self, di_dict, predictor):
di = DatasetItem(di_dict)
self.df['subject'] = pd.Series(data=di.get_subject(), index=di.get_stamps())
self.df['scenario'] = di.get_scenario()
self.df['humanhash'] = di.get_humanhash()
for stamp in di.get_stamps():
T_camdriver_head = di.get_T_camdriver_head(stamp)
assert T_camdriver_head is not None
T_headfrontal_head = T_headfrontal_camdriver.dot(T_camdriver_head)
self.df.at[stamp, 'gt_roll'], self.df.at[stamp, 'gt_pitch'], self.df.at[stamp, 'gt_yaw'] = tr.euler_from_matrix(T_headfrontal_head)
self.df.at[stamp, 'gt_x'], self.df.at[stamp, 'gt_y'], self.df.at[stamp, 'gt_z'] = T_camdriver_head[0:3,3]
gt_angle_from_zero, _, _ = tr.rotation_from_matrix(T_headfrontal_head)
self.df.at[stamp, 'gt_angle_from_zero'] = abs(gt_angle_from_zero)
self.df.at[stamp, 'occlusion_state'] = di.get_occlusion_state(stamp)
hypo_T_headfrontal_head = predictor.get_T_headfrontal_head(stamp)
if hypo_T_headfrontal_head is None:
self.df.at[stamp, 'hypo_roll'] = None
self.df.at[stamp, 'hypo_pitch'] = None
self.df.at[stamp, 'hypo_yaw'] = None
self.df.at[stamp, 'angle_diff'] = None
self.df.at[stamp, 'hypo_x'] = None
self.df.at[stamp, 'hypo_y'] = None
self.df.at[stamp, 'hypo_z'] = None
else:
self.df.at[stamp, 'hypo_roll'], self.df.at[stamp, 'hypo_pitch'], self.df.at[stamp, 'hypo_yaw'] = tr.euler_from_matrix(hypo_T_headfrontal_head)
angle_difference, _, _ = tr.rotation_from_matrix(tr.inverse_matrix(T_headfrontal_head).dot(hypo_T_headfrontal_head))
self.df.at[stamp, 'angle_diff'] = abs(angle_difference)
self.df.at[stamp, 'hypo_x'], self.df.at[stamp, 'hypo_y'], self.df.at[stamp, 'hypo_z'] = predictor.get_t_camdriver_head(stamp)
# print gt_angle_from_zero, angle_difference, np.rad2deg(angle_difference), position_difference
@staticmethod
def load_evaluation_data(di_dict, predictor_class, predictor_kwargs):
"""
Factory method creating an EvaluationData object with loaded ground truth and predictions from predictor.
"""
ed = EvaluationData()
predictor_kwargs.update({'di_dict': di_dict})
predictor = predictor_class(**predictor_kwargs)
ed.load(di_dict, predictor)
return ed
def load_all(self, di_dicts, predictor_class, predictor_kwargs, is_parallel=True):
"""
Load both ground truth and predictions for all di_dicts.
"""
if is_parallel:
p = Pool(12)
eds = p.map(lambda di_dict: EvaluationData.load_evaluation_data(di_dict, predictor_class, predictor_kwargs), di_dicts)
else:
eds = map(lambda di_dict: EvaluationData.load_evaluation_data(di_dict, predictor_class, predictor_kwargs), di_dicts)
self.df = pd.concat([e.df for e in eds], sort=True)
del eds
diff = self.df[['gt_x','gt_y', 'gt_z']].values - self.df[['hypo_x', 'hypo_y', 'hypo_z']].values
self.df['pos_diff'] = np.linalg.norm(diff, axis=1)
def get_dx(self):
return abs((self.df.hypo_x - self.df.gt_x)).mean()
def get_dy(self):
return abs((self.df.hypo_y - self.df.gt_y)).mean()
def get_dz(self):
return abs((self.df.hypo_z - self.df.gt_z)).mean()
def get_dxyz(self):
"""
Get mean absoulte L2 distance.
"""
return abs(self.df.pos_diff).mean()
def get_recall(self):
"""
Get recall, i.e. ratio of available predictions and ground truth measurements.
"""
n_gt = self.df.gt_x.count()
n_pos = self.df[~self.df.gt_x.isna()].hypo_x.count()
if n_gt > 0:
recall = float(n_pos)/n_gt
else:
recall = np.nan
return recall
def get_drpy(self):
# rad
return (self.df[['gt_roll','gt_pitch', 'gt_yaw']].values - self.df[['hypo_roll', 'hypo_pitch', 'hypo_yaw']]).abs().mean().values
def get_mae(self):
mae = self.df.angle_diff.mean()
return mae
def new_by_angle_range(self, angle_rad_min, angle_rad_max):
ed = EvaluationData()
ed.df = self.df[(self.df.gt_angle_from_zero >= angle_rad_min) & (self.df.gt_angle_from_zero < angle_rad_max)]
ed.name = self.name + "%.0f<=a<%.0f" % (angle_rad_min, angle_rad_max)
return ed
def new_by_roll_range(self, angle_rad_min, angle_rad_max):
ed = EvaluationData()
ed.df = self.df[(self.df.gt_roll.abs() >= angle_rad_min) & (self.df.gt_roll.abs() < angle_rad_max)]
return ed
def new_by_pitch_range(self, angle_rad_min, angle_rad_max):
ed = EvaluationData()
ed.df = self.df[(self.df.gt_pitch.abs() >= angle_rad_min) & (self.df.gt_pitch.abs() < angle_rad_max)]
return ed
def new_by_yaw_range(self, angle_rad_min, angle_rad_max):
ed = EvaluationData()
ed.df = self.df[(self.df.gt_yaw.abs() >= angle_rad_min) & (self.df.gt_yaw.abs() < angle_rad_max)]
return ed
def new_by_occlusion_none(self):
ed = EvaluationData()
ed.df = self.df[(self.df.occlusion_state == 'none-auto') | (self.df.occlusion_state == 'none')]
ed.name = self.name + " occl=none"
return ed
def new_by_occlusion_none_partial(self):
ed = EvaluationData()
ed.df = self.df[(self.df.occlusion_state == 'none-auto') | (self.df.occlusion_state == 'none') | (self.df.occlusion_state == 'partial') | (self.df.occlusion_state == 'partial-auto')]
ed.name = self.name + " occl<=partial"
return ed
def new_by_dist_z(self, min_z, max_z=None):
ed = EvaluationData()
ed.df = self.df[self.df.gt_z >= min_z]
ed.name = self.name + " z>=%.2f" % min_z
if max_z is not None:
ed.df = ed.df[ed.df.gt_z < max_z]
ed.name += " z<%.2f" % max_x
return ed
def new_easy(self):
"""Easy subset: angle in [0..35), occlusion none, min dist 0.4m"""
ed = self.new_by_angle_range(np.deg2rad(0), np.deg2rad(35))
ed = ed.new_by_occlusion_none()
ed.name = self.name + " easy"
return ed
def new_moderate(self):
"""Moderate subset: angle in [35..60), occlusion none or partial, min dist 0.4m"""
ed = self.new_by_angle_range(np.deg2rad(0), np.deg2rad(60))
ed = ed.new_by_occlusion_none_partial()
# remove easy ones
ed.df = ed.df[~((ed.df.gt_angle_from_zero < np.deg2rad(35)) & ((ed.df.occlusion_state == 'none') | (ed.df.occlusion_state == 'none-auto')))]
ed.name = self.name + " mod"
return ed
def new_hard(self):
"""Hard subset: angle in [60..inf) or <0.4m, occlusion all types"""
ed = EvaluationData()
ed.df = self.df[(self.df.gt_angle_from_zero >= np.deg2rad(60)) | (self.df.occlusion_state == 'full') | (self.df.occlusion_state == 'full-auto')]
ed.name = self.name + " hard"
return ed
def new_test_split(self):
"""Test split"""
ed = EvaluationData()
ed.df = self.df[self.df.subject.isin(test_subjects)]
ed.name = self.name + " test"
return ed
def new_trainval_split(self):
"""Trainval split"""
ed = EvaluationData()
ed.df = self.df[~self.df.subject.isin(test_subjects)]
ed.name = self.name + " trainval"
return ed
def get_angle_recalls(self, d=5, k=75):
"""deg!"""
bins = dict()
for i in range(0, k-1, d):
bins[i] = self.new_by_angle_range(np.deg2rad(i), np.deg2rad(i+d)).get_recall()
angles, recalls = zip(*[(k,v) for k,v in sorted(bins.items()) if not np.isnan(v)])
angles = np.array(angles)
return angles, recalls
def get_angle_maes(self, d=5, k=75):
"""deg!"""
bins = dict()
for i in range(0, k-1, d):
bins[i] = self.new_by_angle_range(np.deg2rad(i), np.deg2rad(i+d)).get_mae()
angles, maes = zip(*[(k,v) for k,v in sorted(bins.items()) if not np.isnan(v)])
angles = np.array(angles)
maes = np.rad2deg(np.array(maes))
return angles, maes
def get_angle_rpys(self, d=5, k=75):
"""deg!"""
bins = dict()
for i in range(0, k-1, d):
bins[i] = self.new_by_angle_range(np.deg2rad(i), np.deg2rad(i+d)).get_drpy()
angles, rpys = zip(*[(k,v) for k,v in sorted(bins.items()) if not np.any(np.isnan(v))])
angles = np.array(angles)
rpys = np.rad2deg(np.array(rpys))
return angles, rpys
def get_angle_rolls(self, d=5, k=75):
"""deg!"""
bins = dict()
for i in range(0, k-1, d):
bins[i] = self.new_by_roll_range(np.deg2rad(i), np.deg2rad(i+d)).get_drpy()
angles, rpys = zip(*[(k,v) for k,v in sorted(bins.items()) if not np.any(np.isnan(v))])
angles = np.array(angles)
rpys = np.rad2deg(np.array(rpys))
return angles, rpys[:,0] # ROLL
def get_angle_pitches(self, d=5, k=75):
"""deg!"""
bins = dict()
for i in range(0, k-1, d):
bins[i] = self.new_by_pitch_range(np.deg2rad(i), np.deg2rad(i+d)).get_drpy()
angles, rpys = zip(*[(k,v) for k,v in sorted(bins.items()) if not np.any(np.isnan(v))])
angles = np.array(angles)
rpys = np.rad2deg(np.array(rpys))
return angles, rpys[:,1] # PITCH
def get_angle_yaws(self, d=5, k=75):
"""deg!"""
bins = dict()
for i in range(0, k-1, d):
bins[i] = self.new_by_yaw_range(np.deg2rad(i), np.deg2rad(i+d)).get_drpy()
angles, rpys = zip(*[(k,v) for k,v in sorted(bins.items()) if not np.any(np.isnan(v))])
angles = np.array(angles)
rpys = np.rad2deg(np.array(rpys))
return angles, rpys[:,2] # YAW
def get_bmae(self, d=5, k=75):
"""deg!"""
_, maes_deg = self.get_angle_maes(d, k)
count = sum(not np.isnan(mae) for mae in maes_deg) # number on nonempty bins
if count != (k/d):
print("Warn: empty MAEs when computing BMAE!")
bmae = 1.0/float(count) * sum(maes_deg)
return bmae
class Plotter:
def __init__(self, subset_eds):
"""
subset_eds: dict which maps from name to evaluation data objects
"""
self.subset_eds = subset_eds
def get_maes_figure(self):
data = []
binsize = 5
for name, ed in self.subset_eds.items():
x, y = ed.get_angle_maes(d=binsize)
x = x + float(binsize)/2.0
data.append(go.Scatter(x=x, y=y, name=name))
layout = go.Layout(
xaxis=dict(
title='angle from frontal (deg), binsize = %d deg' % binsize,
nticks=16, # or tickvals,
titlefont=dict(
family='serif',
size=35,
),
tickfont=dict(
family='serif',
size=30
)
),
yaxis=dict(
title='MAE within bin (deg)',
titlefont=dict(
family='serif',
size=35,
),
tickfont=dict(
family='serif',
size=30
),
range=[-0.1,70]
),
margin=dict(l=80, r=0, t=10, b=85),
legend=dict(
x=0.05,
y=0.95,
font=dict(
family='serif',
size=30,
),
borderwidth=1
)
)
fig = go.Figure(data=data, layout=layout)
return fig
def get_recalls_figure(self):
data = []
binsize = 5
for name, ed in self.subset_eds.items():
x, y = ed.get_angle_recalls(d=binsize)
x = x + float(binsize)/2.0
data.append(go.Scatter(x=x, y=y, name=name))
layout = go.Layout(
xaxis=dict(
title='angle from frontal (deg), binsize = %d deg' % binsize,
nticks=16,
titlefont=dict(
family='serif',
size=35,
),
tickfont=dict(
family='serif',
size=30
)
),
yaxis=dict(
title='recall within bin',
titlefont=dict(
family='serif',
size=35,
),
tickfont=dict(
family='serif',
size=30
),
range=[-0.01,1.05]
),
margin=dict(l=80, r=0, t=10, b=85),
legend=dict(
x=0.87,
y=0.92,
# x=0.04,
# y=0.03,
font=dict(
family='serif',
size=25,
),
borderwidth=1,
# bgcolor = 'rgba(255,255,255,0.3)' #transparent bg
)
)
fig = go.Figure(data=data, layout=layout)
return fig
def get_rpys_figure(self):
# mae for RPY
data = []
binsize = 5
for name, ed in self.subset_eds.items():
x, y = ed.get_angle_rpys(d=binsize)
x = x + float(binsize)/2.0
data.append(go.Scatter(x=x, y=y[:,0], name=name + ' roll'))
data.append(go.Scatter(x=x, y=y[:,1], name=name + ' pitch'))
data.append(go.Scatter(x=x, y=y[:,2], name=name + ' yaw'))
layout = go.Layout(
xaxis=dict(
title='angle from frontal (deg), binsize = %d deg' % binsize,
nticks=16, # or tickvals,
titlefont=dict(
family='serif',
size=35,
),
tickfont=dict(
family='serif',
size=30
)
),
yaxis=dict(
title='MAE within bin (deg)',
titlefont=dict(
family='serif',
size=35,
),
tickfont=dict(
family='serif',
size=30
),
range=[-0.1,70]
),
margin=dict(l=80, r=0, t=10, b=85),
legend=dict(
x=0.05,
y=0.95,
font=dict(
family='serif',
size=30,
),
borderwidth=1
)
)
fig = go.Figure(data=data, layout=layout)
return fig
| 35.752381 | 190 | 0.533724 | 2,449 | 18,770 | 3.917926 | 0.127399 | 0.046274 | 0.018343 | 0.029807 | 0.566441 | 0.509015 | 0.45284 | 0.425221 | 0.413757 | 0.39531 | 0 | 0.021194 | 0.338892 | 18,770 | 524 | 191 | 35.820611 | 0.752035 | 0.079435 | 0 | 0.476804 | 0 | 0 | 0.057044 | 0.003869 | 0 | 0 | 0 | 0 | 0.002577 | 1 | 0.100515 | false | 0 | 0.028351 | 0.012887 | 0.229381 | 0.002577 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba6b4ad51525e09923ed5cb2a1d71dc3c0634e6a | 1,376 | py | Python | fw-rde/mnist/test_adver.py | morgankohler/FrankWolfe.jl | b878dc2c2038a3b4486d5f7cec47f1a8e024192e | [
"MIT"
] | null | null | null | fw-rde/mnist/test_adver.py | morgankohler/FrankWolfe.jl | b878dc2c2038a3b4486d5f7cec47f1a8e024192e | [
"MIT"
] | null | null | null | fw-rde/mnist/test_adver.py | morgankohler/FrankWolfe.jl | b878dc2c2038a3b4486d5f7cec47f1a8e024192e | [
"MIT"
] | null | null | null | import os
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import tensorflow.keras.backend as K
from models import load_model, load_adfmodel
from PIL import Image
# GENERAL PARAMETERS
MODE = 'diag' # 'diag', 'half', or 'full'
# LOAD MODEL
print(os.getcwd())
adfmodel = load_adfmodel(mode=MODE)
model = load_model(path='mnist/mnist-convnet-avgpool-weights.hdf5')
#
# x = np.array(Image.open('mnist/results/untargeted_ktest/img.png'))
# x = (x - 37.96046)[:, :, 0]
#
# k = np.array(Image.open('mnist/results/untargeted_ktest/diag-mode-rate50-nx.png'))
# k = k[:, :, 0]
s = np.load('/home/Morgan/fw-rde/mnist/results/784.npy')
s = np.expand_dims(np.expand_dims(s, axis=0), axis=3)
x = np.load('/home/Morgan/fw-rde/mnist/results/x.npy')
print(np.max(x))
print(np.min(x))
noise=(1-s)
rand=np.random.normal(size=s.shape)
noise=noise*rand/np.max(rand)*np.max(x)
new = x + noise
new[new>np.max(x)] = np.max(x)
new[new<np.min(x)] = np.min(x)
# new = (new - np.min(new)) / (np.max(new) - np.min(new)) * (np.max(x) - np.min(x)) + np.min(x)
print(np.max(new))
print(np.min(new))
# new = np.expand_dims(new, axis=0)
# new = np.expand_dims(new, axis=3)
plt.figure()
plt.imshow(new.squeeze(), cmap='gray', vmin=np.min(new), vmax=np.max(new))
plt.show()
# new =
pred = model.predict(new)
print(pred)
_=0
| 27.52 | 96 | 0.653343 | 241 | 1,376 | 3.684647 | 0.327801 | 0.050676 | 0.033784 | 0.023649 | 0.308559 | 0.283784 | 0.171171 | 0.171171 | 0 | 0 | 0 | 0.017903 | 0.147529 | 1,376 | 49 | 97 | 28.081633 | 0.73913 | 0.302326 | 0 | 0 | 0 | 0 | 0.142857 | 0.133929 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.233333 | 0 | 0.233333 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba6bc29f591786e97baca19f293cddeea2e887b4 | 2,210 | py | Python | dataApp.py | gauravpgaurav/CS412_HW5 | 0aee3fed7f0f1314d1f67ca59c73f137335b05fc | [
"MIT"
] | null | null | null | dataApp.py | gauravpgaurav/CS412_HW5 | 0aee3fed7f0f1314d1f67ca59c73f137335b05fc | [
"MIT"
] | null | null | null | dataApp.py | gauravpgaurav/CS412_HW5 | 0aee3fed7f0f1314d1f67ca59c73f137335b05fc | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 29 20:03:22 2018
@author: gauravpant
"""
import numpy as np
import pandas as pd
df=pd.read_csv('data/responses.csv', sep=',',header=0)
#f = open('data/responses.csv')
#csv_f = csv.reader(f)
#
#headers = []
#data = []
#
#
#for i, row in enumerate(csv_f):
# if i == 0:
# headers = row
# else:
# data.append(row)
#
#print(headers)
# x is your dataset
# x = numpy.random.rand(100, 5)
#numpy.random.shuffle(df)
#training, test = df[:80,:], df[80:,:]
df['Smoking'] = pd.Categorical(df['Smoking'])
df['Smoking'] = df['Smoking'].cat.codes
df['Alcohol'] = pd.Categorical(df['Alcohol'])
df['Alcohol'] = df['Alcohol'].cat.codes
df['Punctuality'] = pd.Categorical(df['Punctuality'])
df['Punctuality'] = df['Punctuality'].cat.codes
df['Lying'] = pd.Categorical(df['Lying'])
df['Lying'] = df['Lying'].cat.codes
df['Internet usage'] = pd.Categorical(df['Internet usage'])
df['Internet usage'] = df['Internet usage'].cat.codes
df['Gender'] = pd.Categorical(df['Gender'])
df['Gender'] = df['Gender'].cat.codes
df['Left - right handed'] = pd.Categorical(df['Left - right handed'])
df['Left - right handed'] = df['Left - right handed'].cat.codes
df['Education'] = pd.Categorical(df['Education'])
df['Education'] = df['Education'].cat.codes
df['Only child'] = pd.Categorical(df['Only child'])
df['Only child'] = df['Only child'].cat.codes
df['Village - town'] = pd.Categorical(df['Village - town'])
df['Village - town'] = df['Village - town'].cat.codes
df['House - block of flats'] = pd.Categorical(df['House - block of flats'])
df['House - block of flats'] = df['House - block of flats'].cat.codes
#msk = np.random.rand(len(df)) < 0.6
#training = df[msk]
#other = df[~msk]
#msk2 = np.random.rand(len(other)) < 0.5
#dev = other[msk2]
#test = other[~msk2]
training_percent = 0.6
dev_test_percent = 0.2
np.random.seed(seed=None)
perm = np.random.permutation(df.index)
length = len(df.index)
training_end = int(training_percent * length)
dev_end = int(dev_test_percent * length) + training_end
training = df.loc[perm[:training_end]]
dev = df.loc[perm[training_end:dev_end]]
test = df.loc[perm[dev_end:]]
#print(training) | 26.309524 | 75 | 0.658824 | 339 | 2,210 | 4.247788 | 0.283186 | 0.099306 | 0.114583 | 0.047222 | 0.201389 | 0.188194 | 0.075 | 0.075 | 0.039583 | 0.039583 | 0 | 0.01821 | 0.130317 | 2,210 | 84 | 76 | 26.309524 | 0.731009 | 0.252489 | 0 | 0 | 0 | 0 | 0.318098 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.057143 | 0 | 0.057143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba6f8c76a23c650a23be073851e814f43b243d61 | 3,388 | py | Python | src/analysis/directionAnalysis.py | mortbopet/NetCracker | 8b5c1dbe1780c111d1f6810d3ef13400f26f9cb0 | [
"MIT"
] | 9 | 2021-02-21T13:27:03.000Z | 2021-12-22T17:21:43.000Z | src/analysis/directionAnalysis.py | mortbopet/NetCracker | 8b5c1dbe1780c111d1f6810d3ef13400f26f9cb0 | [
"MIT"
] | null | null | null | src/analysis/directionAnalysis.py | mortbopet/NetCracker | 8b5c1dbe1780c111d1f6810d3ef13400f26f9cb0 | [
"MIT"
] | 5 | 2021-02-22T02:55:44.000Z | 2021-12-22T17:23:09.000Z | from src.switchbox import *
from src.point import *
from src.netcrackerformat import *
from src.sbhelper import *
from src.direction import *
from src.logger import *
from src.analysis.posBasedFilter import *
from src.analysis.analysispass import *
from src.analysis.directionAnalysis import *
# ============================== Analysis results ==============================
DIRECTION_ANALYSIS_RES = "direction analysis" # Type: map containing the below results as keys
DIRECTION_ANALYSIS_RES_CARDINAL_PJS = "cardinal PJs" # Type: {Direction : {Point : [PIPJunction]}}
DIRECTION_ANALYSIS_RES_NON_CARDINAL_PJS = "non-cardinal PJs" # Type: {Direction : {Point : [PIPJunction]}}
# ==============================================================================
DIRECTION_RESULT_FILE = "direction_analysis"
class DirectionAnalysis(AnalysisPass):
def __init__(self):
super().__init__(
description="Determine source and sink location of in/out PIP junctions of a switchbox",
key="direction",
depends=[],
produces=[DIRECTION_ANALYSIS_RES]
)
def run(self, sb, debug=True):
dirPJs = {}
for d in Direction:
dirPJs[d] = {}
for pj in sb.PIPJunctions:
extOuts, extIns = sb.getExternalPJsForPJ(pj)
if len(extOuts) == 0: # and len(extIns) == 0:
continue
extListToConsider = extOuts if extOuts else extIns
# Consider the PJ which is furthest away
externalPJ = None
for extPjToConsider in extListToConsider:
if externalPJ is None:
externalPJ = extPjToConsider
else:
if sb.PJPosDifference(externalPJ).length < sb.PJPosDifference(extPjToConsider).length:
externalPJ = extPjToConsider
# Get the direction of the vector between this switchbox and the external PJ
extVector = sb.PJPosDifference(externalPJ)
posDifference = sb.PJPosDifference(externalPJ)
if posDifference not in dirPJs[extVector.dir]:
dirPJs[extVector.dir][posDifference] = []
dirPJs[extVector.dir][posDifference].append(pj)
# Create dictionaries for the wire counts of diagonal and rectilinear wires
cardinalPJDicts = {k: v for k, v in dirPJs.items() if k.isCardinal()}
nonCardinalPJDicts = {k: v for k, v in dirPJs.items() if not k.isCardinal()}
# Record analysis results
sb.results[DIRECTION_ANALYSIS_RES] = {}
sb.results[DIRECTION_ANALYSIS_RES][DIRECTION_ANALYSIS_RES_CARDINAL_PJS] = cardinalPJDicts
sb.results[DIRECTION_ANALYSIS_RES][DIRECTION_ANALYSIS_RES_NON_CARDINAL_PJS] = nonCardinalPJDicts
if(debug):
# Do debug printing
logResult(sb.name, DIRECTION_RESULT_FILE, "Global Direction Analysis debug output")
for k, v in dirPJs.items():
logResult(sb.name, DIRECTION_RESULT_FILE, "Direction: " + k.name)
for distance, pjs in v.items():
logResult(sb.name, DIRECTION_RESULT_FILE, str(distance) + ":")
for pj in pjs:
logResult(sb.name, DIRECTION_RESULT_FILE, pj.name, end=', ')
logResult(sb.name, DIRECTION_RESULT_FILE, "\n")
| 41.317073 | 111 | 0.61157 | 351 | 3,388 | 5.763533 | 0.31339 | 0.10084 | 0.088977 | 0.059318 | 0.290163 | 0.255067 | 0.157192 | 0.070193 | 0.02175 | 0 | 0 | 0.000807 | 0.2683 | 3,388 | 81 | 112 | 41.82716 | 0.815248 | 0.160862 | 0 | 0.035714 | 0 | 0 | 0.070646 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0.035714 | 0.160714 | 0 | 0.214286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba70c1544876182a4809d2b55655af0fcfecfac0 | 422 | py | Python | actions/get_project_components.py | AnushkaKamerkar/stackstorm-jira | 44063968dcfa2e117599b3afaa67007bade9e4ae | [
"Apache-2.0"
] | null | null | null | actions/get_project_components.py | AnushkaKamerkar/stackstorm-jira | 44063968dcfa2e117599b3afaa67007bade9e4ae | [
"Apache-2.0"
] | null | null | null | actions/get_project_components.py | AnushkaKamerkar/stackstorm-jira | 44063968dcfa2e117599b3afaa67007bade9e4ae | [
"Apache-2.0"
] | null | null | null | from lib.base import BaseJiraAction
from lib.formatters import to_project_dict
__all__ = [
'GetJiraProjectComponentsAction'
]
class GetJiraProjectComponentsAction(BaseJiraAction):
def run(self, project_key):
projects = self._client.project_components(project_key)
print(projects)
results = []
for project_key in projects:
results.append(to_project_dict(project_key=project_key))
return results
| 22.210526 | 64 | 0.78436 | 48 | 422 | 6.583333 | 0.520833 | 0.158228 | 0.082278 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.14218 | 422 | 18 | 65 | 23.444444 | 0.872928 | 0 | 0 | 0 | 0 | 0 | 0.071429 | 0.071429 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.153846 | 0 | 0.384615 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba711fe318893ea8e97e7dc75d344bc0d3740047 | 2,773 | py | Python | mwcp_parsers/_blzpack.py | CybercentreCanada/assemblyline-service-configextractor | ab456ed6bac2ae60dea56890b0e5d0cc42c7c519 | [
"MIT"
] | 2 | 2021-06-18T14:53:21.000Z | 2021-07-03T11:45:42.000Z | mwcp_parsers/_blzpack.py | CybercentreCanada/assemblyline-service-configextractor | ab456ed6bac2ae60dea56890b0e5d0cc42c7c519 | [
"MIT"
] | 5 | 2020-11-04T16:06:38.000Z | 2022-01-28T16:17:38.000Z | mwcp_parsers/_blzpack.py | CybercentreCanada/assemblyline-service-configextractor | ab456ed6bac2ae60dea56890b0e5d0cc42c7c519 | [
"MIT"
] | 2 | 2021-05-30T11:37:25.000Z | 2021-06-24T12:57:35.000Z | #included from https://github.com/sysopfb/brieflz
import os
from ctypes import *
import binascii
import zlib
import struct
CURR_DIR = os.path.abspath(os.path.dirname(__file__))
LIB_PATH = os.path.join(CURR_DIR, 'blzpack_lib.so')
brieflz = cdll.LoadLibrary(LIB_PATH)
DEFAULT_BLOCK_SIZE = 1024 * 1024
def compress_data(data, blocksize, level):
compressed_data = ""
while len(data) > 0:
buf = create_string_buffer(data[:blocksize])
cb = c_int(len(buf))
cbOut = brieflz.blz_max_packed_size(blocksize)
packed = create_string_buffer(cbOut)
workmem = create_string_buffer(brieflz.blz_workmem_size_level(blocksize,1))
cbOut = c_int(cbOut)
retval = brieflz.blz_pack_level(byref(buf), byref(packed), cb, byref(workmem), level)
if retval > 0:
temp = packed.raw[:retval]
tempret = struct.pack(">IIIIII", 1651276314, level, len(temp), zlib.crc32(temp) % (1<<32), len(buf), zlib.crc32(data[:blocksize])%(1<<32)) + temp
compressed_data += tempret
else:
print("Compression Error")
return None
data = data[blocksize:]
return compressed_data
def decompress_data(data, blocksize=DEFAULT_BLOCK_SIZE, level=1):
decompressed_data = b""
max_packed_size = brieflz.blz_max_packed_size(blocksize);
(magic,level,packedsize,crc,hdr_depackedsize,crc2) = struct.unpack_from('>IIIIII', data)
data = data[24:]
while magic == 0x626C7A1A and len(data) > 0:
compressed_data = create_string_buffer(data[:packedsize])
workdata = create_string_buffer(blocksize)
depackedsize = brieflz.blz_depack(byref(compressed_data), byref(workdata), c_int(hdr_depackedsize))
if depackedsize != hdr_depackedsize:
print("Decompression error")
print("DepackedSize: "+str(depackedsize) + "\nHdrVal: "+str(hdr_depackedsize))
return None
decompressed_data += workdata.raw[:depackedsize]
data = data[packedsize:]
if len(data) > 0:
(magic,level,packedsize,crc,hdr_depackedsize,crc2) = struct.unpack_from('>IIIIII', data)
data = data[24:]
else:
break
return decompressed_data
def main():
#blocksize = DEFAULT_BLOCK_SIZE
blocksize = 100
level = 1
data = "This is a test of brieflz compression"*100
retval = compress_data(data, blocksize, level)
if retval != None:
print("Compression SUCCESS!\nCompressed Data: ")
print(binascii.hexlify(retval))
retval = decompress_data(retval, blocksize, level)
if retval != None and retval == data:
print("Decompress SUCCESS!\nDecompress Data: ")
print(retval)
if __name__ == "__main__":
main()
| 36.973333 | 157 | 0.658853 | 332 | 2,773 | 5.295181 | 0.298193 | 0.040956 | 0.051195 | 0.028441 | 0.180887 | 0.125142 | 0.088737 | 0.088737 | 0.088737 | 0.088737 | 0 | 0.024732 | 0.227191 | 2,773 | 74 | 158 | 37.472973 | 0.795614 | 0.028128 | 0 | 0.126984 | 0 | 0 | 0.080579 | 0 | 0 | 0 | 0.003713 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.079365 | 0 | 0.190476 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba7602227a8f014510f10efc052090714dcb5668 | 8,261 | py | Python | home-assistant/custom_components/yahoofinance/__init__.py | square-spade/SmartHouse | 1566fe6153321908ff4cda48f6ff5cdf5de8fe67 | [
"MIT"
] | 136 | 2019-06-27T08:11:47.000Z | 2022-03-11T12:26:53.000Z | home-assistant/custom_components/yahoofinance/__init__.py | square-spade/SmartHouse | 1566fe6153321908ff4cda48f6ff5cdf5de8fe67 | [
"MIT"
] | 5 | 2020-05-30T00:19:22.000Z | 2022-03-25T18:49:47.000Z | home-assistant/custom_components/yahoofinance/__init__.py | square-spade/SmartHouse | 1566fe6153321908ff4cda48f6ff5cdf5de8fe67 | [
"MIT"
] | 63 | 2019-07-15T21:11:58.000Z | 2022-03-13T09:43:24.000Z | """
The Yahoo finance component.
https://github.com/iprak/yahoofinance
"""
from datetime import timedelta
import logging
from typing import Union
import async_timeout
from homeassistant.const import CONF_SCAN_INTERVAL
from homeassistant.helpers import discovery
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
import voluptuous as vol
from .const import (
BASE,
CONF_DECIMAL_PLACES,
CONF_SHOW_TRENDING_ICON,
CONF_SYMBOLS,
CONF_TARGET_CURRENCY,
DATA_REGULAR_MARKET_PRICE,
DEFAULT_CONF_SHOW_TRENDING_ICON,
DEFAULT_DECIMAL_PLACES,
DOMAIN,
HASS_DATA_CONFIG,
HASS_DATA_COORDINATOR,
NUMERIC_DATA_KEYS,
SERVICE_REFRESH,
STRING_DATA_KEYS,
)
_LOGGER = logging.getLogger(__name__)
DEFAULT_SCAN_INTERVAL = timedelta(hours=6)
MINIMUM_SCAN_INTERVAL = timedelta(seconds=30)
WEBSESSION_TIMEOUT = 15
BASIC_SYMBOL_SCHEMA = vol.All(cv.string, vol.Upper)
COMPLEX_SYMBOL_SCHEMA = vol.All(
dict,
vol.Schema(
{
vol.Required("symbol"): BASIC_SYMBOL_SCHEMA,
vol.Optional(CONF_TARGET_CURRENCY): BASIC_SYMBOL_SCHEMA,
}
),
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_SYMBOLS): vol.All(
cv.ensure_list,
[vol.Any(BASIC_SYMBOL_SCHEMA, COMPLEX_SYMBOL_SCHEMA)],
),
vol.Optional(
CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL
): vol.Any("none", "None", cv.positive_time_period),
vol.Optional(CONF_TARGET_CURRENCY): vol.All(cv.string, vol.Upper),
vol.Optional(
CONF_SHOW_TRENDING_ICON, default=DEFAULT_CONF_SHOW_TRENDING_ICON
): cv.boolean,
vol.Optional(
CONF_DECIMAL_PLACES, default=DEFAULT_DECIMAL_PLACES
): vol.Coerce(int),
}
)
},
# The complete HA configuration is passed down to`async_setup`, allow the extra keys.
extra=vol.ALLOW_EXTRA,
)
def parse_scan_interval(scan_interval: Union[timedelta, str]) -> timedelta:
"""Parse and validate scan_interval."""
if isinstance(scan_interval, str):
if isinstance(scan_interval, str):
if scan_interval.lower() == "none":
scan_interval = None
else:
raise vol.Invalid(
f"Invalid {CONF_SCAN_INTERVAL} specified: {scan_interval}"
)
elif scan_interval < MINIMUM_SCAN_INTERVAL:
raise vol.Invalid("Scan interval should be at least 30 seconds.")
return scan_interval
def normalize_input(defined_symbols):
"""Normalize input and remove duplicates."""
symbols = set()
normalized_symbols = []
for value in defined_symbols:
if isinstance(value, str):
if not (value in symbols):
symbols.add(value)
normalized_symbols.append({"symbol": value})
else:
if not (value["symbol"] in symbols):
symbols.add(value["symbol"])
normalized_symbols.append(value)
return (list(symbols), normalized_symbols)
async def async_setup(hass, config) -> bool:
domain_config = config.get(DOMAIN, {})
defined_symbols = domain_config.get(CONF_SYMBOLS, [])
symbols, normalized_symbols = normalize_input(defined_symbols)
domain_config[CONF_SYMBOLS] = normalized_symbols
scan_interval = parse_scan_interval(domain_config.get(CONF_SCAN_INTERVAL))
# Populate parsed value into domain_config
domain_config[CONF_SCAN_INTERVAL] = scan_interval
coordinator = YahooSymbolUpdateCoordinator(symbols, hass, scan_interval)
# Refresh coordinator to get initial symbol data
_LOGGER.info(
f"Requesting data from coordinator with update interval of {scan_interval}."
)
await coordinator.async_refresh()
# Pass down the coordinator and config to platforms.
hass.data[DOMAIN] = {
HASS_DATA_COORDINATOR: coordinator,
HASS_DATA_CONFIG: domain_config,
}
async def handle_refresh_symbols(_call):
"""Refresh symbol data."""
_LOGGER.info("Processing refresh_symbols")
await coordinator.async_request_refresh()
hass.services.async_register(
DOMAIN,
SERVICE_REFRESH,
handle_refresh_symbols,
)
if not coordinator.last_update_success:
_LOGGER.debug("Coordinator did not report any data, requesting async_refresh")
hass.async_create_task(coordinator.async_request_refresh())
hass.async_create_task(
discovery.async_load_platform(hass, "sensor", DOMAIN, {}, config)
)
return True
class YahooSymbolUpdateCoordinator(DataUpdateCoordinator):
"""Class to manage Yahoo finance data update."""
@staticmethod
def parse_symbol_data(symbol_data):
"""Return data pieces which we care about, use 0 for missing numeric values."""
data = {}
# get() ensures that we have an entry in symbol_data.
for value in NUMERIC_DATA_KEYS:
key = value[0]
data[key] = symbol_data.get(key, 0)
for key in STRING_DATA_KEYS:
data[key] = symbol_data.get(key)
return data
def __init__(self, symbols, hass, update_interval) -> None:
"""Initialize."""
self._symbols = symbols
self.data = None
self.loop = hass.loop
self.websession = async_get_clientsession(hass)
super().__init__(
hass,
_LOGGER,
name="YahooSymbolUpdateCoordinator",
update_method=self._async_update,
update_interval=update_interval,
)
def get_symbols(self):
"""Return symbols tracked by the coordinator."""
return self._symbols
def add_symbol(self, symbol):
"""Add symbol to the symbol list."""
if symbol not in self._symbols:
self._symbols.append(symbol)
# Request a refresh to get data for the missing symbol.
# This would have been called while data for sensor was being parsed.
self.hass.async_create_task(self.async_request_refresh())
_LOGGER.info(f"Added symbol {symbol} and requested update")
return True
return False
async def get_json(self):
"""Get the JSON data."""
json = None
async with async_timeout.timeout(WEBSESSION_TIMEOUT, loop=self.loop):
response = await self.websession.get(BASE + ",".join(self._symbols))
json = await response.json()
_LOGGER.debug("Data = %s", json)
return json
async def _async_update(self):
"""
Return updated data if new JSON is valid.
Don't catch any exceptions, they get properly handled in the caller
(DataUpdateCoordinator.async_refresh) which also updates last_update_success.
UpdateFailed is raised if JSON is invalid.
"""
json = await self.get_json()
if json is None:
raise UpdateFailed("No data received")
if "quoteResponse" not in json:
raise UpdateFailed("Data invalid, 'quoteResponse' not found.")
quoteResponse = json["quoteResponse"] # pylint: disable=invalid-name
if "error" in quoteResponse:
if quoteResponse["error"] is not None:
raise UpdateFailed(quoteResponse["error"])
if "result" not in quoteResponse:
raise UpdateFailed("Data invalid, no 'result' found")
result = quoteResponse["result"]
if result is None:
raise UpdateFailed("Data invalid, 'result' is None")
data = {}
for symbol_data in result:
symbol = symbol_data["symbol"]
data[symbol] = self.parse_symbol_data(symbol_data)
_LOGGER.debug(
"Updated %s (%s)",
symbol,
data[symbol][DATA_REGULAR_MARKET_PRICE],
)
_LOGGER.info("Data updated")
return data
| 31.056391 | 89 | 0.642658 | 926 | 8,261 | 5.49892 | 0.241901 | 0.058916 | 0.015711 | 0.015711 | 0.105656 | 0.028672 | 0 | 0 | 0 | 0 | 0 | 0.001666 | 0.273575 | 8,261 | 265 | 90 | 31.173585 | 0.846859 | 0.093209 | 0 | 0.103825 | 0 | 0 | 0.082115 | 0.003937 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032787 | false | 0 | 0.060109 | 0 | 0.147541 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba78d40a2e3b13b764928d8aba27030d20c5d6fc | 2,485 | py | Python | dev/backend/src/mod_audio_recognition/bean.py | dannyphan2910/music-lyrics | 19ca60793648a12f62ed92f3198c1e8cf12a4af4 | [
"MIT"
] | null | null | null | dev/backend/src/mod_audio_recognition/bean.py | dannyphan2910/music-lyrics | 19ca60793648a12f62ed92f3198c1e8cf12a4af4 | [
"MIT"
] | 6 | 2021-03-10T12:32:51.000Z | 2022-03-02T06:32:13.000Z | dev/backend/src/mod_audio_recognition/bean.py | dannyphan2910/music-lyrics | 19ca60793648a12f62ed92f3198c1e8cf12a4af4 | [
"MIT"
] | 1 | 2021-04-23T15:55:54.000Z | 2021-04-23T15:55:54.000Z | import json
import tempfile
from acrcloud.recognizer import ACRCloudRecognizeType
from acrcloud.recognizer import ACRCloudRecognizer
from mod_track_search.bean import get_track_id
from model.Track import Track
def get_tracks_from_audio(file):
response = ({}, 404)
if file is None or file == '':
print('invalid audio file')
response = ({}, 400)
else:
config = {
'host': 'identify-us-west-2.acrcloud.com',
'access_key': os.environ.get('ACCESS_KEY'),
'access_secret': os.environ.get('ACCESS_SECRET'),
'recognize_type': ACRCloudRecognizeType.ACR_OPT_REC_BOTH,
'debug': False,
'timeout': 10 # seconds
}
'''This module can recognize ACRCloud by most of audio/video file.
Audio: mp3, wav, m4a, flac, aac, amr, ape, ogg ...
Video: mp4, mkv, wmv, flv, ts, avi ...'''
recognizer = ACRCloudRecognizer(config)
f = tempfile.NamedTemporaryFile()
f.write(file.read())
duration = ACRCloudRecognizer.get_duration_ms_by_file(str(f.name))
print("duration_ms=" + str(duration))
if duration // 1000 > 10:
max_duration = max(10, (duration * 20 // 100) // 1000)
else:
max_duration = 10
result = json.loads(recognizer.recognize_by_file(str(f.name), 0, max_duration))
print(result)
f.close()
tracks = process_metadata(result)
data = {
'data': tracks
}
response = (data, 200)
print(json.dumps(response[0], indent=4))
return response
def process_metadata(result):
tracks = []
if result['status']['msg'] == "Success":
tracks_dict = result['metadata']['music']
for item in tracks_dict:
if 'spotify' in item['external_metadata']:
track = get_track_id(item['external_metadata']['spotify']['track']['id'])
if track is None:
artist = ''
for this_artist in item['artists']:
artist += this_artist['name'] + ','
artist = artist[:len(artist) - 1]
track = Track(item['title'], artist, item['album']['name'])
track_to_append = {
'track': track.get(),
'score': item['score']
}
tracks.append(track_to_append)
return tracks
| 24.85 | 89 | 0.55171 | 267 | 2,485 | 4.988764 | 0.434457 | 0.015766 | 0.033033 | 0.042042 | 0.021021 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02266 | 0.325151 | 2,485 | 99 | 90 | 25.10101 | 0.771616 | 0.002817 | 0 | 0.034483 | 0 | 0 | 0.11631 | 0.013555 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.103448 | 0 | 0.172414 | 0.068966 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba7f1880c2b90a49559f5960814c055b80809594 | 5,201 | py | Python | federatedscope/gfl/fedsageplus/utils.py | alibaba/FederatedScope | fcf6d237624769ea094cfd68803901622f14fc23 | [
"Apache-2.0"
] | 9 | 2022-03-24T07:59:37.000Z | 2022-03-31T06:47:52.000Z | federatedscope/gfl/fedsageplus/utils.py | alibaba/FederatedScope | fcf6d237624769ea094cfd68803901622f14fc23 | [
"Apache-2.0"
] | 1 | 2022-03-28T13:52:17.000Z | 2022-03-28T13:52:17.000Z | federatedscope/gfl/fedsageplus/utils.py | alibaba/FederatedScope | fcf6d237624769ea094cfd68803901622f14fc23 | [
"Apache-2.0"
] | null | null | null | import torch
from torch_geometric.data import Data
from torch_geometric.transforms import BaseTransform
from torch_geometric.utils import to_networkx, from_networkx
import networkx as nx
import numpy as np
from federatedscope.core.configs.config import global_cfg
class HideGraph(BaseTransform):
r"""
Generate impaired graph with labels and features to train NeighGen,
hide Node from validation set from raw graph.
Arguments:
hidden_portion (int): hidden_portion of validation set.
num_pred (int): hyperparameters which limit the maximum value of the prediction
:returns:
filled_data : impaired graph with attribute "num_missing"
:rtype:
nx.Graph
"""
def __init__(self, hidden_portion=0.5, num_pred=5):
self.hidden_portion = hidden_portion
self.num_pred = num_pred
def __call__(self, data):
val_ids = torch.where(data.val_mask == True)[0]
hide_ids = np.random.choice(val_ids,
int(len(val_ids) * self.hidden_portion),
replace=False)
remaining_mask = torch.ones(data.num_nodes, dtype=torch.bool)
remaining_mask[hide_ids] = False
remaining_nodes = torch.where(remaining_mask == True)[0].numpy()
data.ids_missing = [[] for _ in range(data.num_nodes)]
G = to_networkx(data,
node_attrs=[
'x', 'y', 'train_mask', 'val_mask', 'test_mask',
'index_orig', 'ids_missing'
],
to_undirected=True)
for missing_node in hide_ids:
neighbors = G.neighbors(missing_node)
for i in neighbors:
G.nodes[i]['ids_missing'].append(missing_node)
for i in G.nodes:
ids_missing = G.nodes[i]['ids_missing']
del G.nodes[i]['ids_missing']
G.nodes[i]['num_missing'] = np.array([len(ids_missing)],
dtype=np.float32)
if len(ids_missing) > 0:
if len(ids_missing) <= self.num_pred:
G.nodes[i]['x_missing'] = np.vstack(
(data.x[ids_missing],
np.zeros((self.num_pred - len(ids_missing),
data.x.shape[1]))))
else:
G.nodes[i]['x_missing'] = data.x[
ids_missing[:self.num_pred]]
else:
G.nodes[i]['x_missing'] = np.zeros(
(self.num_pred, data.x.shape[1]))
return from_networkx(nx.subgraph(G, remaining_nodes))
def __repr__(self):
return f'{self.__class__.__name__}({self.hidden_portion})'
def FillGraph(impaired_data, original_data, pred_missing, pred_feats,
num_pred):
# Mend the original data
original_data = original_data.detach().cpu()
new_features = original_data.x
new_edge_index = original_data.edge_index.T
pred_missing = pred_missing.detach().cpu().numpy()
pred_feats = pred_feats.detach().cpu().reshape(
(-1, num_pred, original_data.num_node_features))
start_id = original_data.num_nodes
for node in range(len(pred_missing)):
num_fill_node = np.around(pred_missing[node]).astype(np.int32).item()
if num_fill_node > 0:
new_ids_i = np.arange(start_id,
start_id + min(num_pred, num_fill_node))
org_id = impaired_data.index_orig[node]
org_node = torch.where(
original_data.index_orig == org_id)[0].item()
new_edges = torch.tensor([[org_node, fill_id]
for fill_id in new_ids_i],
dtype=torch.int64)
new_features = torch.vstack(
(new_features, pred_feats[node][:num_fill_node]))
new_edge_index = torch.vstack((new_edge_index, new_edges))
start_id = start_id + min(num_pred, num_fill_node)
new_y = torch.zeros(new_features.shape[0], dtype=torch.int64)
new_y[:original_data.num_nodes] = original_data.y
filled_data = Data(
x=new_features,
edge_index=new_edge_index.T,
train_idx=torch.where(original_data.train_mask == True)[0],
valid_idx=torch.where(original_data.val_mask == True)[0],
test_idx=torch.where(original_data.test_mask == True)[0],
y=new_y,
)
return filled_data
@torch.no_grad()
def GraphMender(model, impaired_data, original_data):
r"""Mend the graph with generation model
Arguments:
model (torch.nn.module): trained generation model
impaired_data (PyG.Data): impaired graph
original_data (PyG.Data): raw graph
:returns:
filled_data : Graph after Data Enhancement
:rtype:
PyG.data
"""
device = impaired_data.x.device
model = model.to(device)
pred_missing, pred_feats, _ = model(impaired_data)
return FillGraph(impaired_data, original_data, pred_missing, pred_feats,
global_cfg.fedsageplus.num_pred) | 38.813433 | 87 | 0.594886 | 650 | 5,201 | 4.469231 | 0.22 | 0.070224 | 0.016867 | 0.030293 | 0.179346 | 0.09432 | 0.060585 | 0.060585 | 0.060585 | 0.024096 | 0 | 0.006362 | 0.304941 | 5,201 | 134 | 88 | 38.813433 | 0.797234 | 0.126322 | 0 | 0.021277 | 0 | 0 | 0.038003 | 0.010794 | 0 | 0 | 0 | 0 | 0 | 1 | 0.053191 | false | 0 | 0.074468 | 0.010638 | 0.180851 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba85db38c48467a473b1f5022b7dfe1c25bcc9e4 | 15,516 | py | Python | adversarials/test_attack.py | vergilus/NJUNMT-pytorch | 85cc8d4f1aae04541d2e30ec2ca2b9b9fe2bea60 | [
"MIT"
] | 2 | 2020-02-16T02:56:55.000Z | 2020-03-08T15:04:31.000Z | adversarials/test_attack.py | vergilus/NJUNMT-pytorch | 85cc8d4f1aae04541d2e30ec2ca2b9b9fe2bea60 | [
"MIT"
] | null | null | null | adversarials/test_attack.py | vergilus/NJUNMT-pytorch | 85cc8d4f1aae04541d2e30ec2ca2b9b9fe2bea60 | [
"MIT"
] | null | null | null | from adversarials.adversarial_utils import *
from adversarials import attacker
from src.utils.logging import *
from src.utils.common_utils import *
from src.data.dataset import TextLineDataset
from src.data.data_iterator import DataIterator
from src.models import build_model
from src.decoding import beam_search
import argparse
import torch
parser = argparse.ArgumentParser()
#
parser.add_argument("--source_path", type=str, default="/home/public_data/nmtdata/nist_zh-en_1.34m/test/mt02.src", # /zouw/pycharm_project_NMT_torch/adversarials/attack_zh2en_tf_log/mt02/perturbed_src
help="the path for input files")
parser.add_argument("--model_path", type=str,
default="/home/zouw/pycharm_project_NMT_torch/adversarials/attack_zh2en_tf_log/ACmodel.final")
parser.add_argument("--config_path", type=str,
default="/home/zouw/pycharm_project_NMT_torch/configs/nist_zh2en_attack.yaml",
help="the path to attack config file.")
parser.add_argument("--save_to", type=str, default="/home/zouw/pycharm_project_NMT_torch/adversarials/attack_zh2en_tf_log",
help="the path for result saving.")
parser.add_argument("--batch_size", type=int, default=50,
help="test batch_size")
parser.add_argument("--unk_ignore", action="store_true", default=False,
help="Don't replace target words using UNK (default as false)")
parser.add_argument("--use_gpu", action="store_true", default=False,
help="Whether to use GPU.(default as false)")
def prepare_data(seqs_x, seqs_y=None, cuda=False, batch_first=True):
"""
pad seqs into torch tensor
:param seqs_x:
:param seqs_y:
:param cuda:
:param batch_first:
:return:
"""
def _np_pad_batch_2D(samples, pad, batch_first=True, cuda=True):
batch_size = len(samples)
sizes = [len(s) for s in samples]
max_size = max(sizes)
x_np = np.full((batch_size, max_size), fill_value=pad, dtype='int64')
for ii in range(batch_size):
x_np[ii, :sizes[ii]] = samples[ii]
if batch_first is False:
x_np = np.transpose(x_np, [1, 0])
x = torch.tensor(x_np)
if cuda is True:
x = x.cuda()
return x
seqs_x = list(map(lambda s: [BOS] + s + [EOS], seqs_x))
x = _np_pad_batch_2D(samples=seqs_x, pad=PAD,
cuda=cuda, batch_first=batch_first)
if seqs_y is None:
return x
seqs_y = list(map(lambda s: [BOS] + s + [EOS], seqs_y))
y = _np_pad_batch_2D(seqs_y, pad=PAD,
cuda=cuda, batch_first=batch_first)
return x, y
def calculate_cummulate_survive(max_len, gamma, surrogate_step_survival):
"""
estimate a overall surrogate survival values
:param input: the src tensor to be attacked. shape: [batch, timestep]
:param gamma: used in reinforced rewards
:param surrogate_survival: surrogate single step survival rewards
:return: a list of cummulated survival for every step,
with estimate_accumulate_survive[timestep]=accumualted survive of sen_len "timestep"
"""
estimate_accumulate_survive = [surrogate_step_survival]
for i in range(1,max_len):
estimate_accumulate_survive.append(
estimate_accumulate_survive[i-1]*gamma+surrogate_step_survival
)
return torch.tensor(estimate_accumulate_survive)
def test_attack():
"""
during test phrase, the attacker modifies inputs without constrains
:return:
"""
timer = Timer()
args = parser.parse_args()
with open(args.config_path) as f:
configs = yaml.load(f)
attack_configs = configs["attack_configs"]
attacker_configs = configs["attacker_configs"]
attacker_model_configs = attacker_configs["attacker_model_configs"]
# for modification
GlobalNames.SEED = attack_configs["seed"]
torch.manual_seed(GlobalNames.SEED)
# the Global variable of USE_GPU is mainly used for environments
GlobalNames.USE_GPU = args.use_gpu
INFO("build vocabularies and data set")
with open(attack_configs["victim_configs"], "r") as victim_f:
victim_configs = yaml.load(victim_f)
data_configs = victim_configs["data_configs"]
src_vocab = Vocabulary(**data_configs["vocabularies"][0])
trg_vocab = Vocabulary(**data_configs["vocabularies"][1])
print("attack ", args.source_path)
datset = TextLineDataset(data_path=args.source_path,
vocabulary=src_vocab)
test_iterator = DataIterator(dataset=datset,
batch_size=args.batch_size,
use_bucket=attack_configs["use_bucket"],
buffer_size=attack_configs["buffer_size"],
numbering=True)
total_amount = len(test_iterator)
test_iterator = test_iterator.build_generator()
_, w2vocab = load_or_extract_near_vocab(config_path=attack_configs["victim_configs"],
model_path=attack_configs["victim_model"],
init_perturb_rate=attack_configs["init_perturb_rate"],
save_to=os.path.join(args.save_to, "near_vocab"),
save_to_full=os.path.join(args.save_to, "full_near_vocab"),
top_reserve=12,
emit_as_id=True)
if attack_configs["pinyin_data"] != "" and not args.unk_ignore:
# for Chinese we adopt
INFO("collect pinyin data for gen_UNK, this would take a while")
char2pyDict, py2charDict = collect_pinyin(pinyin_path=attack_configs["pinyin_data"],
src_path=data_configs["train_data"][0])
else:
INFO("test without pinyin")
char2pyDict, py2charDict = None, None
INFO("build and reload attacker model parameters")
global_attacker = attacker.Attacker(src_vocab.max_n_words,
**attacker_model_configs)
attacker_param = load_model_parameters(args.model_path)
global_attacker.eval()
global_attacker.load_state_dict(attacker_param)
INFO("Build and reload translator...")
nmt_model = build_model(n_src_vocab=src_vocab.max_n_words,
n_tgt_vocab=trg_vocab.max_n_words,
**victim_configs["model_configs"])
nmt_model.eval()
nmt_param = load_model_parameters(attack_configs["victim_model"])
nmt_model.load_state_dict(nmt_param)
if args.use_gpu:
# collect available devices and distribute env on the available gpu
global_attacker.cuda()
nmt_model = nmt_model.cuda()
result_indices = [] # to resume ordering
origin_results = [] # original translation
perturbed_seqs = [] # adversarial src
perturbed_results = [] # adversarial translation
overall_values = [] # attacker value estimation on first step: indicates overall degradation
# translate all sentences and collect all adversarial src
with open(os.path.join(args.save_to, "perturbed_src"), "w") as perturbed_src, \
open(os.path.join(args.save_to, "perturbed_trans"), "w") as perturbed_trans, \
open(os.path.join(args.save_to, "origin_trans"), "w") as origin_trans:
i = 0
timer.tic()
for batch in test_iterator:
i += 1
if i:
print(i * args.batch_size, "/", total_amount, " finished")
numbers, seqs_x = batch
# print(seqs_x)
batch_size = len(seqs_x)
x = prepare_data(seqs_x=seqs_x, cuda=args.use_gpu)
x_mask = x.detach().eq(PAD).long()
cummulate_survive = calculate_cummulate_survive(max_len=x.shape[1],
gamma=attack_configs["gamma"],
surrogate_step_survival=0)
# x_len = (1 - x_mask).sum(dim=-1).float()
with torch.no_grad():
word_ids = beam_search(nmt_model=nmt_model, beam_size=5, max_steps=150,
src_seqs=x, alpha=-1.0)
word_ids = word_ids.cpu().numpy().tolist() # in shape [batch_size, beam_size, max_len]
# remove PAD and append result with its indices
# we only take top-one final results from beam
for sent_t in word_ids:
top_result = [trg_vocab.id2token(wid) for wid in sent_t[0] if wid not in [PAD, EOS]]
origin_results.append(trg_vocab.tokenizer.detokenize(top_result))
result_indices += numbers
# calculate adversarial value functions for each src position
attack_results = []
critic_results = []
with torch.no_grad():
for t in range(1, x.shape[1]-1):
attack_out, critic_out = global_attacker(x, label=x[:, t-1:t+1])
attack_results.append(attack_out.argmax(dim=1).unsqueeze(dim=1))
# print(mask_len.shape, critic_out.shape)
critic_results.append(critic_out)
attack_results = torch.cat(attack_results, dim=1)
temp_mask = (1-x_mask)[:, 1:x.shape[1]-1]
attack_results *= temp_mask
critic_results = torch.cat(critic_results, dim=1)*(1-x_mask)[:, 1:x.shape[1]-1].float()
critic_results *= temp_mask.float()
# critic_results = critic_results.cpu().numpy().tolist()
# print(attack_results)
# print(critic_results)
# get adversarial samples for the src
with torch.no_grad():
perturbed_x_ids = x.clone().detach()
batch_size, max_steps = x.shape
for t in range(1, max_steps - 1): # ignore BOS and EOS
inputs = x[:, t - 1:t + 1]
attack_out, critic_out = global_attacker(x=perturbed_x_ids, label=inputs)
actions = attack_out.argmax(dim=-1)
if t == 1:
overall_values += (critic_out - cummulate_survive[-t-2]).cpu().numpy().tolist()
# action is masked if the corresponding value estimation is negative
actions *= (critic_out-cummulate_survive[-t-2]).gt(0).squeeze().long() # - cummulate_survive[-t-2]
target_of_step = []
for batch_index in range(batch_size):
word_id = inputs[batch_index][1]
# select least similar candidate based on victim embedding
target_word_id = w2vocab[word_id.item()][0] #[np.random.choice(len(w2vocab[word_id.item()]), 1)[0]]
# select nearest candidate based on victim embedding
# choose least similar candidates
# origin_emb = global_attacker.src_embedding(word_id)
# candidates_emb = global_attacker.src_embedding(torch.tensor(w2vocab[word_id.item()]).cuda())
# nearest = candidates_emb.matmul(origin_emb)\
# .div((candidates_emb*candidates_emb).sum(dim=-1))\
# .argmax(dim=-1).item()
# target_word_id = w2vocab[word_id.item()][nearest]
if args.unk_ignore and target_word_id == UNK:
# undo this attack if UNK is set to be ignored
target_word_id = word_id.item()
target_of_step += [target_word_id]
# override the perturbed results with choice from candidates
perturbed_x_ids[:, t] *= (1 - actions)
adjustification_ = torch.tensor(target_of_step, device=inputs.device)
if GlobalNames.USE_GPU:
adjustification_ = adjustification_.cuda()
perturbed_x_ids[:, t] += adjustification_ * actions
# re-tokenization and validate UNK
inputs = perturbed_x_ids.cpu().numpy().tolist()
new_inputs = []
for origin_indices, indices in zip(x.cpu().numpy().tolist(), inputs):
new_line_token = [] # for output files
# remove BOS, EOS, PAD, and detokenize to sentence
for origin_word_id, word_id in zip(origin_indices, indices):
if word_id not in [BOS, EOS, PAD]:
if word_id == UNK and origin_word_id != UNK:
# validate UNK induced by attack and append
new_line_token.append(gen_UNK(src_token=src_vocab.id2token(origin_word_id),
vocab=src_vocab,
char2pyDict=char2pyDict, py2charDict=py2charDict))
else:
new_line_token.append(src_vocab.id2token(word_id))
new_line_token = src_vocab.tokenizer.detokenize(new_line_token)
perturbed_seqs.append(new_line_token)
# tokenization must ignore original <UNK>
if not hasattr(src_vocab.tokenizer, "bpe"):
new_line = new_line_token.strip().split()
else:
new_token = []
for w in new_line_token.strip().split():
if w != src_vocab.id2token(UNK):
new_token.append(src_vocab.tokenizer.bpe.segment_word(w))
else:
new_token.append([w])
new_line = sum(new_token, [])
new_line = [src_vocab.token2id(t) for t in new_line]
new_inputs.append(new_line)
# override perturbed_x_ids
perturbed_x_ids = prepare_data(seqs_x=new_inputs,
cuda=args.use_gpu)
# batch translate perturbed_src
word_ids = beam_search(nmt_model=nmt_model, beam_size=5, max_steps=150,
src_seqs=perturbed_x_ids, alpha=-1.0)
word_ids = word_ids.cpu().numpy().tolist() # in shape [batch_size, beam_size, max_len]
# translate adversarial inputs
for sent_t in word_ids:
top_result = [trg_vocab.id2token(wid) for wid in sent_t[0] if wid not in [PAD, EOS]]
perturbed_results.append(trg_vocab.tokenizer.detokenize(top_result))
print(timer.toc(return_seconds=True), "sec")
# resume original ordering and output to files
origin_order = np.argsort(result_indices).tolist()
for line in [origin_results[ii] for ii in origin_order]:
origin_trans.write(line+"\n")
for line, value in [(perturbed_seqs[ii], overall_values[ii]) for ii in origin_order]:
perturbed_src.write(line+"\n") # +" "+str(value)
for line in [perturbed_results[ii] for ii in origin_order]:
perturbed_trans.write(line+"\n")
if __name__ == "__main__":
test_attack()
| 51.892977 | 200 | 0.586685 | 1,859 | 15,516 | 4.629909 | 0.182894 | 0.013245 | 0.012083 | 0.008133 | 0.22807 | 0.170326 | 0.139886 | 0.11967 | 0.07552 | 0.07552 | 0 | 0.009458 | 0.318574 | 15,516 | 298 | 201 | 52.067114 | 0.804597 | 0.162477 | 0 | 0.086364 | 0 | 0 | 0.08439 | 0.0231 | 0 | 0 | 0 | 0 | 0 | 1 | 0.018182 | false | 0 | 0.045455 | 0 | 0.081818 | 0.013636 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba86b3218164d82acdf2e535950af7f6a5fd6761 | 1,845 | py | Python | gs-engine/gse_api_server/controller/service_mesh.py | gedge-platform/GEdge-Platform | b5cbe63089cf3d3263683cbcd5ec3d10ad85779b | [
"Apache-2.0"
] | 13 | 2020-10-14T07:45:08.000Z | 2021-10-01T08:19:56.000Z | gs-engine/gse_api_server/controller/service_mesh.py | gedge-platform/GEdge-Platform | b5cbe63089cf3d3263683cbcd5ec3d10ad85779b | [
"Apache-2.0"
] | null | null | null | gs-engine/gse_api_server/controller/service_mesh.py | gedge-platform/GEdge-Platform | b5cbe63089cf3d3263683cbcd5ec3d10ad85779b | [
"Apache-2.0"
] | 17 | 2020-11-09T05:16:42.000Z | 2021-12-28T08:04:33.000Z | from flask import Blueprint, request, jsonify
import json
import yaml
import app_conf
from tools.db_connector import DBConnector as mysql
from service import service_mesh as sm_service
service_mesh = Blueprint('service_mesh', __name__)
# set logger
logger = app_conf.Log.get_logger(__name__)
conn = mysql.instance()
@service_mesh.route('', methods=['get'])
def list_service_mesh():
namespace = request.headers.get('namespace', None)
details = request.args.get('details') == 'true'
cnt_from = request.args.get('from', None, int)
cnt_to = request.args.get('to', None, int)
search_name = request.args.get('name', None, str)
sort = json.loads(request.args.get('sort', "null", str))
result = sm_service.get_service_meshes(details, cnt_from, cnt_to, namespace, search_name, sort)
return jsonify(result)
@service_mesh.route('', methods=['post'])
def create_service_mesh():
content_type = request.headers.get("Content-Type")
namespace = request.headers.get('namespace', 'default')
if "yaml" in content_type:
# schema validation
body = yaml.load(request.data, Loader=yaml.Loader)
else:
body = json.loads(request.data)
sm = body['serviceMesh']
result = sm_service.create_service_mesh(namespace, sm)
return jsonify(result)
@service_mesh.route('/<mesh_name>', methods=['get'])
def get_service_mesh(mesh_name):
namespace = request.headers.get('namespace', None)
result = sm_service.get_service_mesh(namespace, mesh_name)
return jsonify(result)
@service_mesh.route('/<mesh_name>', methods=['delete'])
def delete_service_mesh(mesh_name):
namespace = request.headers.get('namespace', None)
result = sm_service.delete_service_mesh(namespace, mesh_name)
return jsonify(result)
| 30.245902 | 100 | 0.694851 | 238 | 1,845 | 5.159664 | 0.264706 | 0.125407 | 0.069218 | 0.084691 | 0.372964 | 0.321661 | 0.261401 | 0.261401 | 0.200326 | 0.118893 | 0 | 0 | 0.17832 | 1,845 | 60 | 101 | 30.75 | 0.810026 | 0.015176 | 0 | 0.175 | 0 | 0 | 0.086089 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.15 | 0 | 0.35 | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba87a2336a8f0ec71ea84a395469860438887992 | 2,363 | py | Python | tests/test_multiple_pubtopics.py | mpi-sws-rse/antevents-python | 5b9226813583141986014fc83f6f74342a5f271e | [
"Apache-2.0"
] | 7 | 2016-09-27T00:21:46.000Z | 2017-03-18T20:04:29.000Z | tests/test_multiple_pubtopics.py | mpi-sws-rse/antevents-python | 5b9226813583141986014fc83f6f74342a5f271e | [
"Apache-2.0"
] | null | null | null | tests/test_multiple_pubtopics.py | mpi-sws-rse/antevents-python | 5b9226813583141986014fc83f6f74342a5f271e | [
"Apache-2.0"
] | 2 | 2017-03-16T21:47:43.000Z | 2020-10-20T22:58:03.000Z | # Copyright 2016 by MPI-SWS and Data-Ken Research.
# Licensed under the Apache 2.0 License.
"""
Build a filter that takes an input stream and dispatches to one of several
output topics based on the input value.
"""
import asyncio
import unittest
from antevents.base import Publisher, DefaultSubscriber, Scheduler
from utils import make_test_publisher
import antevents.linq.where
import antevents.linq.output
class SplitPublisher(Publisher, DefaultSubscriber):
"""Here is a filter that takes a sequence of sensor events as its input
and the splits it into one of three output topics: 'below' if the
value is below one standard deviation from the mean, 'above'
if the value is above one standard deviation from the mean, and
'within' if the value is within a standard deviation from the mean.
"""
def __init__(self, mean=100.0, stddev=20.0):
Publisher.__init__(self, topics=['above', 'below', 'within'])
self.mean = mean
self.stddev = stddev
def on_next(self, x):
val = x[2]
if val < (self.mean-self.stddev):
#print("split: value=%s dispatching to below" % val)
self._dispatch_next(val, topic='below')
elif val > (self.mean+self.stddev):
#print("split: value=%s dispatching to above" % val)
self._dispatch_next(val, topic='above')
else:
#print("split: value=%s dispatching to within" % val)
self._dispatch_next(val, topic='within')
def __str__(self):
return "SplitPublisher"
class TestMultiplePubtopics(unittest.TestCase):
def test_case(self):
sensor = make_test_publisher(1, stop_after_events=10)
split= SplitPublisher()
sensor.subscribe(split)
split.subscribe(lambda x: print("above:%s" % x),
topic_mapping=('above','default'))
split.subscribe(lambda x: print("below:%s" % x),
topic_mapping=('below', 'default'))
split.subscribe(lambda x: print("within:%s" % x),
topic_mapping=('within', 'default'))
scheduler = Scheduler(asyncio.get_event_loop())
scheduler.schedule_periodic(sensor, 1)
sensor.print_downstream()
scheduler.run_forever()
print("that's all")
if __name__ == '__main__':
unittest.main()
| 35.80303 | 75 | 0.64452 | 301 | 2,363 | 4.920266 | 0.362126 | 0.023633 | 0.020257 | 0.024308 | 0.264686 | 0.228224 | 0.067522 | 0.067522 | 0.067522 | 0.067522 | 0 | 0.010147 | 0.249259 | 2,363 | 65 | 76 | 36.353846 | 0.82469 | 0.289886 | 0 | 0 | 0 | 0 | 0.076829 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102564 | false | 0 | 0.153846 | 0.025641 | 0.333333 | 0.128205 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba886223d7d5b936770b403beef641b7928034bb | 12,515 | py | Python | pe/packrat.py | goodmami/pe | ea2ff69742c7f76f370cdfa8e3f7ae0860d5e6fa | [
"MIT"
] | 18 | 2020-03-25T09:10:24.000Z | 2022-02-21T02:46:10.000Z | pe/packrat.py | goodmami/pe | ea2ff69742c7f76f370cdfa8e3f7ae0860d5e6fa | [
"MIT"
] | 21 | 2020-03-18T08:03:04.000Z | 2021-10-11T04:29:09.000Z | pe/packrat.py | goodmami/pe | ea2ff69742c7f76f370cdfa8e3f7ae0860d5e6fa | [
"MIT"
] | 1 | 2021-09-02T16:09:29.000Z | 2021-09-02T16:09:29.000Z |
"""
Packrat Parsing
"""
# NOTE: attempting to use exceptions instead of FAIL codes resulted in
# almost a 2x slowdown, so it's probably not a good idea
from typing import (Union, List, Dict, Callable, Iterable, Any)
from collections import defaultdict
import re
import inspect
from pe._constants import (
FAIL,
MAX_MEMO_SIZE,
DEL_MEMO_SIZE,
Operator,
Flag,
)
from pe._errors import Error, ParseError
from pe._definition import Definition
from pe._match import Match, determine
from pe._types import RawMatch, Memo
from pe._grammar import Grammar
from pe._parser import Parser
from pe._optimize import optimize, regex
from pe._debug import debug
from pe._misc import ansicolor
from pe.actions import Action
_Matcher = Callable[[str, int, Memo], RawMatch]
class PackratParser(Parser):
def __init__(self, grammar: Grammar, flags: Flag = Flag.NONE):
super().__init__(grammar, flags=flags)
grammar = optimize(grammar,
inline=flags & Flag.INLINE,
common=flags & Flag.COMMON,
regex=flags & Flag.REGEX)
if flags & Flag.DEBUG:
grammar = debug(grammar)
self.modified_grammar = grammar
self._exprs: Dict[str, Callable] = {}
self._grammar_to_packrat(grammar)
@property
def start(self):
return self.grammar.start
def __contains__(self, name: str) -> bool:
return name in self._exprs
def match(self,
s: str,
pos: int = 0,
flags: Flag = Flag.MEMOIZE | Flag.STRICT) -> Union[Match, None]:
memo: Union[Memo, None] = None
if flags & Flag.MEMOIZE:
memo = defaultdict(dict)
end, args, kwargs = self._exprs[self.start](s, pos, memo)
if end < 0:
if flags & Flag.STRICT:
failpos, message = _get_furthest_fail(args, memo)
if failpos >= 0:
exc = ParseError.from_pos(failpos, s, message=message)
else:
exc = ParseError(message=message)
raise exc
else:
return None
args = tuple(args or ())
if kwargs is None:
kwargs = {}
return Match(s, pos, end, self.grammar[self.start], args, kwargs)
def _grammar_to_packrat(self, grammar):
exprs = self._exprs
for name, _def in grammar.definitions.items():
expr = self._def_to_expr(_def)
# if name is already in exprs, that means it was seen as a
# nonterminal in some other rule, so don't replace the object
# or the call chain will break.
if name in exprs:
if isinstance(expr, Rule):
action = expr.action
expr = expr.expression
else:
action = None
exprs[name].expression = expr
exprs[name].action = action
else:
exprs[name] = expr
# ensure all symbols are defined
for name, expr in exprs.items():
if expr is None or (isinstance(expr, Rule)
and expr.expression is None):
raise Error(f'undefined rule: {name}')
return exprs
def _def_to_expr(self, definition: Definition):
op = definition.op
if op == Operator.SYM:
name = definition.args[0]
return self._exprs.setdefault(name, Rule(name))
else:
try:
meth = self._op_map[op]
except KeyError:
raise Error(f'invalid definition: {definition!r}')
else:
return meth(self, definition)
def _terminal(self, definition: Definition) -> _Matcher:
definition = regex(definition)
_re = re.compile(definition.args[0], flags=definition.args[1])
def _match(s: str, pos: int, memo: Memo) -> RawMatch:
m = _re.match(s, pos)
retval: RawMatch
if m:
retval = m.end(), (), None
else:
retval = FAIL, (pos, definition), None
if memo is not None:
memo[pos][id(_match)] = retval
return retval
return _match
def _sequence(self, definition: Definition) -> _Matcher:
items: Iterable[Definition] = definition.args[0]
expressions = [self._def_to_expr(defn) for defn in items]
def _match(s: str, pos: int, memo: Memo) -> RawMatch:
args: List = []
kwargs: Dict[str, Any] = {}
for expr in expressions:
end, _args, _kwargs = expr(s, pos, memo)
if end < 0:
return FAIL, _args, None
else:
args.extend(_args)
if _kwargs:
kwargs.update(_kwargs)
pos = end
return pos, tuple(args), kwargs
return _match
def _choice(self, definition: Definition) -> _Matcher:
items: Iterable[Definition] = definition.args[0]
expressions = [self._def_to_expr(defn) for defn in items]
def _match(s: str, pos: int, memo: Memo) -> RawMatch:
_id = id(_match)
if memo and pos in memo and _id in memo[pos]:
# packrat memoization check
end, args, kwargs = memo[pos][_id]
else:
# clear memo beyond size limit
if memo and len(memo) > MAX_MEMO_SIZE:
for _pos in sorted(memo)[:DEL_MEMO_SIZE]:
del memo[_pos]
for e in expressions:
end, args, kwargs = e(s, pos, memo)
if end >= 0:
break
if memo is not None:
memo[pos][_id] = (end, args, kwargs)
return end, args, kwargs # end may be FAIL
return _match
def _repeat(self, definition: Definition, min: int) -> _Matcher:
expression = self._def_to_expr(definition)
def _match(s: str, pos: int, memo: Memo) -> RawMatch:
guard = len(s) - pos # simple guard against runaway left-recursion
args: List = []
kwargs: Dict[str, Any] = {}
ext = args.extend
upd = kwargs.update
end, _args, _kwargs = expression(s, pos, memo)
if end < 0 and min > 0:
return FAIL, _args, None
while end >= 0 and guard > 0:
ext(_args)
if _kwargs:
upd(_kwargs)
pos = end
guard -= 1
end, _args, _kwargs = expression(s, pos, memo)
return pos, tuple(args), kwargs
return _match
def _star(self, definition: Definition) -> _Matcher:
return self._repeat(definition.args[0], 0)
def _plus(self, definition: Definition) -> _Matcher:
return self._repeat(definition.args[0], 1)
def _optional(self, definition: Definition) -> _Matcher:
expression = self._def_to_expr(definition.args[0])
def _match(s: str, pos: int, memo: Memo) -> RawMatch:
end, args, kwargs = expression(s, pos, memo)
if end < 0:
return pos, (), None
return end, args, kwargs
return _match
def _lookahead(self, definition: Definition, polarity: bool) -> _Matcher:
"""An expression that may match but consumes no input."""
expression = self._def_to_expr(definition)
def _match(s: str, pos: int, memo: Memo) -> RawMatch:
end, args, kwargs = expression(s, pos, memo)
passed = end >= 0
if polarity ^ passed:
if passed: # negative lookahead failed
return FAIL, (pos, expression), None
else: # positive lookahead failed
return FAIL, args, None
return pos, (), None
return _match
def _and(self, definition: Definition) -> _Matcher:
return self._lookahead(definition.args[0], True)
def _not(self, definition: Definition) -> _Matcher:
return self._lookahead(definition.args[0], False)
def _capture(self, definition: Definition) -> _Matcher:
expression = self._def_to_expr(definition.args[0])
def _match(s: str, pos: int, memo: Memo) -> RawMatch:
end, args, kwargs = expression(s, pos, memo)
if end < 0:
return FAIL, args, None
return end, (s[pos:end],), None
return _match
def _bind(self, definition: Definition) -> _Matcher:
bound: Definition = definition.args[0]
expression = self._def_to_expr(bound)
name: str = definition.args[1]
def _match(s: str, pos: int, memo: Memo) -> RawMatch:
end, args, kwargs = expression(s, pos, memo)
if end < 0:
return FAIL, args, None
if not kwargs:
kwargs = {}
kwargs[name] = determine(args)
return end, (), kwargs
return _match
def _rule(self, definition: Definition) -> _Matcher:
subdef: Definition
action: Action
name: str
subdef, action, name = definition.args
expression = self._def_to_expr(subdef)
return Rule(name, expression, action)
def _debug(self, definition: Definition) -> _Matcher:
subdef: Definition = definition.args[0]
expression = self._def_to_expr(subdef)
def _match(s: str, pos: int, memo: Memo) -> RawMatch:
# for proper printing, only terminals can print after
# knowing the result
if subdef.op.precedence == 6 and subdef.op != Operator.SYM:
end, args, kwargs = expression(s, pos, memo)
indent = ' ' * len(inspect.stack(0))
color = 'green' if end >= 0 else 'red'
defstr = ansicolor(color, str(subdef))
print(f'{s[pos:pos+10]:<12} | {indent}{defstr}')
else:
print('{:<12} | {}{!s}'.format(
s[pos:pos+10],
' ' * len(inspect.stack(0)),
str(subdef)))
end, args, kwargs = expression(s, pos, memo)
return end, args, kwargs
return _match
_op_map = {
Operator.DOT: _terminal,
Operator.LIT: _terminal,
Operator.CLS: _terminal,
Operator.RGX: _terminal,
# Operator.SYM: _,
Operator.OPT: _optional,
Operator.STR: _star,
Operator.PLS: _plus,
Operator.AND: _and,
Operator.NOT: _not,
Operator.CAP: _capture,
Operator.BND: _bind,
Operator.SEQ: _sequence,
Operator.CHC: _choice,
Operator.RUL: _rule,
Operator.DBG: _debug,
}
# Recursion and Rules
class Rule:
"""
A grammar rule is a named expression with an optional action.
The *name* field is more relevant for the grammar than the rule
itself, but it helps with debugging.
"""
def __init__(self,
name: str,
expression: _Matcher = None,
action: Action = None):
self.name = name
self.expression = expression
self.action = action
def __call__(self, s: str, pos: int, memo: Memo) -> RawMatch:
expression = self.expression
if expression:
end, args, kwargs = expression(s, pos, memo)
action = self.action
if end >= 0 and action:
if not kwargs:
kwargs = {}
args, kwargs = action(s, pos, end, args, kwargs)
return end, args, kwargs
else:
raise NotImplementedError
def _get_furthest_fail(args, memo):
failpos = -1
message = 'failed to parse; use memoization for more details'
# assuming we're here because of a failure, the max memo position
# should be the furthest failure
if memo:
memopos = max(memo)
fails = []
if memopos > failpos:
fails = [args[1]
for pos, args, _ in memo[memopos].values()
if pos < 0]
if fails:
failpos = memopos
message = ', '.join(map(str, fails))
return failpos, message
| 32.42228 | 79 | 0.54159 | 1,402 | 12,515 | 4.699715 | 0.180456 | 0.034907 | 0.037487 | 0.056458 | 0.323721 | 0.298224 | 0.260434 | 0.236303 | 0.207163 | 0.187434 | 0 | 0.005914 | 0.365002 | 12,515 | 385 | 80 | 32.506494 | 0.823204 | 0.072233 | 0 | 0.285714 | 0 | 0 | 0.014711 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111498 | false | 0.010453 | 0.052265 | 0.020906 | 0.310105 | 0.006969 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba8a3d42e7689a2e026e951ecc04b58e3c117678 | 582 | py | Python | tests/utils.py | davidmcnabnz/aiohttp-rpc | 25c3b61f48e45a8f2c5586f3e97ac16fd15f2c86 | [
"MIT"
] | 22 | 2020-05-24T08:54:51.000Z | 2022-02-16T13:03:14.000Z | tests/utils.py | davidmcnabnz/aiohttp-rpc | 25c3b61f48e45a8f2c5586f3e97ac16fd15f2c86 | [
"MIT"
] | 7 | 2020-08-31T19:40:21.000Z | 2021-08-02T06:50:05.000Z | tests/utils.py | davidmcnabnz/aiohttp-rpc | 25c3b61f48e45a8f2c5586f3e97ac16fd15f2c86 | [
"MIT"
] | 2 | 2020-05-24T12:18:19.000Z | 2021-08-01T11:30:43.000Z | import aiohttp
from aiohttp import web
import aiohttp_rpc
async def make_client(aiohttp_client, rpc_server: aiohttp_rpc.JsonRpcServer) -> aiohttp.ClientSession:
app = web.Application()
app.router.add_post('/rpc', rpc_server.handle_http_request)
return await aiohttp_client(app)
async def make_ws_client(aiohttp_client, rpc_server: aiohttp_rpc.WsJsonRpcServer) -> aiohttp.ClientSession:
app = web.Application()
app.router.add_get('/rpc', rpc_server.handle_http_request)
app.on_shutdown.append(rpc_server.on_shutdown)
return await aiohttp_client(app)
| 32.333333 | 107 | 0.785223 | 80 | 582 | 5.425 | 0.35 | 0.103687 | 0.0553 | 0.101382 | 0.658986 | 0.534562 | 0.400922 | 0.225806 | 0 | 0 | 0 | 0 | 0.123711 | 582 | 17 | 108 | 34.235294 | 0.85098 | 0 | 0 | 0.333333 | 0 | 0 | 0.013746 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.416667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba90dc2cb2e51a3de479c4158575a47f4042bca2 | 852 | py | Python | LeetCode/01_Easy/lc_243.py | Zubieta/CPP | fb4a3cbf2e4edcc590df15663cd28fb9ecab679c | [
"MIT"
] | 8 | 2017-03-02T07:56:45.000Z | 2021-08-07T20:20:19.000Z | LeetCode/01_Easy/lc_243.py | zubie7a/Algorithms | fb4a3cbf2e4edcc590df15663cd28fb9ecab679c | [
"MIT"
] | null | null | null | LeetCode/01_Easy/lc_243.py | zubie7a/Algorithms | fb4a3cbf2e4edcc590df15663cd28fb9ecab679c | [
"MIT"
] | 1 | 2021-08-07T20:20:20.000Z | 2021-08-07T20:20:20.000Z | # 243 - Shortest Word Distance (Easy)
# https://leetcode.com/problems/shortest-word-distance/
class Solution(object):
def shortestDistance(self, words, word1, word2):
"""
:type words: List[str]
:type word1: str
:type word2: str
:rtype: int
"""
# Find the shortest separation between two words in an array,
# such words are guaranteed to happen but also may happen more
# than once. Also the two words are distinct.
i1, i2 = -1, -1
minDist = 1<<31
for index in xrange(len(words)):
word = words[index]
if word == word1:
i1 = index
if word == word2:
i2 = index
if i1 != -1 and i2 != -1:
minDist = min(minDist, abs(i1 - i2))
return minDist
| 34.08 | 70 | 0.524648 | 102 | 852 | 4.382353 | 0.558824 | 0.04698 | 0.089485 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.045283 | 0.377934 | 852 | 25 | 71 | 34.08 | 0.798113 | 0.380282 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba91558aefd2df242fdc6737e72408a5090c1f04 | 2,539 | py | Python | showdetector.py | squeakus/motiontracker | 9a3e744893de691f2af8f33372911ccf9c6ee5e0 | [
"BSD-2-Clause"
] | 1 | 2017-03-17T12:43:26.000Z | 2017-03-17T12:43:26.000Z | showdetector.py | squeakus/motiontracker | 9a3e744893de691f2af8f33372911ccf9c6ee5e0 | [
"BSD-2-Clause"
] | null | null | null | showdetector.py | squeakus/motiontracker | 9a3e744893de691f2af8f33372911ccf9c6ee5e0 | [
"BSD-2-Clause"
] | null | null | null | from __future__ import print_function
import numpy as np
import cv2
import sys
import imutils
from imutils.video import VideoStream
import argparse
import time
def main():
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--picamera", type=int, default=-1,
help="whether or not the Raspberry Pi camera should be used")
ap.add_argument("-d", "--detector", required=True,
help="choose detector: sift, surf, orb, akaze, brisk")
args = vars(ap.parse_args())
#set up detector
detstr = args["detector"]
print("Using", detstr, "for feature detection")
if detstr == 'sift':
detector = cv2.xfeatures2d.SIFT_create()
norm = cv2.NORM_L2
elif detstr == 'surf':
detector = cv2.xfeatures2d.SURF_create()
norm = cv2.NORM_L2
elif detstr == 'orb':
detector = cv2.ORB_create(100000)
norm = cv2.NORM_HAMMING
elif detstr == 'akaze':
detector = cv2.AKAZE_create()
norm = cv2.NORM_HAMMING
elif detstr == 'brisk':
detector = cv2.BRISK_create()
norm = cv2.NORM_HAMMING
elif detstr == 'daisy':
detector = cv2.xfeatures2d.DAISY_create()
elif detstr == 'freak':
detector = cv2.xfeatures2d.FREAK_create()
norm = cv2.NORM_HAMMING
elif detstr == 'latch':
detector = cv2.xfeatures2d.LATCH_create()
norm = cv2.NORM_HAMMING
elif detstr == 'lucid':
detector = cv2.xfeatures2d.LUCID_create()
norm = cv2.NORM_HAMMING
elif detstr == 'vgg':
detector = cv2.xfeatures2d.VGG_create()
norm = cv2.NORM_HAMMING
else:
print("Cannot find detector",detstr)
exit()
#webcam or pycam?
cap = VideoStream(usePiCamera=args["picamera"] > 0).start()
print("letting camera warm up")
time.sleep(2.0)
img = None
framecnt = 0
while True:
framecnt += 1
frame = cap.read()
frame = imutils.resize(frame, width=640)
framecnt = 0
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
kp = detector.detect(gray,None)
img = cv2.drawKeypoints(gray,kp, frame, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
# Display the resulting frame
print("keypoints", len(kp))
cv2.imshow('frame',img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.stop()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
| 29.183908 | 97 | 0.620717 | 307 | 2,539 | 5 | 0.416938 | 0.071661 | 0.064495 | 0.088599 | 0.18241 | 0.166775 | 0.148534 | 0 | 0 | 0 | 0 | 0.029443 | 0.264277 | 2,539 | 86 | 98 | 29.523256 | 0.792291 | 0.054746 | 0 | 0.157143 | 0 | 0 | 0.114405 | 0 | 0 | 0 | 0.00167 | 0 | 0 | 1 | 0.014286 | false | 0 | 0.114286 | 0 | 0.128571 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba918a951cdd349ee4cd6bbade1526c24a7533de | 2,106 | py | Python | parse/bsoup.py | thorwhalen/ut | 353a4629c35a2cca76ef91a4d5209afe766433b4 | [
"MIT"
] | 4 | 2016-12-17T20:06:10.000Z | 2021-11-19T04:45:29.000Z | parse/bsoup.py | thorwhalen/ut | 353a4629c35a2cca76ef91a4d5209afe766433b4 | [
"MIT"
] | 11 | 2021-01-06T05:35:11.000Z | 2022-03-11T23:28:31.000Z | parse/bsoup.py | thorwhalen/ut | 353a4629c35a2cca76ef91a4d5209afe766433b4 | [
"MIT"
] | 3 | 2015-06-12T10:44:16.000Z | 2021-07-26T18:39:47.000Z | __author__ = 'thorwhalen'
"""
functions that work on soup, soup tags, etc.
"""
import bs4
from ut.pgenerator.get import last_element
from tempfile import mkdtemp
import os
import ut.pstr.to as strto
import ut.parse.util as parse_util
import ut.pstr.trans as pstr_trans
def root_parent(s):
return last_element(s.parents)
def open_tag_in_firefox(tag):
save_file = os.path.join(mkdtemp(), 'tmp.html')
strto.file(tag.prettify(), save_file)
parse_util.open_in_firefox(save_file)
def add_text_to_parse_dict(soup, parse_dict, key, name, attrs, text_transform=pstr_trans.strip):
tag = soup.find(name=name, attrs=attrs)
if tag:
if text_transform:
parse_dict[key] = text_transform(tag.text)
else:
parse_dict[key] = tag.text
return parse_dict
def get_element(node, path_to_element):
for p in path_to_element:
if isinstance(p, str):
p = p.split('.')
if isinstance(p, dict):
node = node.find(**p)
else:
node = node.find(*p)
return node
def get_elements(nodes, path_to_element):
"""
Recursiverly get elements from soup, soup tags, result sets, etc. by specifying a node (or nodes) and
a list of paths to follow.
:param nodes:
:param path_to_element: list of paths. A path can be a period-separated string, a list (of findAll args), or a
dict (of findAll kwargs)
:return: a list of elements that were found
"""
if not isinstance(nodes, (bs4.element.ResultSet, tuple, list)):
nodes = [nodes]
cumul = []
for node in nodes:
for i, p in enumerate(path_to_element):
if isinstance(p, str):
p = p.split('.')
if isinstance(p, dict):
_nodes = node.findAll(**p)
else:
_nodes = node.findAll(*p)
_path_to_element = path_to_element[(i + 1):]
if len(_path_to_element) > 0:
cumul.extend(get_elements(_nodes, _path_to_element))
else:
cumul.extend(_nodes)
return cumul
| 28.459459 | 114 | 0.623457 | 299 | 2,106 | 4.197324 | 0.32107 | 0.043028 | 0.093227 | 0.023904 | 0.130677 | 0.130677 | 0.084462 | 0.084462 | 0.084462 | 0.084462 | 0 | 0.002623 | 0.275878 | 2,106 | 73 | 115 | 28.849315 | 0.820328 | 0.154796 | 0 | 0.204082 | 0 | 0 | 0.01182 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102041 | false | 0 | 0.142857 | 0.020408 | 0.326531 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba918ece5de60730c0129575be92e67c044ff8ec | 1,130 | py | Python | interview_query/utils/faker_sat_scores.py | mhetrerajat/ds-challenge | 3208df5c29612b0dfe60c1c082da1f31ad220b49 | [
"MIT"
] | null | null | null | interview_query/utils/faker_sat_scores.py | mhetrerajat/ds-challenge | 3208df5c29612b0dfe60c1c082da1f31ad220b49 | [
"MIT"
] | 1 | 2021-05-18T07:30:16.000Z | 2021-05-18T07:30:16.000Z | interview_query/utils/faker_sat_scores.py | mhetrerajat/ds-challenge | 3208df5c29612b0dfe60c1c082da1f31ad220b49 | [
"MIT"
] | null | null | null | from faker import Faker
from sqlalchemy import Column, Date, ForeignKey, Integer, String, Table, create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm.session import sessionmaker
from sqlalchemy_utils import create_database, database_exists
connection_string = "mysql+mysqlconnector://root:@127.0.0.1:3306/sat_scores"
fake = Faker()
engine = create_engine(connection_string)
Session = sessionmaker(bind=engine)
session = Session()
if not database_exists(engine.url):
create_database(engine.url)
Base = declarative_base()
class Student(Base):
__tablename__ = "students"
id = Column(Integer, primary_key=True)
student = Column("student", String(128))
score = Column("score", Integer)
def main():
Base.metadata.create_all(engine)
count = 1000
session.bulk_insert_mappings(
Student,
[
{
"student": fake.name(),
"score": fake.pyint(min_value=1700, max_value=2200, step=1),
}
for _ in range(count)
],
)
session.commit()
if __name__ == "__main__":
main()
| 25.111111 | 86 | 0.679646 | 132 | 1,130 | 5.583333 | 0.507576 | 0.075984 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.029246 | 0.213274 | 1,130 | 44 | 87 | 25.681818 | 0.799775 | 0 | 0 | 0 | 0 | 0 | 0.083186 | 0.047788 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029412 | false | 0 | 0.147059 | 0 | 0.323529 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba9443740a1bddede8f3fe0b990e93d6b3615856 | 2,319 | py | Python | scripts/leetcode-submission.py | ZihuanLing/zihuanling.github.io | d1c308039bab43bf4966100e1783c486ec2f105d | [
"MIT"
] | 1 | 2022-02-24T07:05:19.000Z | 2022-02-24T07:05:19.000Z | scripts/leetcode-submission.py | ZihuanLing/zihuanling.github.io | d1c308039bab43bf4966100e1783c486ec2f105d | [
"MIT"
] | null | null | null | scripts/leetcode-submission.py | ZihuanLing/zihuanling.github.io | d1c308039bab43bf4966100e1783c486ec2f105d | [
"MIT"
] | null | null | null | # coding: utf-8
# 爬取leetcode刷题记录
import os
import json
import requests
import time
def parse_submissions(leetcode_session):
url = "https://leetcode.cn/api/submissions/"
headers = {
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"accept-language": "zh,en;q=0.9,zh-CN;q=0.8",
"cache-control": "max-age=0",
"sec-ch-ua": "\" Not A;Brand\";v=\"99\", \"Chromium\";v=\"101\", \"Google Chrome\";v=\"101\"",
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": "\"macOS\"",
"sec-fetch-dest": "document",
"sec-fetch-mode": "navigate",
"sec-fetch-site": "none",
"sec-fetch-user": "?1",
"upgrade-insecure-requests": "1",
"cookie": f"LEETCODE_SESSION={leetcode_session}",
}
limit, offset = 100, 0
submissions = []
with requests.Session() as session:
while True:
resp = session.get(url, headers=headers, params={'limit': limit, 'offset': offset})
if resp.status_code != 200:
print(f"Get submissions from leetcode-cn failed: {resp.content.decode()}")
break
data = resp.json()
submissions += data['submissions_dump']
if not data['has_next']:
print("Finished requests")
break
offset += limit
print(f"parsing next, offset = {offset}")
time.sleep(1)
if not submissions:
print("no submissions to dump to file.")
return
# filter submissions
_submissions = []
exists = set()
for sub in submissions:
key = (sub['title'], sub['lang'])
if sub['status_display'] != 'Accepted' or key in exists:
continue
exists.add(key)
_submissions.append(sub)
print(f"All done, total {len(submissions)} submissions fetched.")
# output data to json
with open('static/leetcode-submissions.json', 'w') as f:
json.dump(_submissions, f)
def main():
leetcode_session = os.environ.get("LEETCODE_SESSION")
if not leetcode_session:
print("leetcode session not set.")
return
parse_submissions(leetcode_session)
if __name__ == '__main__':
main()
| 32.208333 | 156 | 0.580854 | 281 | 2,319 | 4.708185 | 0.455516 | 0.090703 | 0.006803 | 0.046863 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018713 | 0.262613 | 2,319 | 71 | 157 | 32.661972 | 0.754971 | 0.028892 | 0 | 0.068966 | 0 | 0.017241 | 0.353072 | 0.121549 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.068966 | 0 | 0.137931 | 0.103448 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba94a64403dabcab291a47f945af3b92e3234033 | 3,179 | py | Python | milestone1.py | kaiicheng/United-States-Population-Name-Dashboard | 9019538fcb58c7e97a3dc67d3b27cb8ad180e448 | [
"MIT"
] | null | null | null | milestone1.py | kaiicheng/United-States-Population-Name-Dashboard | 9019538fcb58c7e97a3dc67d3b27cb8ad180e448 | [
"MIT"
] | null | null | null | milestone1.py | kaiicheng/United-States-Population-Name-Dashboard | 9019538fcb58c7e97a3dc67d3b27cb8ad180e448 | [
"MIT"
] | null | null | null | """
File: Milestone1.py
Name:
-----------------------
This file tests the milestone 1 for
our babyname.py project
"""
import sys
def add_data_for_name(name_data, year, rank, name):
# Compare the rank of certain name which already exists in the name_data dictionary.
final_rank = int(rank)
#print(name_data[name])
# print(class(rank)) # Why this code cannot be executed?
print(type(rank))
# What's the class of rank?
if name in name_data:
if year in name_data[name]:
old_rank = int(name_data[name][year])
#print(old_rank)
new_rank = int(final_rank)
#print(new_rank)
# Why equation still working when rank isn't int?
# No ERROR without int(rank)
# We can compare
#字串比較 => 比第一個數字
if new_rank <= old_rank:
final_rank = new_rank
# print(final_rank)
# Input constant number cannot be changed? Like rank?
else: # 200 > 90
final_rank = old_rank
# print(final_rank)
# print(final_rank)
# Store new data into name_data list
if name not in name_data:
new_dict = {year: str(final_rank)}
# new_dict = {}
# new_dict[year] = rank
name_data[name] = new_dict
else:
name_data[name][year] = str(final_rank)
# ------------- DO NOT EDIT THE CODE BELOW THIS LINE ---------------- #
def test1():
name_data = {'Kylie': {'2010': '57'}, 'Nick': {'2010': '37'}}
add_data_for_name(name_data, '2010', '208', 'Kate')
print('--------------------test1----------------------')
print(str(name_data))
print('-----------------------------------------------')
def test2():
name_data = {'Kylie': {'2010': '57'}, 'Nick': {'2010': '37'}}
add_data_for_name(name_data, '2000', '104', 'Kylie')
print('--------------------test2----------------------')
print(str(name_data))
print('-----------------------------------------------')
def test3():
name_data = {'Kylie': {'2010': '57'}, 'Sammy': {'1980': '451', '1990': '90'}}
add_data_for_name(name_data, '1990', '200', 'Sammy')
print('-------------------test3-----------------------')
print(str(name_data))
print('-----------------------------------------------')
def test4():
name_data = {'Kylie': {'2010': '57'}, 'Nick': {'2010': '37'}}
add_data_for_name(name_data, '2010', '208', 'Kate')
add_data_for_name(name_data, '2000', '108', 'Kate')
add_data_for_name(name_data, '1990', '200', 'Sammy')
add_data_for_name(name_data, '1990', '90', 'Sammy')
add_data_for_name(name_data, '2000', '104', 'Kylie')
print('--------------------test4----------------------')
print(str(name_data))
print('-----------------------------------------------')
def main():
args = sys.argv[1:]
if len(args) == 1 and args[0] == 'test1':
test1()
elif len(args) == 1 and args[0] == 'test2':
test2()
elif len(args) == 1 and args[0] == 'test3':
test3()
elif len(args) == 1 and args[0] == 'test4':
test4()
if __name__ == "__main__":
main()
| 31.475248 | 88 | 0.493551 | 382 | 3,179 | 3.89267 | 0.259162 | 0.139879 | 0.060525 | 0.084734 | 0.38534 | 0.372562 | 0.282448 | 0.201076 | 0.201076 | 0.155346 | 0 | 0.058848 | 0.240956 | 3,179 | 100 | 89 | 31.79 | 0.557397 | 0.215791 | 0 | 0.333333 | 0 | 0 | 0.238636 | 0.152597 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.017544 | 0 | 0.122807 | 0.22807 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba951d4ecafedfb08c55ff85b92950bc2b527ea0 | 1,003 | py | Python | Ballot.py | robbierobinette/rcv-tensorflow | 984852902f465bb6f61ba863e4b76092249911d0 | [
"MIT"
] | null | null | null | Ballot.py | robbierobinette/rcv-tensorflow | 984852902f465bb6f61ba863e4b76092249911d0 | [
"MIT"
] | null | null | null | Ballot.py | robbierobinette/rcv-tensorflow | 984852902f465bb6f61ba863e4b76092249911d0 | [
"MIT"
] | null | null | null | from typing import List, Set
from CandidateScore import CandidateScore
from Candidate import Candidate
from Voter import Voter
from ElectionConfig import ElectionConfig
class Ballot:
def __init__(self, voter: Voter, candidates: List[Candidate], config: ElectionConfig):
self.voter = voter
scores = list(map(lambda c: voter.score(c, config), candidates))
cs = list(map(lambda c: CandidateScore(c[0], c[1]), zip(candidates, scores)))
cs.sort(key=lambda c: c.score, reverse=True)
self.ordered_candidates = cs
def active_choice(self, active_candidates: Set[Candidate]) -> Candidate:
for c in self.ordered_candidates:
if c.candidate in active_candidates:
return c.candidate
assert(False, "no candidate in active candidates")
def print(self):
for cs in self.ordered_candidates:
print("\t %6s ideology: % 7.2f score: % 7.2f" % (cs.candidate.name, cs.candidate.ideology.vec[0], cs.score))
| 37.148148 | 120 | 0.680957 | 131 | 1,003 | 5.137405 | 0.358779 | 0.031204 | 0.093611 | 0.041605 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010191 | 0.217348 | 1,003 | 26 | 121 | 38.576923 | 0.847134 | 0 | 0 | 0 | 0 | 0 | 0.06986 | 0 | 0 | 0 | 0 | 0 | 0.05 | 1 | 0.15 | false | 0 | 0.25 | 0 | 0.5 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba9b2619a41c953c21ce58a65c8f71171bae6e85 | 5,853 | py | Python | experiments/RASF_pretraining/reconstruction.py | seanywang0408/RASF | 2437ace3f19812d1fe852651358b3cbb9325efb7 | [
"Apache-2.0"
] | 14 | 2022-03-16T13:00:38.000Z | 2022-03-28T11:53:34.000Z | experiments/RASF_pretraining/reconstruction.py | seanywang0408/RASF | 2437ace3f19812d1fe852651358b3cbb9325efb7 | [
"Apache-2.0"
] | null | null | null | experiments/RASF_pretraining/reconstruction.py | seanywang0408/RASF | 2437ace3f19812d1fe852651358b3cbb9325efb7 | [
"Apache-2.0"
] | null | null | null | import os
import time
from tqdm import tqdm
import trimesh
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
import _init_path
from config import cfg
from RASF import RASF
from pointclouds.datasets.shapenetpart import ShapenetPartDataset, to_categorical
from utils.training_utils import backup_terminal_outputs, backup_code, set_seed
from utils.chamfer_distance import ChamferDistance
save_path = os.path.join('./log/recon', time.strftime("%y%m%d_%H%M%S"))
os.makedirs(save_path, exist_ok=True)
print('save_path', save_path)
backup_terminal_outputs(save_path)
backup_code(save_path)
batch_size = 64
num_workers = 0
num_epochs = 150
num_input_points = 24
rasf_resolution = cfg.rasf_resolution
rasf_channel = cfg.rasf_channel
num_local_points = 64 # total_points = 2048
data_path = cfg.ShapeNetPart_path
train_set = ShapenetPartDataset(data_path, npoints=2048, split='trainval')
test_set = ShapenetPartDataset(data_path, npoints=2048, split='test')
train_loader = DataLoader(train_set,
batch_size=batch_size, shuffle=True,
num_workers=num_workers, pin_memory=True)
val_loader = DataLoader(test_set,
batch_size=batch_size,
num_workers=num_workers, pin_memory=True)
class Generator(nn.Module):
def __init__(self, rasf_channel):
super().__init__()
self.conv1 = nn.Conv1d(rasf_channel+3, rasf_channel*2, 1)
self.conv2 = nn.Conv1d(rasf_channel*2, rasf_channel*4, 1)
self.conv3 = nn.Conv1d(rasf_channel*4, rasf_channel*8, 1)
self.fc1 = nn.Linear(rasf_channel*8, rasf_channel*8*2)
self.fc2 = nn.Linear(rasf_channel*8*2, 1024*3)
def forward(self, x):
x = self.conv1(x)
x = F.leaky_relu(x, negative_slope=0.02, inplace=True)
x = self.conv2(x)
x = F.leaky_relu(x, negative_slope=0.02, inplace=True)
x = self.conv3(x)
x = F.leaky_relu(x, negative_slope=0.02, inplace=True)
x = x.max(-1)[0]
x = self.fc1(x)
x = F.leaky_relu(x, negative_slope=0.02, inplace=True)
x = self.fc2(x)
x = x.view(x.shape[0], -1, 3)
return x
model = Generator(rasf_channel=rasf_channel).cuda()
field = RASF(resolution=(rasf_resolution, rasf_resolution, rasf_resolution), channel=rasf_channel, num_local_points=num_local_points).cuda()
optimizer = torch.optim.Adam(list(model.parameters())+list(field.parameters()), lr=0.001)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[50,100], gamma=0.2)
start_time = time.time()
best_loss = 20
chamfer_dist = ChamferDistance()
for e in range(num_epochs):
print('###################')
print('Epoch:', e)
print('###################')
train_loss = 0.
train_accuracy = 0.
num_batches = 0
model.train()
field.train()
scheduler.step()
for idx, (data, category, seg) in enumerate(tqdm(train_loader)):
category = category.cuda()
data = data.cuda()
points = data
data = torch.cat([data.transpose(2,1), field.batch_samples(data)], 1)
select_points = torch.ones(data.shape[0], data.shape[2]).multinomial(num_samples=num_input_points).cuda()
data = data.gather(-1, select_points.unsqueeze(1).expand(-1, data.shape[1], -1))
output = model(data)
d1, d2 = chamfer_dist(output, points)
loss = (d1.mean() + d2.mean())
train_loss += loss.item()
loss.backward()
optimizer.step()
optimizer.zero_grad()
num_batches += 1
print(train_loss/num_batches)
os.makedirs(os.path.join(save_path, 'epoch_%d'%e))
for i, (y_points, pred_points) in enumerate(zip(points.cpu().detach(), output.cpu().detach())):
trimesh.PointCloud(y_points.numpy(), colors=np.zeros(y_points.shape)).export(os.path.join(save_path, 'epoch_%d'%e, 'train_%d_y.ply'%i))
trimesh.PointCloud(pred_points.numpy(), colors=np.zeros(pred_points.shape)).export(os.path.join(save_path, 'epoch_%d'%e, 'train_%d_pred.ply'%i))
print('Train loss:', train_loss / num_batches)
val_loss = 0.
val_accuracy = 0.
num_batches = 0
model.eval()
field.eval()
with torch.no_grad():
for idx, (data, category, seg) in enumerate(tqdm(val_loader)):
category = category.cuda()
data = data.cuda()
points = data
data = torch.cat([data.transpose(2,1), field.batch_samples(data)], 1)
select_points = torch.ones(data.shape[0], data.shape[2]).multinomial(num_samples=num_input_points).cuda()
data = data.gather(-1, select_points.unsqueeze(1).expand(-1, data.shape[1], -1))
# data = data.max(-1)[0]
output = model(data)
d1, d2 = chamfer_dist(output, points)
loss = (d1.mean() + d2.mean())
val_loss += loss.item()
num_batches += 1
for i, (y_points, pred_points) in enumerate(zip(points.cpu().detach(), output.cpu().detach())):
# points.shape == [n_points, 3]
trimesh.PointCloud(y_points.numpy(), colors=np.zeros(y_points.shape)).export(os.path.join(save_path, 'epoch_%d'%e, 'test_%d_y.ply'%i))
trimesh.PointCloud(pred_points.numpy(), colors=np.zeros(pred_points.shape)).export(os.path.join(save_path, 'epoch_%d'%e, 'test_%d_pred.ply'%i))
print('Val loss:', val_loss / num_batches)
# print('Val accuracy:', val_accuracy / num_batches)
if best_loss >= val_loss / num_batches:
best_loss = val_loss / num_batches
torch.save(field.state_dict(), os.path.join(save_path, "recon_weights.pt"))
end_time = time.time()
print('Training time: {}'.format(end_time - start_time))
print('best loss: ', best_loss)
| 32.337017 | 152 | 0.653169 | 836 | 5,853 | 4.366029 | 0.215311 | 0.045205 | 0.019178 | 0.023014 | 0.514521 | 0.460548 | 0.417808 | 0.374521 | 0.347945 | 0.347945 | 0 | 0.026378 | 0.203315 | 5,853 | 180 | 153 | 32.516667 | 0.75638 | 0.021015 | 0 | 0.260163 | 0 | 0 | 0.044208 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.01626 | false | 0 | 0.121951 | 0 | 0.154472 | 0.073171 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba9c65d98c4bb9faaef93d07f4f1dd876514f4a1 | 1,697 | py | Python | geoware/models/timezone.py | un33k/django-geoware | cd7c51e358e5b2d2c3ca92626edbdd7e4f573ab8 | [
"MIT"
] | 4 | 2017-01-02T21:38:45.000Z | 2017-01-31T09:59:30.000Z | geoware/models/timezone.py | un33k/django-geoware | cd7c51e358e5b2d2c3ca92626edbdd7e4f573ab8 | [
"MIT"
] | null | null | null | geoware/models/timezone.py | un33k/django-geoware | cd7c51e358e5b2d2c3ca92626edbdd7e4f573ab8 | [
"MIT"
] | null | null | null | from django.utils.translation import ugettext as _
from slugify import slugify
from .base import models
class Timezone(models.Model):
"""
Timezone Model Class.
"""
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
country = models.ForeignKey(
"Country",
verbose_name=_("Country"),
related_name='%(app_label)s_%(class)s_country',
null=True,
blank=True,
)
name_id = models.CharField(
_("Name"),
db_index=True,
max_length=254,
)
slug = models.CharField(
_('Slug'),
max_length=254,
null=True,
blank=True,
)
gmt_offset = models.FloatField(
_("GMT Offset (Jan 1)"),
default=0.0,
)
dst_offset = models.FloatField(
_("DST Offset (Jul 1)"),
default=0.0,
)
raw_offset = models.FloatField(
_("Raw Offset"),
default=0.0,
)
url = models.URLField(
_('URL'),
max_length=254,
null=True,
blank=True,
)
info = models.TextField(
_('Details'),
null=True,
blank=True,
)
is_active = models.BooleanField(
_('Active'),
default=True,
)
class Meta:
app_label = 'geoware'
db_table = '{app}-{type}'.format(app=app_label, type='timezone')
verbose_name = _('Timezone')
verbose_name_plural = _('Timezones')
unique_together = [('name_id',)]
def save(self, *args, **kwargs):
self.slug = slugify(self.name_id)
super().save(*args, **kwargs)
def __str__(self):
return self.name_id
| 20.695122 | 72 | 0.558044 | 183 | 1,697 | 4.928962 | 0.404372 | 0.035477 | 0.05765 | 0.075388 | 0.126386 | 0.064302 | 0.064302 | 0 | 0 | 0 | 0 | 0.014542 | 0.311137 | 1,697 | 81 | 73 | 20.950617 | 0.757057 | 0.012375 | 0 | 0.225806 | 0 | 0 | 0.1 | 0.018675 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032258 | false | 0 | 0.048387 | 0.016129 | 0.306452 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba9cab0a86eada80999c888eaab2f667c62be16c | 1,905 | py | Python | tools/ransac.py | OhJaeKwang/gaze_estimation | 8fefa9ccb353ae5c164251a61221c369c1a825d2 | [
"MIT"
] | null | null | null | tools/ransac.py | OhJaeKwang/gaze_estimation | 8fefa9ccb353ae5c164251a61221c369c1a825d2 | [
"MIT"
] | null | null | null | tools/ransac.py | OhJaeKwang/gaze_estimation | 8fefa9ccb353ae5c164251a61221c369c1a825d2 | [
"MIT"
] | null | null | null | import os
import sys
import cv2
import numpy as np
import matplotlib.pyplot as plt
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
from lib.utils import utils_inference
from lib.utils import utils_landmarks
from lib.ransac import ransac
### Eye Landmarks Detection
method = 'unityeyes_angle'
ckpt = 13
model = utils_inference.get_model_by_name('C:/Users/yklee/eye_landmarks_detection/tools/output/unityeyes/eye_alignment_unityeyes_hrnet_w18/backup/' + method + '/checkpoint_{}.pth'.format(ckpt),
'C:/Users/yklee/eye_landmarks_detection/experiments/unityeyes/eye_alignment_unityeyes_hrnet_w18.yaml',
device='cuda')
# img = plt.imread('C:/Users/yklee/eye_landmarks_detection/data/unityeyes/images/40001.jpg')
img = plt.imread('C:/Users/yklee/eye_landmarks_detection/data/sample/1.jpg')
crop_size = 192
img_shape = img.shape
if img_shape[0] != crop_size or img_shape[1] != crop_size:
cen_x = int(img_shape[1] / 2)
cen_y = int(img_shape[0] / 2)
img = img[cen_y-int(crop_size/2):cen_y+int(crop_size/2), cen_x-int(crop_size/2):cen_x+int(crop_size/2)]
lmks, conf_score = utils_inference.get_lmks_by_img(model, img, conf_score=True)
utils_landmarks.show_landmarks(img, lmks)
### Ellipse RANSAC
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
pnts = list(lmks[18:])
# 일부 Landmarks만 사용 -> 비추천
# lmks, conf_score = list(lmks), np.reshape(np.array(conf_score), 50)
# iris_lmks, iris_score = lmks[18:], conf_score[18:]
#
# conf_argsort = iris_score.argsort()
#
# pnts = []
# for i in range(16):
# pnts.append(iris_lmks[conf_argsort[i]])
ellipse_params = ransac.FitEllipse_RANSAC(np.array(pnts), gray)
# for circle in pnts:
# cv2.circle(img, (int(np.round(circle[0])), int(np.round(circle[1]))), 2, (0, 0, 255), -1)
cv2.ellipse(img, ellipse_params, (255, 0, 0), 1)
plt.imshow(img)
plt.show()
| 36.634615 | 193 | 0.709186 | 301 | 1,905 | 4.265781 | 0.33887 | 0.043614 | 0.081776 | 0.043614 | 0.26947 | 0.233645 | 0.124611 | 0.109034 | 0.109034 | 0.109034 | 0 | 0.034969 | 0.144357 | 1,905 | 51 | 194 | 37.352941 | 0.752761 | 0.260367 | 0 | 0 | 0 | 0 | 0.213669 | 0.185612 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.275862 | 0 | 0.275862 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba9d5fcab89a1398779538800ea4c82f1e9c755c | 241 | py | Python | test_scripts/pub100.py | talih0/dps-for-iot | e16f61da15d1d06f13cfce8a667c710f45441d4b | [
"Apache-2.0"
] | 57 | 2017-12-14T01:37:02.000Z | 2021-11-08T11:19:32.000Z | test_scripts/pub100.py | talih0/dps-for-iot | e16f61da15d1d06f13cfce8a667c710f45441d4b | [
"Apache-2.0"
] | 30 | 2017-11-03T18:40:51.000Z | 2021-06-30T13:47:16.000Z | test_scripts/pub100.py | talih0/dps-for-iot | e16f61da15d1d06f13cfce8a667c710f45441d4b | [
"Apache-2.0"
] | 15 | 2018-03-14T05:56:08.000Z | 2021-04-25T21:29:09.000Z | #!/usr/bin/python
from common import *
import atexit
atexit.register(cleanup)
subs = [
sub('1.1.#'),
sub('1.1.#'),
sub('1.1.#')
]
for i in range(100):
pub('1.1.{}'.format(i))
expect_pub_received(subs, ['1.1.\d+'] * 100)
| 13.388889 | 44 | 0.560166 | 39 | 241 | 3.410256 | 0.564103 | 0.075188 | 0.112782 | 0.120301 | 0.112782 | 0.112782 | 0 | 0 | 0 | 0 | 0 | 0.081633 | 0.186722 | 241 | 17 | 45 | 14.176471 | 0.596939 | 0.06639 | 0 | 0.181818 | 0 | 0 | 0.125 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.181818 | 0 | 0.181818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ba9f502bd80efd70b9e6648ecf3e02c644ef98ef | 4,601 | py | Python | borda.py | thesis-wisard/thesis_libs | a5643775092fdb4322c28ff3793075c86694025d | [
"MIT"
] | null | null | null | borda.py | thesis-wisard/thesis_libs | a5643775092fdb4322c28ff3793075c86694025d | [
"MIT"
] | null | null | null | borda.py | thesis-wisard/thesis_libs | a5643775092fdb4322c28ff3793075c86694025d | [
"MIT"
] | null | null | null | import wisardpkg as wp
import random
import numpy as np
import time
from astropy.stats import bootstrap
from astropy.utils import NumpyRNGContext
LOW_N = 5
HIGH_N = 31
MIN_SCORE = 0.1
GROW_INTERVAL = 100
MAX_DISCRIMINATOR_LIMIT = 10
class BordaBagging(object):
def __init__(self, train_dataset, learners, partitions = "undefined", voting = "borda0"):
self.train_dataset = train_dataset
self.learners = learners
self.nets = []
self.partitions = partitions
if(partitions == "undefined"):
self.partitions = int(len(train_dataset)/75)
if(self.partitions == 0):
self.partitions = 1
self.entry_size = len(train_dataset.get(0))
self.voting = voting
self.training_time = 0
self.ensemble()
def random_wisard(self):
return wp.ClusWisard(np.random.randint(LOW_N, HIGH_N), 0.1, 10, 1)
def generate_dataset(self):
boot = []
for i in range(len(self.train_dataset)):
boot.append(i)
with NumpyRNGContext(1):
bootresult = bootstrap(np.array(boot), self.learners, int(len(self.train_dataset)*self.partitions))
dataset = []
for samples in bootresult:
d = wp.DataSet()
for sample in samples:
d.add(self.train_dataset.get(int(sample)), self.train_dataset.getLabel(int(sample)))
dataset.append(d)
return dataset
def ensemble(self):
dataset = self.generate_dataset()
for i in range(0, self.learners):
net = self.random_wisard()
training_time = time.time()
net.train(dataset[i])
self.training_time = self.training_time + time.time() - training_time
self.nets.append(net)
def get_training_time(self):
return self.training_time
@staticmethod
def get_labels(out):
labels = []
for label in out[0]:
labels.append(label)
return labels
@staticmethod
def borda_count_0(scores, labels):
score_labels = [0] * len(labels)
for i in range(len(scores)):
for j in range(len(labels)):
if(scores[i] == labels[j]):
score_labels[j] += 1
scores_template = sorted(set(score_labels))
new_scores = []
for i in range(len(score_labels)):
vote = scores_template.index(score_labels[i])
new_scores.append(vote/(len(labels)-1))
return labels[new_scores.index(max(new_scores))]
@staticmethod
def borda_count_1(scores, labels):
score_labels = [0] * len(labels)
for i in range(len(scores)):
for j in range(len(labels)):
if(scores[i] == labels[j]):
score_labels[j] += 1
scores_template = sorted(set(score_labels))
new_scores = []
for i in range(len(score_labels)):
vote = scores_template.index(score_labels[i])
new_scores.append((vote+1)/len(labels))
return labels[new_scores.index(max(new_scores))]
@staticmethod
def dowdall(scores, labels):
score_labels = [0] * len(labels)
for i in range(len(scores)):
for j in range(len(labels)):
if(scores[i] == labels[j]):
score_labels[j] += 1
scores_template = sorted(set(score_labels), reverse = True)
new_scores = []
for i in range(len(score_labels)):
vote = scores_template.index(score_labels[i])
new_scores.append(1/(vote+1))
return labels[new_scores.index(max(new_scores))]
def classify(self, test_dataset):
results = []
for i in range(0, len(test_dataset)):
scores = []
test = wp.DataSet()
bi = wp.BinInput(test_dataset.get(i))
test.add(bi, test_dataset.getLabel(i))
for j in range(0, len(self.nets)):
scores.append(self.nets[j].classify(test)[0])
out = self.nets[0].getAllScores(test)
labels = self.get_labels(out)
result = 0
if(self.voting == "borda0"):
result = self.borda_count_0(scores, labels)
else:
if(self.voting == "borda1"):
result = self.borda_count_1(scores, labels)
else:
result = self.dowdall(scores, labels)
results.append(result)
return results
| 33.583942 | 111 | 0.564008 | 550 | 4,601 | 4.570909 | 0.18 | 0.065632 | 0.039777 | 0.039379 | 0.365951 | 0.323787 | 0.323787 | 0.323787 | 0.323787 | 0.307876 | 0 | 0.014824 | 0.325581 | 4,601 | 136 | 112 | 33.830882 | 0.795359 | 0 | 0 | 0.301724 | 0 | 0 | 0.007824 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086207 | false | 0 | 0.051724 | 0.017241 | 0.215517 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
baa1caa8e9bdbac6ca15b15bf9f64230c965b99a | 23,105 | bzl | Python | repositories.bzl | jesseschalken/rules_proto_grpc | 2e8be8e27cc82203794f14dbdcf37189b02ab722 | [
"Apache-2.0"
] | null | null | null | repositories.bzl | jesseschalken/rules_proto_grpc | 2e8be8e27cc82203794f14dbdcf37189b02ab722 | [
"Apache-2.0"
] | null | null | null | repositories.bzl | jesseschalken/rules_proto_grpc | 2e8be8e27cc82203794f14dbdcf37189b02ab722 | [
"Apache-2.0"
] | null | null | null | """Common dependencies for rules_proto_grpc."""
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive", "http_file")
load("//internal:common.bzl", "check_bazel_minimum_version")
# Versions
MINIMUM_BAZEL_VERSION = "3.0.0"
ENABLE_VERSION_NAGS = False
PROTOBUF_VERSION = "3.19.1" # When updating, also update JS requirements, JS rulegen in js.go, Ruby requirements and C#/F# requirements
GRPC_VERSION = "1.42.0" # When updating, also update grpc hash, grpc-java hash, Go repositories.bzl, Ruby requirements and C#/F# requirements
VERSIONS = {
# Core
"rules_proto": {
"type": "github",
"org": "bazelbuild",
"repo": "rules_proto",
"ref": "4.0.0",
"sha256": "66bfdf8782796239d3875d37e7de19b1d94301e8972b3cbd2446b332429b4df1",
},
"com_google_protobuf": {
"type": "github",
"org": "protocolbuffers",
"repo": "protobuf",
"ref": "v{}".format(PROTOBUF_VERSION),
"sha256": "87407cd28e7a9c95d9f61a098a53cf031109d451a7763e7dd1253abf8b4df422",
},
"com_github_grpc_grpc": {
"type": "github",
"org": "grpc",
"repo": "grpc",
"ref": "v{}".format(GRPC_VERSION),
"sha256": "b2f2620c762427bfeeef96a68c1924319f384e877bc0e084487601e4cc6e434c",
},
"zlib": {
"type": "http",
"urls": [
"https://mirror.bazel.build/zlib.net/zlib-1.2.11.tar.gz",
"https://zlib.net/zlib-1.2.11.tar.gz",
],
"sha256": "c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1",
"strip_prefix": "zlib-1.2.11",
"build_file": "@rules_proto_grpc//third_party:BUILD.bazel.zlib",
},
"rules_python": {
"type": "github",
"org": "bazelbuild",
"repo": "rules_python",
"ref": "0.5.0",
"sha256": "a2fd4c2a8bcf897b718e5643040b03d9528ac6179f6990774b7c19b2dc6cd96b",
},
"build_bazel_rules_swift": {
"type": "github",
"org": "bazelbuild",
"repo": "rules_swift",
"ref": "0.24.0",
"sha256": "56f79e7f1b075b0ba9c046db0ff290ad2b5696c47c683ea3faf414bf70e0fa9b",
},
"bazel_skylib": {
"type": "github",
"org": "bazelbuild",
"repo": "bazel-skylib",
"ref": "1.1.1",
"sha256": "07b4117379dde7ab382345c3b0f5edfc6b7cff6c93756eac63da121e0bbcc5de",
},
# Android
"build_bazel_rules_android": {
"type": "github",
"org": "bazelbuild",
"repo": "rules_android",
"ref": "9ab1134546364c6de84fc6c80b4202fdbebbbb35",
"sha256": "f329928c62ade05ceda72c4e145fd300722e6e592627d43580dd0a8211c14612",
},
# Buf
"protoc_gen_buf_breaking_darwin_x86_64": {
"type": "http_file",
"urls": ["https://github.com/bufbuild/buf/releases/download/v0.56.0/protoc-gen-buf-breaking-Darwin-x86_64"],
"sha256": "d7b12a2ccd663f00a068b19cbd2c1e81f4983ea33bd9a92980485e2c4693b75a",
"executable": True,
},
"protoc_gen_buf_breaking_linux_x86_64": {
"type": "http_file",
"urls": ["https://github.com/bufbuild/buf/releases/download/v0.56.0/protoc-gen-buf-breaking-Linux-x86_64"],
"sha256": "8463f63626327d81f72b4a2ad08b97898753a1ee14899e63728df9e2d110d5bf",
"executable": True,
},
"protoc_gen_buf_lint_darwin_x86_64": {
"type": "http_file",
"urls": ["https://github.com/bufbuild/buf/releases/download/v0.56.0/protoc-gen-buf-lint-Darwin-x86_64"],
"sha256": "3ff939636e5857f6fe3dcaeae816538fcee41cec66b10b62df5ccb65d0f79e7f",
"executable": True,
},
"protoc_gen_buf_lint_linux_x86_64": {
"type": "http_file",
"urls": ["https://github.com/bufbuild/buf/releases/download/v0.56.0/protoc-gen-buf-lint-Linux-x86_64"],
"sha256": "a7ab67a5bcc5906366bde424ba63fdcf604e07d4825e5720c8e5b3ab1530bbf7",
"executable": True,
},
# C
"upb": {
"type": "github",
"org": "protocolbuffers",
"repo": "upb",
"ref": "982f26aad42291064878ff64cb5a43d69723f91c",
"sha256": "72d25e544bce0e350612184096ba4cd3454d63c048e5c18a682038c075c947a4",
},
# C#/F#
"io_bazel_rules_dotnet": {
"type": "github",
"org": "bazelbuild",
"repo": "rules_dotnet",
"ref": "a07119eedbba3aee95cefda1f4db0d6a48c53071",
"sha256": "75a9c7292e93a7c1b86f59cf457bea5c6e7d6899150e42dbb900ba755f1cbd84",
},
# D
"io_bazel_rules_d": {
"type": "github",
"org": "bazelbuild",
"repo": "rules_d",
"ref": "73a7fc7d1884b029a4723bef2a0bb1f3f93c3fb6",
"sha256": "53bbc348ac8e8e66003dee887b2536e45739f649196733eb936991e53fdaac72",
},
"com_github_dcarp_protobuf_d": {
"type": "http",
"urls": ["https://github.com/dcarp/protobuf-d/archive/v0.6.2.tar.gz"],
"sha256": "5509883fa042aa2e1c8c0e072e52c695fb01466f572bd828bcde06347b82d465",
"strip_prefix": "protobuf-d-0.6.2",
"build_file": "@rules_proto_grpc//third_party:BUILD.bazel.com_github_dcarp_protobuf_d",
},
# Doc
"protoc_gen_doc_darwin_x86_64": {
"type": "http",
"urls": ["https://github.com/pseudomuto/protoc-gen-doc/releases/download/v1.5.0/protoc-gen-doc-1.5.0.darwin-amd64.go1.16.6.tar.gz"],
"sha256": "5b74f2b2b98f2c9a0978f42dc1d931e03fc51dd112e56ff9a6252f87fdb879c9",
"strip_prefix": "protoc-gen-doc-1.5.0.darwin-amd64.go1.16.6",
"build_file_content": """exports_files(glob(["protoc-gen-doc*"]))""",
},
"protoc_gen_doc_linux_x86_64": {
"type": "http",
"urls": ["https://github.com/pseudomuto/protoc-gen-doc/releases/download/v1.5.0/protoc-gen-doc-1.5.0.linux-amd64.go1.16.6.tar.gz"],
"sha256": "5455f066af1197a7cd3753eed5d8096b310b69b7b3d0f9b81c38223f4e0e5f10",
"strip_prefix": "protoc-gen-doc-1.5.0.linux-amd64.go1.16.6",
"build_file_content": """exports_files(glob(["protoc-gen-doc*"]))""",
},
"protoc_gen_doc_windows_x86_64": {
"type": "http",
"urls": ["https://github.com/pseudomuto/protoc-gen-doc/releases/download/v1.5.0/protoc-gen-doc-1.5.0.windows-amd64.go1.16.6.tar.gz"],
"sha256": "b6cc89ed9b9d037433f35a1ae5b593bf528db86e1d07f96533a9be33af9e9a6f",
"strip_prefix": "protoc-gen-doc-1.5.0.windows-amd64.go1.16.6",
"build_file_content": """exports_files(glob(["protoc-gen-doc*"]))""",
},
# Go
# When updating, update go version for go_register_toolchains in WORKSPACE and go.go
"io_bazel_rules_go": {
"type": "github",
"org": "bazelbuild",
"repo": "rules_go",
"ref": "v0.29.0",
"sha256": "7a89df64b765721be9bb73b3aa52c15209af3b6628cae4344b9516e8b21c2b8b",
},
"bazel_gazelle": {
"type": "github",
"org": "bazelbuild",
"repo": "bazel-gazelle",
"ref": "v0.24.0",
"sha256": "fc4c319b9e32ea44be8a5e1a46746d93e8b6a8b104baf7cb6a344a0a08386fed",
},
# grpc-gateway
"grpc_ecosystem_grpc_gateway": {
"type": "github",
"org": "grpc-ecosystem",
"repo": "grpc-gateway",
"ref": "v2.6.0",
"sha256": "4a1a50fcb2dafb0134db0be669d3d8d8dd0d6933f88a3e580fee2727ccf5ebc2",
},
# Java
"io_grpc_grpc_java": {
"type": "github",
"org": "grpc",
"repo": "grpc-java",
"ref": "v{}".format(GRPC_VERSION),
"sha256": "1289abd750bee2ebc80679435301e046d587bdf0c0802a76907119725d18eef0",
},
"rules_jvm_external": {
"type": "github",
"org": "bazelbuild",
"repo": "rules_jvm_external",
"ref": "4.2",
"sha256": "2cd77de091e5376afaf9cc391c15f093ebd0105192373b334f0a855d89092ad5",
},
# JavaScript
# Use .tar.gz in release assets, not the Github generated source .tar.gz
"build_bazel_rules_nodejs": {
"type": "http",
"urls": ["https://github.com/bazelbuild/rules_nodejs/releases/download/4.4.6/rules_nodejs-4.4.6.tar.gz"],
"sha256": "cfc289523cf1594598215901154a6c2515e8bf3671fd708264a6f6aefe02bf39",
},
"grpc_web_plugin_darwin": {
"type": "http_file", # When updating, also update in package.json and vice-versa
"urls": ["https://github.com/grpc/grpc-web/releases/download/1.3.0/protoc-gen-grpc-web-1.3.0-darwin-x86_64"],
"sha256": "4b8962af0e26047271858c731589825f92d4973d4a47ed9a0c544dd24c292b15",
"executable": True,
},
"grpc_web_plugin_linux": {
"type": "http_file", # When updating, also update in package.json and vice-versa
"urls": ["https://github.com/grpc/grpc-web/releases/download/1.3.0/protoc-gen-grpc-web-1.3.0-linux-x86_64"],
"sha256": "ab26bdf1326236df9b35941608ca309e949233b2c442e3cd973a341d3331cf90",
"executable": True,
},
"grpc_web_plugin_windows": {
"type": "http_file", # When updating, also update in package.json and vice-versa
"urls": ["https://github.com/grpc/grpc-web/releases/download/1.3.0/protoc-gen-grpc-web-1.3.0-windows-x86_64.exe"],
"sha256": "899a087d7d5592fcb547b29aa986e86a8989c9e7f1500bc0f3b5f45b09a87c85",
"executable": True,
},
# Python
"subpar": {
"type": "github",
"org": "google",
"repo": "subpar",
"ref": "2.0.0",
"sha256": "b80297a1b8d38027a86836dbadc22f55dc3ecad56728175381aa6330705ac10f",
},
"six": {
"type": "http",
"urls": ["https://pypi.python.org/packages/source/s/six/six-1.16.0.tar.gz"],
"sha256": "1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926",
"strip_prefix": "six-1.16.0",
"build_file": "@rules_proto_grpc//third_party:BUILD.bazel.six",
},
# Ruby
"bazelruby_rules_ruby": {
"type": "github",
"org": "bazelruby",
"repo": "rules_ruby",
"ref": "v0.6.0",
"sha256": "5035393cb5043d49ca9de78acb9e8c8622a193f6463a57ad02383a622b6dc663",
},
# Rust
"rules_rust": {
"type": "github",
"org": "bazelbuild",
"repo": "rules_rust",
"ref": "87b74a1d72612e90441fd75a364a6e61bcf80ca6",
"sha256": "43d2ce2da5ad4def3a48bd5b7f0a732e0f116887d9487c45eefceee31ef8d054",
},
# Scala
"io_bazel_rules_scala": {
"type": "github",
"org": "bazelbuild",
"repo": "rules_scala",
"ref": "17791a18aa966cdf2babb004822e6c70a7decc76",
"sha256": "6899cddf7407d09266dddcf6faf9f2a8b414de5e2b35ef8b294418f559172f28",
},
# Swift
"com_github_grpc_grpc_swift": {
"type": "github",
"org": "grpc",
"repo": "grpc-swift",
"ref": "1.6.0",
"sha256": "f08729b656dd1e7c1e273f2362a907d3ce6721348a4cd347574cd1ef28a95983",
"build_file": "@rules_proto_grpc//third_party:BUILD.bazel.com_github_grpc_grpc_swift",
},
"com_github_apple_swift_log": {
# Dependency of com_github_grpc_grpc_swift
"type": "github",
"org": "apple",
"repo": "swift-log",
"ref": "1.4.2",
"sha256": "de51662b35f47764b6e12e9f1d43e7de28f6dd64f05bc30a318cf978cf3bc473",
"build_file": "@rules_proto_grpc//third_party:BUILD.bazel.com_github_apple_swift_log",
},
"com_github_apple_swift_nio": {
# Dependency of com_github_grpc_grpc_swift
"type": "github",
"org": "apple",
"repo": "swift-nio",
"ref": "2.32.3",
"sha256": "d6b41f67b907b458a4c1c86d3c8549835242cf40c49616b8d7531db002336835",
"build_file": "@rules_proto_grpc//third_party:BUILD.bazel.com_github_apple_swift_nio",
},
"com_github_apple_swift_nio_extras": {
# Dependency of com_github_grpc_grpc_swift
"type": "github",
"org": "apple",
"repo": "swift-nio-extras",
"ref": "1.10.2",
"sha256": "2f37596dcf26532b867aee3dbd8c5354108a076174751f4e6a72a0b6506df05e",
"build_file": "@rules_proto_grpc//third_party:BUILD.bazel.com_github_apple_swift_nio_extras",
},
"com_github_apple_swift_nio_http2": {
# Dependency of com_github_grpc_grpc_swift
"type": "github",
"org": "apple",
"repo": "swift-nio-http2",
"ref": "1.18.3",
"sha256": "497882ef4fd6980bd741a7c91783592bbee3bfac15278434cc17753c56d5dc63",
"build_file": "@rules_proto_grpc//third_party:BUILD.bazel.com_github_apple_swift_nio_http2",
},
"com_github_apple_swift_nio_ssl": {
# Dependency of com_github_grpc_grpc_swift
"type": "github",
"org": "apple",
"repo": "swift-nio-ssl",
"ref": "2.15.1",
"sha256": "eefce9af7904b2e627219b9c78356d0bd3d659f06cdf2b45d931d832b21dcd46",
"build_file": "@rules_proto_grpc//third_party:BUILD.bazel.com_github_apple_swift_nio_ssl",
},
"com_github_apple_swift_nio_transport_services": {
# Dependency of com_github_grpc_grpc_swift
"type": "github",
"org": "apple",
"repo": "swift-nio-transport-services",
"ref": "1.11.3",
"sha256": "1ac6867fb9251a3d4da2834b080c1cf90cf0fbdeccd66ef39b7a315e5d5612b6",
"build_file": "@rules_proto_grpc//third_party:BUILD.bazel.com_github_apple_swift_nio_transport_services",
},
}
def _generic_dependency(name, **kwargs):
if name not in VERSIONS:
fail("Name {} not in VERSIONS".format(name))
dep = VERSIONS[name]
existing_rules = native.existing_rules()
if dep["type"] == "github":
# Resolve ref and sha256
ref = kwargs.get(name + "_ref", dep["ref"])
sha256 = kwargs.get(name + "_sha256", dep["sha256"])
# Fix GitHub naming normalisation in path
stripped_ref = ref
if stripped_ref.startswith("v"):
stripped_ref = ref[1:]
stripped_ref = stripped_ref.replace("@", "-")
# Generate URLs
urls = [
"https://github.com/{}/{}/archive/{}.tar.gz".format(dep["org"], dep["repo"], ref),
]
# Check for existing rule
if name not in existing_rules:
http_archive(
name = name,
strip_prefix = dep["repo"] + "-" + stripped_ref,
urls = urls,
sha256 = sha256,
**{k: v for k, v in dep.items() if k in ["build_file", "patch_cmds"]}
)
elif existing_rules[name]["kind"] != "http_archive":
if ENABLE_VERSION_NAGS:
print("Dependency '{}' has already been declared with a different rule kind. Found {}, expected http_archive".format(
name,
existing_rules[name]["kind"],
)) # buildifier: disable=print
elif existing_rules[name]["urls"] != tuple(urls):
if ENABLE_VERSION_NAGS:
print("Dependency '{}' has already been declared with a different version. Found urls={}, expected {}".format(
name,
existing_rules[name]["urls"],
tuple(urls),
)) # buildifier: disable=print
elif dep["type"] == "http":
if name not in existing_rules:
args = {k: v for k, v in dep.items() if k in ["urls", "sha256", "strip_prefix", "build_file", "build_file_content"]}
http_archive(name = name, **args)
elif existing_rules[name]["kind"] != "http_archive":
if ENABLE_VERSION_NAGS:
print("Dependency '{}' has already been declared with a different rule kind. Found {}, expected http_archive".format(
name,
existing_rules[name]["kind"],
)) # buildifier: disable=print
elif existing_rules[name]["urls"] != tuple(dep["urls"]):
if ENABLE_VERSION_NAGS:
print("Dependency '{}' has already been declared with a different version. Found urls={}, expected {}".format(
name,
existing_rules[name]["urls"],
tuple(dep["urls"]),
)) # buildifier: disable=print
elif dep["type"] == "http_file":
if name not in existing_rules:
args = {k: v for k, v in dep.items() if k in ["urls", "sha256", "executable"]}
http_file(name = name, **args)
elif existing_rules[name]["kind"] != "http_file":
if ENABLE_VERSION_NAGS:
print("Dependency '{}' has already been declared with a different rule kind. Found {}, expected http_file".format(
name,
existing_rules[name]["kind"],
)) # buildifier: disable=print
elif existing_rules[name]["urls"] != tuple(dep["urls"]):
if ENABLE_VERSION_NAGS:
print("Dependency '{}' has already been declared with a different version. Found urls={}, expected {}".format(
name,
existing_rules[name]["urls"],
tuple(dep["urls"]),
)) # buildifier: disable=print
elif dep["type"] == "local":
if name not in existing_rules:
args = {k: v for k, v in dep.items() if k in ["path"]}
native.local_repository(name = name, **args)
elif existing_rules[name]["kind"] != "local_repository":
if ENABLE_VERSION_NAGS:
print("Dependency '{}' has already been declared with a different rule kind. Found {}, expected local_repository".format(
name,
existing_rules[name]["kind"],
)) # buildifier: disable=print
elif existing_rules[name]["path"] != dep["path"]:
if ENABLE_VERSION_NAGS:
print("Dependency '{}' has already been declared with a different version. Found path={}, expected {}".format(
name,
existing_rules[name]["path"],
dep["urls"],
)) # buildifier: disable=print
else:
fail("Unknown dependency type {}".format(dep))
if "binds" in dep:
for bind in dep["binds"]:
if bind["name"] not in native.existing_rules():
native.bind(
name = bind["name"],
actual = bind["actual"],
)
#
# Toolchains
#
def rules_proto_grpc_toolchains(name = ""):
"""Register the rules_proto_grpc toolchains."""
check_bazel_minimum_version(MINIMUM_BAZEL_VERSION)
native.register_toolchains(str(Label("//protobuf:protoc_toolchain")))
#
# Core
#
def rules_proto_grpc_repos(**kwargs):
"""Load the rules_proto_grpc common dependencies.""" # buildifier: disable=function-docstring-args
check_bazel_minimum_version(MINIMUM_BAZEL_VERSION)
rules_proto(**kwargs)
rules_python(**kwargs)
build_bazel_rules_swift(**kwargs)
bazel_skylib(**kwargs)
six(**kwargs)
com_google_protobuf(**kwargs)
com_github_grpc_grpc(**kwargs)
external_zlib(**kwargs)
def rules_proto(**kwargs):
_generic_dependency("rules_proto", **kwargs)
def rules_python(**kwargs):
_generic_dependency("rules_python", **kwargs)
def build_bazel_rules_swift(**kwargs):
_generic_dependency("build_bazel_rules_swift", **kwargs)
def com_google_protobuf(**kwargs):
_generic_dependency("com_google_protobuf", **kwargs)
def com_github_grpc_grpc(**kwargs):
_generic_dependency("com_github_grpc_grpc", **kwargs)
def external_zlib(**kwargs):
_generic_dependency("zlib", **kwargs)
#
# Misc
#
def bazel_skylib(**kwargs):
_generic_dependency("bazel_skylib", **kwargs)
#
# Android
#
def build_bazel_rules_android(**kwargs):
_generic_dependency("build_bazel_rules_android", **kwargs)
#
# Buf
#
def protoc_gen_buf_breaking_darwin_x86_64(**kwargs):
_generic_dependency("protoc_gen_buf_breaking_darwin_x86_64", **kwargs)
def protoc_gen_buf_breaking_linux_x86_64(**kwargs):
_generic_dependency("protoc_gen_buf_breaking_linux_x86_64", **kwargs)
def protoc_gen_buf_lint_darwin_x86_64(**kwargs):
_generic_dependency("protoc_gen_buf_lint_darwin_x86_64", **kwargs)
def protoc_gen_buf_lint_linux_x86_64(**kwargs):
_generic_dependency("protoc_gen_buf_lint_linux_x86_64", **kwargs)
#
# C
#
def upb(**kwargs):
_generic_dependency("upb", **kwargs)
#
# C#
#
def io_bazel_rules_dotnet(**kwargs):
_generic_dependency("io_bazel_rules_dotnet", **kwargs)
#
# D
#
def io_bazel_rules_d(**kwargs):
_generic_dependency("io_bazel_rules_d", **kwargs)
def com_github_dcarp_protobuf_d(**kwargs):
_generic_dependency("com_github_dcarp_protobuf_d", **kwargs)
#
# Doc
#
def protoc_gen_doc_darwin_x86_64(**kwargs):
_generic_dependency("protoc_gen_doc_darwin_x86_64", **kwargs)
def protoc_gen_doc_linux_x86_64(**kwargs):
_generic_dependency("protoc_gen_doc_linux_x86_64", **kwargs)
def protoc_gen_doc_windows_x86_64(**kwargs):
_generic_dependency("protoc_gen_doc_windows_x86_64", **kwargs)
#
# Go
#
def io_bazel_rules_go(**kwargs):
_generic_dependency("io_bazel_rules_go", **kwargs)
def bazel_gazelle(**kwargs):
_generic_dependency("bazel_gazelle", **kwargs)
#
# gRPC gateway
#
def grpc_ecosystem_grpc_gateway(**kwargs):
_generic_dependency("grpc_ecosystem_grpc_gateway", **kwargs)
#
# Java
#
def io_grpc_grpc_java(**kwargs):
_generic_dependency("io_grpc_grpc_java", **kwargs)
def rules_jvm_external(**kwargs):
_generic_dependency("rules_jvm_external", **kwargs)
#
# JavaScript
#
def build_bazel_rules_nodejs(**kwargs):
_generic_dependency("build_bazel_rules_nodejs", **kwargs)
def grpc_web_plugin_darwin(**kwargs):
_generic_dependency("grpc_web_plugin_darwin", **kwargs)
def grpc_web_plugin_linux(**kwargs):
_generic_dependency("grpc_web_plugin_linux", **kwargs)
def grpc_web_plugin_windows(**kwargs):
_generic_dependency("grpc_web_plugin_windows", **kwargs)
#
# Python
#
def subpar(**kwargs):
_generic_dependency("subpar", **kwargs)
def six(**kwargs):
_generic_dependency("six", **kwargs)
#
# Ruby
#
def bazelruby_rules_ruby(**kwargs):
_generic_dependency("bazelruby_rules_ruby", **kwargs)
#
# Rust
#
def rules_rust(**kwargs):
_generic_dependency("rules_rust", **kwargs)
#
# Scala
#
def io_bazel_rules_scala(**kwargs):
_generic_dependency("io_bazel_rules_scala", **kwargs)
#
# Swift
#
def com_github_grpc_grpc_swift(**kwargs):
_generic_dependency("com_github_grpc_grpc_swift", **kwargs)
def com_github_apple_swift_log(**kwargs):
_generic_dependency("com_github_apple_swift_log", **kwargs)
def com_github_apple_swift_nio(**kwargs):
_generic_dependency("com_github_apple_swift_nio", **kwargs)
def com_github_apple_swift_nio_extras(**kwargs):
_generic_dependency("com_github_apple_swift_nio_extras", **kwargs)
def com_github_apple_swift_nio_http2(**kwargs):
_generic_dependency("com_github_apple_swift_nio_http2", **kwargs)
def com_github_apple_swift_nio_ssl(**kwargs):
_generic_dependency("com_github_apple_swift_nio_ssl", **kwargs)
def com_github_apple_swift_nio_transport_services(**kwargs):
_generic_dependency("com_github_apple_swift_nio_transport_services", **kwargs)
| 36.328616 | 142 | 0.639602 | 2,451 | 23,105 | 5.73317 | 0.105671 | 0.0269 | 0.065471 | 0.032451 | 0.53914 | 0.484486 | 0.391261 | 0.343439 | 0.28971 | 0.265158 | 0 | 0.123107 | 0.217053 | 23,105 | 635 | 143 | 36.385827 | 0.653676 | 0.064012 | 0 | 0.257322 | 0 | 0.035565 | 0.468034 | 0.240898 | 0 | 0 | 0 | 0 | 0 | 1 | 0.089958 | false | 0 | 0 | 0 | 0.089958 | 0.016736 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
baa5ec809f693db6d15b2be48cd628a88b94983c | 5,279 | py | Python | adventuredocs/adocs.py | hypatia-software-org/adventuredocs | bba007855e464cd95945b8a5cce73ebaa25f487f | [
"MIT"
] | 7 | 2016-02-20T00:38:10.000Z | 2016-04-18T16:45:20.000Z | adventuredocs/adocs.py | lily-seabreeze/adventuredocs | bba007855e464cd95945b8a5cce73ebaa25f487f | [
"MIT"
] | 19 | 2016-02-20T20:22:43.000Z | 2016-04-17T19:07:10.000Z | adventuredocs/adocs.py | lillian-gardenia-seabreeze/adventuredocs | bba007855e464cd95945b8a5cce73ebaa25f487f | [
"MIT"
] | 2 | 2016-03-26T01:57:37.000Z | 2016-03-28T18:06:43.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""AdventureDocs
Choose Your Own Adventure style software
documentation from markdown.
Use markdown files to represent a section of instructions,
and options to skip to a section, or just go to the next
section.
Load a directory of markdown files, which also includes a
file named ORDER which specifies the default order of the
markdown files. The ORDER enables us to have a "next
section" link per section (while you can still present
options to jump to other sections).
Usage:
adocs <source> [<destination>]
"""
import os
import glob
import docopt
import markdown
import pkgutil
import datetime
from adventuredocs import plugins
from bs4 import BeautifulSoup
from jinja2 import Environment, FileSystemLoader
class Section(object):
""""
Attributes:
index (int): --
name (str): --
path (str): --
soup (BeautifulSoup): --
"""
def __init__(self, index, name, path, soup, title, unit, type):
self.index = index
self.name = name
self.path = path
self.soup = soup
self.title = title
self.unit = unit
self.type = type
@property
def contents(self):
return self.soup.prettify()
@classmethod
def from_file(cls, section_index, path_to_markdown_file):
"""Create a section object by reading
in a markdown file from path!
Arguments:
section_index (int):
path_to_markdown_file (str): --
Returns:
Section
"""
with open(path_to_markdown_file) as f:
# markdown module strictly only
# supports UTF-8
file_contents = unicode(f.read(), 'utf-8')
html = markdown.markdown(file_contents)
section_soup = BeautifulSoup(html, "html.parser")
# get the file name without the extension
__, section_file_name = os.path.split(path_to_markdown_file)
section_name, __ = os.path.splitext(section_file_name)
section_title = file_contents.split('\n', 1)[0]
section_unit = section_title
section_type = 'normal'
if 'hint' in section_name:
section_type = 'hint'
if '-' in section_title:
section_unit = section_title.split('-', 1)[0]
section_title = section_title.split('-', 1)[1]
return cls(index=section_index,
path=path_to_markdown_file,
soup=section_soup,
name=section_name,
title=section_title,
unit=section_unit,
type=section_type,
)
class AdventureDoc(object):
"""A directory of markdown files, with an ORDER file.
"""
SECTION_CHOICE_KEYWORD = "NEXT_SECTION:"
TEMPLATE = pkgutil.get_data("adventuredocs", "layout.html")
def __init__(self, sections):
self.sections = sections
def build(self):
for section_soup in self.sections:
section_soup = self.use_plugins(section_soup)
# Use collected sections with jinja
return (Environment().from_string(self.TEMPLATE)
.render(title=u'AdventureDocs',
headercomment=u"NOTICE! This file was automatically generated by AdventureDocs on {:%Y-%m-%d %H:%M:%S}. Changes to this file may be overwritten by adocs, please use adocs to manage this file!".format(datetime.datetime.now()),
sections=self.sections)).encode('UTF-8')
@staticmethod
def get_sections(directory):
"""Collect the files specified in the
ORDER file, returning a list of
dictionary representations of each file.
Returns:
list[Section]: list of sections which
"""
with open(os.path.join(directory, "ORDER")) as f:
order_file_lines = f.readlines()
ordered_section_file_paths = []
for line_from_order_file in order_file_lines:
section_path = os.path.join(directory, line_from_order_file)
ordered_section_file_paths.append(section_path.strip())
sections = []
for i, section_file_path in enumerate(ordered_section_file_paths):
sections.append(Section.from_file(i, section_file_path))
return sections
# NOTE: this currently actually changes the section's
# beautiful soup but should make copy instead!
def use_plugins(self, section):
for _, module_name, _ in pkgutil.iter_modules(plugins.__path__):
module_name = "adventuredocs.plugins." + module_name
plugin = __import__(module_name, fromlist=["change_soup"])
change_soup_function = getattr(plugin, "change_soup")
plugin.change_soup(self, section)
return section
@classmethod
def from_directory(cls, directory):
ordered_sections = cls.get_sections(directory)
return AdventureDoc(ordered_sections)
def main():
arguments = docopt.docopt(__doc__)
source_directory = arguments["<source>"]
adoc = AdventureDoc.from_directory(source_directory)
destination = arguments["<destination>"] or "adocs-output.html"
with open(destination, 'w') as f:
f.write(adoc.build())
| 28.846995 | 249 | 0.638757 | 630 | 5,279 | 5.155556 | 0.312698 | 0.025862 | 0.021552 | 0.027709 | 0.015394 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003123 | 0.272211 | 5,279 | 182 | 250 | 29.005495 | 0.84227 | 0.237166 | 0 | 0.022989 | 0 | 0.011494 | 0.091309 | 0.005691 | 0 | 0 | 0 | 0 | 0 | 1 | 0.103448 | false | 0 | 0.114943 | 0.011494 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
baa9b540682d38df669c05ad81b3c9ed20735c9d | 8,104 | py | Python | scripts/detectron2_inference.py | openem-team/openem | 45222c9c77084eacab278da25a8734ae7d43f677 | [
"MIT"
] | 10 | 2019-01-23T23:58:01.000Z | 2021-08-30T19:42:35.000Z | scripts/detectron2_inference.py | openem-team/openem | 45222c9c77084eacab278da25a8734ae7d43f677 | [
"MIT"
] | 3 | 2020-03-20T15:21:41.000Z | 2020-09-18T18:49:38.000Z | scripts/detectron2_inference.py | openem-team/openem | 45222c9c77084eacab278da25a8734ae7d43f677 | [
"MIT"
] | 2 | 2020-05-08T17:39:12.000Z | 2020-10-09T01:27:17.000Z | import argparse
import json
import logging
import multiprocessing as mp
import os
import time
from typing import List
from detectron2.structures import BoxMode
from detectron2 import model_zoo
from detectron2.data import MetadataCatalog, DatasetCatalog
from detectron2.structures import BoxMode
from detectron2.utils.visualizer import Visualizer
from detectron2.config import get_cfg
from detectron2.engine import DefaultPredictor, DefaultTrainer
from detectron2.evaluation import COCOEvaluator, inference_on_dataset
from detectron2.data import build_detection_test_loader
from detectron2.utils.visualizer import ColorMode
from detectron2.modeling import build_model
import detectron2.data.transforms as T
from detectron2.checkpoint import DetectionCheckpointer
import numpy as np
import pandas as pd
import torch
import torchvision
from utils.frame_reader import FrameReaderMgrBase
from utils.file_downloader import FileDownloader
import tator
log_filename = "detectron2_inference.log"
logging.basicConfig(
handlers=[logging.FileHandler(log_filename, mode="w"), logging.StreamHandler()],
format="%(asctime)s %(levelname)s:%(message)s",
datefmt="%m/%d/%Y %I:%M:%S %p",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
class FrameReaderMgr(FrameReaderMgrBase):
def __init__(
self,
*,
augmentation: T.Augmentation,
**kwargs,
):
super().__init__(**kwargs)
self._augmentation = augmentation
def _format_img(self, img, frame_num):
h, w = img.shape[:2]
img = self._augmentation.get_transform(img).apply_image(img)
img = torch.as_tensor(img.astype("float32").transpose(2, 0, 1))
return {"image": img, "height": h, "width": w, "frame_num": frame_num}
class LocalizationGenerator:
def __init__(self, model_nms, nms_threshold, localization_type):
self._model_nms = model_nms
self._nms_threshold = nms_threshold
self._localization_type = localization_type
def __call__(self, element, frame, media_id):
"""
Yields `LocalizationSpec`s from the model detections in a video frame.
"""
element["instances"] = element["instances"][
self._model_nms(
element["instances"].pred_boxes.tensor,
element["instances"].scores,
self._nms_threshold,
)
.to("cpu")
.tolist()
]
instance_dict = element["instances"].get_fields()
pred_boxes = instance_dict["pred_boxes"]
scores = instance_dict["scores"]
pred_classes = instance_dict["pred_classes"]
# TODO check attribute names and determine if they should be dynamic
# yield LocalizationSpec
for box, score, cls in zip(pred_boxes, scores, pred_classes):
x1, y1, x2, y2 = box.tolist()
yield {
"type": self._localization_type,
"media_id": media_id,
"frame": frame,
"x": x1,
"y": y1,
"width": x2 - x1,
"height": y2 - y1,
"Species": cls,
"Score": score,
}
def parse_args():
parser = argparse.ArgumentParser(description="Testing script for testing video data.")
parser.add_argument("video_path", help="Path to video file")
parser.add_argument(
"--inference-config",
help="Path to inference config file.",
# TODO remove default here
default="/mnt/md0/Projects/Fathomnet/Training_Files/2021-06-29-Detectron/detectron_files/fathomnet_config.yaml",
)
parser.add_argument(
"--builtin-model-config",
help="Path to built-in model config file.",
# TODO remove default here
default="COCO-Detection/retinanet_R_50_FPN_3x.yaml",
)
parser.add_argument(
"--model-weights",
help="Path to the trained model weights",
# TODO remove default here
default="/home/hugh/mycode/detectron/out/model_0076543.pth",
)
parser.add_argument(
"--gpu", help="Id of the GPU to use (as reported by nvidia-smi).", default=0, type=int
)
parser.add_argument(
"--score-threshold", help="Threshold to filter detections", default=0.7, type=float
)
parser.add_argument(
"--batch-size", help="batch size for frames to process at a time", default=4, type=int
)
parser.add_argument(
"--nms-threshold", help="threshold for NMS routine to suppress", default=0.55, type=float
)
parser.add_argument("--media-ids", help="The ids of the media to process", nargs="+", type=int)
parser.add_argument(
"--localization-type", help="The id of the localization type to generate", type=int
)
parser.add_argument("--host", type=str, help="Tator host to use")
parser.add_argument("--token", type=str, help="Token to use for tator.")
parser.add_argument(
"--work-dir", type=str, help="The name of the directory to use for local storage"
)
return parser.parse_args()
def main(
*,
inference_config: str,
builtin_model_config: str,
model_weights: str,
video_path: str,
batch_size: int,
nms_threshold: float,
score_threshold: float,
gpu: int,
media_ids: List[int],
localization_type: int,
host: str,
token: str,
work_dir: str,
):
# Download associated media
api = tator.get_api(host=host, token=token)
download = FileDownloader(work_dir, api)
media_paths = download(media_ids)
# Instantiate the model
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file(builtin_model_config))
cfg.merge_from_file(inference_config)
cfg.MODEL.RETINANET.SCORE_THRESH_TEST = 0.3 # TODO magic number
cfg.MODEL.WEIGHTS = model_weights
cfg.MODEL.DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
model = build_model(cfg) # returns a torch.nn.Module
checkpointer = DetectionCheckpointer(model)
checkpointer.load(cfg.MODEL.WEIGHTS)
model.eval()
# Separate NMS layer
model_nms = torchvision.ops.nms
aug = T.ResizeShortestEdge(
short_edge_length=[cfg.INPUT.MIN_SIZE_TEST],
max_size=cfg.INPUT.MAX_SIZE_TEST,
sample_style="choice",
)
localization_generator = LocalizationGenerator(model_nms, nms_threshold, localization_type)
frame_reader = FrameReaderMgr(augmentation=aug)
results = []
for media_id, media_path in zip(media_ids, media_paths):
with frame_reader(media_path):
logger.info(f"Generating detections for {media_id}")
st = time.time()
while True:
try:
batch = frame_reader.get_frames(batch_size)
except:
break
else:
frames = [ele["frame_num"] for ele in batch]
with torch.no_grad():
model_outputs = model(batch)
results.extend(
loc
for frame_detections, frame in zip(model_outputs, frames)
for loc in localization_generator(frame_detections, frame, media_id)
)
if results:
created_ids = []
for response in tator.util.chunked_create(
tator_api.create_localization_list, project, localization_spec=results
):
created_ids += response.id
n_requested = len(results)
n_created = len(created_ids)
if n_created == n_requested:
logger.info(f"Created {n_created} localizations for {media_id}!")
else:
logger.warning(
f"Requested the creation of {n_requested} localizations, but only {n_created} were created for {media_id}"
)
else:
logger.info(f"No detections for media {media_id}")
if __name__ == "__main__":
# parse arguments
args = parse_args()
main(**vars(args))
logger.info("Finished")
| 33.213115 | 126 | 0.640301 | 960 | 8,104 | 5.207292 | 0.311458 | 0.023405 | 0.044209 | 0.012803 | 0.104421 | 0.05001 | 0.035607 | 0 | 0 | 0 | 0 | 0.00968 | 0.260612 | 8,104 | 243 | 127 | 33.349794 | 0.824599 | 0.044793 | 0 | 0.096447 | 0 | 0.005076 | 0.172181 | 0.033995 | 0 | 0 | 0 | 0.004115 | 0 | 1 | 0.030457 | false | 0 | 0.137056 | 0 | 0.187817 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
baab29c428a4fd141d39839a0e81de189e328413 | 679 | py | Python | redbot/type.py | kinow/redbot | f183f8468b3cf645711ff4a078ea85075ea9c081 | [
"MIT"
] | 167 | 2015-01-07T16:34:56.000Z | 2022-02-20T15:20:06.000Z | redbot/type.py | QPC-database/redbot | f05dd7754cd6f6ba005ae44beeb8ed21516a93c8 | [
"MIT"
] | 180 | 2015-02-01T01:37:53.000Z | 2022-02-17T04:32:01.000Z | redbot/type.py | QPC-database/redbot | f05dd7754cd6f6ba005ae44beeb8ed21516a93c8 | [
"MIT"
] | 32 | 2015-05-20T21:00:13.000Z | 2022-02-16T10:14:15.000Z | from typing import Any, Callable, Dict, List, Tuple
try:
from typing_extensions import Protocol
except ImportError:
from typing import Protocol # type: ignore
StrHeaderListType = List[Tuple[str, str]]
RawHeaderListType = List[Tuple[bytes, bytes]]
HeaderDictType = Dict[str, Any]
ParamDictType = Dict[str, str]
AddNoteMethodType = Callable[..., None]
class HttpResponseExchange(Protocol):
def response_start(
self, status_code: bytes, status_phrase: bytes, res_hdrs: RawHeaderListType
) -> None:
...
def response_body(self, chunk: bytes) -> None:
...
def response_done(self, trailers: RawHeaderListType) -> None:
...
| 26.115385 | 83 | 0.696613 | 74 | 679 | 6.297297 | 0.513514 | 0.064378 | 0.06867 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.197349 | 679 | 25 | 84 | 27.16 | 0.855046 | 0.017673 | 0 | 0.157895 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.157895 | false | 0 | 0.210526 | 0 | 0.421053 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
baab5621da39d454b371690762d14b2d4e136c2b | 1,660 | py | Python | beatsaver/models/users.py | Sirspam/BeatSaver.py | 7cd0224fb49d4b147ab9b150c0800988bc557c2d | [
"MIT"
] | 4 | 2021-08-13T16:16:22.000Z | 2021-09-25T04:34:56.000Z | beatsaver/models/users.py | Sirspam/BeatSaver.py | 7cd0224fb49d4b147ab9b150c0800988bc557c2d | [
"MIT"
] | null | null | null | beatsaver/models/users.py | Sirspam/BeatSaver.py | 7cd0224fb49d4b147ab9b150c0800988bc557c2d | [
"MIT"
] | 2 | 2021-08-15T00:14:38.000Z | 2021-12-13T02:35:56.000Z | from dataclasses import dataclass
from typing import Union
NoneType = type(None)
@dataclass
class UserDiffStats:
def __init__(self, data):
self.easy=data["easy"]
self.expert=data["expert"]
self.expertPlus=data["expertPlus"]
self.hard=data["hard"]
self.normal=data["normal"]
self.total=data["total"]
easy: int
expert: int
expertPlus: int
hard: int
normal: int
total: int
@dataclass
class UserStats:
def __init__(self, data):
self.totalUpvotes=data["totalUpvotes"]
self.totalDownvotes=data["totalDownvotes"]
self.totalMaps=data["totalMaps"]
self.rankedMaps=data["rankedMaps"]
self.avgBpm=data["avgBpm"]
self.avgDuration=data["avgDuration"]
self.avgScore=data["avgScore"]
self.firstUpload=data["firstUpload"]
self.lastUpload=data["lastUpload"]
self.diffStats=UserDiffStats(data["diffStats"])
totalUpvotes: int
totalDownvotes: int
totalMaps: int
rankedMaps: int
avgBpm: float
avgDuration: float
avgScore: float
firstUpload: str
lastUpload: str
diffStats: UserDiffStats
@dataclass
class UserDetail:
def __init__(self, data):
self.id=data["id"]
self.name=data["name"]
self.hash=None
if "hash" in data: # Hashes are a legacy field for old beatsaver accounts
self.hash=data["hash"]
self.avatar=data["avatar"]
self.stats=None
if "stats" in data:
self.stats=UserStats(data["stats"])
id: str
name: str
hash: Union[str, NoneType]
avatar: str
stats: UserStats | 25.9375 | 81 | 0.631325 | 185 | 1,660 | 5.6 | 0.281081 | 0.030888 | 0.031853 | 0.043436 | 0.055019 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.256627 | 1,660 | 64 | 82 | 25.9375 | 0.839546 | 0.031325 | 0 | 0.103448 | 0 | 0 | 0.102676 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051724 | false | 0 | 0.034483 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
baab9796d720ace8942051881398273476a5ceda | 11,599 | py | Python | models/swin_transformer.py | rosinality/vision-transformers-pytorch | b884b5da79900c96e4ce17fbb575cf1c5cb3cd5f | [
"MIT"
] | 77 | 2021-04-03T06:44:19.000Z | 2021-07-07T07:05:01.000Z | models/swin_transformer.py | rosinality/vision-transformers-pytorch | b884b5da79900c96e4ce17fbb575cf1c5cb3cd5f | [
"MIT"
] | 1 | 2021-04-08T06:59:41.000Z | 2021-04-08T11:20:32.000Z | models/swin_transformer.py | rosinality/vision-transformers-pytorch | b884b5da79900c96e4ce17fbb575cf1c5cb3cd5f | [
"MIT"
] | 6 | 2021-04-15T13:36:37.000Z | 2022-02-03T12:32:20.000Z | import math
from typing import Sequence, Tuple
import torch
from torch import nn
from torch.nn import functional as F
from tensorfn.config import config_model
from pydantic import StrictInt, StrictFloat
from .layer import DropPath, tuple2, PositionwiseFeedForward
LayerNorm = lambda x: nn.LayerNorm(x, eps=1e-6)
def patchify(input, size):
batch, height, width, dim = input.shape
return (
input.view(batch, height // size, size, width // size, size, dim)
.permute(0, 1, 3, 2, 4, 5)
.reshape(batch, height // size, width // size, -1)
)
class MultiHeadedLocalAttention(nn.Module):
def __init__(
self, dim, n_head, dim_head, input_size, window_size, shift, dropout=0
):
super().__init__()
self.dim_head = dim_head
self.n_head = n_head
self.weight = nn.Linear(dim, n_head * dim_head * 3, bias=True)
self.linear = nn.Linear(n_head * dim_head, dim)
self.input_size = input_size
self.window_size = window_size
self.dropout = dropout
self.shift = shift
y_pos, x_pos, local_mask = self.make_mask_pos(input_size, window_size, shift)
pos_size = y_pos.shape[0]
pos = y_pos * (2 * window_size - 1) + x_pos
self.register_buffer("pos", pos[0].reshape(window_size ** 2, window_size ** 2))
self.rel_pos = nn.Embedding((2 * window_size - 1) ** 2, n_head)
self.rel_pos.weight.detach().zero_()
if shift:
self.register_buffer(
"local_mask",
~local_mask.reshape(pos_size, window_size ** 2, window_size ** 2),
)
def make_mask_pos(self, input_size, window_size, shift):
h, w = input_size
h //= window_size
w //= window_size
yy, xx = torch.meshgrid(
torch.arange(window_size * h), torch.arange(window_size * w)
)
if shift:
roll = -math.floor(window_size / 2)
yy = torch.roll(yy, (roll, roll), (0, 1))
xx = torch.roll(xx, (roll, roll), (0, 1))
y_c = (
yy.view(h, window_size, w, window_size)
.permute(0, 2, 1, 3)
.reshape(-1, window_size, window_size)
)
x_c = (
xx.view(h, window_size, w, window_size)
.permute(0, 2, 1, 3)
.reshape(-1, window_size, window_size)
)
x_diff = (
x_c.transpose(1, 2).unsqueeze(1) - x_c.transpose(1, 2).unsqueeze(2)
).transpose(2, 3)
x_flag = x_diff.abs() < window_size
y_diff = y_c.unsqueeze(1) - y_c.unsqueeze(2)
y_flag = y_diff.abs() < window_size
x_diff = x_diff.unsqueeze(1)
y_diff = y_diff.unsqueeze(2)
if shift:
local_mask = x_flag.unsqueeze(1) & y_flag.unsqueeze(2)
x_diff = x_diff * local_mask
y_diff = y_diff * local_mask
else:
local_mask = None
x_diff = x_diff.expand(-1, window_size, -1, -1, -1)
y_diff = y_diff.expand(-1, -1, window_size, -1, -1)
x_pos = x_diff + (window_size - 1)
y_pos = y_diff + (window_size - 1)
return y_pos, x_pos, local_mask
def forward(self, input):
batch, height, width, dim = input.shape
h_stride = height // self.window_size
w_stride = width // self.window_size
window = self.window_size
if self.shift:
roll = -math.floor(window / 2)
input = torch.roll(input, (roll, roll), (1, 2))
def reshape(input):
return (
input.reshape(
batch,
h_stride,
window,
w_stride,
window,
self.n_head,
self.dim_head,
)
.permute(0, 1, 3, 5, 2, 4, 6)
.reshape(batch, -1, self.n_head, window * window, self.dim_head)
)
query, key, value = self.weight(input).chunk(3, dim=-1) # B, S, H, W^2, D
query = reshape(query)
key = reshape(key).transpose(-2, -1)
value = reshape(value)
score = query @ key / math.sqrt(self.dim_head) # B, S, H, W^2, W^2
rel_pos = self.rel_pos(self.pos) # W^2, W^2, H
score = score + rel_pos.permute(2, 0, 1).unsqueeze(0).unsqueeze(1)
if self.shift:
score = score.masked_fill(
self.local_mask.unsqueeze(0).unsqueeze(2), float("-inf")
)
attn = F.softmax(score, -1)
attn = F.dropout(attn, self.dropout, training=self.training)
out = attn @ value # B, S, H, W^2, D
out = (
out.view(
batch, h_stride, w_stride, self.n_head, window, window, self.dim_head
)
.permute(0, 1, 4, 2, 5, 3, 6)
.reshape(batch, height, width, self.n_head * self.dim_head)
)
out = self.linear(out)
if self.shift:
out = torch.roll(out, (-roll, -roll), (1, 2))
return out
class TransformerLayer(nn.Module):
def __init__(
self,
dim,
n_head,
dim_head,
dim_ff,
input_size,
window_size,
shift,
activation=nn.SiLU,
drop_ff=0,
drop_attn=0,
drop_path=0,
):
super().__init__()
self.norm_attn = LayerNorm(dim)
self.attn = MultiHeadedLocalAttention(
dim, n_head, dim_head, input_size, window_size, shift, drop_attn
)
self.drop_path = DropPath(drop_path)
self.norm_ff = LayerNorm(dim)
self.ff = PositionwiseFeedForward(
dim, dim_ff, activation=activation, dropout=drop_ff
)
def set_drop_path(self, p):
self.drop_path.p = p
def forward(self, input):
out = input + self.drop_path(self.attn(self.norm_attn(input)))
out = out + self.drop_path(self.ff(self.norm_ff(out)))
return out
class PatchEmbedding(nn.Module):
def __init__(self, in_dim, out_dim, window_size):
super().__init__()
self.window_size = window_size
self.linear = nn.Linear(in_dim * window_size * window_size, out_dim)
self.norm = nn.LayerNorm(out_dim)
def forward(self, input):
out = patchify(input, self.window_size)
out = self.linear(out)
out = self.norm(out)
return out
class PatchMerge(nn.Module):
def __init__(self, in_dim, out_dim, window_size):
super().__init__()
self.window_size = window_size
self.norm = nn.LayerNorm(in_dim * window_size * window_size)
self.linear = nn.Linear(in_dim * window_size * window_size, out_dim, bias=False)
def forward(self, input):
out = patchify(input, self.window_size)
out = self.norm(out)
out = self.linear(out)
return out
def reduce_size(size, reduction):
return (size[0] // reduction, size[1] // reduction)
@config_model(name="swin_transformer", namespace="model", use_type=True)
class SwinTransformer(nn.Module):
def __init__(
self,
image_size: Tuple[StrictInt, StrictInt],
n_class: StrictInt,
depths: Tuple[StrictInt, StrictInt, StrictInt, StrictInt],
dims: Tuple[StrictInt, StrictInt, StrictInt, StrictInt],
dim_head: StrictInt,
n_heads: Tuple[StrictInt, StrictInt, StrictInt, StrictInt],
dim_ffs: Tuple[StrictInt, StrictInt, StrictInt, StrictInt],
window_size: StrictInt,
drop_ff: StrictFloat = 0.0,
drop_attn: StrictFloat = 0.0,
drop_path: StrictFloat = 0.0,
):
super().__init__()
self.depths = depths
def make_block(i, in_dim, input_size, reduction):
return self.make_block(
depths[i],
in_dim,
dims[i],
n_heads[i],
dim_head,
dim_ffs[i],
input_size,
window_size,
reduction,
drop_ff,
drop_attn,
)
self.patch_embedding = PatchEmbedding(3, dims[0], 4)
self.block1 = make_block(0, 3, reduce_size(image_size, 4), 1)
self.block2 = make_block(1, dims[0], reduce_size(image_size, 4), 2)
self.block3 = make_block(2, dims[1], reduce_size(image_size, 4 * 2), 2)
self.block4 = make_block(3, dims[2], reduce_size(image_size, 4 * 2 * 2), 2)
self.final_linear = nn.Sequential(nn.LayerNorm(dims[-1]))
linear = nn.Linear(dims[-1], n_class)
nn.init.normal_(linear.weight, std=0.02)
nn.init.zeros_(linear.bias)
self.classifier = nn.Sequential(nn.AdaptiveAvgPool2d(1), nn.Flatten(1), linear)
self.apply(self.init_weights)
self.set_dropout(None, drop_path)
def set_dropout(self, dropout, drop_path):
n_blocks = sum(self.depths)
dp_rate = [drop_path * float(i) / n_blocks for i in range(n_blocks)]
i = 0
for block in self.block1:
try:
block.set_drop_path(dp_rate[i])
i += 1
except:
continue
for block in self.block2:
try:
block.set_drop_path(dp_rate[i])
i += 1
except:
continue
for block in self.block3:
try:
block.set_drop_path(dp_rate[i])
i += 1
except:
continue
for block in self.block4:
try:
block.set_drop_path(dp_rate[i])
i += 1
except:
continue
def init_weights(self, module):
if isinstance(module, nn.Linear):
nn.init.normal_(module.weight, std=0.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, nn.LayerNorm):
nn.init.ones_(module.weight)
nn.init.zeros_(module.bias)
def make_block(
self,
depth,
in_dim,
dim,
n_head,
dim_head,
dim_ff,
input_size,
window_size,
reduction,
drop_ff,
drop_attn,
):
block = []
if reduction > 1:
block.append(PatchMerge(in_dim, dim, reduction))
for i in range(depth):
block.append(
TransformerLayer(
dim,
n_head,
dim_head,
dim_ff,
reduce_size(input_size, reduction),
window_size,
shift=i % 2 == 0,
drop_ff=drop_ff,
drop_attn=drop_attn,
)
)
return nn.Sequential(*block)
def forward(self, input):
out = self.patch_embedding(input.permute(0, 2, 3, 1))
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.block4(out)
out = self.final_linear(out).permute(0, 3, 1, 2)
out = self.classifier(out)
return out
| 30.523684 | 89 | 0.52358 | 1,423 | 11,599 | 4.0513 | 0.119466 | 0.091934 | 0.038855 | 0.027754 | 0.335473 | 0.26418 | 0.192368 | 0.181266 | 0.170165 | 0.15889 | 0 | 0.023507 | 0.36917 | 11,599 | 379 | 90 | 30.604222 | 0.764384 | 0.005259 | 0 | 0.339934 | 0 | 0 | 0.003407 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.062706 | false | 0 | 0.026403 | 0.009901 | 0.141914 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
baae0bd8562b640b832596855eba8d94415bbcc3 | 2,676 | py | Python | mnist/mnist_reader.py | Amathlog/RLTorch | 51fbfe26644d0ad06a6a1e6654e42c4221b09b56 | [
"MIT"
] | 1 | 2019-03-11T10:36:23.000Z | 2019-03-11T10:36:23.000Z | mnist/mnist_reader.py | Amathlog/RLTorch | 51fbfe26644d0ad06a6a1e6654e42c4221b09b56 | [
"MIT"
] | null | null | null | mnist/mnist_reader.py | Amathlog/RLTorch | 51fbfe26644d0ad06a6a1e6654e42c4221b09b56 | [
"MIT"
] | null | null | null | import gzip
from pathlib import Path
import numpy as np
data_path = Path(__file__).parent / '..' / 'data'
train_images_file = data_path / 'train-images-idx3-ubyte.gz'
train_labels_file = data_path / 'train-labels-idx1-ubyte.gz'
test_images_file = data_path / 't10k-images-idx3-ubyte.gz'
test_labels_file = data_path / 't10k-labels-idx1-ubyte.gz'
def gz_to_npz(file):
return Path(str(file)[:-3] + '.npz')
train_images_file_array = gz_to_npz(train_images_file)
train_labels_file_array = gz_to_npz(train_labels_file)
test_images_file_array = gz_to_npz(test_images_file)
test_labels_file_array = gz_to_npz(test_labels_file)
def read_int(f, size=1):
return int.from_bytes(f.read1(size), 'big', signed=False)
def read_images(file, magic_number):
print('Read images', str(file))
with gzip.open(str(file)) as f:
assert magic_number == read_int(f, 4)
n_images = read_int(f, 4)
n_rows = read_int(f, 4)
n_cols = read_int(f, 4)
images = []
for n in range(n_images):
data = np.reshape(np.frombuffer(f.read1(n_rows*n_cols), dtype=np.ubyte, count=n_rows*n_cols), (n_rows, n_cols))
images.append(data)
return images
def read_labels(file, magic_number, n_images):
print('Read labels', str(file))
with gzip.open(str(file)) as f:
assert magic_number == read_int(f, 4)
assert n_images == read_int(f, 4)
labels = []
for n in range(n_images):
labels.append(read_int(f))
return labels
def get_data():
if not test_images_file_array.exists():
print('Pre-extracted data does not exist... Creating data....')
train_images = read_images(train_images_file, 2051)
train_labels = read_labels(train_labels_file, 2049, len(train_images))
test_images = read_images(test_images_file, 2051)
test_labels = read_labels(test_labels_file, 2049, len(test_images))
np.savez_compressed(str(train_images_file_array), data=train_images)
np.savez_compressed(str(train_labels_file_array), data=train_labels)
np.savez_compressed(str(test_images_file_array), data=test_images)
np.savez_compressed(str(test_labels_file_array), data=test_labels)
return np.load(train_images_file_array)['data'], \
np.load(train_labels_file_array)['data'], \
np.load(test_images_file_array)['data'], \
np.load(test_labels_file_array)['data']
if __name__ == "__main__":
import matplotlib.pyplot as plt
train_img, train_lbl, test_img, test_lbl = get_data()
plt.imshow(train_img[1], cmap='Greys')
plt.title('Number: ' + str(train_lbl[1]))
plt.show()
| 31.482353 | 123 | 0.689836 | 413 | 2,676 | 4.121065 | 0.198547 | 0.082256 | 0.037603 | 0.031727 | 0.356639 | 0.268508 | 0.06463 | 0.06463 | 0.06463 | 0.06463 | 0 | 0.016559 | 0.187593 | 2,676 | 84 | 124 | 31.857143 | 0.766329 | 0 | 0 | 0.103448 | 0 | 0 | 0.085202 | 0.038117 | 0 | 0 | 0 | 0 | 0.051724 | 1 | 0.086207 | false | 0 | 0.068966 | 0.034483 | 0.241379 | 0.051724 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bab2cdd97624bd764104d4c41b7ad0ceafb4df27 | 5,063 | py | Python | examples/tutorial_pt1.py | apoz00003/arcana | 23a8e8ce469cf541f2ed4703c1e9c1d10291d4a6 | [
"Apache-2.0"
] | 3 | 2018-11-12T05:50:38.000Z | 2020-02-03T04:25:05.000Z | examples/tutorial_pt1.py | apoz00003/arcana | 23a8e8ce469cf541f2ed4703c1e9c1d10291d4a6 | [
"Apache-2.0"
] | 72 | 2018-09-07T06:03:12.000Z | 2020-11-03T00:47:04.000Z | examples/tutorial_pt1.py | apoz00003/arcana | 23a8e8ce469cf541f2ed4703c1e9c1d10291d4a6 | [
"Apache-2.0"
] | 3 | 2018-02-12T05:07:35.000Z | 2018-03-02T03:11:29.000Z | from __future__ import absolute_import
from __future__ import print_function
import os.path
import numpy
# from nipype.interfaces.base import (
# TraitedSpec, traits, File, isdefined,
# CommandLineInputSpec, CommandLine)
from nipype.interfaces.base import (
TraitedSpec, traits, BaseInterface, File, isdefined,
Directory, CommandLineInputSpec, CommandLine, InputMultiPath)
class GrepInputSpec(CommandLineInputSpec):
match_str = traits.Str(argstr='%s', position=0,
desc="The string to search for")
in_file = File(argstr='%s', position=1,
desc="The file to search")
out_file = File(genfile=True, argstr='> %s', position=2,
desc=("The file to contain the search results"))
class GrepOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="The search results")
class Grep(CommandLine):
"""Creates a zip repository from a given folder"""
_cmd = 'grep'
input_spec = GrepInputSpec
output_spec = GrepOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
outputs['out_file'] = self._gen_filename('out_file')
return outputs
def _gen_filename(self, name):
if name == 'out_file':
if isdefined(self.inputs.out_file):
fname = self.inputs.out_file
else:
fname = os.path.join(os.getcwd(), 'search_results.txt')
else:
assert False
return fname
class AwkInputSpec(CommandLineInputSpec):
format_str = traits.Str(argstr="'%s'", position=0,
desc="The string to search for")
in_file = File(argstr='%s', position=1,
desc="The file to parse")
out_file = File(genfile=True, argstr='> %s', position=2,
desc=("The file to contain the parsed results"))
class AwkOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="The parsed results")
class Awk(CommandLine):
"""Creates a zip repository from a given folder"""
_cmd = 'awk'
input_spec = AwkInputSpec
output_spec = AwkOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
outputs['out_file'] = self._gen_filename('out_file')
return outputs
def _gen_filename(self, name):
if name == 'out_file':
if isdefined(self.inputs.out_file):
fname = self.inputs.out_file
else:
fname = os.path.join(os.getcwd(), 'awk_results.txt')
else:
assert False
return fname
class ConcatFloatsInputSpec(TraitedSpec):
in_files = InputMultiPath(desc='file name')
class ConcatFloatsOutputSpec(TraitedSpec):
out_list = traits.List(traits.Float, desc='input floats')
class ConcatFloats(BaseInterface):
"""Joins values from a list of files into a single list"""
input_spec = ConcatFloatsInputSpec
output_spec = ConcatFloatsOutputSpec
def _list_outputs(self):
out_list = []
for path in self.inputs.in_files:
with open(path) as f:
val = float(f.read())
out_list.append(val)
outputs = self._outputs().get()
outputs['out_list'] = out_list
return outputs
def _run_interface(self, runtime):
# Do nothing
return runtime
class ExtractMetricsInputSpec(TraitedSpec):
in_list = traits.List(traits.Float, desc='input floats')
class ExtractMetricsOutputSpec(TraitedSpec):
std = traits.Float(desc="The standard deviation")
avg = traits.Float(desc="The average")
class ExtractMetrics(BaseInterface):
"""Joins values from a list of files into a single list"""
input_spec = ExtractMetricsInputSpec
output_spec = ExtractMetricsOutputSpec
def _list_outputs(self):
values = self.inputs.in_list
outputs = self._outputs().get()
outputs['std'] = numpy.std(values)
outputs['avg'] = numpy.average(values)
return outputs
def _run_interface(self, runtime):
# Do nothing
return runtime
grep = Grep()
grep.inputs.match_str = 'height'
grep.inputs.in_file = '/Users/tclose/Desktop/arcana_tutorial/subject1/visit1/metrics.txt'
grep.inputs.out_file = '/Users/tclose/Desktop/test-out.txt'
grep.run()
awk = Awk()
awk.inputs.format_str = '{print $2}'
awk.inputs.in_file = '/Users/tclose/Desktop/test-out.txt'
awk.inputs.out_file = '/Users/tclose/Desktop/test-awk.txt'
awk.run()
concat_floats = ConcatFloats()
concat_floats.inputs.in_files = [
'/Users/tclose/Desktop/arcana_tutorial/subject1/visit1/awk.txt',
'/Users/tclose/Desktop/arcana_tutorial/subject1/visit2/awk.txt',
'/Users/tclose/Desktop/arcana_tutorial/subject2/visit1/awk.txt']
result = concat_floats.run()
print('Output list {}'.format(result.outputs.out_list))
extract_metrics = ExtractMetrics()
extract_metrics.inputs.in_list = result.outputs.out_list
result = extract_metrics.run()
print('Average: {}'.format(result.outputs.avg))
print('Std.: {}'.format(result.outputs.std))
| 30.317365 | 89 | 0.661466 | 606 | 5,063 | 5.372937 | 0.207921 | 0.034398 | 0.038698 | 0.015971 | 0.541462 | 0.532862 | 0.514742 | 0.401106 | 0.351966 | 0.324324 | 0 | 0.003816 | 0.223583 | 5,063 | 166 | 90 | 30.5 | 0.824472 | 0.066364 | 0 | 0.347826 | 0 | 0 | 0.165887 | 0.074436 | 0 | 0 | 0 | 0 | 0.017391 | 1 | 0.069565 | false | 0 | 0.043478 | 0.017391 | 0.486957 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bab5be57e22359586e87739856f23dc498c9a1a3 | 1,245 | py | Python | tests/integration/views/test_session.py | ONSdigital/census-survey-runner | 9f8cd3d664db5c5b49d348bdf48c58d1a3492aab | [
"MIT"
] | null | null | null | tests/integration/views/test_session.py | ONSdigital/census-survey-runner | 9f8cd3d664db5c5b49d348bdf48c58d1a3492aab | [
"MIT"
] | 3 | 2018-10-10T08:19:07.000Z | 2018-10-29T11:43:08.000Z | tests/integration/views/test_session.py | ONSdigital/census-survey-runner | 9f8cd3d664db5c5b49d348bdf48c58d1a3492aab | [
"MIT"
] | 1 | 2021-04-11T08:04:22.000Z | 2021-04-11T08:04:22.000Z | import time
from tests.integration.integration_test_case import IntegrationTestCase
from app.settings import RESPONDENT_ACCOUNT_URL
class TestSession(IntegrationTestCase):
def test_session_expired(self):
self.get('/session-expired')
self.assertInPage('Your session has expired')
def test_session_signed_out(self):
self.get('/signed-out')
self.assertInPage('Your survey answers have been saved')
self.assertInPage(RESPONDENT_ACCOUNT_URL)
def test_session_signed_out_with_overridden_Account_url(self):
self.launchSurvey(account_service_url='https://ras.ons.gov.uk')
self.get('/signed-out')
self.assertInPage('Your survey answers have been saved')
self.assertNotInPage(RESPONDENT_ACCOUNT_URL)
self.assertInPage('https://ras.ons.gov.uk')
def test_session_signed_out_with_none_overridden_Account_url(self):
self.launchSurvey(account_service_url=None)
self.get('/signed-out')
self.assertInPage('Your survey answers have been saved')
self.assertInPage(RESPONDENT_ACCOUNT_URL)
def test_session_jti_token_expired(self):
self.launchSurvey(exp=time.time() - float(60))
self.assertStatusUnauthorised()
| 37.727273 | 71 | 0.734137 | 152 | 1,245 | 5.763158 | 0.309211 | 0.127854 | 0.079909 | 0.068493 | 0.569635 | 0.506849 | 0.461187 | 0.461187 | 0.461187 | 0.33105 | 0 | 0.001938 | 0.171084 | 1,245 | 32 | 72 | 38.90625 | 0.846899 | 0 | 0 | 0.32 | 0 | 0 | 0.178313 | 0 | 0 | 0 | 0 | 0 | 0.36 | 1 | 0.2 | false | 0 | 0.12 | 0 | 0.36 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bab70bc3ca929ef53c40db322323ae3e7fc22459 | 557 | py | Python | test_ifo_env.py | medric49/sharingan | f6b85118016d45456fc1467c6706731562c0f0d7 | [
"MIT"
] | null | null | null | test_ifo_env.py | medric49/sharingan | f6b85118016d45456fc1467c6706731562c0f0d7 | [
"MIT"
] | null | null | null | test_ifo_env.py | medric49/sharingan | f6b85118016d45456fc1467c6706731562c0f0d7 | [
"MIT"
] | null | null | null | import os
from gym.envs.mujoco import reacher3dof
from rllab.envs.gym_env import GymEnv
os.environ['MKL_SERVICE_FORCE_INTEL'] = '1'
os.environ['MUJOCO_GL'] = 'egl'
env = GymEnv("Reacher3DOF-v1", mode='oracle', force_reset=True)
time_step = env.reset()
print(time_step)
while True:
env.render()
time_step = env.step(env.action_space.sample())
# action = policy(observation)
# observation, reward, done, info = env.step(action)
#
# if done:
# observation, info = env.reset(return_info=True)
print(time_step)
env.close()
| 24.217391 | 63 | 0.696589 | 79 | 557 | 4.759494 | 0.493671 | 0.085106 | 0.087766 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008602 | 0.165171 | 557 | 22 | 64 | 25.318182 | 0.8 | 0.249551 | 0 | 0.153846 | 0 | 0 | 0.135922 | 0.055825 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.230769 | 0 | 0.230769 | 0.153846 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bab7f429091cfd8a1ee991d9f7990039209eef13 | 2,773 | py | Python | preprocessing/clause_splitter.py | cniklaus/argumentation-learning | d81c6b9f0f26ccee373994dacefd5b575fc3e763 | [
"MIT"
] | null | null | null | preprocessing/clause_splitter.py | cniklaus/argumentation-learning | d81c6b9f0f26ccee373994dacefd5b575fc3e763 | [
"MIT"
] | null | null | null | preprocessing/clause_splitter.py | cniklaus/argumentation-learning | d81c6b9f0f26ccee373994dacefd5b575fc3e763 | [
"MIT"
] | null | null | null | import spacy
from spacy.lang.de.examples import sentences
#from collections import OrderedDict
#import numpy as np
nlp = spacy.load('de_core_news_sm')
doc = nlp("Weil die Sonne scheint, ist es warm, nachdem ich ein Eis, das sehr lecker war, gegessen habe.")
print(doc.text)
#for token in doc:
# print(token.text, token.pos_, token.dep_)
#TODO add recursion!
#TODO check for empty main clauses!
def split_relative_clauses(sentence):
relc = []
main = []
rc_left = []
rc_right = []
start = 0
for token in sentence:
print(token, token.i, token.dep_)
if token.dep_ == "rc":
start = token.left_edge.i
rel_clause = sentence[token.left_edge.i: token.right_edge.i+1]
rc_right.append(token.i+1)
rc_left.append(token.left_edge.i)
relc.append(rel_clause)
count = 0
for j in rc_left:
print(start, rc_left, rc_right)
end = j
if start == end:
end = rc_left[count]
main1 = sentence[start: rc_right[count]]
start = rc_right[count]
count += 1
if len(main1) > 1:
main.append(main1)
print("main: ", main)
print("relcl: ", relc)
def split_adverbial_clauses(sentence):
advclauses = []
main = []
advcl_left = []
advcl_right = []
for token in sentence:
if token.dep_ == "cp":
adverbial_clause = sentence[token.left_edge.i : token.head.i+1]
advcl_right.append(token.head.i+1)
advcl_left.append(token.left_edge.i)
advclauses.append(adverbial_clause)
start = 0
count = 0
for j in advcl_left:
end = j
main1 = sentence[start: end]
start = advcl_right[count]
count += 1
if len(main1) > 1:
main.append(main1)
print(main)
print(advclauses)
for a in advclauses:
split_relative_clauses(a)
def split_coordinate_clauses1(sentence):
for token in sentence:
if token.dep_ == "oc":
rel_clause = sentence[token.left_edge.i : token.head.i+1]
main1 = sentence[:token.left_edge.i]
main2 = sentence[token.head.i+1: ]
print(rel_clause)
print(main1)
print(main2)
def split_coordinate_clauses2(sentence):
for token in sentence:
if token.dep_ == "cd":
rel_clause = sentence[token.left_edge.i : token.head.i+1]
main1 = sentence[:token.left_edge.i]
main2 = sentence[token.i: ]
print(rel_clause)
print(main1)
print(main2)
#def split_into_clauses(sentence):
#split_relative_clauses(doc)
split_adverbial_clauses(doc)
#split_coordinate_clauses1(doc)
#split_coordinate_clauses2(doc) | 26.409524 | 106 | 0.60476 | 367 | 2,773 | 4.395095 | 0.231608 | 0.030998 | 0.072536 | 0.078115 | 0.389337 | 0.361438 | 0.33168 | 0.314321 | 0.247365 | 0.195288 | 0 | 0.016726 | 0.288496 | 2,773 | 105 | 107 | 26.409524 | 0.800811 | 0.102777 | 0 | 0.373333 | 0 | 0.013333 | 0.052037 | 0 | 0 | 0 | 0 | 0.009524 | 0 | 1 | 0.053333 | false | 0 | 0.026667 | 0 | 0.08 | 0.173333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bab96adff00e5592d6dd50d6d5dcfa735edf0250 | 805 | py | Python | encryptedpickle/utils.py | ai-are-better-than-humans/encrypted-pickle-python | 7656233598e02e65971f69e11849a0f288b2b2a5 | [
"MIT"
] | 4 | 2016-05-23T08:07:31.000Z | 2020-02-26T17:07:15.000Z | encryptedpickle/utils.py | ai-are-better-than-humans/encrypted-pickle-python | 7656233598e02e65971f69e11849a0f288b2b2a5 | [
"MIT"
] | null | null | null | encryptedpickle/utils.py | ai-are-better-than-humans/encrypted-pickle-python | 7656233598e02e65971f69e11849a0f288b2b2a5 | [
"MIT"
] | 8 | 2016-05-23T23:17:22.000Z | 2021-05-12T18:13:10.000Z | # -*- coding: utf-8 -*-
'''
Some common, generic utilities
'''
from __future__ import absolute_import
from base64 import urlsafe_b64encode, urlsafe_b64decode
def urlsafe_nopadding_b64encode(data):
'''URL safe Base64 encode without padding (=)'''
return urlsafe_b64encode(data).rstrip('=')
def urlsafe_nopadding_b64decode(data):
'''URL safe Base64 decode without padding (=)'''
padding = len(data) % 4
if padding != 0:
padding = 4 - padding
padding = '=' * padding
data = data + padding
return urlsafe_b64decode(data)
def const_equal(str_a, str_b):
'''Constant time string comparison'''
if len(str_a) != len(str_b):
return False
result = True
for i in range(len(str_a)):
result &= (str_a[i] == str_b[i])
return result
| 21.184211 | 55 | 0.650932 | 103 | 805 | 4.883495 | 0.446602 | 0.031809 | 0.075547 | 0.067594 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.035256 | 0.224845 | 805 | 37 | 56 | 21.756757 | 0.770833 | 0.212422 | 0 | 0 | 0 | 0 | 0.003273 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.111111 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
babaa94cdad5e340c2f91ecb33bb6c6a3444d673 | 1,655 | py | Python | payments/migrations/0004_expand_email_scope.py | jakereps/workshops.qiime2.org | 5941e4db8b63c3518db2b85d5c45afbea5781bfc | [
"BSD-3-Clause"
] | null | null | null | payments/migrations/0004_expand_email_scope.py | jakereps/workshops.qiime2.org | 5941e4db8b63c3518db2b85d5c45afbea5781bfc | [
"BSD-3-Clause"
] | null | null | null | payments/migrations/0004_expand_email_scope.py | jakereps/workshops.qiime2.org | 5941e4db8b63c3518db2b85d5c45afbea5781bfc | [
"BSD-3-Clause"
] | null | null | null | # ----------------------------------------------------------------------------
# Copyright (c) 2016-2018, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('payments', '0003_workshop_location'),
]
operations = [
migrations.RenameField(
model_name='order',
old_name='email',
new_name='contact_email',
),
migrations.AddField(
model_name='orderitem',
name='email',
field=models.EmailField(default='example@example.com', max_length=254),
preserve_default=False,
),
migrations.AddField(
model_name='workshop',
name='closing_date',
field=models.DateField(default=datetime.datetime(2016, 8, 7, 23, 54, 27, 693604, tzinfo=utc)),
preserve_default=False,
),
migrations.AlterUniqueTogether(
name='orderitem',
unique_together=set([('order', 'rate', 'email')]),
),
migrations.AlterUniqueTogether(
name='workshop',
unique_together=set([('title', 'slug')]),
),
migrations.RemoveField(
model_name='orderitem',
name='quantity',
),
]
| 30.648148 | 106 | 0.536556 | 145 | 1,655 | 5.986207 | 0.586207 | 0.041475 | 0.052995 | 0.062212 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.028783 | 0.265257 | 1,655 | 53 | 107 | 31.226415 | 0.685033 | 0.215106 | 0 | 0.358974 | 0 | 0 | 0.126357 | 0.017054 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.102564 | 0 | 0.179487 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
babb781d7744991028ae717034b56c6166172a1f | 1,304 | py | Python | src/ddo_transform/ddo_transform/standardize.py | bricrsa/datadevops | a6431d30f2ae283197ec91efd6b2052fff9452ea | [
"MIT"
] | null | null | null | src/ddo_transform/ddo_transform/standardize.py | bricrsa/datadevops | a6431d30f2ae283197ec91efd6b2052fff9452ea | [
"MIT"
] | null | null | null | src/ddo_transform/ddo_transform/standardize.py | bricrsa/datadevops | a6431d30f2ae283197ec91efd6b2052fff9452ea | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Main module."""
from pyspark.sql import DataFrame
from pyspark.sql.functions import lit, col, to_timestamp
def standardize_parking_bay(parkingbay_sdf: DataFrame, load_id, loaded_on):
t_parkingbay_sdf = (
parkingbay_sdf
.withColumn("last_edit", to_timestamp("last_edit", "YYYYMMddHHmmss"))
.select(
col("bay_id").cast("int").alias("bay_id"),
"last_edit",
"marker_id",
"meter_id",
"rd_seg_dsc",
col("rd_seg_id").cast("int").alias("rd_seg_id"),
"the_geom",
lit(load_id).alias("load_id"),
lit(loaded_on.isoformat()).alias("loaded_on")
)
)
return t_parkingbay_sdf
def standardize_sensordata(sensordata_sdf: DataFrame, load_id, loaded_on):
t_sensordata_sdf = (
sensordata_sdf
.select(
col("bay_id").cast("int").alias("bay_id"),
"st_marker_id",
col("lat").cast("float").alias("lat"),
col("lon").cast("float").alias("lon"),
"location",
"status",
lit(load_id).alias("load_id"),
lit(loaded_on.isoformat()).alias("loaded_on")
)
)
return t_sensordata_sdf
| 29.636364 | 78 | 0.546012 | 148 | 1,304 | 4.493243 | 0.337838 | 0.054135 | 0.040602 | 0.063158 | 0.354887 | 0.354887 | 0.354887 | 0.273684 | 0.273684 | 0.180451 | 0 | 0.001105 | 0.305982 | 1,304 | 43 | 79 | 30.325581 | 0.733702 | 0.02684 | 0 | 0.235294 | 0 | 0 | 0.169672 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.058824 | 0 | 0.176471 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
babde36aa5e6a7922b35c485f8bf74af0c0cb0ed | 6,702 | py | Python | lib/surface/functions/get_logs.py | ianel20/google-cloud-sdk | 36ed4e06ba3961d0a8fbf30a3eaabf7db6d4e9c3 | [
"Apache-2.0"
] | null | null | null | lib/surface/functions/get_logs.py | ianel20/google-cloud-sdk | 36ed4e06ba3961d0a8fbf30a3eaabf7db6d4e9c3 | [
"Apache-2.0"
] | null | null | null | lib/surface/functions/get_logs.py | ianel20/google-cloud-sdk | 36ed4e06ba3961d0a8fbf30a3eaabf7db6d4e9c3 | [
"Apache-2.0"
] | 1 | 2020-07-25T12:23:41.000Z | 2020-07-25T12:23:41.000Z | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""'functions get-logs' command."""
from googlecloudsdk.api_lib.functions import util
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
class GetLogs(base.ListCommand):
"""Show logs produced by functions.
This command is deprecated. Please use `gcloud preview app logs read` instead.
This command displays log entries produced by all functions running in a
region, or by a single function if it is specified through a command argument.
By default, when no extra flags are specified, the most recent 20 log entries
are displayed.
"""
SEVERITIES = ['DEBUG', 'INFO', 'ERROR']
@staticmethod
def Args(parser):
"""Register flags for this command."""
base.LIMIT_FLAG.RemoveFromParser(parser)
parser.add_argument(
'name', nargs='?',
help=('Name of the function which logs are to be displayed. If no name '
'is specified, logs from all functions are displayed.'))
parser.add_argument(
'--execution-id',
help=('Execution ID for which logs are to be displayed.'))
parser.add_argument(
'--start-time', required=False, type=arg_parsers.Datetime.Parse,
help=('Return only log entries which timestamps are not earlier than '
'the specified time. The timestamp must be in RFC3339 UTC "Zulu" '
'format. If --start-time is specified, the command returns '
'--limit earliest log entries which appeared after '
'--start-time.'))
parser.add_argument(
'--end-time', required=False, type=arg_parsers.Datetime.Parse,
help=('Return only log entries which timestamps are not later than '
'the specified time. The timestamp must be in RFC3339 UTC "Zulu" '
'format. If --end-time is specified but --start-time is not, the '
'command returns --limit latest log entries which appeared '
'before --end-time.'))
parser.add_argument(
'--limit', required=False, type=arg_parsers.BoundedInt(1, 1000),
default=20,
help=('Number of log entries to be fetched; must not be greater than '
'1000.'))
parser.add_argument(
'--min-log-level', choices=GetLogs.SEVERITIES,
help=('Minimum level of logs to be fetched; can be one of DEBUG, INFO, '
'ERROR.'))
parser.add_argument(
'--show-log-levels', action='store_true', default=True,
help=('Print a log level of each log entry.'))
parser.add_argument(
'--show-function-names', action='store_true', default=True,
help=('Print a function name before each log entry.'))
parser.add_argument(
'--show-execution-ids', action='store_true', default=True,
help=('Print an execution ID before each log entry.'))
parser.add_argument(
'--show-timestamps', action='store_true', default=True,
help=('Print a UTC timestamp before each log entry.'))
@util.CatchHTTPErrorRaiseHTTPException
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Yields:
Objects representing log entries.
"""
log.warn('This command is deprecated. '
'Please use `gcloud preview app logs read` instead.')
logging_client = self.context['logging_client']
logging = self.context['logging_messages']
project = properties.VALUES.core.project.Get(required=True)
log_filter = (
'resource.type="cloud_function" '
'labels."cloudfunctions.googleapis.com/region"="{0}" '
.format(args.region))
if args.name:
log_filter += (
'labels."cloudfunctions.googleapis.com/function_name"="{0}" '
.format(args.name))
if args.execution_id:
log_filter += 'labels."execution_id"="{0}" '.format(args.execution_id)
if args.min_log_level:
log_filter += 'severity>={0} '.format(args.min_log_level)
if args.start_time:
order = 'asc'
start_time = args.start_time.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
log_filter += 'timestamp>="{0}" '.format(start_time)
else:
order = 'desc'
if args.end_time:
end_time = args.end_time.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
log_filter += 'timestamp<="{0}" '.format(end_time)
# TODO(user): Consider using paging for listing more than 1000 log entries.
# However, reversing the order of received latest N entries before a
# specified timestamp would be problematic with paging.
request = logging.ListLogEntriesRequest(
projectIds=[project], filter=log_filter,
orderBy='timestamp {0}'.format(order), pageSize=args.limit)
response = logging_client.entries.List(request=request)
entries = response.entries if order == 'asc' else reversed(response.entries)
for entry in entries:
row = dict(
log=entry.textPayload
)
if entry.severity:
severity = str(entry.severity)
if severity in GetLogs.SEVERITIES:
# Use short form (first letter) for expected severities.
row['level'] = severity[0]
else:
# Print full form of unexpected severities.
row['level'] = severity
for label in entry.labels.additionalProperties:
if label.key == 'cloudfunctions.googleapis.com/function_name':
row['name'] = label.value
if label.key == 'execution_id':
row['execution_id'] = label.value
if entry.timestamp:
row['time_utc'] = util.FormatTimestamp(entry.timestamp)
yield row
def Format(self, args):
fields = []
if args.show_log_levels:
fields.append('level')
if args.show_function_names:
fields.append('name')
if args.show_execution_ids:
fields.append('execution_id')
if args.show_timestamps:
fields.append('time_utc')
fields.append('log')
return 'table({0})'.format(','.join(fields))
| 40.618182 | 80 | 0.662787 | 862 | 6,702 | 5.082367 | 0.310905 | 0.020543 | 0.038804 | 0.019174 | 0.212052 | 0.188085 | 0.176672 | 0.16115 | 0.118694 | 0.118694 | 0 | 0.008102 | 0.2265 | 6,702 | 164 | 81 | 40.865854 | 0.836998 | 0.222769 | 0 | 0.121739 | 0 | 0 | 0.330216 | 0.05299 | 0 | 0 | 0 | 0.006098 | 0 | 1 | 0.026087 | false | 0 | 0.043478 | 0 | 0.095652 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bacd225ac1d42e9cb81fbdcf9ad0ce8c4ea2e152 | 428 | py | Python | f8a_report/dbtable_cleanup_main.py | rafiu007/f8a-stacks-report | d7b8d24a67aaaeb36556fe9de71e997074e52daf | [
"Apache-2.0"
] | null | null | null | f8a_report/dbtable_cleanup_main.py | rafiu007/f8a-stacks-report | d7b8d24a67aaaeb36556fe9de71e997074e52daf | [
"Apache-2.0"
] | 1 | 2020-10-29T08:00:39.000Z | 2020-10-29T08:03:46.000Z | f8a_report/dbtable_cleanup_main.py | practice-fabric8-analytics/f8a-stacks-report | 433402eb017201495654a4885c89ce6f378a1cd9 | [
"Apache-2.0"
] | 1 | 2020-10-28T16:07:21.000Z | 2020-10-28T16:07:21.000Z | """Daily clean up of DB tables."""
import logging
from helpers.report_helper import ReportHelper
logger = logging.getLogger(__file__)
def main():
"""Regular clean up of database tables."""
r = ReportHelper()
try:
r.cleanup_db_tables()
except Exception as e:
logger.exception("Exception encountered when trying to clean up DB tables")
raise e
if __name__ == '__main__':
main()
| 19.454545 | 84 | 0.668224 | 54 | 428 | 5.018519 | 0.62963 | 0.077491 | 0.066421 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.233645 | 428 | 21 | 85 | 20.380952 | 0.82622 | 0.151869 | 0 | 0 | 0 | 0 | 0.181818 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.166667 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bacd2e3df389119769eb29fc4b7a05c4e560d95f | 13,356 | py | Python | losses.py | ProbIOU/PROBIOU-EFFICIENTDET | 1906964f5ac82b73ad120ede1b5eef47bc520598 | [
"Apache-2.0"
] | 2 | 2021-09-02T01:56:58.000Z | 2021-11-19T14:42:41.000Z | losses.py | ProbIOU/PROBIOU-EFFICIENTDET | 1906964f5ac82b73ad120ede1b5eef47bc520598 | [
"Apache-2.0"
] | null | null | null | losses.py | ProbIOU/PROBIOU-EFFICIENTDET | 1906964f5ac82b73ad120ede1b5eef47bc520598 | [
"Apache-2.0"
] | 2 | 2021-12-18T01:11:01.000Z | 2022-02-14T23:00:38.000Z | """
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# import keras
import math
from tensorflow import keras
import tensorflow as tf
import tensorflow_addons as tfa
import numpy as np
from utils.anchors import anchors_for_shape
from layers import RegressBoxes
def focal(alpha=0.25, gamma=1.5):
"""
Create a functor for computing the focal loss.
Args
alpha: Scale the focal weight with alpha.
gamma: Take the power of the focal weight with gamma.
Returns
A functor that computes the focal loss using the alpha and gamma.
"""
def _focal(y_true, y_pred):
"""
Compute the focal loss given the target tensor and the predicted tensor.
As defined in https://arxiv.org/abs/1708.02002
Args
y_true: Tensor of target data from the generator with shape (B, N, num_classes).
y_pred: Tensor of predicted data from the network with shape (B, N, num_classes).
Returns
The focal loss of y_pred w.r.t. y_true.
"""
labels = y_true[:, :, :-1]
# -1 for ignore, 0 for background, 1 for object
anchor_state = y_true[:, :, -1]
classification = y_pred
# filter out "ignore" anchors
indices = tf.where(keras.backend.not_equal(anchor_state, -1))
labels = tf.gather_nd(labels, indices)
classification = tf.gather_nd(classification, indices)
# compute the focal loss
alpha_factor = keras.backend.ones_like(labels) * alpha
alpha_factor = tf.where(keras.backend.equal(labels, 1), alpha_factor, 1 - alpha_factor)
# (1 - 0.99) ** 2 = 1e-4, (1 - 0.9) ** 2 = 1e-2
focal_weight = tf.where(keras.backend.equal(labels, 1), 1 - classification, classification)
focal_weight = alpha_factor * focal_weight ** gamma
cls_loss = focal_weight * keras.backend.binary_crossentropy(labels, classification)
# compute the normalizer: the number of positive anchors
normalizer = tf.where(keras.backend.equal(anchor_state, 1))
normalizer = keras.backend.cast(keras.backend.shape(normalizer)[0], keras.backend.floatx())
normalizer = keras.backend.maximum(keras.backend.cast_to_floatx(1.0), normalizer)
return keras.backend.sum(cls_loss) / normalizer
#loss = tf.math.divide_no_nan(keras.backend.sum(cls_loss), normalizer)
#return tf.where(tf.math.is_nan(loss), 0., loss)
return _focal
def smooth_l1(sigma=3.0):
"""
Create a smooth L1 loss functor.
Args
sigma: This argument defines the point where the loss changes from L2 to L1.
Returns
A functor for computing the smooth L1 loss given target data and predicted data.
"""
sigma_squared = sigma ** 2
def _smooth_l1(y_true, y_pred):
""" Compute the smooth L1 loss of y_pred w.r.t. y_true.
Args
y_true: Tensor from the generator of shape (B, N, 5). The last value for each box is the state of the anchor (ignore, negative, positive).
y_pred: Tensor from the network of shape (B, N, 4).
Returns
The smooth L1 loss of y_pred w.r.t. y_true.
"""
# separate target and state
regression = y_pred
regression_target = y_true[:, :, :-1]
anchor_state = y_true[:, :, -1]
# filter out "ignore" anchors
indices = tf.where(keras.backend.equal(anchor_state, 1))
regression = tf.gather_nd(regression, indices)
regression_target = tf.gather_nd(regression_target, indices)
# compute smooth L1 loss
# f(x) = 0.5 * (sigma * x)^2 if |x| < 1 / sigma / sigma
# |x| - 0.5 / sigma / sigma otherwise
regression_diff = regression - regression_target
regression_diff = keras.backend.abs(regression_diff)
regression_loss = tf.where(
keras.backend.less(regression_diff, 1.0 / sigma_squared),
0.5 * sigma_squared * keras.backend.pow(regression_diff, 2),
regression_diff - 0.5 / sigma_squared
)
# compute the normalizer: the number of positive anchors
normalizer = keras.backend.maximum(1, keras.backend.shape(indices)[0])
normalizer = keras.backend.cast(normalizer, dtype=keras.backend.floatx())
return keras.backend.sum(regression_loss) / normalizer
return _smooth_l1
def smooth_l1_quad(sigma=3.0):
"""
Create a smooth L1 loss functor.
Args
sigma: This argument defines the point where the loss changes from L2 to L1.
Returns
A functor for computing the smooth L1 loss given target data and predicted data.
"""
sigma_squared = sigma ** 2
def _smooth_l1(y_true, y_pred):
""" Compute the smooth L1 loss of y_pred w.r.t. y_true.
Args
y_true: Tensor from the generator of shape (B, N, 5). The last value for each box is the state of the anchor (ignore, negative, positive).
y_pred: Tensor from the network of shape (B, N, 4).
Returns
The smooth L1 loss of y_pred w.r.t. y_true.
"""
# separate target and state
regression = y_pred
regression = tf.concat([regression[..., :4], tf.sigmoid(regression[..., 4:9])], axis=-1)
regression_target = y_true[:, :, :-1]
anchor_state = y_true[:, :, -1]
# filter out "ignore" anchors
indices = tf.where(keras.backend.equal(anchor_state, 1))
regression = tf.gather_nd(regression, indices)
regression_target = tf.gather_nd(regression_target, indices)
# compute smooth L1 loss
# f(x) = 0.5 * (sigma * x)^2 if |x| < 1 / sigma / sigma
# |x| - 0.5 / sigma / sigma otherwise
regression_diff = regression - regression_target
regression_diff = keras.backend.abs(regression_diff)
box_regression_loss = tf.where(
keras.backend.less(regression_diff[..., :4], 1.0 / sigma_squared),
0.5 * sigma_squared * keras.backend.pow(regression_diff[..., :4], 2),
regression_diff[..., :4] - 0.5 / sigma_squared
)
alpha_regression_loss = tf.where(
keras.backend.less(regression_diff[..., 4:8], 1.0 / sigma_squared),
0.5 * sigma_squared * keras.backend.pow(regression_diff[..., 4:8], 2),
regression_diff[..., 4:8] - 0.5 / sigma_squared
)
ratio_regression_loss = tf.where(
keras.backend.less(regression_diff[..., 8], 1.0 / sigma_squared),
0.5 * sigma_squared * keras.backend.pow(regression_diff[..., 8], 2),
regression_diff[..., 8] - 0.5 / sigma_squared
)
# compute the normalizer: the number of positive anchors
normalizer = keras.backend.maximum(1, keras.backend.shape(indices)[0])
normalizer = keras.backend.cast(normalizer, dtype=keras.backend.floatx())
box_regression_loss = tf.reduce_sum(box_regression_loss) / normalizer
alpha_regression_loss = tf.reduce_sum(alpha_regression_loss) / normalizer
ratio_regression_loss = tf.reduce_sum(ratio_regression_loss) / normalizer
return box_regression_loss + alpha_regression_loss + 16 * ratio_regression_loss
return _smooth_l1
''' ProbIoU '''
EPS = 1e-3
def helinger_dist(x1,y1,a1,b1, x2,y2,a2,b2, freezed=False):
'''
Dh = sqrt(1 - exp(-Db))
Db = 1/4*((x1-x2)²/(a1+a2) + (y1-y2)²/(b1+b2))-ln2 \
1/2*ln((a1+a2)*(b1+b2)) - 1/4*ln(a1*a2*b1*b2)
'''
if freezed:
B1 = 1/4.*(tf.math.pow(x1-x2, 2.)/(a1+a2+EPS) + tf.math.pow(y1-y2, 2.)/(b1+b2+EPS))
B2 = 1/2.*tf.math.log((a1+a2)*(b1+b2)+EPS)
B3 = 1/4.*tf.math.log(a1*a2*b1*b2+EPS)
Db = B1 + B2 - B3 - tf.math.log(2.)
else:
Db = tf.math.pow(x1-x2, 2.)/(2*a1+EPS) + tf.math.pow(y1-y2, 2.)/(2*b1+EPS)
Db = tf.clip_by_value(Db, EPS, 100.)
return tf.math.sqrt(1 - tf.math.exp(-Db) + EPS)
def get_probiou_values(array):
# xmin, ymin, xmax, ymax
xmin = array[:,0]; ymin = array[:,1]
xmax = array[:,2]; ymax = array[:,3]
# get ProbIoU values
x = (xmin + xmax)/2.
y = (ymin + ymax)/2.
a = tf.math.pow((xmax - xmin), 2.)/12.
b = tf.math.pow((ymax - ymin), 2.)/12.
return x, y, a, b
def calc_probiou(mode, target, pred, freezed=False):
l1 = helinger_dist(
*get_probiou_values(target),
*get_probiou_values(pred),
freezed=freezed
)
if mode=='probioul1':
return l1
l2 = tf.math.pow(l1, 2.)
l2 = - tf.math.log(1. - l2 + EPS)
return l2
def calc_diou_ciou(mode, bboxes1, bboxes2):
# xmin, ymin, xmax, ymax
rows = tf.cast(tf.shape(bboxes1)[0], 'float32')
cols = tf.cast(tf.shape(bboxes2)[0], 'float32')
cious = tf.zeros((rows, cols), dtype='float32')
dious = tf.zeros((rows, cols), dtype='float32')
if rows * cols == 0:
return cious
exchange = False
if rows > cols:
bboxes1, bboxes2 = bboxes2, bboxes1
cious = tf.zeros((cols, rows), dtype='float32')
dious = tf.zeros((cols, rows), dtype='float32')
exchange = True
w1 = bboxes1[:, 2] - bboxes1[:, 0]
h1 = bboxes1[:, 3] - bboxes1[:, 1]
w2 = bboxes2[:, 2] - bboxes2[:, 0]
h2 = bboxes2[:, 3] - bboxes2[:, 1]
area1 = w1 * h1
area2 = w2 * h2
center_x1 = (bboxes1[:, 2] + bboxes1[:, 0]) / 2.
center_y1 = (bboxes1[:, 3] + bboxes1[:, 1]) / 2.
center_x2 = (bboxes2[:, 2] + bboxes2[:, 0]) / 2.
center_y2 = (bboxes2[:, 3] + bboxes2[:, 1]) / 2.
inter_max_xy = tf.math.minimum(bboxes1[:, 2:],bboxes2[:, 2:])
inter_min_xy = tf.math.maximum(bboxes1[:, :2],bboxes2[:, :2])
out_max_xy = tf.math.maximum(bboxes1[:, 2:],bboxes2[:, 2:])
out_min_xy = tf.math.minimum(bboxes1[:, :2],bboxes2[:, :2])
inter = inter_max_xy - inter_min_xy
inter = tf.where(inter<0., 0., inter)
inter_area = inter[:, 0] * inter[:, 1]
inter_diag = (center_x2 - center_x1)**2. + (center_y2 - center_y1)**2.
outer = out_max_xy - out_min_xy
outer = tf.where(outer<0., 0., outer)
outer_diag = (outer[:, 0] ** 2.) + (outer[:, 1] ** 2.)
union = area1+area2-inter_area
if mode=='diou':
dious = inter_area / union - (inter_diag) / outer_diag
dious = tf.clip_by_value(dious, -1.0, 1.0)
if exchange:
dious = tf.transpose(dious)
return 1. - dious
u = (inter_diag) / outer_diag
iou = inter_area / union
v = (4. / (math.pi ** 2.)) * tf.math.pow((tf.math.atan(w2 / h2) - tf.math.atan(w1 / h1)), 2.)
S = tf.stop_gradient(1. - iou)
alpha = tf.stop_gradient(v / (S + v))
cious = iou - (u + alpha * v)
cious = tf.clip_by_value(cious, -1.0, 1.0)
if exchange:
cious = tf.transpose(cious)
return 1. - cious
def iou_loss(mode, phi, weight, anchor_parameters=None, freeze_iterations=0):
assert phi in range(7)
image_sizes = [512, 640, 768, 896, 1024, 1280, 1408]
input_size = float(image_sizes[phi])
it = 0
def _iou(y_true, y_pred):
nonlocal it
# separate target and state
regression = y_pred
regression_target = y_true[:, :, :-1]
anchor_state = y_true[:, :, -1]
# convert to boxes values: xmin, ymin, xmax, ymax
anchors = anchors_for_shape((input_size, input_size), anchor_params=anchor_parameters)
anchors_input = np.expand_dims(anchors, axis=0)
regression = RegressBoxes(name='boxes')([anchors_input, regression[..., :4]])
regression_target = RegressBoxes(name='boxes')([anchors_input, regression_target[..., :4]])
# filter out "ignore" anchors
indices = tf.where(keras.backend.equal(anchor_state, 1))
regression = tf.gather_nd(regression, indices)
regression_target = tf.gather_nd(regression_target, indices)
if 'probiou' in mode:
loss = calc_probiou(mode, regression_target, regression, freezed=freeze_iterations>it)
it += 1
elif mode in ('diou', 'ciou'):
loss = calc_diou_ciou(mode, regression, regression_target)
else:
# requires: y_min, x_min, y_max, x_max
xmin, ymin, xmax, ymax = tf.unstack(regression, axis=-1)
regression = tf.stack([ymin,xmin,ymax,xmax], axis=-1)
xmin, ymin, xmax, ymax = tf.unstack(regression_target, axis=-1)
regression_target = tf.stack([ymin,xmin,ymax,xmax], axis=-1)
loss = tfa.losses.GIoULoss(mode=mode, reduction=tf.keras.losses.Reduction.NONE) (regression_target, regression)
return tf.cast(weight, 'float32') * loss
return _iou | 37.516854 | 150 | 0.609763 | 1,858 | 13,356 | 4.249193 | 0.160926 | 0.053198 | 0.01064 | 0.026472 | 0.474858 | 0.451172 | 0.403547 | 0.382521 | 0.365801 | 0.319569 | 0 | 0.042627 | 0.262279 | 13,356 | 356 | 151 | 37.516854 | 0.758652 | 0.250749 | 0 | 0.194737 | 0 | 0 | 0.00901 | 0 | 0 | 0 | 0 | 0 | 0.005263 | 1 | 0.063158 | false | 0 | 0.036842 | 0 | 0.178947 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bacda3db4aab0d0f6e89b164fd1966fe8e3f70d2 | 3,093 | py | Python | pomodoro.py | Mattynb/PomodoroGUI | b6c67a0f059497f17fad5cdc4c6b9089d63d29a8 | [
"MIT"
] | null | null | null | pomodoro.py | Mattynb/PomodoroGUI | b6c67a0f059497f17fad5cdc4c6b9089d63d29a8 | [
"MIT"
] | null | null | null | pomodoro.py | Mattynb/PomodoroGUI | b6c67a0f059497f17fad5cdc4c6b9089d63d29a8 | [
"MIT"
] | null | null | null | import PySimpleGUI as pg
import time
import sys
from pygame import mixer
# Section Popup
def win2m():
lay2 = [[pg.T(f'', key='T')], [pg.OK()]]
win2 = pg.Window('Popup', lay2, location=(250 ,0), no_titlebar=True)
return win2
def sound():
mixer.init()
mixer.music.load("notification.mp3")
mixer.music.set_volume(0.7)
mixer.music.play()
def main():
# Color thingy
pg.theme('dark amber')
# Main Window
layout = [
[pg.Text('Timer = 0', key='timer', visible = False), pg.DropDown([(0.05, 0.05), (25, 5), (15, 2)], key='drop', )],
[pg.B('CLOSE'), pg.B('START')]
]
win = pg.Window('Pomodoro', layout, location=(0,0), finalize=True, no_titlebar=True)
while True:
# Reads for events and values
e, v = win.read()
# Closes the program
if e == pg.WINDOW_CLOSED or e == 'CLOSE':
win.close()
sys.exit()
# Starts the counter upon pressing START
if e == 'START':
# Defines how long each section is
WORK_T, BREAK_T = v['drop']
# Hides Elements
win['drop'].update(visible = False)
win['START'].hide_row()
win['timer'].update(visible = True)
# Start the counter at 0.00 and goes up to WORK_T
M = 0
T = time.time()
while M < WORK_T:
M = round((time.time() - T)/60, 2)
M = M + 0.00
win['timer'].update(M)
win.refresh()
# Popup window to indicateb break time
sound()
if M >= WORK_T:
win2 = win2m()
win2.finalize()
win2['T'].update(f'GOOD JOB!\nENJOY YOUR {BREAK_T} MINUTE BREAK NOW!')
e2, v2 = win2.read()
if e2 == pg.WINDOW_CLOSED or 'OK':
win2.close()
# Start the counter at 0.00 and goes up to BREAK_T
M = 0
win['timer'].update(M)
win.refresh()
T = time.time()
while M < BREAK_T:
M = round((time.time() - T)/60, 2)
M = M + 0.00
win['timer'].update(M)
win.refresh()
# Resets win to default
if M >= BREAK_T:
sound()
win2 = win2m()
win2.finalize()
win2['T'].update(f'GOOD JOB!\nSECTION IS OVER.')
win2.refresh()
e2, v2 = win2.read()
if e2 == pg.WINDOW_CLOSED or 'OK':
win2.close()
win['drop'].update(visible = True)
win['START'].unhide_row()
win['timer'].update(visible = False)
e, v = win.read()
if __name__ == '__main__':
main()
| 30.93 | 126 | 0.430973 | 350 | 3,093 | 3.737143 | 0.334286 | 0.030581 | 0.053517 | 0.036697 | 0.327982 | 0.268349 | 0.249235 | 0.249235 | 0.249235 | 0.249235 | 0 | 0.038261 | 0.442289 | 3,093 | 99 | 127 | 31.242424 | 0.72 | 0.106369 | 0 | 0.405797 | 0 | 0 | 0.081071 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.057971 | 0 | 0.115942 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bacf8f6d3d4b525ddd175a8b2492963e5de1c2a0 | 4,703 | py | Python | addons/blender-skeletal-motion-animate/panels/retargeting.py | trisadmeslek/V-Sekai-Blender-tools | 0d8747387c58584b50c69c61ba50a881319114f8 | [
"MIT"
] | null | null | null | addons/blender-skeletal-motion-animate/panels/retargeting.py | trisadmeslek/V-Sekai-Blender-tools | 0d8747387c58584b50c69c61ba50a881319114f8 | [
"MIT"
] | null | null | null | addons/blender-skeletal-motion-animate/panels/retargeting.py | trisadmeslek/V-Sekai-Blender-tools | 0d8747387c58584b50c69c61ba50a881319114f8 | [
"MIT"
] | null | null | null | import bpy
from .main import ToolPanel
from ..operators import retargeting, detector
from ..core.icon_manager import Icons
from ..core.retargeting import get_target_armature
from bpy.types import PropertyGroup, UIList
from bpy.props import StringProperty
# Retargeting panel
class RetargetingPanel(ToolPanel, bpy.types.Panel):
bl_idname = 'VIEW3D_PT_rsl_retargeting_v2'
bl_label = 'Retargeting'
def draw(self, context):
layout = self.layout
layout.use_property_split = False
row = layout.row(align=True)
row.label(text='Select the armatures:')
row = layout.row(align=True)
row.prop(context.scene, 'rsl_retargeting_armature_source', icon='ARMATURE_DATA')
row = layout.row(align=True)
row.prop(context.scene, 'rsl_retargeting_armature_target', icon='ARMATURE_DATA')
anim_exists = False
for obj in bpy.data.objects:
if obj.animation_data and obj.animation_data.action:
anim_exists = True
if not anim_exists:
row = layout.row(align=True)
row.label(text='No animated armature found!', icon='INFO')
return
if not context.scene.rsl_retargeting_armature_source or not context.scene.rsl_retargeting_armature_target:
self.draw_import_export(layout)
return
if not context.scene.rsl_retargeting_bone_list:
row = layout.row(align=True)
row.scale_y = 1.2
row.operator(retargeting.BuildBoneList.bl_idname, icon_value=Icons.CALIBRATE.get_icon())
self.draw_import_export(layout)
return
subrow = layout.row(align=True)
row = subrow.row(align=True)
row.scale_y = 1.2
row.operator(retargeting.BuildBoneList.bl_idname, text='Rebuild Bone List', icon_value=Icons.CALIBRATE.get_icon())
row = subrow.row(align=True)
row.scale_y = 1.2
row.alignment = 'RIGHT'
row.operator(retargeting.ClearBoneList.bl_idname, text="", icon='X')
layout.separator()
row = layout.row(align=True)
row.template_list("RSL_UL_BoneList", "Bone List", context.scene, "rsl_retargeting_bone_list", context.scene, "rsl_retargeting_bone_list_index", rows=1, maxrows=10)
row = layout.row(align=True)
row.prop(context.scene, 'rsl_retargeting_auto_scaling')
row = layout.row(align=True)
row.label(text='Use Pose:')
row.prop(context.scene, 'rsl_retargeting_use_pose', expand=True)
row = layout.row(align=True)
row.scale_y = 1.4
row.operator(retargeting.RetargetAnimation.bl_idname, icon_value=Icons.CALIBRATE.get_icon())
self.draw_import_export(layout)
row = layout.row(align=True)
row.scale_y = 1.4
row.operator(retargeting.RenameVRMBones.bl_idname, text='Rename VRM Bones', icon_value=Icons.CALIBRATE.get_icon())
row = layout.row(align=True)
row.scale_y = 1.4
row.operator(retargeting.RenameVRMBonesStandard.bl_idname, text='Rename VRM Bones to Standard', icon_value=Icons.CALIBRATE.get_icon())
def draw_import_export(self, layout):
layout.separator()
row = layout.row(align=True)
row.label(text='Custom Naming Schemes:')
row.operator(detector.SaveCustomBonesRetargeting.bl_idname, text='Save')
subrow = layout.row(align=True)
row = subrow.row(align=True)
row.scale_y = 0.9
row.operator(detector.ImportCustomBones.bl_idname, text='Import')
row.operator(detector.ExportCustomBones.bl_idname, text='Export')
row = subrow.row(align=True)
row.scale_y = 0.9
row.alignment = 'RIGHT'
row.operator(detector.ClearCustomBones.bl_idname, text='', icon='X')
class BoneListItem(PropertyGroup):
"""Properties of the bone list items"""
bone_name_source: StringProperty(
name="Source Bone",
description="The source bone name",
default="Undefined")
bone_name_target: StringProperty(
name="Target Bone",
description="The target bone name",
default="")
bone_name_key: StringProperty(
name="Auto Detection Key",
description="The automatically detected bone key",
default="")
class RSL_UL_BoneList(UIList):
def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
armature_target = get_target_armature()
layout = layout.split(factor=0.36, align=True)
layout.label(text=item.bone_name_source)
if armature_target:
layout.prop_search(item, 'bone_name_target', armature_target.pose, "bones", text='')
| 35.900763 | 171 | 0.670423 | 590 | 4,703 | 5.164407 | 0.225424 | 0.056121 | 0.070889 | 0.088612 | 0.481457 | 0.453889 | 0.380374 | 0.336396 | 0.25041 | 0.247457 | 0 | 0.006566 | 0.222836 | 4,703 | 130 | 172 | 36.176923 | 0.827086 | 0.011057 | 0 | 0.395833 | 0 | 0 | 0.124004 | 0.042626 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03125 | false | 0 | 0.125 | 0 | 0.270833 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bad1baa27ef9fe52644d371d1f406ee906b6cb17 | 4,128 | py | Python | repos/system_upgrade/common/actors/selinux/selinuxprepare/tests/component_test_selinuxprepare.py | sm00th/leapp-repository | 1c171ec3a5f9260a3c6f84a9b15cad78a875ac61 | [
"Apache-2.0"
] | null | null | null | repos/system_upgrade/common/actors/selinux/selinuxprepare/tests/component_test_selinuxprepare.py | sm00th/leapp-repository | 1c171ec3a5f9260a3c6f84a9b15cad78a875ac61 | [
"Apache-2.0"
] | 1 | 2022-03-07T15:34:11.000Z | 2022-03-07T15:35:15.000Z | repos/system_upgrade/common/actors/selinux/selinuxprepare/tests/component_test_selinuxprepare.py | sm00th/leapp-repository | 1c171ec3a5f9260a3c6f84a9b15cad78a875ac61 | [
"Apache-2.0"
] | null | null | null | import os
import pytest
from leapp.libraries.stdlib import api, CalledProcessError, run
from leapp.models import SELinuxModule, SELinuxModules
from leapp.reporting import Report
from leapp.snactor.fixture import current_actor_context
TEST_MODULES = [
['400', 'mock1'],
['99', 'mock1'],
['300', 'mock1'],
['400', 'mock2'],
['999', 'mock3'],
]
TEST_TEMPLATES = [
['200', 'base_container']
]
SEMANAGE_COMMANDS = [
['fcontext', '-t', 'httpd_sys_content_t', '"/web(/.*)?"'],
['fcontext', '-t', 'cgdcbxd_var_run_t', '"/ganesha(/.*)?"'],
['fcontext', '-t', 'mock_file_type_t', '"/mock_directory(/.*)?"'],
['port', '-t', 'http_port_t', '-p', 'udp', '81'],
['permissive', 'abrt_t']
]
testmoduledir = 'tests/mock_modules/'
def _run_cmd(cmd, logmsg='', split=False):
try:
return run(cmd, split=split).get('stdout', '')
except CalledProcessError as e:
if logmsg:
api.current_logger().warning('{}: {}'.format(logmsg, e.stderr))
return None
@pytest.fixture(scope='module')
def semodule_lfull_initial():
yield _run_cmd(['semodule', '-lfull'], logmsg='Error listing SELinux customizations')
@pytest.fixture(scope='module')
def semanage_export_initial():
yield _run_cmd(['semanage', 'export'], logmsg='Error listing SELinux customizations')
@pytest.fixture(scope='function')
def destructive_selinux_env():
tests_dir = os.path.join(os.getenv('PYTEST_CURRENT_TEST').rsplit(os.path.sep, 2)[0], testmoduledir)
# try to install compatibility module - needed on newer systems - failure to install is expected on rhel 7
_run_cmd(['semodule', '-X', '100', '-i', os.path.join(tests_dir, 'compat.cil')])
semodule_command = ['semodule']
for priority, module in TEST_MODULES + TEST_TEMPLATES:
semodule_command.extend(['-X', priority, '-i', os.path.join(tests_dir, module + '.cil')])
_run_cmd(semodule_command, logmsg='Error installing mock modules')
for command in SEMANAGE_COMMANDS:
_run_cmd(['semanage', command[0], '-a'] + command[1:], logmsg='Error applying selinux customizations')
yield
for command in SEMANAGE_COMMANDS:
_run_cmd(['semanage', command[0], '-d'] + command[1:])
semodule_command = ['semodule']
for priority, module in reversed(TEST_MODULES + TEST_TEMPLATES +
[['400', 'permissive_abrt_t'], ['100', 'compat']]):
semodule_command.extend(['-X', priority, '-r', module])
_run_cmd(semodule_command)
@pytest.mark.skipif(os.getenv('DESTRUCTIVE_TESTING', False) in [False, '0'],
reason='Test disabled by default because it would modify the system')
def test_SELinuxPrepare(current_actor_context, semodule_lfull_initial, semanage_export_initial,
destructive_selinux_env):
before_test = []
for cmd in (['semodule', '-lfull'], ['semanage', 'export']):
res = _run_cmd(cmd, 'Error listing SELinux customizations')
before_test.append(res)
# XXX still not sure about logging in tests
api.current_logger().info('Before test: {}'.format(res))
# Make sure that initial semodule/semanage commands don't match before tests ones
assert before_test != [semodule_lfull_initial, semanage_export_initial]
semodule_list = [SELinuxModule(name=module, priority=int(prio), content='', removed=[])
for (prio, module) in TEST_MODULES + [['400', 'permissive_abrt_t'], ['100', 'compat']]]
template_list = [SELinuxModule(name=module, priority=int(prio), content='', removed=[])
for (prio, module) in TEST_TEMPLATES]
current_actor_context.feed(SELinuxModules(modules=semodule_list, templates=template_list))
current_actor_context.run()
# check if all given modules and local customizations where removed
semodule_res = _run_cmd(['semodule', '-lfull'], 'Error listing SELinux modules')
assert semodule_lfull_initial == semodule_res
semanage_res = _run_cmd(['semanage', 'export'], 'Error listing SELinux customizations')
assert semanage_export_initial == semanage_res
| 38.579439 | 110 | 0.668362 | 494 | 4,128 | 5.370445 | 0.342105 | 0.027139 | 0.026385 | 0.049755 | 0.270637 | 0.234452 | 0.168865 | 0.137203 | 0.094233 | 0.094233 | 0 | 0.013901 | 0.180959 | 4,128 | 106 | 111 | 38.943396 | 0.770778 | 0.070736 | 0 | 0.08 | 0 | 0 | 0.221091 | 0.006004 | 0 | 0 | 0 | 0 | 0.04 | 1 | 0.066667 | false | 0 | 0.08 | 0 | 0.173333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bad1ccb092d8f5ff71c5c028aa84d24c26e25a42 | 20,919 | py | Python | run.py | BIDS-Apps/afni_proc_bids_app | b36d224b25fb023e3bffcf6a4fb96833a1ce18f4 | [
"Apache-2.0"
] | 1 | 2018-09-17T21:04:46.000Z | 2018-09-17T21:04:46.000Z | run.py | BIDS-Apps/afni_proc_bids_app | b36d224b25fb023e3bffcf6a4fb96833a1ce18f4 | [
"Apache-2.0"
] | 8 | 2017-12-05T17:02:53.000Z | 2022-02-17T16:04:50.000Z | run.py | BIDS-Apps/afni_proc_bids_app | b36d224b25fb023e3bffcf6a4fb96833a1ce18f4 | [
"Apache-2.0"
] | 3 | 2017-12-05T15:46:25.000Z | 2018-01-15T20:00:09.000Z | #!/usr/bin/env python3
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import os
import subprocess
from glob import glob
import pandas as pd
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import base64
import json
import numpy as np
import re
from io import open # pylint: disable=W0622
import jinja2
__version__ = open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'version')).read()
class Template(object):
"""
Utility class for generating a config file from a jinja template.
https://github.com/oesteban/endofday/blob/f2e79c625d648ef45b08cc1f11fd0bd84342d604/endofday/core/template.py
"""
def __init__(self, template_str):
self.template_str = template_str
self.env = jinja2.Environment(
loader=jinja2.FileSystemLoader(searchpath='/'),
trim_blocks=True, lstrip_blocks=True)
def compile(self, configs):
"""Generates a string with the replacements"""
template = self.env.get_template(self.template_str)
return template.render(configs)
def generate_conf(self, configs, path):
"""Saves the oucome after replacement on the template to file"""
output = self.compile(configs)
with open(path, 'w+') as output_file:
output_file.write(output)
class IndividualTemplate(Template):
"""Specific template for the individual report"""
def __init__(self):
#super(IndividualTemplate, self).__init__(pkgrf('mriqc', 'data/reports/individual.html'))
super(IndividualTemplate, self).__init__('/code/reports/individual.html')
class GroupTemplate(Template):
"""Specific template for the individual report"""
def __init__(self):
#super(GroupTemplate, self).__init__(pkgrf('mriqc', 'data/reports/group.html'))
super(GroupTemplate, self).__init__('/code/reports/group.html')
def read_report_snippet(in_file):
"""Add a snippet into the report"""
import os.path as op
import re
from io import open # pylint: disable=W0622
is_svg = (op.splitext(op.basename(in_file))[1] == '.svg')
with open(in_file) as thisfile:
if not is_svg:
return thisfile.read()
svg_tag_line = 0
content = thisfile.read().split('\n')
corrected = []
for i, line in enumerate(content):
if "<svg " in line:
line = re.sub(' height="[0-9.]+[a-z]*"', '', line)
line = re.sub(' width="[0-9.]+[a-z]*"', '', line)
if svg_tag_line == 0:
svg_tag_line = i
corrected.append(line)
return '\n'.join(corrected[svg_tag_line:])
def make_montage(prefix, ulay=None, olay=None, cbar='FreeSurfer_Seg_i255',
opacity=4, montx=3, monty=1, blowup=1, delta_slices='-1 -1 -1',
func_range_perc=100):
if ulay is None and olay is None:
raise Exception("overlay and underlay can't both be undefined")
elif ulay is None and olay is not None:
ulay = olay
olay = None
cmd = '/code/@chauffeur_afni' + \
' -ulay ' + ulay
if olay is not None:
cmd += ' -olay ' + olay
cmd += ' -set_dicom_xyz `3dCM {i}`'.format(i=olay)
cmd += ' -cbar ' + cbar + \
' -opacity %d'%opacity
else:
cmd += ' -olay_off'
cmd += ' -set_dicom_xyz `3dCM {i}`'.format(i=ulay)
cmd += ' -prefix ' + prefix + \
' -do_clean' + \
' -delta_slices '+ delta_slices + \
' -montx %d'%montx + \
' -monty %d'%monty + \
' -blowup %d'%blowup + \
' -func_range_perc %f' %func_range_perc + \
' -save_ftype JPEG'
return cmd
def make_motion_plot(subj_dir, subj_id):
# Read the three files in
motion_file = os.path.join(subj_dir,'dfile_rall.1D')
motion = pd.read_csv(motion_file, sep='\s*', engine = 'python', names = ['$\Delta$A-P [mm]','$\Delta$L-R [mm]','$\Delta$I-S [mm]','Yaw [$^\circ$]','Pitch [$^\circ$]','Roll [$^\circ$]'])
enorm_file = os.path.join(subj_dir,'motion_{subj_id}_enorm.1D'.format(subj_id=subj_id))
enorm = pd.read_csv(enorm_file, sep='\s*', engine = 'python', names = ['enorm'])
outlier_file = os.path.join(subj_dir,'outcount_rall.1D')
outliers = pd.read_csv(outlier_file, sep='\s*', engine = 'python', names = ['outliers'])
# make a dataframe
mot_df = pd.concat([outliers,enorm,motion], axis = 1)
# Plot the dataframe
axs = mot_df.plot(subplots = True, figsize = (4,5))
ldgs = []
for ax in axs:
box = ax.get_position()
ax.legend()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ldgs.append(ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)))
plt.tight_layout()
# save the figure
qc_dir = os.path.join(subj_dir,'qc')
img_dir = os.path.join(qc_dir,'img')
if not os.path.exists(qc_dir):
os.mkdir(qc_dir)
if not os.path.exists(img_dir):
os.mkdir(img_dir)
out_path = os.path.join(img_dir,'motion_plot.svg')
plt.savefig(out_path, tight_layout = True, bbox_extra_artists=ldgs, bbox_inches='tight')
return out_path
def run(command, env={}, shell=False):
merged_env = os.environ
merged_env.update(env)
process = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=shell,
env=merged_env)
while True:
line = process.stdout.readline()
line = str(line, 'utf-8')[:-1]
print(line)
if line == '' and process.poll() is not None:
break
if process.returncode != 0:
raise Exception("Non zero return code: %d"%process.returncode)
task_re = re.compile('.*task-([^_]*)_.*')
parser = argparse.ArgumentParser(description='Example BIDS App entrypoint script.')
parser.add_argument('bids_dir', help='The directory with the input dataset '
'formatted according to the BIDS standard.')
parser.add_argument('output_dir', help='The directory where the output files '
'should be stored. If you are running group level analysis '
'this folder should be prepopulated with the results of the'
'participant level analysis.')
parser.add_argument('analysis_level', help='Level of the analysis that will be performed. '
'Multiple participant level analyses can be run independently '
'(in parallel) using the same output_dir.'
'Only "participant" is currently supported.',
choices=['participant', 'group'])
parser.add_argument('--participant_label', help='The label(s) of the participant(s) that should be analyzed. The label '
'corresponds to sub-<participant_label> from the BIDS spec '
'(so it does not include "sub-"). If this parameter is not '
'provided all subjects should be analyzed. Multiple '
'participants can be specified with a space separated list.',
nargs="+")
parser.add_argument('--session_label', help='The label(s) of the sessions(s) that should be analyzed. The label '
'corresponds to ses-<session_label> from the BIDS spec '
'(so it does not include "ses-"). If this parameter is not '
'provided all sessions should be analyzed. Multiple '
'sessions can be specified with a space separated list.',
nargs="+")
parser.add_argument('--task_label', help='The label(s) of the tasks(s) that should be analyzed. The label '
'corresponds to task-<task_label> from the BIDS spec '
'(so it does not include "task-"). If this parameter is not '
'provided all tasks will be analyzed. Multiple '
'tasks can be specified with a space separated list.',
nargs="+")
parser.add_argument('--afni_proc', help='Optional: command string for afni proc. '
'Parameters that vary by subject '
'should be encapsulated in curly braces and must all be included '
'{{subj_id}}, {{out_dir}}, {{anat_path}}, or {{epi_paths}}.'
'The first _T1w for each subject will currently be used as the anat.'
'All of the _bold will be used as the functionals.'
'Example:'
'-subj_id {subj_id} '
'-scr_overwrite -out_dir {{out_dir}} '
'-blocks tshift align tlrc volreg blur mask scale '
'-copy_anat {{anat_path}} -tcat_remove_first_trs 0 '
'-dsets {{epi_paths}} -volreg_align_to MIN_OUTLIER '
'-volreg_align_e2a -volreg_tlrc_warp -blur_size 4.0 -bash')
parser.add_argument('--report_only', dest='report_only', action='store_true')
parser.add_argument('-v', '--version', action='version',
version='afni_proc BIDS-App {}'.format(__version__))
args = parser.parse_args()
bad_chars = ['`', '|', '&', ';', '>', '<', '$', '?', '\.', ':', '[', ']']
if args.afni_proc is not None:
cmd_skeleton = args.afni_proc
for bc in bad_chars:
if bc in cmd_skeleton:
raise Exception("Unsafe character '%s' found in command: %s"%(bc, cmd_skeleton))
cmd_skeleton = 'python /opt/afni/afni_proc.py -check_results_dir no -script {ses_dir}/proc.bids.{subj_id}.{ses_id}.{task_id} '+ cmd_skeleton
else:
cmd_skeleton = "python /opt/afni/afni_proc.py -check_results_dir no -subj_id {subj_id} \
-script {ses_dir}/proc.bids.{subj_id}.{ses_id}.{task_id} -scr_overwrite -out_dir {out_dir} \
-blocks tshift align tlrc volreg blur mask scale \
-copy_anat {anat_path} -tcat_remove_first_trs 0 \
-dsets {epi_paths} -align_opts_aea -cost lpc+ZZ -giant_move \
-tlrc_base MNI152_T1_2009c+tlrc -tlrc_NL_warp \
-volreg_align_to MIN_OUTLIER \
-volreg_align_e2a -volreg_tlrc_warp -blur_size 4.0 -bash"""
run(('bids-validator %s'%args.bids_dir).split(' '))
# Get path for report directory
reports_dir = os.path.join(args.output_dir,"reports")
subjects_to_analyze = []
# only for a subset of subjects
if args.participant_label:
subjects_to_analyze = args.participant_label[0].split(' ')
# for all subjects
else:
subject_dirs = glob(os.path.join(args.bids_dir, "sub-*"))
subjects_to_analyze = sorted([subject_dir.split("-")[-1] for subject_dir in subject_dirs])
# TODO: throw early error if they've specified participants, labels,
# and subjects in such a way that there is nothing to analyze
# make sessions to analyze
# make tasks to analyze
all_configs = []
report_num = 0
for subject_label in subjects_to_analyze:
# get anatomical path
anat_path = sorted(list(glob(os.path.join(args.bids_dir, "sub-%s"%subject_label,
"anat", "*_T1w.nii*")) + glob(os.path.join(args.bids_dir,"sub-%s"%subject_label,"ses-*","anat", "*_T1w.nii*"))))[0]
subj_out_dir = os.path.join(args.output_dir, "sub-%s"%subject_label)
# Do sessions exist
sessions_dirs = list(glob(os.path.join(args.bids_dir,"sub-%s"%subject_label,"ses-*")))
sessions_list = [session_dir.split("-")[-1] for session_dir in sessions_dirs]
if len(sessions_list) > 0:
sessions_exist = True
if args.session_label:
sessions_to_analyze = sorted(set(args.session_label[0].split(' ')).intersection(set(sessions_list)))
else:
sessions_to_analyze = sessions_list
else:
sessions_exist = False
sessions_to_analyze = ['']
for session_label in sessions_to_analyze:
if sessions_exist:
session_out_dir = os.path.join(subj_out_dir,"ses-%s"%session_label)
else:
session_out_dir = subj_out_dir
os.makedirs(session_out_dir, exist_ok = True)
all_epi_paths = sorted(set(glob(os.path.join(args.bids_dir, "sub-%s"%subject_label,
"func", "*bold.nii*")) + glob(os.path.join(args.bids_dir,"sub-%s"%subject_label,"ses-%s"%session_label,"func", "*bold.nii*"))))
# Which tasks to analyze
try:
tasks_in_session = set([task_re.findall(epi)[0] for epi in all_epi_paths])
except:
print("Tasks: ",[epi for epi in all_epi_paths if len(task_re.findall(epi))==0])
raise Exception("A bold scan without a task label exists. Not permitted")
if args.task_label:
tasks_to_analyze = sorted(set(args.task_label[0].split(' ')).intersection(tasks_in_session))
else:
tasks_to_analyze = sorted(tasks_in_session)
for task_label in tasks_to_analyze:
epi_paths = ' '.join(sorted(set(glob(os.path.join(args.bids_dir, "sub-%s"%subject_label,
"func", "*%s*bold.nii*"%task_label)) + glob(os.path.join(args.bids_dir,"sub-%s"%subject_label,"ses-%s"%session_label,"func", "*%s*bold.nii*"%task_label)))))
task_out_dir = os.path.join(session_out_dir,task_label)
task_qc_dir = os.path.join(task_out_dir, 'qc')
task_qc_img_dir = os.path.join(task_qc_dir, 'img')
if args.analysis_level == 'participant':
config = {}
cmd = cmd_skeleton.format(subj_id=subject_label,ses_id = session_label, task_id = task_label, out_dir=task_out_dir,
anat_path=anat_path, epi_paths=epi_paths, ses_dir = session_out_dir)
if '{' in cmd:
raise Exception("Unsafe character '{' found in command: %s"%cmd.join(' '))
cmd = cmd.replace(' ', ' ').split(' ')
if not args.report_only:
print(' '.join(cmd), flush = True)
run(cmd)
print('bash -c "$(set -o pipefail && tcsh -xef {ses_dir}/proc.bids.{subj_id}.{ses_id}.{task_id} 2>&1 | tee {ses_dir}/output.proc.bids.{subj_id}.{ses_id}.{task_id})"'.format(subj_id = subject_label,ses_id = session_label, task_id = task_label, ses_dir = session_out_dir), flush = True)
run('bash -c "set -o pipefail && tcsh -xef {ses_dir}/proc.bids.{subj_id}.{ses_id}.{task_id} 2>&1 > {ses_dir}/output.proc.bids.{subj_id}.{ses_id}.{task_id}"'.format(subj_id = subject_label,ses_id = session_label, task_id = task_label, ses_dir = session_out_dir), shell=True)
run("mv {ses_dir}/proc.bids.{subj_id}.{ses_id}.{task_id} {out_dir};mv {ses_dir}/output.proc.bids.{subj_id}.{ses_id}.{task_id} {out_dir}".format(subj_id = subject_label,ses_id = session_label, task_id = task_label, ses_dir = session_out_dir, out_dir = task_out_dir), shell=True)
pbs = glob(os.path.join(task_out_dir, 'pb*'))
if len(pbs) > 0:
pb_lod = []
for pb in pbs:
pbd = {}
pbn = pb.split('/')[-1].split('.')
pbd['path'] = pb
pbd['filename'] = pb.split('/')[-1]
pbd['pb'] = int(pbn[0][-2:])
pbd['subj'] = pbn[1]
pbd['run'] = int(pbn[2][-2:])
pbd['block'] = pbn[3].split('+')[0]
pbd['orientation'] = pbn[3].split('+')[-1]
pb_lod.append(pbd)
pb_df = pd.DataFrame(pb_lod)
config['subj_id'] = pb_df.subj.unique()[0]
config['task_label'] = task_label
config['num_runs'] = len(pb_df.run.unique())
config['blocks'] = ' '.join(pb_df.block.unique())
config['report_num'] = report_num
report_num += 1
if session_label != '':
config['session_label'] = session_label
try:
mot_path = make_motion_plot(task_out_dir, subject_label)
config['motion_report'] = read_report_snippet(mot_path)
except FileNotFoundError:
pass
warn_list = ['3dDeconvolve.err',
'out.pre_ss_warn.txt',
'out.cormat_warn.txt']
warns = {}
for wf in warn_list:
wf_path = os.path.join(task_out_dir, wf)
try:
if os.path.getsize(wf_path) > 0:
with open(wf_path, 'r') as h:
warns[wf] = h.readlines()
warns[wf] = [ww.replace('\n', '') for ww in warns[wf]]
except FileNotFoundError:
pass
if len(warns) > 0:
config['warnings'] = warns
if not os.path.exists(task_qc_dir):
os.mkdir(task_qc_dir)
if not os.path.exists(task_qc_img_dir):
os.mkdir(task_qc_img_dir)
if not os.path.exists(reports_dir):
os.mkdir(reports_dir)
try:
anat_out_path = os.path.join(task_out_dir, 'anat_final.%s+tlrc.HEAD'%subject_label)
anat_exts = np.array([float(ss) for ss in subprocess.check_output(["3dinfo", "-extent", anat_out_path]).decode().split('\t')])
anat_lrext = np.abs(anat_exts[0]) + np.abs(anat_exts[1])
anat_mont_dim = np.floor(np.sqrt(anat_lrext))
print("#######\n mont_dim = %f \n#########"%anat_mont_dim)
run(make_montage(os.path.join(task_qc_img_dir, 'anatomical_montage'),
ulay=anat_out_path,
montx=anat_mont_dim, monty=anat_mont_dim), shell=True)
func_path = pb_df.loc[pb_df['block'] == 'volreg', 'path'].values[0] + '[0]'
func_rext = float(subprocess.check_output(["3dinfo", "-Rextent", func_path]))
func_lext = float(subprocess.check_output(["3dinfo", "-Lextent", func_path]))
func_lrext = np.abs(func_lext) + np.abs(func_rext)
func_mont_dim = np.floor(np.sqrt(func_lrext))
run(make_montage(os.path.join(task_qc_img_dir, 'functional_montage'),
ulay=anat_out_path,
olay=func_path, montx=anat_mont_dim, monty=anat_mont_dim,
cbar='gray_scale', opacity=9), shell=True)
with open(os.path.join(task_qc_img_dir, 'anatomical_montage.sag.jpg'), 'rb') as h:
anat_bs = base64.b64encode(h.read()).decode()
with open(os.path.join(task_qc_img_dir, 'functional_montage.sag.jpg'), 'rb') as h:
func_bs = base64.b64encode(h.read()).decode()
config['volreg_report_anat'] = anat_bs
config['volreg_report_func'] = func_bs
config['anat_ap_ext'] = np.abs(anat_exts[2]) + np.abs(anat_exts[3]) + 1
config['anat_is_ext'] = np.abs(anat_exts[4]) + np.abs(anat_exts[5]) + 1
print("#######\n anat_ap_ext = %f \n#########"%config['anat_ap_ext'])
except (FileNotFoundError, ValueError):
pass
tpl = IndividualTemplate()
if sessions_exist:
tpl.generate_conf(config, os.path.join(reports_dir, 'sub-%s_ses-%s_task-%s_individual.html'%(subject_label, session_label, task_label)))
else:
tpl.generate_conf(config, os.path.join(reports_dir, 'sub-%s_task-%s_individual.html'%(subject_label, task_label)))
with open(os.path.join(task_qc_dir, 'individual.json'), 'w') as h:
json.dump(config, h)
elif args.analysis_level == 'group':
with open(os.path.join(task_qc_dir, 'individual.json'), 'r') as h:
all_configs.append(json.load(h))
if args.analysis_level == 'group':
if not os.path.exists(reports_dir):
os.mkdir(reports_dir)
tpl = GroupTemplate()
#print(all_configs)
tpl.generate_conf({'configs':all_configs}, os.path.join(reports_dir, 'group.html'))
| 47.979358 | 304 | 0.571442 | 2,665 | 20,919 | 4.258912 | 0.187993 | 0.022731 | 0.029075 | 0.013568 | 0.3563 | 0.317269 | 0.275154 | 0.237093 | 0.230044 | 0.193921 | 0 | 0.010152 | 0.298389 | 20,919 | 435 | 305 | 48.089655 | 0.763167 | 0.049285 | 0 | 0.100295 | 0 | 0.014749 | 0.218775 | 0.035866 | 0 | 0 | 0 | 0.002299 | 0 | 1 | 0.026549 | false | 0.00885 | 0.050147 | 0 | 0.100295 | 0.020649 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bad3f16dab0d3a862f98f60e8103cc7bdfb888f7 | 1,547 | py | Python | web/app/views_admin.py | pierre-chaville/automlk | 61386beba62f72360e1f5f8d6bcce17df653e2e8 | [
"MIT"
] | 16 | 2017-09-05T12:26:11.000Z | 2019-10-26T22:55:41.000Z | web/app/views_admin.py | pierre-chaville/automlk | 61386beba62f72360e1f5f8d6bcce17df653e2e8 | [
"MIT"
] | 1 | 2018-02-07T11:16:43.000Z | 2018-02-07T11:16:43.000Z | web/app/views_admin.py | pierre-chaville/automlk | 61386beba62f72360e1f5f8d6bcce17df653e2e8 | [
"MIT"
] | 8 | 2017-09-21T01:20:52.000Z | 2021-01-21T10:03:34.000Z | from app import app
from flask import render_template, request, flash
from .form import *
from automlk.monitor import get_heart_beeps
from automlk.context import get_config, set_config
@app.route('/monitor', methods=['GET'])
def monitor():
# monitor workers
return render_template('monitor.html', controller=get_heart_beeps('controller'),
grapher=get_heart_beeps('grapher'), worker_text=get_heart_beeps('worker_text'),
workers=get_heart_beeps('worker'), config=get_config())
@app.route('/config', methods=['GET', 'POST'])
def config():
# view/edit configuration
form = ConfigForm()
if request.method == 'POST':
if form.validate():
try:
set_config(data=form.data.data,
theme=form.theme.data,
bootstrap=form.bootstrap.data,
graph_theme=form.graph_theme.data,
store=form.store.data,
store_url=form.store_url.data)
except Exception as e:
flash(str(e))
else:
config = get_config()
# copy data to form
form.data.data = config['data']
form.theme.data = config['theme']
form.bootstrap.data = config['bootstrap']
form.graph_theme.data = config['graph_theme']
form.store.data = config['store']
form.store_url.data = config['store_url']
return render_template('config.html', form=form, config=get_config())
| 35.976744 | 106 | 0.594053 | 178 | 1,547 | 5 | 0.280899 | 0.067416 | 0.073034 | 0.042697 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.289593 | 1,547 | 42 | 107 | 36.833333 | 0.809827 | 0.036846 | 0 | 0 | 0 | 0 | 0.08681 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060606 | false | 0 | 0.151515 | 0.030303 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bad4cf3dc6de5ce2d0d4976051b05fed4b7194fc | 14,591 | py | Python | components/watcher_handle.py | Druzai/Bot_Mc_discord | 0ab210d201675db96fbb7ba527ab36aa67cddf90 | [
"MIT"
] | 2 | 2020-12-15T14:06:13.000Z | 2021-12-09T20:25:02.000Z | components/watcher_handle.py | Druzai/Bot_Mc_discord | 0ab210d201675db96fbb7ba527ab36aa67cddf90 | [
"MIT"
] | 21 | 2020-09-05T23:04:13.000Z | 2022-03-28T15:31:30.000Z | components/watcher_handle.py | Druzai/Bot_Mc_discord | 0ab210d201675db96fbb7ba527ab36aa67cddf90 | [
"MIT"
] | 1 | 2021-09-03T17:54:14.000Z | 2021-09-03T17:54:14.000Z | import socket
from contextlib import suppress
from os import SEEK_END, stat
from pathlib import Path
from re import search, split, findall
from sys import exc_info
from threading import Thread
from time import sleep
from traceback import format_exc
from colorama import Fore, Style
from discord import Webhook, RequestsWebhookAdapter
from components.localization import get_translation
from config.init_config import Config, BotVars
class Watcher:
_running = True
_thread = None
# Constructor
def __init__(self, watch_file: Path, call_func_on_change=None, *args, **kwargs):
self._cached_stamp = None
self._filename: Path = watch_file
self._call_func_on_change = call_func_on_change
self._refresh_delay_secs = Config.get_cross_platform_chat_settings().refresh_delay_of_console_log
self._args = args
self._kwargs = kwargs
# Look for changes
def look(self):
stamp = stat(self._filename).st_mtime
if stamp != self._cached_stamp:
temp = self._cached_stamp
self._cached_stamp = stamp
if self._call_func_on_change is not None and temp is not None:
BotVars.watcher_last_line = self._call_func_on_change(file=self._filename,
last_line=BotVars.watcher_last_line,
*self._args, **self._kwargs)
# Keep watching in a loop
def watch(self):
while self._running:
try:
# Look for changes
sleep(self._refresh_delay_secs)
self.look()
except FileNotFoundError:
print(get_translation("Watcher Error: File '{0}' wasn't found!").format(self._filename.as_posix()))
except UnicodeDecodeError:
print(get_translation("Watcher Error: Can't decode strings from file '{0}'"
", check that minecraft server saves it in utf-8 encoding!\n"
"(Ensure you have '-Dfile.encoding=UTF-8' as one of the arguments "
"to start the server in start script)").format(self._filename.as_posix()))
except BaseException:
exc = format_exc().rstrip("\n")
print(get_translation("Watcher Unhandled Error: {0}").format(exc_info()[0]) +
f"\n{Style.DIM}{Fore.RED}{exc}{Style.RESET_ALL}")
def start(self):
self._thread = Thread(target=self.watch, daemon=True)
self._thread.start()
def stop(self):
self._running = False
if self._thread is not None:
self._thread.join()
self._thread = None
def is_running(self):
return self._running
def create_watcher():
if BotVars.watcher_of_log_file is not None and BotVars.watcher_of_log_file.is_running():
BotVars.watcher_of_log_file.stop()
from components.additional_funcs import get_server_version
server_version = get_server_version()
if 7 <= server_version:
path_to_server_log = "logs/latest.log"
elif 0 <= server_version < 7:
path_to_server_log = "server.log"
else:
return
BotVars.watcher_of_log_file = Watcher(watch_file=Path(Config.get_selected_server_from_list().working_directory,
path_to_server_log),
call_func_on_change=_check_log_file)
def create_chat_webhook():
if Config.get_cross_platform_chat_settings().webhook_url:
BotVars.webhook_chat = Webhook.from_url(url=Config.get_cross_platform_chat_settings().webhook_url,
adapter=RequestsWebhookAdapter())
def _check_log_file(file: Path, last_line: str = None):
if Config.get_cross_platform_chat_settings().channel_id is None:
return
last_lines = _get_last_n_lines(file,
Config.get_cross_platform_chat_settings().number_of_lines_to_check_in_console_log,
last_line)
if len(last_lines) == 0:
return last_line
if last_line is None:
last_lines = last_lines[-1:]
mention_max_words = 5
mention_max_right_symbols = 5
for line in last_lines:
if search(r"INFO", line) and "*" not in split(r"<([^>]*)>", line, maxsplit=1)[0] and \
search(r"<([^>]*)> (.*)", line):
player_nick, player_message = search(r"<([^>]*)>", line)[0], \
split(r"<([^>]*)>", line, maxsplit=1)[-1].strip()
if search(r"@[^\s]+", player_message):
split_arr = split(r"@[^\s]+", player_message)
mentions = [[i[1:]] for i in findall(r"@[^\s]+", player_message)]
for i_mention in range(len(mentions)):
for words_number in range(mention_max_words + 1):
if len(split_arr[1 + i_mention]) < words_number:
break
found = False
add_string = " ".join(split_arr[1 + i_mention].lstrip(" ").split(" ")[:words_number]) \
if words_number > 0 else ""
for symbols_number in range(mention_max_right_symbols + 1):
mention = f"{mentions[i_mention][0]} {add_string}".lower() \
if len(add_string) > 0 else mentions[i_mention][0].lower()
cut_right_string = None
if symbols_number > 0:
cut_right_string = mention[-symbols_number:]
mention = mention[:-symbols_number]
found = False
# Check mention of everyone and here
for mention_pattern in ["a", "e", "everyone", "p", "here"]:
if mention_pattern == mention:
mentions[i_mention] = [mention_pattern]
if cut_right_string is not None:
mentions[i_mention].extend([None, cut_right_string])
found = True
break
# Check mention on user mention
for member in BotVars.bot_for_webhooks.guilds[0].members:
if member.name.lower() == mention:
mentions[i_mention] = [member.name if len(add_string) == 0
else [member.name, add_string], member]
if cut_right_string is not None:
mentions[i_mention].append(cut_right_string)
found = True
break
elif member.display_name.lower() == mention:
mentions[i_mention] = [member.display_name if len(add_string) == 0
else [member.display_name, add_string], member]
if cut_right_string is not None:
mentions[i_mention].append(cut_right_string)
found = True
break
if found:
break
# Check mention on role mention
for role in BotVars.bot_for_webhooks.guilds[0].roles:
if role.name.lower() == mention:
mentions[i_mention] = [role.name if len(add_string) == 0
else [role.name, add_string], role]
if cut_right_string is not None:
mentions[i_mention].append(cut_right_string)
found = True
break
if found:
break
# Check mention on minecraft nick mention
for user in Config.get_settings().known_users:
if user.user_minecraft_nick.lower() == mention:
if len(mentions[i_mention]) == 1:
mentions[i_mention] = [user.user_minecraft_nick if len(add_string) == 0
else [user.user_minecraft_nick, add_string], []]
if cut_right_string is not None:
mentions[i_mention].append(cut_right_string)
if isinstance(mentions[i_mention][1], list):
mentions[i_mention][1] += [m for m in BotVars.bot_for_webhooks.guilds[0].members
if m.id == user.user_discord_id]
found = True
if found:
break
if found:
break
insert_numb = 1
mention_nicks = []
for mention in mentions:
if isinstance(mention[0], str):
is_list = False
elif isinstance(mention[0], list):
is_list = True
else:
raise ValueError("mention[0] is not string or list!")
if (mention[0] if not is_list else mention[0][0]) in ["a", "e", "everyone"]:
if len(mention) == 3:
split_arr[insert_numb] = f"{mention[2]}{split_arr[insert_numb]}"
split_arr.insert(insert_numb, f"@everyone")
if "@a" not in mention_nicks:
mention_nicks.append("@a")
elif (mention[0] if not is_list else mention[0][0]) in ["p", "here"]:
if len(mention) == 3:
split_arr[insert_numb] = f"{mention[2]}{split_arr[insert_numb]}"
split_arr.insert(insert_numb, f"@here")
if "@a" not in mention_nicks:
mention_nicks.append("@a")
elif len(mention) > 1 and isinstance(mention[1], list):
if not is_list:
if len(mention) == 3:
split_arr[insert_numb] = f"{mention[2]}{split_arr[insert_numb]}"
split_arr.insert(insert_numb,
f"@{mention[0]} ({', '.join([mn.mention for mn in mention[1]])})")
else:
split_arr[insert_numb] = split_arr[insert_numb][1:].lstrip(mention[0][1])
if len(mention) == 3:
split_arr[insert_numb] = f"{mention[2]}{split_arr[insert_numb]}"
split_arr.insert(insert_numb,
f"@{mention[0][0]} ({', '.join([mn.mention for mn in mention[1]])})")
if "@a" not in mention_nicks:
mention_nicks.append(mention[0] if not is_list else mention[0][0])
else:
if not is_list:
if len(mention) == 3:
split_arr[insert_numb] = f"{mention[2]}{split_arr[insert_numb]}"
split_arr.insert(insert_numb,
mention[1].mention if len(mention) > 1 and
mention[1] is not None else f"@{mention[0]}")
else:
split_arr[insert_numb] = split_arr[insert_numb][1:].lstrip(mention[0][1])
if len(mention) == 3:
split_arr[insert_numb] = f"{mention[2]}{split_arr[insert_numb]}"
split_arr.insert(insert_numb,
mention[1].mention if len(mention) > 1 and
mention[1] is not None else f"@{mention[0][0]}")
insert_numb += 2
player_message = "".join(split_arr)
if len(mention_nicks) > 0:
from components.additional_funcs import announce, connect_rcon, times
with suppress(ConnectionError, socket.error):
with connect_rcon() as cl_r:
with times(0, 60, 20, cl_r):
for nick in mention_nicks:
announce(nick, f"@{player_nick[1:-1]} -> @{nick if nick != '@a' else 'everyone'}",
cl_r)
BotVars.webhook_chat.send(f"**{player_nick}** {player_message}")
return last_lines[-1]
def _get_last_n_lines(file, number_of_lines, last_line):
list_of_lines = []
with open(file, 'rb') as read_obj:
read_obj.seek(-2, SEEK_END)
buffer = bytearray()
pointer_location = read_obj.tell()
while pointer_location >= 0:
read_obj.seek(pointer_location)
pointer_location = pointer_location - 1
new_byte = read_obj.read(1)
if new_byte == b'\n':
decoded_line = buffer[::-1].decode().strip()
if decoded_line == last_line:
return list(reversed(list_of_lines))
list_of_lines.append(decoded_line)
if len(list_of_lines) == number_of_lines:
return list(reversed(list_of_lines))
buffer = bytearray()
else:
buffer.extend(new_byte)
if len(buffer) > 0:
list_of_lines.append(buffer[::-1].decode().strip())
return list(reversed(list_of_lines))
| 51.741135 | 120 | 0.47872 | 1,505 | 14,591 | 4.381395 | 0.154153 | 0.031544 | 0.046709 | 0.043676 | 0.403397 | 0.336973 | 0.28162 | 0.256294 | 0.234152 | 0.207916 | 0 | 0.012332 | 0.433144 | 14,591 | 281 | 121 | 51.925267 | 0.784911 | 0.013981 | 0 | 0.282787 | 0 | 0.004098 | 0.070733 | 0.021422 | 0 | 0 | 0 | 0 | 0 | 1 | 0.040984 | false | 0 | 0.061475 | 0.004098 | 0.147541 | 0.012295 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bad8ded43ad99c5016d472583b43eb9b9f4122d5 | 745 | py | Python | utils/update_all_playlist_descriptions.py | stephanebruckert/resident-archive | 75c270faded445ac71065c1b6e5d587da925f379 | [
"MIT"
] | 16 | 2019-05-19T15:52:25.000Z | 2021-06-02T10:03:30.000Z | utils/update_all_playlist_descriptions.py | stephanebruckert/resident-archive | 75c270faded445ac71065c1b6e5d587da925f379 | [
"MIT"
] | 2 | 2019-06-16T10:22:40.000Z | 2019-11-21T22:00:07.000Z | utils/update_all_playlist_descriptions.py | resident-archive/resident-archive-lambdas | 75c270faded445ac71065c1b6e5d587da925f379 | [
"MIT"
] | 2 | 2019-08-19T12:27:05.000Z | 2019-10-31T08:27:19.000Z | #!/usr/bin/python3.7
"""
Set all playlist descriptions.
Example result:
Resident Advisor Archive www.residentarchive.com @residentarchive
"""
import boto3
import spotipy
from pprint import pprint
dynamodb = boto3.resource("dynamodb", region_name='eu-west-1')
ra_playlists = dynamodb.Table('ra_playlists')
scope = 'playlist-modify-public playlist-modify-private'
sp = spotipy.Spotify(auth_manager=spotipy.SpotifyOAuth(scope=scope))
# Get all
playlists = ra_playlists.scan()
pprint(len(playlists['Items']))
for p in playlists['Items']:
desc = "Resident Advisor Archive www.residentarchive.com @residentarchive"
print(p.get('spotify_playlist'), desc)
sp.playlist_change_details(None, p.get('spotify_playlist'), description=desc) | 26.607143 | 81 | 0.769128 | 96 | 745 | 5.875 | 0.552083 | 0.058511 | 0.078014 | 0.088652 | 0.205674 | 0.205674 | 0.205674 | 0 | 0 | 0 | 0 | 0.007496 | 0.104698 | 745 | 28 | 81 | 26.607143 | 0.838081 | 0.190604 | 0 | 0 | 0 | 0 | 0.305882 | 0.114286 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.230769 | 0 | 0.230769 | 0.230769 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bad9b540ba82400bb66c15a6e6b7c6b46db61e1a | 1,428 | py | Python | msflops/reporter.py | swagshaw/mindspore-flops | 364139865c47b6c80cfd0ba6cd5e6901db983144 | [
"Apache-2.0"
] | 2 | 2021-10-09T11:53:35.000Z | 2022-02-02T16:07:33.000Z | msflops/reporter.py | swagshaw/mindspore-flops | 364139865c47b6c80cfd0ba6cd5e6901db983144 | [
"Apache-2.0"
] | null | null | null | msflops/reporter.py | swagshaw/mindspore-flops | 364139865c47b6c80cfd0ba6cd5e6901db983144 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
pd.set_option('display.width', 1000)
pd.set_option('display.max_rows', 10000)
pd.set_option('display.max_columns', 10000)
def round_value(value, binary=False):
divisor = 1024. if binary else 1000.
if value // divisor**4 > 0:
return str(round(value / divisor**4, 2)) + 'T'
elif value // divisor**3 > 0:
return str(round(value / divisor**3, 2)) + 'G'
elif value // divisor**2 > 0:
return str(round(value / divisor**2, 2)) + 'M'
elif value // divisor > 0:
return str(round(value / divisor, 2)) + 'K'
return str(value)
def report_format(collected_nodes):
data = list()
for node in collected_nodes:
name = node.name
Flops = node.Flops
data.append([name, Flops])
df = pd.DataFrame(data)
df.columns = ['module name', 'Flops']
total_flops = df['Flops'].sum()
# Add Total row
total_df = pd.Series([total_flops
],
index=['Flops'],
name='total')
df = df.append(total_df)
df = df.fillna(' ')
df['Flops'] = df['Flops'].apply(lambda x: '{:,}'.format(x))
summary = str(df) + '\n'
summary += "=" * len(str(df).split('\n')[0])
summary += '\n'
summary += "-" * len(str(df).split('\n')[0])
summary += '\n'
summary += "Total Flops: {}Flops\n".format(round_value(total_flops))
return summary
| 26.444444 | 72 | 0.553922 | 189 | 1,428 | 4.10582 | 0.328042 | 0.123711 | 0.051546 | 0.07732 | 0.283505 | 0.229381 | 0.159794 | 0.087629 | 0.087629 | 0.087629 | 0 | 0.036574 | 0.272409 | 1,428 | 53 | 73 | 26.943396 | 0.710298 | 0.009104 | 0 | 0.052632 | 0 | 0 | 0.093418 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.026316 | 0 | 0.236842 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bae06136e10bb2daeb4725a0ae34365494e741f2 | 9,707 | py | Python | tests/xmrswap/common.py | tecnovert/xmrswap | ad2983a4df03184453ff680c17602497acc75a87 | [
"MIT"
] | 2 | 2020-09-21T17:33:23.000Z | 2020-10-03T08:54:01.000Z | tests/xmrswap/common.py | tecnovert/xmrswap | ad2983a4df03184453ff680c17602497acc75a87 | [
"MIT"
] | 2 | 2020-10-03T09:18:48.000Z | 2020-10-13T19:58:34.000Z | tests/xmrswap/common.py | tecnovert/xmrswap | ad2983a4df03184453ff680c17602497acc75a87 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020 tecnovert
# Distributed under the MIT software license, see the accompanying
# file LICENSE.txt or http://www.opensource.org/licenses/mit-license.php.
import os
import sys
import time
import signal
import logging
import subprocess
from io import StringIO
from unittest.mock import patch
from xmrswap.rpc import callrpc, callrpc_xmr, callrpc_xmr_na
from xmrswap.util import dumpje
from xmrswap.contrib.rpcauth import generate_salt, password_to_hmac
import bin.xmrswaptool as swapTool
TEST_DATADIRS = os.path.expanduser(os.getenv('TEST_DATADIRS', '/tmp/xmrswap'))
NUM_NODES = 3
BASE_PORT = 14792
BASE_RPC_PORT = 19792
XMR_NUM_NODES = 3
XMR_BASE_P2P_PORT = 17792
XMR_BASE_RPC_PORT = 21792
XMR_BASE_ZMQ_PORT = 22792
XMR_BASE_WALLET_RPC_PORT = 23792
bin_suffix = ('.exe' if os.name == 'nt' else '')
PARTICL_BINDIR = os.path.expanduser(os.getenv('PARTICL_BINDIR', '.'))
PARTICLD = os.getenv('PARTICLD', 'particld' + bin_suffix)
PARTICL_CLI = os.getenv('PARTICL_CLI', 'particl-cli' + bin_suffix)
PARTICL_TX = os.getenv('PARTICL_TX', 'particl-tx' + bin_suffix)
BITCOIN_BINDIR = os.path.expanduser(os.getenv('BITCOIN_BINDIR', ''))
BITCOIND = os.getenv('BITCOIND', 'bitcoind' + bin_suffix)
BITCOIN_CLI = os.getenv('BITCOIN_CLI', 'bitcoin-cli' + bin_suffix)
BITCOIN_TX = os.getenv('BITCOIN_TX', 'bitcoin-tx' + bin_suffix)
XMR_BINDIR = os.path.expanduser(os.getenv('XMR_BINDIR', ''))
XMRD = os.getenv('XMRD', 'monerod' + bin_suffix)
XMR_WALLET_RPC = os.getenv('XMR_WALLET_RPC', 'monero-wallet-rpc' + bin_suffix)
def prepareXmrDataDir(datadir, node_id, conf_file):
node_dir = os.path.join(datadir, 'xmr' + str(node_id))
if not os.path.exists(node_dir):
os.makedirs(node_dir)
cfg_file_path = os.path.join(node_dir, conf_file)
if os.path.exists(cfg_file_path):
return
with open(cfg_file_path, 'w+') as fp:
fp.write('regtest=1\n')
fp.write('keep-fakechain=1\n')
fp.write('data-dir={}\n'.format(node_dir))
fp.write('fixed-difficulty=1\n')
# fp.write('offline=1\n')
fp.write('p2p-bind-port={}\n'.format(XMR_BASE_P2P_PORT + node_id))
fp.write('rpc-bind-port={}\n'.format(XMR_BASE_RPC_PORT + node_id))
fp.write('p2p-bind-ip=127.0.0.1\n')
fp.write('rpc-bind-ip=127.0.0.1\n')
fp.write('zmq-rpc-bind-port={}\n'.format(XMR_BASE_ZMQ_PORT + node_id))
fp.write('zmq-rpc-bind-ip=127.0.0.1\n')
for i in range(0, XMR_NUM_NODES):
if node_id == i:
continue
fp.write('add-exclusive-node=127.0.0.1:{}\n'.format(XMR_BASE_P2P_PORT + i))
def prepareDataDir(datadir, node_id, conf_file):
node_dir = os.path.join(datadir, str(node_id))
if not os.path.exists(node_dir):
os.makedirs(node_dir)
cfg_file_path = os.path.join(node_dir, conf_file)
if os.path.exists(cfg_file_path):
return
with open(cfg_file_path, 'w+') as fp:
fp.write('regtest=1\n')
fp.write('[regtest]\n')
fp.write('port=' + str(BASE_PORT + node_id) + '\n')
fp.write('rpcport=' + str(BASE_RPC_PORT + node_id) + '\n')
salt = generate_salt(16)
fp.write('rpcauth={}:{}${}\n'.format('test' + str(node_id), salt, password_to_hmac(salt, 'test_pass' + str(node_id))))
fp.write('daemon=0\n')
fp.write('printtoconsole=0\n')
fp.write('server=1\n')
fp.write('discover=0\n')
fp.write('listenonion=0\n')
fp.write('bind=127.0.0.1\n')
fp.write('debug=1\n')
fp.write('debugexclude=libevent\n')
fp.write('fallbackfee=0.01\n')
fp.write('acceptnonstdtxn=0\n')
fp.write('txindex=1\n')
fp.write('findpeers=0\n')
# minstakeinterval=5 # Using walletsettings stakelimit instead
for i in range(0, NUM_NODES):
if node_id == i:
continue
fp.write('addnode=127.0.0.1:{}\n'.format(BASE_PORT + i))
def startXmrDaemon(node_dir, bin_dir, daemon_bin, opts=[]):
daemon_bin = os.path.expanduser(os.path.join(bin_dir, daemon_bin))
args = [daemon_bin, '--config-file=' + os.path.join(os.path.expanduser(node_dir), 'monerod.conf')] + opts
logging.info('Starting node {} --data-dir={}'.format(daemon_bin, node_dir))
return subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def startXmrWalletRPC(node_dir, bin_dir, wallet_bin, node_id, opts=[]):
daemon_bin = os.path.expanduser(os.path.join(bin_dir, wallet_bin))
data_dir = os.path.expanduser(node_dir)
args = [daemon_bin]
args += ['--daemon-address=localhost:{}'.format(XMR_BASE_RPC_PORT + node_id)]
args += ['--no-dns']
args += ['--rpc-bind-port={}'.format(XMR_BASE_WALLET_RPC_PORT + node_id)]
args += ['--wallet-dir={}'.format(os.path.join(data_dir, 'wallets'))]
args += ['--log-file={}'.format(os.path.join(data_dir, 'wallet.log'))]
args += ['--rpc-login=test{0}:test_pass{0}'.format(node_id)]
args += ['--shared-ringdb-dir={}'.format(os.path.join(data_dir, 'shared-ringdb'))]
args += opts
logging.info('Starting daemon {} --wallet-dir={}'.format(daemon_bin, node_dir))
wallet_stdout = open(os.path.join(data_dir, 'wallet_stdout.log'), 'w')
wallet_stderr = open(os.path.join(data_dir, 'wallet_stderr.log'), 'w')
return subprocess.Popen(args, stdin=subprocess.PIPE, stdout=wallet_stdout, stderr=wallet_stderr, cwd=data_dir)
def startDaemon(node_dir, bin_dir, daemon_bin, opts=[]):
daemon_bin = os.path.expanduser(os.path.join(bin_dir, daemon_bin))
args = [daemon_bin, '-datadir=' + os.path.expanduser(node_dir)] + opts
logging.info('Starting node {} -datadir={}'.format(daemon_bin, node_dir))
return subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def callnoderpc(node_id, method, params=[], wallet=None):
auth = 'test{0}:test_pass{0}'.format(node_id)
return callrpc(BASE_RPC_PORT + node_id, auth, method, params, wallet)
def make_rpc_func(node_id):
node_id = node_id
auth = 'test{0}:test_pass{0}'.format(node_id)
def rpc_func(method, params=None, wallet=None):
nonlocal node_id, auth
return callrpc(BASE_RPC_PORT + node_id, auth, method, params, wallet)
return rpc_func
def checkSoftForks(ro):
if 'bip9_softforks' in ro:
assert(ro['bip9_softforks']['csv']['status'] == 'active')
assert(ro['bip9_softforks']['segwit']['status'] == 'active')
else:
assert(ro['softforks']['csv']['active'])
assert(ro['softforks']['segwit']['active'])
def callSwapTool(swap_file, method=None, json_params=None, str_param=None):
testargs = ['xmrswaptool.py', swap_file]
if method:
testargs.append(method)
if json_params is not None:
testargs.append('"' + dumpje(json_params) + '"')
if str_param is not None:
testargs.append(str_param)
print('testargs', ' '.join(testargs))
with patch.object(sys, 'argv', testargs):
with patch('sys.stdout', new=StringIO()) as fake_out:
try:
swapTool.main()
except Exception as e:
logging.info('swapTool failed: stdout: %s', fake_out.getvalue())
raise e
return fake_out.getvalue()
def waitForXMRNode(rpc_offset, max_tries=7):
for i in range(max_tries + 1):
try:
callrpc_xmr_na(XMR_BASE_RPC_PORT + rpc_offset, 'get_block_count')
return
except Exception as ex:
if i < max_tries:
logging.warning('Can\'t connect to XMR RPC: %s. Retrying in %d second/s.', str(ex), (i + 1))
time.sleep(i + 1)
raise ValueError('waitForXMRNode failed')
def waitForXMRWallet(rpc_offset, auth, max_tries=7):
for i in range(max_tries + 1):
try:
callrpc_xmr(XMR_BASE_WALLET_RPC_PORT + rpc_offset, auth, 'get_languages')
return
except Exception as ex:
if i < max_tries:
logging.warning('Can\'t connect to XMR wallet RPC: %s. Retrying in %d second/s.', str(ex), (i + 1))
time.sleep(i + 1)
raise ValueError('waitForXMRWallet failed')
def stopNodes(self):
self.stop_nodes = True
if self.update_thread is not None:
try:
self.update_thread.join()
except Exception:
logging.info('Failed to join update_thread')
self.update_thread = None
for d in self.xmr_daemons:
logging.info('Interrupting %d', d.pid)
try:
d.send_signal(signal.SIGINT)
except Exception as e:
logging.info('Interrupting %d, error %s', d.pid, str(e))
for d in self.xmr_daemons:
try:
d.wait(timeout=20)
if d.stdout:
d.stdout.close()
if d.stderr:
d.stderr.close()
if d.stdin:
d.stdin.close()
except Exception as e:
logging.info('Closing %d, error %s', d.pid, str(e))
self.xmr_daemons = []
for d in self.daemons:
logging.info('Interrupting %d', d.pid)
try:
d.send_signal(signal.SIGINT)
except Exception as e:
logging.info('Interrupting %d, error %s', d.pid, str(e))
for d in self.daemons:
try:
d.wait(timeout=20)
if d.stdout:
d.stdout.close()
if d.stderr:
d.stderr.close()
if d.stdin:
d.stdin.close()
except Exception as e:
logging.info('Closing %d, error %s', d.pid, str(e))
self.daemons = []
| 35.6875 | 126 | 0.631709 | 1,392 | 9,707 | 4.235632 | 0.185345 | 0.035617 | 0.027137 | 0.016791 | 0.494064 | 0.432836 | 0.388569 | 0.353121 | 0.337008 | 0.309193 | 0 | 0.016082 | 0.218502 | 9,707 | 271 | 127 | 35.819188 | 0.761139 | 0.030287 | 0 | 0.358852 | 0 | 0 | 0.160021 | 0.02722 | 0 | 0 | 0 | 0 | 0.019139 | 1 | 0.062201 | false | 0.023923 | 0.057416 | 0 | 0.172249 | 0.009569 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bae2914a1bdd8e77239d3c806fce04b31c71f2e9 | 440 | py | Python | Projetos Python/pythonexercicios/des101.py | Moyses-Nunes/Projetos-Python | 71ae170fb0d7be6afea18608bca630b57b9f0dff | [
"MIT"
] | null | null | null | Projetos Python/pythonexercicios/des101.py | Moyses-Nunes/Projetos-Python | 71ae170fb0d7be6afea18608bca630b57b9f0dff | [
"MIT"
] | null | null | null | Projetos Python/pythonexercicios/des101.py | Moyses-Nunes/Projetos-Python | 71ae170fb0d7be6afea18608bca630b57b9f0dff | [
"MIT"
] | null | null | null | from random import randint
def sort(lista):
print('SORTEANDO OS VALORES DA LISTA: ', end='')
for n in range(0, 5):
v = randint(1, 10)
lista.append(v)
print(f'Os valores são {numeros}.')
print('Pronto!')
def somapar(lista):
s = 0
for v in lista:
if v % 2 == 0:
s += v
print(f'A soma dos valores pares entre {lista} é {s}.')
numeros = list()
sort(numeros)
somapar(numeros)
| 19.130435 | 59 | 0.565909 | 67 | 440 | 3.716418 | 0.567164 | 0.072289 | 0.056225 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.025723 | 0.293182 | 440 | 22 | 60 | 20 | 0.77492 | 0 | 0 | 0 | 0 | 0 | 0.245455 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.058824 | 0 | 0.176471 | 0.235294 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bae8b374ddcb9acaae8cb0a578eb44560c94794f | 2,802 | py | Python | test/animate.py | colonelwatch/ESP32-fluid-simulation | 407811901b45e3eadb43924e4754688f62eb6b05 | [
"MIT"
] | 5 | 2021-08-22T18:13:31.000Z | 2022-02-20T22:42:38.000Z | test/animate.py | colonelwatch/ESP32-fluid-simulation | 407811901b45e3eadb43924e4754688f62eb6b05 | [
"MIT"
] | null | null | null | test/animate.py | colonelwatch/ESP32-fluid-simulation | 407811901b45e3eadb43924e4754688f62eb6b05 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
DT = 0.01
FRAMERATE = 60
N_ROWS = 64
SECONDS = 10
def read_field_file(file_path, type):
if type != 'scalar' and type != 'vector':
raise ValueError('type must be scalar or vector')
file_str = open(file_path, 'r').read()
frame_arr = file_str.split('\n\n')
frame_arr = [frame for frame in frame_arr if frame]
frame_arr = [frame.split('\n') for frame in frame_arr]
frame_arr = [[row.split(' ') for row in frame] for frame in frame_arr]
if type == 'scalar':
frame_arr = [[[float(item) for item in row] for row in frame] for frame in frame_arr]
elif type == 'vector':
def string_to_vector(string):
string = string.replace('(', '')
string = string.replace(')', '')
pair = tuple(string.split(','))
pair = (float(pair[0]), float(pair[1]))
return pair
frame_arr = [[[string_to_vector(item) for item in row] for row in frame] for frame in frame_arr]
frame_arr = np.array(frame_arr)
return frame_arr
def read_velocity():
return read_field_file('velocity.txt', 'vector')
def read_pressure():
return read_field_file('pressure.txt', 'scalar')
def read_divergence(absolute = True):
divergence = read_field_file('divergence.txt', 'scalar')
if(absolute):
divergence = np.abs(divergence)
return divergence
def read_color():
return read_field_file('color.txt', 'scalar')
velocity_frames = read_velocity()
pressure_frames = read_pressure()
color_frames = read_color()
divergence_frames = read_divergence()
frame_interval = 1000//FRAMERATE
frame_count = velocity_frames.shape[0]
fig, (ax1, ax2, ax3) = plt.subplots(1, 3)
ax1.set_title('Pressure and Velocity')
ax2.set_title('Color')
ax3.set_title('Absolute Divergence (Bad!)')
artists = []
foo = np.random.random(size=(64, 64))
artists.append(ax1.quiver(foo, foo, scale=100, scale_units='inches', color='blue'))
artists.append(ax1.imshow(foo, cmap='hot', interpolation='nearest', vmin=-2, vmax=2, animated=True))
artists.append(ax2.imshow(foo, interpolation='nearest', vmin=0, vmax=1, animated=True))
artists.append(ax3.imshow(foo, cmap='hot', interpolation='nearest', vmin=0, vmax=1, animated=True))
def update(i):
u = velocity_frames[i, :, :, 0]
v = velocity_frames[i, :, :, 1]
pressure_frame = pressure_frames[i, :, :]
color_frame = color_frames[i, :, :]
divergence_frame = divergence_frames[i, :, :]
artists[0].set_UVC(u, v)
artists[1].set_array(pressure_frame)
artists[2].set_array(color_frame)
artists[3].set_array(divergence_frame)
return artists
ani = animation.FuncAnimation(fig, update, frames=frame_count, interval=frame_interval, blit=True)
plt.show() | 34.170732 | 104 | 0.682013 | 400 | 2,802 | 4.605 | 0.26 | 0.060803 | 0.035288 | 0.040717 | 0.176982 | 0.176982 | 0.176982 | 0.113464 | 0.067861 | 0.051031 | 0 | 0.020338 | 0.175232 | 2,802 | 82 | 105 | 34.170732 | 0.77672 | 0 | 0 | 0 | 0 | 0 | 0.079914 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.104478 | false | 0 | 0.044776 | 0.044776 | 0.253731 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
baeaf373a9c8459092cec53c9defe9e23ec03c38 | 9,519 | py | Python | policy/RTS2_FITS_LUTs.py | tguillemLSST/eotest | c6f150984fa5dff85b9805028645bf46fc846f11 | [
"BSD-3-Clause-LBNL"
] | 3 | 2016-04-21T07:05:45.000Z | 2020-08-05T08:37:37.000Z | policy/RTS2_FITS_LUTs.py | tguillemLSST/eotest | c6f150984fa5dff85b9805028645bf46fc846f11 | [
"BSD-3-Clause-LBNL"
] | 70 | 2015-03-26T09:48:53.000Z | 2020-04-22T16:29:43.000Z | policy/RTS2_FITS_LUTs.py | tguillemLSST/eotest | c6f150984fa5dff85b9805028645bf46fc846f11 | [
"BSD-3-Clause-LBNL"
] | 5 | 2017-08-15T20:52:44.000Z | 2022-03-25T12:54:07.000Z | RTS2_FITS_LUTs = {}
RTS2_FITS_LUTs['BNL'] = {
0: {
# 'MJD' : 'JD',
# 'MONDIODE' : 'AMP0.CURRENT.MIN',
'MONOWL': 'MONOCH.WAVELENG',
'FILTER': 'MONOCH.FILT_1',
'CONTROLL': 'INSTRUME',
'CCDTEMP': 'CRYO.C.TEMP',
'IMGTYPE': 'TESTTYPE',
'TEMP_SET': 'CRYO.2.SETPT',
'CTLRCFG': 'CONFIG',
'TSTAND': 'TELESCOP',
'CCD_SERN': 'CCD_SER'
},
'TEST_COND': {
'MONOWL': 'MONOCH.WAVELENG',
'FILTER': 'MONOCH.FILT_1',
'CCDTEMP': 'CRYO.C.TEMP',
'TEMP_SET': 'CRYO.2.SETPT'
},
'CCD_COND': {
'V_OD1': 'BIAS_1.OD1_Vmeas',
'V_OD2': 'BIAS_1.OD2_Vmeas',
'V_OD3': 'BIAS_1.OD3_Vmeas',
'V_OD4': 'BIAS_1.OD4_Vmeas',
'V_OD5': 'BIAS_1.OD5_Vmeas',
'V_OD6': 'BIAS_1.OD6_Vmeas',
'V_OD7': 'BIAS_1.OD7_Vmeas',
'V_OD8': 'BIAS_1.OD8_Vmeas',
'V_OD9': 'BIAS_1.OD1_Vmeas',
'V_OD10': 'BIAS_1.OD2_Vmeas',
'V_OD11': 'BIAS_1.OD3_Vmeas',
'V_OD12': 'BIAS_1.OD4_Vmeas',
'V_OD13': 'BIAS_1.OD5_Vmeas',
'V_OD14': 'BIAS_1.OD6_Vmeas',
'V_OD15': 'BIAS_1.OD7_Vmeas',
'V_OD16': 'BIAS_1.OD8_Vmeas',
'V_RD1': 'BIAS_2.RD_Vmeas',
'V_RD2': 'BIAS_2.RD_Vmeas',
'V_RD3': 'BIAS_2.RD_Vmeas',
'V_RD4': 'BIAS_2.RD_Vmeas',
'V_RD5': 'BIAS_2.RD_Vmeas',
'V_RD6': 'BIAS_2.RD_Vmeas',
'V_RD7': 'BIAS_2.RD_Vmeas',
'V_RD8': 'BIAS_2.RD_Vmeas',
'V_RD9': 'BIAS_2.RD_Vmeas',
'V_RD10': 'BIAS_2.RD_Vmeas',
'V_RD11': 'BIAS_2.RD_Vmeas',
'V_RD12': 'BIAS_2.RD_Vmeas',
'V_RD13': 'BIAS_2.RD_Vmeas',
'V_RD14': 'BIAS_2.RD_Vmeas',
'V_RD15': 'BIAS_2.RD_Vmeas',
'V_RD16': 'BIAS_2.RD_Vmeas',
'V_OG1': 'BIAS_1.OG_Vmeas',
'V_OG2': 'BIAS_1.OG_Vmeas',
'V_OG3': 'BIAS_1.OG_Vmeas',
'V_OG4': 'BIAS_1.OG_Vmeas',
'V_OG5': 'BIAS_1.OG_Vmeas',
'V_OG6': 'BIAS_1.OG_Vmeas',
'V_OG7': 'BIAS_1.OG_Vmeas',
'V_OG8': 'BIAS_1.OG_Vmeas',
'V_OG9': 'BIAS_1.OG_Vmeas',
'V_OG10': 'BIAS_1.OG_Vmeas',
'V_OG11': 'BIAS_1.OG_Vmeas',
'V_OG12': 'BIAS_1.OG_Vmeas',
'V_OG13': 'BIAS_1.OG_Vmeas',
'V_OG14': 'BIAS_1.OG_Vmeas',
'V_OG15': 'BIAS_1.OG_Vmeas',
'V_OG16': 'BIAS_1.OG_Vmeas',
'V_S1L': 'DRV_1.S1_low',
'V_S1H': 'DRV_1.S1_high',
'V_S2L': 'DRV_1.S2_low',
'V_S2H': 'DRV_1.S2_high',
'V_S3L': 'DRV_1.S3_low',
'V_S3H': 'DRV_1.S3_high',
'V_RGL': 'DRV_1.RG_low',
'V_RGH': 'DRV_1.RG_high',
'V_P1L': 'DRV_1.P1_low',
'V_P1H': 'DRV_1.P1_high',
'V_P2L': 'DRV_1.P2_low',
'V_P2H': 'DRV_1.P2_high',
'V_P3L': 'DRV_1.P3_low',
'V_P3H': 'DRV_1.P3_high',
'V_P4L': 'DRV_1.P4_low',
'V_P4H': 'DRV_1.P4_high',
'I_OD1': 'BIAS_1.OD1_Cmeas',
'I_OD2': 'BIAS_1.OD2_Cmeas',
'I_OD3': 'BIAS_1.OD3_Cmeas',
'I_OD4': 'BIAS_1.OD4_Cmeas',
'I_OD5': 'BIAS_1.OD5_Cmeas',
'I_OD6': 'BIAS_1.OD6_Cmeas',
'I_OD7': 'BIAS_1.OD7_Cmeas',
'I_OD8': 'BIAS_1.OD8_Cmeas',
'I_OD9': 'BIAS_1.OD1_Cmeas',
'I_OD10': 'BIAS_1.OD2_Cmeas',
'I_OD11': 'BIAS_1.OD3_Cmeas',
'I_OD12': 'BIAS_1.OD4_Cmeas',
'I_OD13': 'BIAS_1.OD5_Cmeas',
'I_OD14': 'BIAS_1.OD6_Cmeas',
'I_OD15': 'BIAS_1.OD7_Cmeas',
'I_OD16': 'BIAS_1.OD8_Cmeas',
'I_RD1': 'BIAS_2.RD_Cmeas',
'I_RD2': 'BIAS_2.RD_Cmeas',
'I_RD3': 'BIAS_2.RD_Cmeas',
'I_RD4': 'BIAS_2.RD_Cmeas',
'I_RD5': 'BIAS_2.RD_Cmeas',
'I_RD6': 'BIAS_2.RD_Cmeas',
'I_RD7': 'BIAS_2.RD_Cmeas',
'I_RD8': 'BIAS_2.RD_Cmeas',
'I_RD9': 'BIAS_2.RD_Cmeas',
'I_RD10': 'BIAS_2.RD_Cmeas',
'I_RD11': 'BIAS_2.RD_Cmeas',
'I_RD12': 'BIAS_2.RD_Cmeas',
'I_RD13': 'BIAS_2.RD_Cmeas',
'I_RD14': 'BIAS_2.RD_Cmeas',
'I_RD15': 'BIAS_2.RD_Cmeas',
'I_RD16': 'BIAS_2.RD_Cmeas',
'I_OG1': 'BIAS_1.OG_Cmeas',
'I_OG2': 'BIAS_1.OG_Cmeas',
'I_OG3': 'BIAS_1.OG_Cmeas',
'I_OG4': 'BIAS_1.OG_Cmeas',
'I_OG5': 'BIAS_1.OG_Cmeas',
'I_OG6': 'BIAS_1.OG_Cmeas',
'I_OG7': 'BIAS_1.OG_Cmeas',
'I_OG8': 'BIAS_1.OG_Cmeas',
'I_OG9': 'BIAS_1.OG_Cmeas',
'I_OG10': 'BIAS_1.OG_Cmeas',
'I_OG11': 'BIAS_1.OG_Cmeas',
'I_OG12': 'BIAS_1.OG_Cmeas',
'I_OG13': 'BIAS_1.OG_Cmeas',
'I_OG14': 'BIAS_1.OG_Cmeas',
'I_OG15': 'BIAS_1.OG_Cmeas',
'I_OG16': 'BIAS_1.OG_Cmeas'
}
}
RTS2_FITS_LUTs['HARVARD'] = {
0: {
# 'MJD' : 'JD',
'MONDIODE': 'K_PHOT.CURRENT',
'MONOWL': 'MONO.WAVELENG',
'FILTER': 'MONO.FILT',
'CONTROLL': 'INSTRUME',
'CCDTEMP': 'LAKESHORE.A.TEMP',
'IMGTYPE': 'TESTTYPE',
'TEMP_SET': 'LAKESHORE.SETPOINT',
'CTLRCFG': 'SIGFILE', # don't know what you want here
'TSTAND': 'TELESCOP',
'CCD_SERN': 'CCD_SER'
},
'TEST_COND': {
'MONOWL': 'MONO.WAVELENG',
'FILTER': 'MONO.FILTER',
'CCDTEMP': 'LAKESHORE.A.TEMP',
'TEMP_SET': 'LAKESHORE.SETPOINT'
},
'CCD_COND': {
'V_OD1': 'OD1_R',
'V_OD2': 'OD1_R',
'V_OD3': 'OD1_R',
'V_OD4': 'OD1_R',
'V_OD5': 'OD1_R',
'V_OD6': 'OD1_R',
'V_OD7': 'OD1_R',
'V_OD8': 'OD1_R',
'V_OD9': 'OD1_R',
'V_OD10': 'OD1_R',
'V_OD11': 'OD1_R',
'V_OD12': 'OD1_R',
'V_OD13': 'OD1_R',
'V_OD14': 'OD1_R',
'V_OD15': 'OD1_R',
'V_OD16': 'OD1_R',
'V_RD1': 'RD',
'V_RD2': 'RD',
'V_RD3': 'RD',
'V_RD4': 'RD',
'V_RD5': 'RD',
'V_RD6': 'RD',
'V_RD7': 'RD',
'V_RD8': 'RD',
'V_RD9': 'RD',
'V_RD10': 'RD',
'V_RD11': 'RD',
'V_RD12': 'RD',
'V_RD13': 'RD',
'V_RD14': 'RD',
'V_RD15': 'RD',
'V_RD16': 'RD',
'V_OG1': 'OG1_R',
'V_OG2': 'OG1_R',
'V_OG3': 'OG1_R',
'V_OG4': 'OG1_R',
'V_OG5': 'OG1_R',
'V_OG6': 'OG1_R',
'V_OG7': 'OG1_R',
'V_OG8': 'OG1_R',
'V_OG9': 'OG1_R',
'V_OG10': 'OG1_R',
'V_OG11': 'OG1_R',
'V_OG12': 'OG1_R',
'V_OG13': 'OG1_R',
'V_OG14': 'OG1_R',
'V_OG15': 'OG1_R',
'V_OG16': 'OG1_R',
'V_S1L': 'SLO',
'V_S1H': 'SHI',
'V_S2L': 'SLO',
'V_S2H': 'SHI',
'V_S3L': 'SLO',
'V_S3H': 'SHI',
'V_RGL': 'RLO',
'V_RGH': 'RHI',
'V_P1L': 'PLO',
'V_P1H': 'PHI',
'V_P2L': 'PLO',
'V_P2H': 'PHI',
'V_P3L': 'PLO',
'V_P3H': 'PHI',
'V_P4L': 'PLO',
'V_P4H': 'PHI',
# 'I_OD1' : 'BIAS_1.OD1_Cmeas',
# 'I_OD2' : 'BIAS_1.OD2_Cmeas',
# 'I_OD3' : 'BIAS_1.OD3_Cmeas',
# 'I_OD4' : 'BIAS_1.OD4_Cmeas',
# 'I_OD5' : 'BIAS_1.OD5_Cmeas',
# 'I_OD6' : 'BIAS_1.OD6_Cmeas',
# 'I_OD7' : 'BIAS_1.OD7_Cmeas',
# 'I_OD8' : 'BIAS_1.OD8_Cmeas',
# 'I_OD9' : 'BIAS_1.OD1_Cmeas',
# 'I_OD10' : 'BIAS_1.OD2_Cmeas',
# 'I_OD11' : 'BIAS_1.OD3_Cmeas',
# 'I_OD12' : 'BIAS_1.OD4_Cmeas',
# 'I_OD13' : 'BIAS_1.OD5_Cmeas',
# 'I_OD14' : 'BIAS_1.OD6_Cmeas',
# 'I_OD15' : 'BIAS_1.OD7_Cmeas',
# 'I_OD16' : 'BIAS_1.OD8_Cmeas',
# 'I_RD1' : 'BIAS_2.RD_Cmeas',
# 'I_RD2' : 'BIAS_2.RD_Cmeas',
# 'I_RD3' : 'BIAS_2.RD_Cmeas',
# 'I_RD4' : 'BIAS_2.RD_Cmeas',
# 'I_RD5' : 'BIAS_2.RD_Cmeas',
# 'I_RD6' : 'BIAS_2.RD_Cmeas',
# 'I_RD7' : 'BIAS_2.RD_Cmeas',
# 'I_RD8' : 'BIAS_2.RD_Cmeas',
# 'I_RD9' : 'BIAS_2.RD_Cmeas',
# 'I_RD10' : 'BIAS_2.RD_Cmeas',
# 'I_RD11' : 'BIAS_2.RD_Cmeas',
# 'I_RD12' : 'BIAS_2.RD_Cmeas',
# 'I_RD13' : 'BIAS_2.RD_Cmeas',
# 'I_RD14' : 'BIAS_2.RD_Cmeas',
# 'I_RD15' : 'BIAS_2.RD_Cmeas',
# 'I_RD16' : 'BIAS_2.RD_Cmeas',
# 'I_OG1' : 'BIAS_1.OG_Cmeas',
# 'I_OG2' : 'BIAS_1.OG_Cmeas',
# 'I_OG3' : 'BIAS_1.OG_Cmeas',
# 'I_OG4' : 'BIAS_1.OG_Cmeas',
# 'I_OG5' : 'BIAS_1.OG_Cmeas',
# 'I_OG6' : 'BIAS_1.OG_Cmeas',
# 'I_OG7' : 'BIAS_1.OG_Cmeas',
# 'I_OG8' : 'BIAS_1.OG_Cmeas',
# 'I_OG9' : 'BIAS_1.OG_Cmeas',
# 'I_OG10' : 'BIAS_1.OG_Cmeas',
# 'I_OG11' : 'BIAS_1.OG_Cmeas',
# 'I_OG12' : 'BIAS_1.OG_Cmeas',
# 'I_OG13' : 'BIAS_1.OG_Cmeas',
# 'I_OG14' : 'BIAS_1.OG_Cmeas',
# 'I_OG15' : 'BIAS_1.OG_Cmeas',
# 'I_OG16' : 'BIAS_1.OG_Cmeas'
}
}
sensor_geom = {'ITL': {'nx': 509,
'ny': 2000,
'prescan': 3,
'vendor': 'ITL'},
'E2V': {'nx': 512,
'ny': 2002,
'prescan': 10,
'vendor': 'E2V'}
}
| 33.517606 | 62 | 0.460027 | 1,328 | 9,519 | 2.847139 | 0.116717 | 0.126951 | 0.088865 | 0.10156 | 0.668871 | 0.465485 | 0.465485 | 0.447501 | 0.447501 | 0.425813 | 0 | 0.094222 | 0.345519 | 9,519 | 283 | 63 | 33.636042 | 0.512681 | 0.197605 | 0 | 0.104803 | 0 | 0 | 0.453682 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
baebfbf068726c3c866bdd3aa0015d86ce8b2933 | 850 | py | Python | problem_#115.py | vivek28111992/DailyCoding | db58c069ef393f6a93fe86913660860134cb97a0 | [
"MIT"
] | null | null | null | problem_#115.py | vivek28111992/DailyCoding | db58c069ef393f6a93fe86913660860134cb97a0 | [
"MIT"
] | null | null | null | problem_#115.py | vivek28111992/DailyCoding | db58c069ef393f6a93fe86913660860134cb97a0 | [
"MIT"
] | null | null | null | """
Given two non-empty binary trees s and t, check whether tree t has exactly the same structure and node values with a subtree of s. A subtree of s is a tree consists of a node in s and all of this node's descendants. The tree s could also be considered as a subtree of itself
"""
def isSubTree(self, s, t):
from hashlib import sha256
def hash_(x):
S = sha256()
S.update()
return S.hexdigest()
def merkle(node):
if not node:
return '#'
m_left = merkle(node.left)
m_right = merkle(node.right)
node.merkle = hash_(m_left + str(node.val) +m_right)
return node.merkle
merkle(s)
merkle(t)
def dfs(node):
if not node:
return False
return (node.merkle == t.merkle or dfs(node.left) or dfs(node.right))
return dfs(s)
| 27.419355 | 274 | 0.614118 | 135 | 850 | 3.822222 | 0.42963 | 0.046512 | 0.05814 | 0.042636 | 0.073643 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01005 | 0.297647 | 850 | 30 | 275 | 28.333333 | 0.854271 | 0.322353 | 0 | 0.1 | 0 | 0 | 0.001761 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.05 | 0 | 0.55 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
baedb9f63ad29c867ce4f5a6689df6868f5d1f76 | 2,039 | py | Python | tests/test_python_literal_action.py | madman-bob/python-argparse-utils | e3a816596d1b374825a4b8d45b56fbce4758a4f4 | [
"MIT"
] | 7 | 2019-07-05T20:17:08.000Z | 2021-09-27T04:56:40.000Z | tests/test_python_literal_action.py | madman-bob/python-argparse-utils | e3a816596d1b374825a4b8d45b56fbce4758a4f4 | [
"MIT"
] | 2 | 2019-04-03T09:43:40.000Z | 2020-05-05T17:47:22.000Z | tests/test_python_literal_action.py | madman-bob/python-argparse-utils | e3a816596d1b374825a4b8d45b56fbce4758a4f4 | [
"MIT"
] | 1 | 2020-12-11T10:47:49.000Z | 2020-12-11T10:47:49.000Z | from argparse import ArgumentParser
from contextlib import redirect_stderr
from io import StringIO
from re import escape as re_escape
from unittest import TestCase
from argparse_utils import python_literal_action
class TestPythonLiteralAction(TestCase):
def test_basic_python_literal_action(self):
parser = ArgumentParser()
parser.add_argument('-a', action=python_literal_action())
tests = [
('[1, 2, 3]', [1, 2, 3]),
('{"a": 1, "b": 2}', {"a": 1, "b": 2}),
('None', None),
('{"nested": {"Python": ["objects"]}}', {"nested": {"Python": ["objects"]}}),
('("some", "tuple")', ("some", "tuple")),
("'Single quotes'", 'Single quotes'),
]
for literal_str, literal_obj in tests:
with self.subTest(literal_obj=literal_obj):
args = parser.parse_args(['-a', literal_str])
self.assertEqual(args.a, literal_obj)
def test_invalid_python_literals(self):
invalid_python_literals = [
'variable_name',
'not a literal',
'{"incomplete": "dict"',
'null',
'2 * 3'
]
parser = ArgumentParser()
parser.add_argument('-a', action=python_literal_action())
for invalid_python_literal in invalid_python_literals:
with self.subTest(invalid_python_literal=invalid_python_literal):
error_message = StringIO()
with redirect_stderr(error_message), self.assertRaises(SystemExit):
parser.parse_args(['-a', invalid_python_literal])
self.assertRegex(
error_message.getvalue(),
re_escape("invalid Python literal: '{}'".format(invalid_python_literal))
)
def test_python_literal_action_help(self):
parser = ArgumentParser()
parser.add_argument('-a', action=python_literal_action())
self.assertRegex(parser.format_help(), "Python literal")
| 35.155172 | 92 | 0.590486 | 211 | 2,039 | 5.450237 | 0.308057 | 0.146957 | 0.09913 | 0.075652 | 0.171304 | 0.171304 | 0.171304 | 0.171304 | 0.171304 | 0.171304 | 0 | 0.008191 | 0.281511 | 2,039 | 57 | 93 | 35.77193 | 0.776792 | 0 | 0 | 0.133333 | 0 | 0 | 0.121138 | 0 | 0 | 0 | 0 | 0 | 0.088889 | 1 | 0.066667 | false | 0 | 0.133333 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
baedce755f416709ddf9ff38c919235dbc9775f4 | 3,911 | py | Python | emissor/processing/processing.py | cltl/GMRCAnnotation | cc4c7f0c9cbbce0eb6c7dee4d39d128f91b85839 | [
"MIT"
] | null | null | null | emissor/processing/processing.py | cltl/GMRCAnnotation | cc4c7f0c9cbbce0eb6c7dee4d39d128f91b85839 | [
"MIT"
] | 18 | 2021-01-12T15:18:07.000Z | 2021-03-23T12:30:57.000Z | emissor/processing/processing.py | cltl/GMRCAnnotation | cc4c7f0c9cbbce0eb6c7dee4d39d128f91b85839 | [
"MIT"
] | null | null | null | import logging
from joblib import Parallel, delayed
from typing import Iterable
from emissor.persistence import ScenarioStorage
from emissor.processing.api import DataPreprocessor, ScenarioInitializer, SignalProcessor
from emissor.representation.scenario import Modality
logger = logging.getLogger(__name__)
class DataProcessing:
def __init__(self, storage: ScenarioStorage, preprocessors: Iterable[DataPreprocessor],
scenario_initializer: ScenarioInitializer, signal_processors: Iterable[SignalProcessor],
num_jobs: int = 1):
self._storage = storage
self._preprocessors = preprocessors
self._scenario_initializer = scenario_initializer
self._signal_processors = signal_processors
self._num_jobs = num_jobs
def run(self):
self.run_preprocessing()
self.run_init()
self.run_process()
def run_preprocessing(self):
for preprocessor in self._preprocessors:
with preprocessor:
logger.info("Preprocessing dataset with %s to %s", preprocessor.name, self._storage.base_path)
preprocessor.preprocess()
logger.info("Finished preprocessing dataset with %s", preprocessor.name)
def run_init(self):
if not self._scenario_initializer:
return
logger.info("Initialize scenarios %s with %s", self._storage.base_path, self._scenario_initializer.name)
with self._scenario_initializer:
self.execute_for_scenarios(_initialize, self._scenario_initializer)
def run_process(self):
if not self._signal_processors:
return
logger.info("Processing scenarios with processors %s", [processor.name for processor in self._signal_processors])
for processor in self._signal_processors:
with processor:
self.execute_for_scenarios(_process, processor)
def execute_for_scenarios(self, function, task):
scenario_ids = tuple(sorted(self._storage.list_scenarios(), key=task.scenario_key(self._storage)))
if not task.parallel:
for scenario_id in scenario_ids:
function(self._storage.base_path, task, scenario_id)
else:
scenario_ids = tuple(scenario_ids)
num_jobs = min(self._num_jobs, len(scenario_ids))
Parallel(n_jobs=num_jobs)(
delayed(function)(self._storage.base_path, task, scenario_id)
for scenario_id in scenario_ids)
def _initialize(base_path, scenario_initializer, scenario_id):
storage = ScenarioStorage(base_path)
try:
storage.load_scenario(scenario_id)
logger.debug("Scenario %s already initialized", scenario_id)
return
except ValueError:
pass
scenario_initializer.initialize_scenario(scenario_id, storage)
logger.info("Initialized scenario %s", scenario_id)
scenario = storage.load_scenario(scenario_id)
for modality in Modality:
if modality in scenario.signals:
logger.debug("Modality %s for scenario %s already initialized", modality, scenario_id)
continue
scenario_initializer.initialize_modality(scenario, modality)
logger.info("Initialized modality %s for scenario %s", modality.name, scenario_id)
storage.save_scenario(scenario)
def _process(base_path, processor, scenario_id):
storage = ScenarioStorage(base_path)
logger.info("Processing scenario %s with processor %s", scenario_id, processor.name)
scenario = storage.load_scenario(scenario_id)
processor.process_scenario(scenario)
storage.save_scenario(scenario)
# TODO
def _signal_generator(scenario_id, modality, processor, storage):
signals = storage.load_modality(scenario_id, Modality[modality.upper()])
for signal in sorted(signals, key=processor.signal_key(storage)):
yield signal | 38.343137 | 121 | 0.706469 | 435 | 3,911 | 6.094253 | 0.195402 | 0.064127 | 0.04338 | 0.028668 | 0.161071 | 0.134289 | 0.030932 | 0.030932 | 0 | 0 | 0 | 0.000326 | 0.216824 | 3,911 | 102 | 122 | 38.343137 | 0.865165 | 0.001023 | 0 | 0.115385 | 0 | 0 | 0.082693 | 0 | 0 | 0 | 0 | 0.009804 | 0 | 1 | 0.115385 | false | 0.012821 | 0.076923 | 0 | 0.24359 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
baef32a63d7954d576b6da6841f0534bc30e3778 | 2,298 | py | Python | pySDC/projects/FastWaveSlowWave/plotgmrescounter_boussinesq.py | brownbaerchen/pySDC | 31293859d731646aa09cef4345669eac65501550 | [
"BSD-2-Clause"
] | 20 | 2015-03-21T09:02:55.000Z | 2022-02-26T20:22:21.000Z | pySDC/projects/FastWaveSlowWave/plotgmrescounter_boussinesq.py | brownbaerchen/pySDC | 31293859d731646aa09cef4345669eac65501550 | [
"BSD-2-Clause"
] | 61 | 2015-03-02T09:35:55.000Z | 2022-03-17T12:42:48.000Z | pySDC/projects/FastWaveSlowWave/plotgmrescounter_boussinesq.py | brownbaerchen/pySDC | 31293859d731646aa09cef4345669eac65501550 | [
"BSD-2-Clause"
] | 19 | 2015-02-20T11:52:33.000Z | 2022-02-02T10:46:27.000Z | import numpy as np
from matplotlib import pyplot as plt
from pylab import rcParams
def plot_buoyancy(cwd=''):
"""
Plotting routine for the cross section of the buoyancy
Args:
cwd (string): current working directory
"""
xx = np.load(cwd + 'data/xaxis.npy')
uend = np.load(cwd + 'data/sdc.npy')
udirk = np.load(cwd + 'data/dirk.npy')
uimex = np.load(cwd + 'data/rkimex.npy')
uref = np.load(cwd + 'data/uref.npy')
usplit = np.load(cwd + 'data/split.npy')
err_split = np.linalg.norm(usplit.flatten() - uref.flatten(), np.inf) / np.linalg.norm(uref.flatten(), np.inf)
err_dirk = np.linalg.norm(udirk.flatten() - uref.flatten(), np.inf) / np.linalg.norm(uref.flatten(), np.inf)
err_imex = np.linalg.norm(uimex.flatten() - uref.flatten(), np.inf) / np.linalg.norm(uref.flatten(), np.inf)
err_sdc = np.linalg.norm(uend.flatten() - uref.flatten(), np.inf) / np.linalg.norm(uref.flatten(), np.inf)
assert err_split < 4.821E-02, 'ERROR: split error is too high, got %s' % err_split
assert err_dirk < 1.495e-01, 'ERROR: dirk error is too high, got %s' % err_dirk
assert err_imex < 1.305e-01, 'ERROR: imex error is too high, got %s' % err_imex
assert err_sdc < 9.548e-02, 'ERROR: sdc error is too high, got %s' % err_sdc
print("Estimated discretisation error split explicit: %5.3e" % err_split)
print("Estimated discretisation error of DIRK: %5.3e" % err_dirk)
print("Estimated discretisation error of RK-IMEX: %5.3e" % err_imex)
print("Estimated discretisation error of SDC: %5.3e" % err_sdc)
fs = 8
rcParams['figure.figsize'] = 5.0, 2.5
plt.figure()
plt.plot(xx[:, 5], udirk[2, :, 5], '--', color='g', markersize=fs - 2, label='DIRK(4)', dashes=(3, 3))
plt.plot(xx[:, 5], uend[2, :, 5], '-', color='b', label='SDC(4)')
plt.plot(xx[:, 5], uimex[2, :, 5], '--', color='r', markersize=fs - 2, label='IMEX(4)', dashes=(3, 3))
plt.legend(loc='lower left', fontsize=fs, prop={'size': fs})
plt.yticks(fontsize=fs)
plt.xticks(fontsize=fs)
plt.xlabel('x [km]', fontsize=fs, labelpad=0)
plt.ylabel('Bouyancy', fontsize=fs, labelpad=1)
filename = 'data/boussinesq.png'
plt.savefig(filename, bbox_inches='tight')
if __name__ == "__main__":
plot_buoyancy()
| 43.358491 | 114 | 0.638381 | 359 | 2,298 | 4.011142 | 0.300836 | 0.044444 | 0.066667 | 0.088889 | 0.295833 | 0.20625 | 0.20625 | 0.147917 | 0.147917 | 0.147917 | 0 | 0.030319 | 0.181897 | 2,298 | 52 | 115 | 44.192308 | 0.735638 | 0.045692 | 0 | 0 | 0 | 0 | 0.241125 | 0 | 0 | 0 | 0 | 0 | 0.108108 | 1 | 0.027027 | false | 0 | 0.081081 | 0 | 0.108108 | 0.108108 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
baf0c0d13ccd7ea08a4340fe5baa52080b784567 | 3,297 | py | Python | src/voiceRecognition/voice_recognition.py | PandaFood/M7012E-Autonom_Robot | e0bef049cc63071f060414ed0ce89001d363401a | [
"FSFAP"
] | null | null | null | src/voiceRecognition/voice_recognition.py | PandaFood/M7012E-Autonom_Robot | e0bef049cc63071f060414ed0ce89001d363401a | [
"FSFAP"
] | null | null | null | src/voiceRecognition/voice_recognition.py | PandaFood/M7012E-Autonom_Robot | e0bef049cc63071f060414ed0ce89001d363401a | [
"FSFAP"
] | null | null | null | #Requires the modules SpeechRecognition and pyaudio
import speech_recognition as sr
import sys
sys.path.insert(1, "..")
from camera.camera import Camera
from widefind.widefindScript import WidefindTracker
def recognizeSpeech(recognizer, microphone):
#Check that recognizer and microphone arguments are appropriate type
if not isinstance(recognizer, sr.Recognizer):
raise TypeError("'recognizer' must be 'Recognizer' instance")
if not isinstance(microphone, sr.Microphone):
raise TypeError("'microphone' must be 'Microphone' instance")
with microphone as source:
recognizer.adjust_for_ambient_noise(source)
audio = recognizer.listen(source)
response = {
"success": True, #Boolean for success true/false
"error": None, #None if no errors, otherwise returns error message from speech recognition API
"transcription": None #None if speech recognition failed, otherwise returns a transcription of input speech
}
try:
print("Analysing voice sample...")
response["transcription"] = recognizer.recognize_google(audio)
except sr.RequestError:
response["success"] = False
response["error"] = "API unavailable"
except sr.UnknownValueError:
response["error"] = "Unable to recognize speech"
return response
def recordAudio(recognizer, microphone):
print("\nListening for input, say something!")
audio = recognizeSpeech(recognizer, microphone)
success = audio["success"]
error = audio["error"]
transcription = audio["transcription"]
print("Success: " + str(success))
print("Error: " + str(error))
print("Transcription: " + str(transcription))
handleTranscription(transcription)
#Start listening for additional commands
recordAudio(recognizer, microphone)
#Handle transcriptions here
def handleTranscription(transcription):
if(not transcription):
return
if("help" in transcription):
print("Helping")
sensor.help()
if ("follow" in transcription):
print("Follow command recognized!")
print("Following")
sensor.follow()
if ("stop" in transcription):
print("Stop command recognized!")
sensor.stop()
#Two examples of easily recognizing transcript commands
#This will trigger if the transcription contains the letters "example" in order, anywhere in the string
#This is useful as if your speech is interpreted as "examples" it will trigger "example"
#Might lead to unintended commands as some words can contain other words
if ("example" in transcription):
print("example command recognized! (partial match)")
#Call function
#This will only trigger if the transcription is exactly "example"
#Might lead to problems if a string contains more words than just the command word(s) and if "example" is interpreted as "examples"
if (transcription == "example"):
print("example command recognized! (exact match)")
#Call function
if __name__ == "__main__":
# create recognizer and mic instances
recognizer = sr.Recognizer()
microphone = sr.Microphone()
c = Camera()
sensor = WidefindTracker()
sensor.start()
recordAudio(recognizer, microphone)
| 33.30303 | 135 | 0.695784 | 362 | 3,297 | 6.301105 | 0.403315 | 0.052609 | 0.035072 | 0.02192 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.000386 | 0.215044 | 3,297 | 98 | 136 | 33.642857 | 0.880989 | 0.286321 | 0 | 0.032258 | 0 | 0 | 0.207959 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.048387 | false | 0 | 0.064516 | 0 | 0.145161 | 0.177419 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
baf0d493a9dadaea985eb5034b9242e9393961b4 | 6,432 | py | Python | src/wind_power_forecasting/nodes/utils.py | vchaparro/wind-power-forecasting | 81e3d361af72c30fbd195a5dd8c7bf3b4df3db66 | [
"CC-BY-4.0"
] | 9 | 2021-03-01T08:40:39.000Z | 2022-03-15T07:21:25.000Z | src/wind_power_forecasting/nodes/utils.py | vchaparro/MasterThesis-wind-power-forecasting | 81e3d361af72c30fbd195a5dd8c7bf3b4df3db66 | [
"CC-BY-4.0"
] | null | null | null | src/wind_power_forecasting/nodes/utils.py | vchaparro/MasterThesis-wind-power-forecasting | 81e3d361af72c30fbd195a5dd8c7bf3b4df3db66 | [
"CC-BY-4.0"
] | 3 | 2021-04-15T17:55:05.000Z | 2022-03-17T18:12:51.000Z | import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.model_selection import learning_curve
class BlockingTimeSeriesSplit:
def __init__(self, n_splits):
self.n_splits = n_splits
def get_n_splits(self, X, y, groups):
return self.n_splits
def split(self, X, y=None, groups=None):
n_samples = len(X)
k_fold_size = n_samples // self.n_splits
indices = np.arange(n_samples)
margin = 0
for i in range(self.n_splits):
start = i * k_fold_size
stop = start + k_fold_size
mid = int(0.8 * (stop - start)) + start
yield indices[start:mid], indices[mid + margin : stop]
def _save_fig(
fig_id: int,
folder: str,
WF: str,
tight_layout=True,
fig_extension="png",
resolution=300,
):
os.makedirs(folder + WF, exist_ok=True)
path = os.path.join(folder + WF, fig_id + "." + fig_extension)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format=fig_extension, dpi=resolution)
def export_reports(name, reports, loc):
""" Export each report in 'reports' to html in the location indicated by 'loc'
"""
for key in reports.keys():
try:
reports[key].to_file(output_file=loc + "{}_NWP{}.html".format(name, key))
except Exception:
print("WARN: Exportation failed for NWP{}".format(key))
continue
def plot_learning_curve(
estimator,
title,
X,
y,
axes=None,
ylim=None,
cv=None,
n_jobs=None,
train_sizes=np.linspace(0.1, 1.0, 5),
):
"""
Generate 3 plots: the test and training learning curve, the training
samples vs fit times curve, the fit times vs score curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
axes : array of 3 axes, optional (default=None)
Axes to use for plotting the curves.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is not a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validators that can be used here.
n_jobs : int or None, optional (default=None)
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
"""
if axes is None:
_, axes = plt.subplots(1, 3, figsize=(20, 5))
axes[0].set_title(title)
if ylim is not None:
axes[0].set_ylim(*ylim)
axes[0].set_xlabel("Training examples")
axes[0].set_ylabel("Score")
train_sizes, train_scores, test_scores, fit_times, _ = learning_curve(
estimator,
X,
y,
cv=cv,
n_jobs=n_jobs,
train_sizes=train_sizes,
return_times=True,
)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
fit_times_mean = np.mean(fit_times, axis=1)
fit_times_std = np.std(fit_times, axis=1)
# Plot learning curve
axes[0].grid()
axes[0].fill_between(
train_sizes,
train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std,
alpha=0.1,
color="r",
)
axes[0].fill_between(
train_sizes,
test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std,
alpha=0.1,
color="g",
)
axes[0].plot(
train_sizes, train_scores_mean, "o-", color="r", label="Training score"
)
axes[0].plot(
train_sizes, test_scores_mean, "o-", color="g", label="Cross-validation score"
)
axes[0].legend(loc="best")
# Plot n_samples vs fit_times
axes[1].grid()
axes[1].plot(train_sizes, fit_times_mean, "o-")
axes[1].fill_between(
train_sizes,
fit_times_mean - fit_times_std,
fit_times_mean + fit_times_std,
alpha=0.1,
)
axes[1].set_xlabel("Training examples")
axes[1].set_ylabel("fit_times")
axes[1].set_title("Scalability of the model")
# Plot fit_time vs score
axes[2].grid()
axes[2].plot(fit_times_mean, test_scores_mean, "o-")
axes[2].fill_between(
fit_times_mean,
test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std,
alpha=0.1,
)
axes[2].set_xlabel("fit_times")
axes[2].set_ylabel("Score")
axes[2].set_title("Performance of the model")
return plt
| 32 | 86 | 0.640547 | 926 | 6,432 | 4.281857 | 0.287257 | 0.0343 | 0.024716 | 0.010088 | 0.15662 | 0.091551 | 0.038335 | 0.030769 | 0.030769 | 0.030769 | 0 | 0.013465 | 0.261039 | 6,432 | 200 | 87 | 32.16 | 0.820745 | 0.391791 | 0 | 0.193277 | 0 | 0 | 0.05732 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05042 | false | 0 | 0.05042 | 0.008403 | 0.12605 | 0.008403 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
baf1b6f63140eb9c0e03761cfc6126eff7978de4 | 3,458 | py | Python | tools/archive_publisher.py | asiekierka/z2 | d926408423dc98d71d5e7fc2fda3202c03c309de | [
"MIT"
] | 1 | 2020-02-17T11:42:15.000Z | 2020-02-17T11:42:15.000Z | tools/archive_publisher.py | asiekierka/z2 | d926408423dc98d71d5e7fc2fda3202c03c309de | [
"MIT"
] | null | null | null | tools/archive_publisher.py | asiekierka/z2 | d926408423dc98d71d5e7fc2fda3202c03c309de | [
"MIT"
] | null | null | null | import glob
import os
import shutil
import sys
from zipfile import ZipFile
import django
from internetarchive import upload
sys.path.append("/var/projects/museum/")
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "museum.settings")
django.setup()
from museum_site.models import File
ZGAMES_PATH = "/var/projects/museum"
BASE_PATH = "/var/projects/museum/museum_site/static/data/base/"
BASES = {
"A": {
"name": "ZZT v3.2 Registered",
"directory": "ZZT32-REG",
"use_cfg": True,
"registered": True,
"prefix": "zzt_",
"executable": "ZZT.EXE",
},
"B": {
"name": "ZZT v2.0 Shareware",
"directory": "ZZT20-SW",
"use_cfg": True,
"registered": False,
"prefix": "zzt_",
"executable": "ZZT.EXE",
}
}
def main():
print("Internet Archive Publisher")
while True:
file_id = input("File ID: ")
if not file_id:
break
# Load file
f = File.objects.get(pk=int(file_id))
print("Selected:", f, "(" + f.filename + ")")
for base in BASES.keys():
print("[" + base + "]", BASES[base]["name"])
selected_base = input("Select package base: ").upper()
base = BASES[selected_base]
# Copy the zip
zip_name = "zzt_" + f.filename
shutil.copy(
ZGAMES_PATH + f.download_url(),
zip_name
)
# Open the WIP zip
with ZipFile(zip_name, "a") as z:
# Insert the base files
to_add = glob.glob(
os.path.join(BASE_PATH, base["directory"], "*")
)
for a in to_add:
z.write(a, arcname=os.path.basename(a))
# Create ZZT.CFG if needed
if base["use_cfg"]:
# Find the relevant files to default to
file_list = z.namelist()
for idx, name in enumerate(file_list, start=1):
print(idx, name)
selected_idx = int(input("Launch which file? ")) - 1
launch_file = z.namelist()[selected_idx]
config_content = launch_file[:-4] # Remove .ZZT extension
if base["registered"]:
config_content += "\r\nREGISTERED"
z.writestr("ZZT.CFG", config_content)
# Zip file is completed, prepare the upload
meta = {
"title": f.title,
"mediatype": "software",
"collection": "open_source_software",
"emulator": "dosbox",
"emulator_ext": "zip",
"emulator_start": base["executable"] + " " + launch_file,
"year": str(f.release_date)[:4],
"subject": ["zzt"] + f.genre.split("/"),
"creator": f.author.split("/"),
"description": "World created using the ZZT engine."
}
print("Uploading to Internet Archive...")
r = upload(
base["prefix"] + f.filename[:-4],
files=[zip_name],
metadata=meta
)
if r[0].status_code == 200:
print("Upload successful!")
f.archive_name = base["prefix"] + f.filename[:-4]
f.save()
print("https://archive.org/details/" + f.archive_name)
os.remove(zip_name)
else:
print("Upload failed!")
print(r)
return True
if __name__ == "__main__":
main()
| 29.058824 | 74 | 0.520821 | 381 | 3,458 | 4.587927 | 0.404199 | 0.020023 | 0.029176 | 0.024027 | 0.051487 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007895 | 0.340659 | 3,458 | 118 | 75 | 29.305085 | 0.758772 | 0.054367 | 0 | 0.06383 | 0 | 0 | 0.223858 | 0.028519 | 0 | 0 | 0 | 0 | 0 | 1 | 0.010638 | false | 0 | 0.085106 | 0 | 0.106383 | 0.095745 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
baf2515a70e81cae3144e3396637050e7d7c8ecd | 3,591 | py | Python | src/extractor.py | wciesialka/sillence-extractor | 34f188951b162280fa0473647ff83a21ddc3f04d | [
"MIT"
] | 4 | 2019-10-12T04:23:43.000Z | 2021-03-04T17:33:29.000Z | src/extractor.py | wciesialka/sillence-extractor | 34f188951b162280fa0473647ff83a21ddc3f04d | [
"MIT"
] | 4 | 2019-12-21T16:51:26.000Z | 2022-03-11T23:55:59.000Z | src/extractor.py | wciesialka/sillence-extractor | 34f188951b162280fa0473647ff83a21ddc3f04d | [
"MIT"
] | 1 | 2021-02-06T21:39:32.000Z | 2021-02-06T21:39:32.000Z | import ffmpeg
import os
import tempfile
import re
from pydub import AudioSegment
import math
FRAME_NAME_PATTERN = "frame-%08d.jpg"
def get_filename_from_path(path):
base = os.path.basename(path)
return os.path.splitext(base)[0]
FRACTION_PATTERN = r"(\d+)/(\d+)"
FRACTION_RE = re.compile(FRACTION_PATTERN)
def convert_fraction(frac):
match = FRACTION_RE.match(frac)
return float(match[1]) / float(match[2])
def get_video_duration(path):
probe = ffmpeg.probe(path)
video_stream = next((stream for stream in probe['streams'] if stream['codec_type'] == 'video'), None)
time_base = video_stream["time_base"]
duration_ts = video_stream["duration_ts"]
duration = convert_fraction(time_base) * float(duration_ts)
return duration
def extract_frames(path,frame_dir_name):
save_path = os.path.join(frame_dir_name,FRAME_NAME_PATTERN)
stream = ffmpeg.input(path)
stream = ffmpeg.output(stream, save_path)
stream.run()
def extract_audio(path,audio_dir_name):
save_path = os.path.join(audio_dir_name,"audio.mp3")
stream = ffmpeg.input(path)
stream = ffmpeg.output(stream, save_path, acodec="libmp3lame",f="mp3")
stream.run()
return save_path
def translate(value, from_min, from_max, to_min, to_max):
from_range = from_max - from_min
to_range = to_max - to_min
left_mapped = float(value - from_min) / float(from_range)
translated = to_min + (left_mapped * to_range)
if translated < 0.0001 or math.isinf(translated):
return 0
else:
return translated
SILENCE = -99.5
LOUDEST = 99.5
def to_db(amplitude):
try:
db = 10 * math.log(amplitude)
except:
return 0
else:
return db
def delete_file(filepath):
if os.path.exists(filepath):
os.remove(filepath)
return True
else:
return False
def remove_frame(frame_number,frame_dir_path):
filename = "frame-{:08d}.jpg".format(frame_number)
filepath = os.path.join(frame_dir_path,filename)
delete_file(filepath)
def extract(input_path,output_path,threshold_ratio=0.7,invert=False):
video_name = get_filename_from_path(input_path)
temp_dir = tempfile.TemporaryDirectory(suffix="_"+video_name)
temp_dir_name = temp_dir.name
duration = get_video_duration(input_path)
duration_millis = duration*1000
extract_frames(input_path,temp_dir_name)
framecount = len([name for name in os.listdir(temp_dir_name) if os.path.isfile(os.path.join(temp_dir_name, name))])
fps = framecount/duration
millis_per_frame = duration_millis/framecount
audio_path = extract_audio(input_path,temp_dir_name)
audio = AudioSegment.from_file(audio_path)
threshold = LOUDEST*threshold_ratio
new_audio = AudioSegment.empty()
for i in range(1,framecount):
start = (i-1) * millis_per_frame
end = i * millis_per_frame
clip = audio[start:end]
volume = to_db(clip.max)
if ((not invert) and volume >= threshold) or (invert and volume <= threshold):
remove_frame(i,temp_dir_name)
else:
new_audio += clip
new_audio_path = os.path.join(temp_dir_name,"new_audio.mp3")
new_audio.export(new_audio_path, format="mp3")
frames_stream = ffmpeg.input(temp_dir_name+ "/*.jpg", pattern_type='glob', framerate=fps)
audio_stream = ffmpeg.input(new_audio_path)
stream = ffmpeg.output(frames_stream,audio_stream,output_path)
try:
stream.run()
except:
return False
else:
return os.path.exists(output_path) | 28.5 | 119 | 0.695071 | 506 | 3,591 | 4.671937 | 0.235178 | 0.038494 | 0.041878 | 0.017767 | 0.111675 | 0.083756 | 0.06599 | 0.044839 | 0.044839 | 0.044839 | 0 | 0.012161 | 0.198552 | 3,591 | 126 | 120 | 28.5 | 0.809243 | 0 | 0 | 0.189474 | 0 | 0 | 0.036748 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.063158 | 0 | 0.294737 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
baf2b885d77799a6834819f80aff075d0ff7c9a4 | 1,159 | py | Python | pwm/i2c_ads1115_pwm.py | jordibinefa/raspberrypi-codes | a043cb4e5fc69a4d2f14d7224fc5378cc6d8d093 | [
"MIT"
] | null | null | null | pwm/i2c_ads1115_pwm.py | jordibinefa/raspberrypi-codes | a043cb4e5fc69a4d2f14d7224fc5378cc6d8d093 | [
"MIT"
] | null | null | null | pwm/i2c_ads1115_pwm.py | jordibinefa/raspberrypi-codes | a043cb4e5fc69a4d2f14d7224fc5378cc6d8d093 | [
"MIT"
] | null | null | null | #! /usr/bin/python3
# 20180726 - wiki.binefa.cat
# Based on a code from Tony DiCola (AdaFruit)
# License: Public Domain
import time
import Adafruit_ADS1x15
import time
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BOARD)
GPIO.setup(32, GPIO.OUT)
GPIO.setup(33, GPIO.OUT)
GPIO.setup(12, GPIO.OUT)
GPIO.setup(35, GPIO.OUT)
p = [0]*4
p[0] = GPIO.PWM(32, 50) # channel=32 frequency=50Hz
p[1] = GPIO.PWM(33, 50) # channel=33 frequency=50Hz
p[2] = GPIO.PWM(12, 50) # channel=12 frequency=50Hz
p[3] = GPIO.PWM(35, 50) # channel=35 frequency=50Hz
p[0].start(0)
p[1].start(0)
p[2].start(0)
p[3].start(0)
adc = Adafruit_ADS1x15.ADS1115()
GAIN = 1
#VPS = 4.096 / 32768.0 #volts per step
VPS = 100.0 / 26600.0
print('-' * 46)
try:
values = [0]*4
while 1:
for i in range(4):
values[i] = adc.read_adc(i, gain=GAIN)
#print('ADC{:01d}: '.format(i)+'HEX 0x{:04x} '.format(values[i])+'DEC {:05d} '.format(values[i])+'reading {:2.3f} %'.format(values[i]*VPS))
p[i].ChangeDutyCycle(values[i]*VPS)
#print('-' * 46)
#time.sleep(0.5)
time.sleep(0.1)
except KeyboardInterrupt:
pass
p[0].stop()
p[1].stop()
p[2].stop()
p[3].stop()
GPIO.cleanup()
| 22.72549 | 146 | 0.646247 | 209 | 1,159 | 3.569378 | 0.392345 | 0.046917 | 0.075067 | 0.064343 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.12069 | 0.149267 | 1,159 | 50 | 147 | 23.18 | 0.635903 | 0.366695 | 0 | 0.055556 | 0 | 0 | 0.001387 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.027778 | 0.111111 | 0 | 0.111111 | 0.027778 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
baf4a552e3a9b31a37863ee85ea0bd2b5650c4cc | 3,423 | py | Python | remeta/util.py | m-guggenmos/remeta | d074d87cb45ae83cd0213ffbecbb3d85036f8cd2 | [
"MIT"
] | 1 | 2022-01-03T22:46:02.000Z | 2022-01-03T22:46:02.000Z | remeta/util.py | m-guggenmos/remeta | d074d87cb45ae83cd0213ffbecbb3d85036f8cd2 | [
"MIT"
] | null | null | null | remeta/util.py | m-guggenmos/remeta | d074d87cb45ae83cd0213ffbecbb3d85036f8cd2 | [
"MIT"
] | null | null | null | import sys
import warnings
import numpy as np
from scipy.stats import rankdata
TAB = ' '
maxfloat = np.float128 if hasattr(np, 'float128') else np.longdouble
class ReprMixin:
def __repr__(self):
return f'{self.__class__.__name__}\n' + '\n'.join([f'\t{k}: {v}' for k, v in self.__dict__.items()])
def _check_param(x):
if hasattr(x, '__len__'):
if len(x) == 2:
return x
elif len(x) == 1:
return [x[0], x[0]]
else:
print(f'Something went wrong, parameter array has {len(x)} values')
else:
return [x, x]
def _check_criteria(x):
if hasattr(x[0], '__len__'):
return x
else:
return [x, x]
def pearson2d(x, y):
x, y = np.asarray(x), np.asarray(y)
mx, my = np.nanmean(x, axis=-1), np.nanmean(y, axis=-1)
xm, ym = x - mx[..., None], y - my[..., None]
r_num = np.nansum(xm * ym, axis=-1)
r_den = np.sqrt(np.nansum(xm ** 2, axis=-1) * np.nansum(ym ** 2, axis=-1))
r = r_num / r_den
return r
def spearman2d(x, y, axis=0):
x, y = np.asarray(x), np.asarray(y)
xr, yr = rankdata(x, axis=axis), rankdata(y, axis=axis)
mxr, myr = np.nanmean(xr, axis=-1), np.nanmean(yr, axis=-1)
xmr, ymr = xr - mxr[..., None], yr - myr[..., None]
r_num = np.nansum(xmr * ymr, axis=-1)
r_den = np.sqrt(np.nansum(xmr ** 2, axis=-1) * np.nansum(ymr ** 2, axis=-1))
r = r_num / r_den
return r
def weighted_pearson(x, y, w):
xf = np.asarray(x).flatten()
yf = np.asarray(y).flatten()
w = np.asarray(w).flatten() / np.nansum(w)
mx = np.nansum(w * xf)
my = np.nansum(w * yf)
r_num = np.nansum(w * (xf - mx) * (yf - my))
s_x = np.nansum(w * (xf - mx) ** 2)
s_y = np.nansum(w * (yf - my) ** 2)
r_den = np.sqrt(s_x * s_y)
r = r_num / r_den
return r
def print_warnings(w):
for el in set([w_.message.args[0] for w_ in w]):
if 'delta_grad == 0.0' not in el:
print('\tWarning: ' + el)
def raise_warning_in_catch_block(msg, category, w):
warnings.warn(msg, category=category)
if len(w):
sys.stderr.write(warnings.formatwarning(
w[-1].message, w[-1].category, w[-1].filename, w[-1].lineno, line=w[-1].line
))
def type2roc(correct, conf, nbins=5):
# Calculate area under type 2 ROC
#
# correct - vector of 1 x ntrials, 0 for error, 1 for correct
# conf - vector of continuous confidence ratings between 0 and 1
# nbins - how many bins to use for discretization
bs = 1 / nbins
h2, fa2 = np.full(nbins, np.nan), np.full(nbins, np.nan)
for c in range(nbins):
if c:
h2[nbins - c - 1] = np.sum((conf > c*bs) & (conf <= (c+1)*bs) & correct.astype(bool)) + 0.5
fa2[nbins - c - 1] = np.sum((conf > c*bs) & (conf <= (c+1)*bs) & ~correct.astype(bool)) + 0.5
else:
h2[nbins - c - 1] = np.sum((conf >= c * bs) & (conf <= (c + 1) * bs) & correct.astype(bool)) + 0.5
fa2[nbins - c - 1] = np.sum((conf >= c * bs) & (conf <= (c + 1) * bs) & ~correct.astype(bool)) + 0.5
h2 /= np.sum(h2)
fa2 /= np.sum(fa2)
cum_h2 = np.hstack((0, np.cumsum(h2)))
cum_fa2 = np.hstack((0, np.cumsum(fa2)))
k = np.full(nbins, np.nan)
for c in range(nbins):
k[c] = (cum_h2[c+1] - cum_fa2[c])**2 - (cum_h2[c] - cum_fa2[c+1])**2
auroc2 = 0.5 + 0.25*np.sum(k)
return auroc2
| 29.765217 | 112 | 0.547181 | 579 | 3,423 | 3.132988 | 0.243523 | 0.052922 | 0.029768 | 0.019846 | 0.321389 | 0.229879 | 0.229879 | 0.229879 | 0.169791 | 0.169791 | 0 | 0.03751 | 0.267894 | 3,423 | 114 | 113 | 30.026316 | 0.686353 | 0.059013 | 0 | 0.219512 | 0 | 0 | 0.046656 | 0.008398 | 0 | 0 | 0 | 0 | 0 | 1 | 0.109756 | false | 0 | 0.04878 | 0.012195 | 0.292683 | 0.036585 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
baf58a848aabaf8670ef81bc6b8d5aced5608d67 | 1,372 | py | Python | tonga/models/handlers/command/command_handler.py | Qotto/tonga | a6ae223ebf0fb7b317118b762102f1909435d1cf | [
"MIT"
] | 1 | 2019-12-17T10:06:03.000Z | 2019-12-17T10:06:03.000Z | tonga/models/handlers/command/command_handler.py | Qotto/tonga | a6ae223ebf0fb7b317118b762102f1909435d1cf | [
"MIT"
] | 1 | 2019-07-04T15:22:58.000Z | 2019-07-05T07:23:31.000Z | tonga/models/handlers/command/command_handler.py | Qotto/tonga | a6ae223ebf0fb7b317118b762102f1909435d1cf | [
"MIT"
] | 2 | 2019-06-05T15:40:49.000Z | 2019-12-10T09:24:23.000Z | #!/usr/bin/env python
# coding: utf-8
# Copyright (c) Qotto, 2019
""" BaseCommandHandler Module
All command handler must be inherit from this class. Execute function was called by consumer on each received command.
For make an transaction in execute function return 'transaction' as string after end transaction otherwise return none.
"""
from typing import Union
from tonga.models.handlers.base import BaseHandler
from tonga.models.records.command.command import BaseCommand
__all__ = [
'BaseCommandHandler'
]
class BaseCommandHandler(BaseHandler):
""" Base of all command handler
"""
@classmethod
def handler_name(cls) -> str:
""" Return handler name, used by serializer
Raises:
NotImplementedError: Abstract def
Returns:
None
"""
raise NotImplementedError
async def execute(self, event: BaseCommand) -> Union[str, None]:
""" This function is automatically call by Tonga when an command with same name was receive by consumer
Args:
event (BaseCommand): Command event receive by consumer
Notes:
If execute make an transaction return 'transaction' as string at transaction end
Raises:
NotImplementedError: Abstract def
Returns:
None
"""
raise NotImplementedError
| 24.945455 | 119 | 0.673469 | 153 | 1,372 | 6.006536 | 0.522876 | 0.032644 | 0.036997 | 0.054407 | 0.154516 | 0.154516 | 0.154516 | 0.154516 | 0 | 0 | 0 | 0.004951 | 0.263848 | 1,372 | 54 | 120 | 25.407407 | 0.904951 | 0.33965 | 0 | 0.166667 | 0 | 0 | 0.041002 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.25 | 0 | 0.416667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
baf6bab456465922d740d1791db8aca93417eb58 | 1,373 | py | Python | neighbourhoodapp/urls.py | marknesh/neighbourhood-watch | 57e36c800b9e4898be9f4949c80c902f7627699a | [
"MIT"
] | null | null | null | neighbourhoodapp/urls.py | marknesh/neighbourhood-watch | 57e36c800b9e4898be9f4949c80c902f7627699a | [
"MIT"
] | 10 | 2020-03-24T10:47:53.000Z | 2021-04-08T19:51:44.000Z | neighbourhoodapp/urls.py | marknesh/Neighbourhood-Watch | 57e36c800b9e4898be9f4949c80c902f7627699a | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.urls import path,re_path
from . import views
from rest_framework.authtoken.views import obtain_auth_token
from rest_framework_simplejwt import views as jwt_views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('', views.home, name='index'),
path('c/', views.posted, name='sigxnup'),
path('signup/', views.signup, name='signup'),
re_path(r'^activate/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
views.activate, name='activate'),
path('profile/', views.myprofile, name='profile'),
re_path(r'^update/profile', views.updatemyprofile, name='update_profile'),
re_path(r'^api-token-auth/', obtain_auth_token),
path('api/token/', jwt_views.TokenObtainPairView.as_view(), name='token_obtain_pair'),
path('api/token/refresh/', jwt_views.TokenRefreshView.as_view(), name='token_refresh'),
re_path(r'^update/(\d+)', views.comment, name='comment'),
re_path(r'^updates/(\d+)', views.updates, name='updates'),
re_path(r'^business/(\d+)', views.business, name='updatesds'),
re_path(r'^g/(\d+)', views.get_business, name='updatesds'),
path('search/', views.search_business, name='search_results'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
| 44.290323 | 102 | 0.705754 | 198 | 1,373 | 4.737374 | 0.328283 | 0.051173 | 0.052239 | 0.01919 | 0.014925 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011419 | 0.107065 | 1,373 | 30 | 103 | 45.766667 | 0.75367 | 0 | 0 | 0 | 0 | 0.038462 | 0.24909 | 0.062637 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.269231 | 0 | 0.269231 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
baf6f7cf8752fa09d78da501f3bc6b60b55ed4dd | 1,517 | py | Python | 2015/day8/day8.py | naitmare01/Adventofcode | 34f2832fa7a18b76cf9827890632740c6f60679c | [
"MIT"
] | null | null | null | 2015/day8/day8.py | naitmare01/Adventofcode | 34f2832fa7a18b76cf9827890632740c6f60679c | [
"MIT"
] | null | null | null | 2015/day8/day8.py | naitmare01/Adventofcode | 34f2832fa7a18b76cf9827890632740c6f60679c | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import argparse
import codecs
def arguments():
# Handle command line arguments
parser = argparse.ArgumentParser(description='Adventofcode.')
parser.add_argument('-f', '--file', required=True)
args = parser.parse_args()
return args
class Matchsticks:
def __init__(self, whole_string):
self.whole_string = whole_string
self.converted_string = None
self.length_whole_string = None
self.length_converted_string = None
def calc_length_whole_string(self):
self.length_whole_string = len(self.whole_string)
def calc_length_converted_string(self):
escaped_str = self.whole_string
escaped_str = escaped_str[1:]
escaped_str = escaped_str[:-1]
self.converted_string = codecs.getdecoder("unicode_escape")(escaped_str)[0]
self.length_converted_string = len(self.converted_string)
def main():
args = arguments()
with open(args.file) as file:
input_file = file.read().strip()
input_file = input_file.splitlines()
result = []
for row in input_file:
part1 = Matchsticks(row)
part1.calc_length_whole_string()
part1.calc_length_converted_string()
result.append(part1)
print("Part1:", (sum([x.length_whole_string for x in result])) - (sum([x.length_converted_string for x in result])))
print("Part2:", sum(2+s.count('\\')+s.count('"') for s in open('input')))
if __name__ == '__main__':
main()
| 29.173077 | 120 | 0.667765 | 192 | 1,517 | 4.979167 | 0.354167 | 0.115063 | 0.088912 | 0.041841 | 0.08159 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010008 | 0.209624 | 1,517 | 51 | 121 | 29.745098 | 0.787323 | 0.045485 | 0 | 0 | 0 | 0 | 0.043599 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.138889 | false | 0 | 0.055556 | 0 | 0.25 | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bafc9badab6fa8688b6c75518218495c76855035 | 6,938 | py | Python | backend/tests/baserow/contrib/database/db/test_db_schema.py | ashishdhngr/baserow | b098678d2165eb7c42930ee24dc6753a3cb520c3 | [
"MIT"
] | null | null | null | backend/tests/baserow/contrib/database/db/test_db_schema.py | ashishdhngr/baserow | b098678d2165eb7c42930ee24dc6753a3cb520c3 | [
"MIT"
] | null | null | null | backend/tests/baserow/contrib/database/db/test_db_schema.py | ashishdhngr/baserow | b098678d2165eb7c42930ee24dc6753a3cb520c3 | [
"MIT"
] | null | null | null | import pytest
from django.db import connection, transaction, ProgrammingError
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.db.backends.dummy.base import DatabaseWrapper as DummyDatabaseWrapper
from django.db.backends.postgresql.schema import (
DatabaseSchemaEditor as PostgresqlDatabaseSchemaEditor,
)
from baserow.contrib.database.db.schema import (
lenient_schema_editor,
PostgresqlLenientDatabaseSchemaEditor,
safe_django_schema_editor,
)
from baserow.contrib.database.table.models import Table
@pytest.mark.django_db
def test_lenient_schema_editor():
dummy = DummyDatabaseWrapper({})
with pytest.raises(ValueError):
with lenient_schema_editor(dummy):
pass
assert connection.SchemaEditorClass == PostgresqlDatabaseSchemaEditor
with lenient_schema_editor(connection) as schema_editor:
assert isinstance(schema_editor, PostgresqlLenientDatabaseSchemaEditor)
assert isinstance(schema_editor, BaseDatabaseSchemaEditor)
assert schema_editor.alter_column_prepare_old_value == ""
assert schema_editor.alter_column_prepare_new_value == ""
assert not schema_editor.force_alter_column
assert connection.SchemaEditorClass != PostgresqlDatabaseSchemaEditor
assert connection.SchemaEditorClass == PostgresqlDatabaseSchemaEditor
with lenient_schema_editor(
connection,
"p_in = REGEXP_REPLACE(p_in, '', 'test', 'g');",
"p_in = REGEXP_REPLACE(p_in, 'test', '', 'g');",
True,
) as schema_editor:
assert schema_editor.alter_column_prepare_old_value == (
"p_in = REGEXP_REPLACE(p_in, '', 'test', 'g');"
)
assert schema_editor.alter_column_prepare_new_value == (
"p_in = REGEXP_REPLACE(p_in, 'test', '', 'g');"
)
assert schema_editor.force_alter_column
# Test provided as an example of how to trigger the django bug. However disabled from CI
# as it will break the connection!
@pytest.mark.django_db
@pytest.mark.slow
# You must add --runslow -s to pytest to run this test, you can do this in intellij by
# editing the run config for this test and adding --runslow -s to additional args.
def test_showing_how_djangos_schema_editor_is_broken(data_fixture):
cxn = transaction.get_connection()
starting_savepoints = list(cxn.savepoint_ids)
user = data_fixture.create_user()
database = data_fixture.create_database_application(user=user)
other_table = data_fixture.create_database_table(database=database)
table = Table.objects.create(database=database, order=0)
# Setup an existing index which will collide with the one that we will make later
# to ensure the `schema_editor.create_model` will fail in the deferred sql section.
with connection.cursor() as cursor:
cursor.execute(
f"CREATE index {table.get_collision_safe_order_id_idx_name()} on "
f'"database_table_{other_table.id}"("id", "order")'
)
cxn = transaction.get_connection()
assert cxn.savepoint_ids == starting_savepoints
# Create the table schema in the database database.
with pytest.raises(
ProgrammingError, match='relation "tbl_order_id_2_idx" already exists'
):
with connection.schema_editor() as schema_editor:
# Django only creates indexes when the model is managed.
model = table.get_model(managed=True)
schema_editor.create_model(model)
# Due to the bug in django.db.backends.base.schema.BaseDatabaseSchemaEditor.__exit__
# we are still in an atomic block even though we weren't in one before!!
cxn = transaction.get_connection()
assert cxn.savepoint_ids[0] == starting_savepoints[0]
# There is still an inner atomic transaction that has not been rolled back!
assert len(cxn.savepoint_ids) == 2
@pytest.mark.django_db
def test_safe_schema_editor(data_fixture):
cxn = transaction.get_connection()
starting_savepoints = list(cxn.savepoint_ids)
user = data_fixture.create_user()
database = data_fixture.create_database_application(user=user)
other_table = data_fixture.create_database_table(database=database)
table = Table.objects.create(database=database, order=0)
# Setup an existing index which will collide with the one that we will make later
# to ensure the `schema_editor.create_model` will fail in the deferred sql section.
with connection.cursor() as cursor:
cursor.execute(
f"CREATE index {table.get_collision_safe_order_id_idx_name()} on "
f'"database_table_{other_table.id}"("id", "order")'
)
cxn = transaction.get_connection()
assert cxn.savepoint_ids == starting_savepoints
# Create the table schema in the database database.
with pytest.raises(
ProgrammingError, match=f'relation "tbl_order_id_{table.id}_idx" already exists'
):
with safe_django_schema_editor() as schema_editor:
# Django only creates indexes when the model is managed.
model = table.get_model(managed=True)
schema_editor.create_model(model)
# Assert because we are using the safe schema editor the transaction was rolled back
# successfully!
cxn = transaction.get_connection()
assert cxn.savepoint_ids == starting_savepoints
@pytest.mark.django_db
def test_lenient_schema_editor_is_also_safe(data_fixture):
cxn = transaction.get_connection()
starting_savepoints = list(cxn.savepoint_ids)
user = data_fixture.create_user()
database = data_fixture.create_database_application(user=user)
other_table = data_fixture.create_database_table(database=database)
table = Table.objects.create(database=database, order=0)
# Setup an existing index which will collide with the one that we will make later
# to ensure the `schema_editor.create_model` will fail in the deferred sql section.
with connection.cursor() as cursor:
cursor.execute(
f"CREATE index {table.get_collision_safe_order_id_idx_name()} on "
f'"database_table_{other_table.id}"("id", "order")'
)
cxn = transaction.get_connection()
assert cxn.savepoint_ids == starting_savepoints
# Create the table schema in the database database.
with pytest.raises(
ProgrammingError, match=f'relation "tbl_order_id_{table.id}_idx" already exists'
):
with lenient_schema_editor(
connection,
None,
None,
False,
) as schema_editor:
# Django only creates indexes when the model is managed.
model = table.get_model(managed=True)
schema_editor.create_model(model)
# Assert because we are using the safe schema editor the transaction was rolled back
# successfully!
cxn = transaction.get_connection()
assert cxn.savepoint_ids == starting_savepoints
| 41.54491 | 88 | 0.72211 | 873 | 6,938 | 5.508591 | 0.190149 | 0.082346 | 0.031192 | 0.05053 | 0.722811 | 0.696818 | 0.69162 | 0.69162 | 0.640882 | 0.582658 | 0 | 0.001261 | 0.200202 | 6,938 | 166 | 89 | 41.795181 | 0.865381 | 0.217354 | 0 | 0.589744 | 0 | 0 | 0.122642 | 0.057899 | 0 | 0 | 0 | 0 | 0.153846 | 1 | 0.034188 | false | 0.008547 | 0.059829 | 0 | 0.094017 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
baff1d955bc292ecbb9120a30bb080c42324de1c | 3,925 | py | Python | umbrella/api/middleware/context.py | xww/umbrella | c54ed576477602b5bc1ecfe23ae1f59cc46a76e5 | [
"Apache-2.0"
] | null | null | null | umbrella/api/middleware/context.py | xww/umbrella | c54ed576477602b5bc1ecfe23ae1f59cc46a76e5 | [
"Apache-2.0"
] | null | null | null | umbrella/api/middleware/context.py | xww/umbrella | c54ed576477602b5bc1ecfe23ae1f59cc46a76e5 | [
"Apache-2.0"
] | null | null | null | '''
Created on 2012-10-23
@author: hzzhoushaoyu
'''
import webob.exc
import json
from umbrella.common import wsgi
import umbrella.common.log as logging
from umbrella.common import cfg
import umbrella.context
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
context_opts = [
cfg.BoolOpt('owner_is_tenant', default=True),
cfg.StrOpt('admin_role', default='admin'),
cfg.BoolOpt('allow_anonymous_access', default=False),
]
CONF.register_opts(context_opts)
class ContextMiddleware(wsgi.Middleware):
def process_response(self, resp):
try:
request_id = resp.request.context.request_id
LOG.debug(_("req-%s is responsing") % request_id)
except AttributeError:
LOG.warn(_('Unable to retrieve request id from context'))
else:
resp.headers['x-openstack-request-id'] = 'req-%s' % request_id
return resp
def process_request(self, req):
if req.headers.get('X-Auth-Token') is not None:
kwargs = {'auth_tok': req.headers.get('X-Auth-Token')}
else:
kwargs = {}
req.context = umbrella.context.RequestContext(**kwargs)
class AuthContextMiddleware(ContextMiddleware):
def process_request(self, req):
"""Convert authentication information into a request context
Generate a glance.context.RequestContext object from the available
authentication headers and store on the 'context' attribute
of the req object.
:param req: wsgi request object that will be given the context object
:raises webob.exc.HTTPUnauthorized: when value of the X-Identity-Status
header is not 'Confirmed' and
anonymous access is disallowed
"""
if req.headers.get('X-Identity-Status') == 'Confirmed':
req.context = self._get_authenticated_context(req)
elif req.headers.get('X-Auth-Token') is not None:
req.context = self._get_auth_token_context(req)
elif CONF.allow_anonymous_access:
req.context = self._get_anonymous_context()
else:
raise webob.exc.HTTPUnauthorized()
def _get_anonymous_context(self):
kwargs = {
'user': None,
'tenant': None,
'roles': [],
'is_admin': False,
'read_only': True,
}
return umbrella.context.RequestContext(**kwargs)
def _get_auth_token_context(self, req):
return umbrella.context.RequestContext(
auth_tok=req.headers.get('X-Auth-Token'))
def _get_authenticated_context(self, req):
#NOTE(bcwaldon): X-Roles is a csv string, but we need to parse
# it into a list to be useful
roles_header = req.headers.get('X-Roles', '')
roles = [r.strip().lower() for r in roles_header.split(',')]
#NOTE(bcwaldon): This header is deprecated in favor of X-Auth-Token
deprecated_token = req.headers.get('X-Storage-Token')
service_catalog = None
if req.headers.get('X-Service-Catalog') is not None:
try:
catalog_header = req.headers.get('X-Service-Catalog')
service_catalog = json.loads(catalog_header)
except ValueError:
raise webob.exc.HTTPInternalServerError(
_('Invalid service catalog json.'))
kwargs = {
'user': req.headers.get('X-User-Id'),
'tenant': req.headers.get('X-Tenant-Id'),
'roles': roles,
'is_admin': CONF.admin_role.strip().lower() in roles,
'auth_tok': req.headers.get('X-Auth-Token', deprecated_token),
'owner_is_tenant': CONF.owner_is_tenant,
'service_catalog': service_catalog,
}
return umbrella.context.RequestContext(**kwargs)
| 35.681818 | 79 | 0.611975 | 457 | 3,925 | 5.113786 | 0.297593 | 0.051348 | 0.066752 | 0.071887 | 0.182285 | 0.089859 | 0.065896 | 0.065896 | 0.027386 | 0 | 0 | 0.002837 | 0.281529 | 3,925 | 109 | 80 | 36.009174 | 0.825887 | 0.173503 | 0 | 0.148649 | 0 | 0 | 0.137484 | 0.013906 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081081 | false | 0 | 0.081081 | 0.013514 | 0.243243 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
24075c23fd6b2621d98a964f8bc89e606781c687 | 8,017 | py | Python | Project1-MinNE-python/src/interface/cmd.py | MrCaiDev/uestc-CNTProject | ea22325f749b48179a294e73390608491618683a | [
"MIT"
] | 1 | 2022-03-06T04:21:26.000Z | 2022-03-06T04:21:26.000Z | Project1-MinNE-python/src/interface/cmd.py | MrCaiDev/cnt | ea22325f749b48179a294e73390608491618683a | [
"MIT"
] | null | null | null | Project1-MinNE-python/src/interface/cmd.py | MrCaiDev/cnt | ea22325f749b48179a294e73390608491618683a | [
"MIT"
] | 1 | 2022-03-22T01:00:17.000Z | 2022-03-22T01:00:17.000Z | from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from utils.io import get_host_config
from utils.params import MessageType, Mode, Topology
class CommandUI(QMainWindow):
"""控制台主界面。"""
def __init__(self) -> None:
super().__init__()
self.__mode = Mode.UNICAST
self.__src = ""
self.__dst = ""
self.__msgtype = MessageType.TEXT
self.__text = ""
self.__filepath = ""
self.__hosts = get_host_config()
self.__init_ui()
def __init_ui(self):
"""初始化UI。"""
# 窗口外观。
self.setFixedSize(300, 200)
self.setWindowTitle(" ")
self.setFont(QFont("Microsoft YaHei UI", pointSize=11))
# 窗口位置。
screen = QDesktopWidget().screenGeometry()
size = self.frameGeometry()
size.moveCenter(screen.center())
self.move(size.topLeft())
# 窗口布局。
self.__central = QWidget()
self.setCentralWidget(self.__central)
self.__Hwidget_1 = QWidget(self.__central)
self.__Hwidget_1.setGeometry(QRect(140, 0, 150, 40))
self.__Hlayout_1 = QHBoxLayout(self.__Hwidget_1)
self.__Hlayout_1.setContentsMargins(0, 0, 0, 0)
self.__Hwidget_2 = QWidget(self.__central)
self.__Hwidget_2.setGeometry(QRect(10, 40, 280, 40))
self.__Hlayout_2 = QHBoxLayout(self.__Hwidget_2)
self.__Hlayout_2.setContentsMargins(0, 0, 0, 0)
self.__Vwidget = QWidget(self.__central)
self.__Vwidget.setGeometry(QRect(10, 80, 60, 80))
self.__Vlayout = QVBoxLayout(self.__Vwidget)
self.__Vlayout.setContentsMargins(0, 0, 0, 0)
# 标题标签。
self.__title = QLabel(self.__central)
self.__title.setGeometry(QRect(10, 0, 130, 40))
self.__title.setFont(QFont("Microsoft YaHei UI", pointSize=12, weight=75))
self.__title.setText("💻 控制台")
# 单播单选按钮。
self.__unicast_radio = QRadioButton(self.__Hwidget_1)
self.__unicast_radio.setText("单播")
self.__unicast_radio.setChecked(True)
self.__unicast_radio.clicked.connect(self.__onclick_unicast_radio)
# 广播单选按钮。
self.__broadcast_radio = QRadioButton(self.__Hwidget_1)
self.__broadcast_radio.setText("广播")
self.__broadcast_radio.clicked.connect(self.__onclick_broadcast_radio)
# 源标签。
self.__src_label = QLabel(self.__Hwidget_2)
self.__src_label.setAlignment(Qt.AlignCenter)
self.__src_label.setText("源")
# 源下拉框。
self.__src_combo = QComboBox(self.__Hwidget_2)
self.__src_combo.addItems(self.__hosts)
self.__src_combo.setCurrentIndex(-1)
self.__src_combo.activated.connect(self.__onactivate_src_combo)
# 目的标签。
self.__dst_label = QLabel(self.__Hwidget_2)
self.__dst_label.setAlignment(Qt.AlignCenter)
self.__dst_label.setText("目标")
# 目的下拉框。
self.__dst_combo = QComboBox(self.__Hwidget_2)
self.__dst_combo.addItems(self.__hosts)
self.__dst_combo.setCurrentIndex(-1)
self.__dst_combo.activated.connect(self.__onactivate_dst_combo)
# 文本单选按钮。
self.__text_radio = QRadioButton(self.__Vwidget)
self.__text_radio.setText("文本")
self.__text_radio.setChecked(True)
self.__text_radio.clicked.connect(self.__onclick_text_radio)
# 文本编辑框。
self.__text_edit = QLineEdit(self.__central)
self.__text_edit.setGeometry(QRect(80, 85, 210, 30))
self.__text_edit.textChanged.connect(self.__onedit_text_edit)
# 文件单选按钮。
self.__file_radio = QRadioButton(self.__Vwidget)
self.__file_radio.setText("图片")
self.__file_radio.clicked.connect(self.__onclick_file_radio)
# 文件按钮。
self.__file_btn = QPushButton(self.__central)
self.__file_btn.setGeometry(QRect(80, 125, 210, 30))
self.__file_btn.setText("选择文件")
self.__file_btn.clicked.connect(self.__onclick_file_btn)
# 发送按钮。
self.__send_btn = QPushButton(self.__central)
self.__send_btn.setGeometry(QRect(10, 160, 280, 35))
self.__send_btn.setText("发送")
self.__send_btn.clicked.connect(self._onclick_send_btn)
# 将组件添加进布局。
self.__Hlayout_1.addWidget(self.__unicast_radio)
self.__Hlayout_1.addWidget(self.__broadcast_radio)
self.__Hlayout_2.addWidget(self.__src_label)
self.__Hlayout_2.addWidget(self.__src_combo)
self.__Hlayout_2.addWidget(self.__dst_label)
self.__Hlayout_2.addWidget(self.__dst_combo)
self.__Vlayout.addWidget(self.__text_radio)
self.__Vlayout.addWidget(self.__file_radio)
def __onclick_unicast_radio(self) -> None:
"""单播按钮点击事件。"""
self.__mode = Mode.UNICAST
if not self.__dst_combo.isEnabled():
self.__dst_combo.setEnabled(True)
def __onclick_broadcast_radio(self) -> None:
"""广播按钮点击事件。"""
self.__mode = Mode.BROADCAST
if self.__dst_combo.isEnabled():
self.__dst_combo.setEnabled(False)
def __onactivate_src_combo(self) -> None:
"""源下拉框激活事件。"""
self.__src = self.__src_combo.currentText()
def __onactivate_dst_combo(self) -> None:
"""目标下拉框激活事件。"""
self.__dst = self.__dst_combo.currentText()
def __onclick_text_radio(self) -> None:
"""文本按钮点击事件。"""
self.__msgtype = MessageType.TEXT
def __onclick_file_radio(self) -> None:
"""文件按钮点击事件。"""
self.__msgtype = MessageType.FILE
def __onedit_text_edit(self) -> None:
"""文本输入框编辑事件。"""
self.__text = self.__text_edit.text()
if not self.__text_radio.isChecked():
self.__text_radio.setChecked(True)
self.__msgtype = MessageType.TEXT
def __onclick_file_btn(self) -> None:
"""文件选择按钮点击事件。"""
filename = QFileDialog.getOpenFileName(
self, "打开", "", "Image files (*.jpg *.png)"
)
imgname = filename[0].split("/")[-1]
if imgname:
self.__filepath = filename[0]
self.__file_btn.setText(imgname)
self.__file_radio.setChecked(True)
self.__msgtype = MessageType.FILE
def __is_valid(self) -> bool:
"""检验当前输入数据的合理性。
Returns:
合理为`True`,不合理为`False`。
"""
if not self.__mode:
CommandUI.__raise_critical("请选择发送模式!")
elif self.__src_combo.currentIndex() == -1:
CommandUI.__raise_critical("请选择源设备号!")
elif self.__mode == Mode.UNICAST and self.__dst_combo.currentIndex() == -1:
CommandUI.__raise_critical("请选择目标设备号!")
elif (
self.__mode == Mode.UNICAST
and self.__src_combo.currentText() == self.__dst_combo.currentText()
):
CommandUI.__raise_critical("源与目标不能相同!")
elif not self.__msgtype:
CommandUI.__raise_critical("请选择消息类型!")
elif self.__msgtype == MessageType.TEXT and not self.__text:
CommandUI.__raise_critical("请输入文本!")
elif self.__msgtype == MessageType.FILE and not self.__filepath:
CommandUI.__raise_critical("请选择文件!")
else:
return True
return False
def _onclick_send_btn(self) -> None:
"""发送按钮点击事件。"""
if not self.__is_valid():
return
self._user_data = {
"src": f"1{self.__src}300",
"dst": f"1{self.__dst}300"
if self.__mode == Mode.UNICAST
else Topology.BROADCAST_PORT,
"msgtype": self.__msgtype,
"text": self.__text,
"file": self.__filepath,
}
print(self._user_data)
@staticmethod
def __raise_critical(message: str):
"""弹出错误窗口。
Args:
message: 错误信息。
"""
# 错误弹窗。
box = QMessageBox(QMessageBox.Critical, "错误", message)
box.addButton("确定", QMessageBox.ButtonRole.YesRole)
box.exec_()
| 35.162281 | 83 | 0.627292 | 884 | 8,017 | 5.140271 | 0.234163 | 0.029269 | 0.03169 | 0.033011 | 0.336928 | 0.183319 | 0.049736 | 0.018926 | 0 | 0 | 0 | 0.021287 | 0.255831 | 8,017 | 227 | 84 | 35.317181 | 0.740194 | 0.038543 | 0 | 0.056604 | 0 | 0 | 0.026135 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081761 | false | 0 | 0.031447 | 0 | 0.138365 | 0.006289 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
240941f8a40679f34211679593aa3ebe92d612d4 | 1,166 | py | Python | app/api/scans.py | jchrisfarris/antiope-scorecards | 82a1e228f4bd23f756c1dec8c0582fcde98de564 | [
"Apache-2.0"
] | 1 | 2020-09-23T21:40:16.000Z | 2020-09-23T21:40:16.000Z | app/api/scans.py | jchrisfarris/antiope-scorecards | 82a1e228f4bd23f756c1dec8c0582fcde98de564 | [
"Apache-2.0"
] | null | null | null | app/api/scans.py | jchrisfarris/antiope-scorecards | 82a1e228f4bd23f756c1dec8c0582fcde98de564 | [
"Apache-2.0"
] | 3 | 2020-07-11T19:18:12.000Z | 2021-08-14T17:43:06.000Z | """
-file concerned with implementation of GET /scans
-should return as many scans as possible starting from newest
-return size must be capped at 6mb
"""
from boto3.dynamodb.conditions import Key
from lib.dynamodb import scans_table
from lib.lambda_decorator.decorator import api_decorator, format_result
BYTE_LIMIT = 5000000
def determine_bytes(target: dict) -> int:
target_with_formatting = format_result(target)
return len(target_with_formatting.encode('utf-8'))
def make_result(records: list) -> dict:
for record in records:
record.pop('scan', None) # omit 'scan' from result, if key is present.
return {'scans': records}
def make_max_return(records: list, byte_limit: int) -> list:
count_bytes = determine_bytes(make_result(records))
while count_bytes > byte_limit:
records.pop()
count_bytes = determine_bytes(make_result(records))
return make_result(records)
@api_decorator
def scans_handler(event, context):
records = scans_table.query_all(
KeyConditionExpression=Key('scan').eq(scans_table.SCAN),
ScanIndexForward=False
)
return make_max_return(records, BYTE_LIMIT)
| 29.15 | 79 | 0.73928 | 157 | 1,166 | 5.286624 | 0.458599 | 0.043373 | 0.081928 | 0.048193 | 0.098795 | 0.098795 | 0.098795 | 0 | 0 | 0 | 0 | 0.010341 | 0.170669 | 1,166 | 39 | 80 | 29.897436 | 0.847983 | 0.163808 | 0 | 0.083333 | 0 | 0 | 0.018614 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.125 | 0 | 0.458333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
24097dde5cb65efb85c75250fd77c8b47f58abf5 | 1,119 | py | Python | sparrow_cloud/access_control/access_verify.py | waro163/sparrow_cloud | 16560fb93e1ba618607acf0c7ea40440708938ed | [
"MIT"
] | 15 | 2019-09-24T09:32:32.000Z | 2021-12-30T08:07:41.000Z | sparrow_cloud/access_control/access_verify.py | waro163/sparrow_cloud | 16560fb93e1ba618607acf0c7ea40440708938ed | [
"MIT"
] | 13 | 2019-09-06T03:20:02.000Z | 2021-09-27T03:37:25.000Z | sparrow_cloud/access_control/access_verify.py | waro163/sparrow_cloud | 16560fb93e1ba618607acf0c7ea40440708938ed | [
"MIT"
] | 17 | 2019-09-02T06:31:05.000Z | 2021-10-08T04:23:23.000Z | import logging
from sparrow_cloud.restclient import rest_client
from sparrow_cloud.restclient.exception import HTTPException
from sparrow_cloud.utils.get_cm_value import get_cm_value
logger = logging.getLogger(__name__)
def access_verify(user_id, app_name, resource_code):
"""
access control verify
"""
if all([user_id, app_name, resource_code]):
sc_access_control_svc = get_cm_value("SC_ACCESS_CONTROL_SVC")
sc_access_control_api = get_cm_value("SC_ACCESS_CONTROL_API")
params = {
"user_id": user_id,
"app_name": app_name,
"resource_code": resource_code
}
try:
response = rest_client.get(sc_access_control_svc, api_path=sc_access_control_api, params=params)
if response['has_perm']:
return True
except HTTPException as ex:
if ex.status_code == 400 or ex.status_code == 403:
logger.info("sparrow_cloud log : access verify failed. user:{}, message:{}".format(user_id, ex.detail))
return False
return True
return False
| 37.3 | 119 | 0.663092 | 144 | 1,119 | 4.777778 | 0.368056 | 0.132267 | 0.130814 | 0.056686 | 0.193314 | 0.145349 | 0 | 0 | 0 | 0 | 0 | 0.007194 | 0.254692 | 1,119 | 29 | 120 | 38.586207 | 0.817746 | 0.018767 | 0 | 0.166667 | 0 | 0 | 0.128466 | 0.038817 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.166667 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |