hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f0027d9f202cb95a6d7e1419f897365ea8870efa | 6,402 | py | Python | budget.py | NicoleHoppy/Budget | 40909bb7675e07e9b25b9309cbb943fd516dc3e5 | [
"MIT"
] | null | null | null | budget.py | NicoleHoppy/Budget | 40909bb7675e07e9b25b9309cbb943fd516dc3e5 | [
"MIT"
] | null | null | null | budget.py | NicoleHoppy/Budget | 40909bb7675e07e9b25b9309cbb943fd516dc3e5 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import sys
import datetime
accounts = dict()
categories = dict()
history = list()
read_header = True
def ASSERT(condition, msg):
if not condition:
print("ERROR: ASSERTION FAILED:", msg)
exit(1)
with open("budget_db", "r") as file:
current = None
for line in file:
if len(line) < 2 or " " == line[0] or "#" == line[0]:
continue
if read_header:
if "A" == line[0]:
temp = line.split(" ", 2)
accounts[temp[0]] = int(temp[1])
elif "C" == line[0]:
current = line[:-1].split(" ", 1)[0]
categories[current] = set()
elif "S" == line[0]:
ASSERT(current is not None, "current is not None")
categories[current].add(line[:-1].split(" ", 1)[0])
elif "H" == line[0]:
read_header = False
else:
temp = line[:-1].split(" ", 5)
data = int(temp[0])
amount = int(temp[1])
inout = temp[2] == "in"
ASSERT(temp[3] in accounts, "temp[3] in accounts")
account = temp[3]
ASSERT(temp[4] in categories, "temp[4] in categories")
category = temp[4]
subcategory = ""
comment = ""
if len(temp) == 6:
temp = temp[5]
if temp[0] == "S":
temp = temp.split(" ", 1)
ASSERT(temp[0] in categories[category], "temp[0] in categories[category]")
subcategory = temp[0]
if len(temp) == 2:
comment = temp[1].lstrip()
else:
comment = temp.strip()
history.append({"data": data, "amount": amount, "inout": inout, "account": account, "category": category, "subcategory": subcategory, "comment": comment})
ASSERT(not read_header, "not read_header")
#print(accounts)
#print(categories)
#print(history)
#print(sys.argv)
if len(sys.argv) == 1:
exit()
ASSERT(len(sys.argv) >= 2, "len(sys.argv) >= 2")
#budget new_account account
if sys.argv[1] == "new_account":
ASSERT(len(sys.argv) == 3, "len(sys.argv) = 3")
account = "A_" + sys.argv[2]
ASSERT(account not in accounts, "This account already exist, dumbass :p")
accounts[account] = 0
#budget new_category category
elif sys.argv[1] == "new_category":
ASSERT(len(sys.argv) == 3, "len(sys.argv) == 3")
category = "C_" + sys.argv[2]
ASSERT(category not in categories, "This category already exist, dumbass :p")
categories[category] = set()
#budget new_subcategory subcategory in category
elif sys.argv[1] == "new_subcategory":
ASSERT(len(sys.argv) == 5, "len(sys.argv) == 5")
subcategory = "S_" + sys.argv[2]
category = "C_" + sys.argv[4]
ASSERT(sys.argv[3] == "in", "sys.argv[3] == 'in'")
ASSERT(category in categories, "category in categories")
ASSERT(subcategory not in categories[category], "This subcategory already exist, dumbass :p")
categories[category].add(subcategory)
#budget (in|out) account amount category [subcategory] [# comment] #income|outcome
elif sys.argv[1] in ("in", "out"):
ASSERT(len(sys.argv) in range(5,8), "len(sys.argv) in range(5,8)")
inout = sys.argv[1] == "in"
account = "A_" + sys.argv[2]
ASSERT(account in accounts, "Sorry, this account doesn't exist, idiot :P")
category = "C_" + sys.argv[4]
ASSERT(category in categories, "Sorry, this category doesn't exist, idiot :P")
subcategory = ""
comment = ""
if len(sys.argv) == 5:
pass
elif len(sys.argv) == 6:
if sys.argv[5][0] == "#":
comment = sys.argv[5]
else:
subcategory = "S_" + sys.argv[5]
ASSERT(subcategory in categories[category], "subcategory in categories[category]")
else:
subcategory = "S_" + sys.argv[5]
ASSERT(subcategory not in categories[category], "subcategory not in categories[category]")
comment = sys.argv[6]
accounts[account] += (1 if inout else -1) * amount
date = datetime.datetime.now()
date = date.year * 10000 + date.month * 100 + date.day
history.append({"data": date, "inout": inout, "account": account, "amount": amount, "category": category, "subcategory": subcategory, "comment": comment})
#budget transfer amount from account_1 to account_2 [# comment]
elif sys.argv[1] == "transfer":
ASSERT(len(sys.argv) in range(7,9), "len(sys.argv) in range(7,9)")
ASSERT(sys.argv[3] == "from", "sys.argv[3] == 'from'")
ASSERT(sys.argv[5] == "to", "sys.argv[5] == 'to'")
amount = int(sys.argv[2])
account_1 = "A_" + sys.argv[4]
account_2 = "A_" + sys.argv[6]
ASSERT(account_1 in accounts, "Sorry, this account doesn't exist, stupid")
ASSERT(account_2 in accounts, "Sorry, this account doesn't exist, stupid")
comment = ""
if len(sys.argv) == 7:
pass
elif len(sys.argv) == 8:
ASSERT(sys.argv[7][0] == "#", "sys.argv[7][0] == '#'")
comment = sys.argv[7]
accounts[account_1] += -amount
accounts[account_2] += amount
date = datetime.datetime.now()
date = date.year * 10000 + date.month * 100 + date.day
history.append({"data": date, "inout": False, "account": account_1, "amount": amount, "category": "C_transfer", "subcategory": "", "comment": comment})
history.append({"data": date, "inout": True, "account": account_2, "amount": amount, "category": "C_transfer", "subcategory": "", "comment": comment})
#budget summary
elif sys.argv[1] == "summary":
ASSERT(len(sys.argv) == 2, "len(sys.argv) == 2")
sum = 0
for account in accounts:
sum += abs(accounts[account])
for account in accounts:
accounts[account]/sum
print(f"{accounts[account]/100}zl {account[2:]} {round(accounts[account]/sum * 100, 2)}%")
#budget history number
elif sys.argv[1] == "history":
ASSERT(len(sys.argv) == 3, "len(sys.argv) == 3")
number = int(sys.argv[2])
for element in reversed(history[-number:]):
data = element['data']
day = data % 100
month = (data % 10000) // 100
year = data // 10000
month = ["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"][month - 1]
data = f"{day} {month} {year}"
amount = f"\x1b[{'38;5;10m+' if element['inout'] else '38;5;9m-'}{element['amount']}\x1b[0m"
print(f"{data} {element['account'][2:]} {amount} {element['category'][2:]} {element['subcategory'][2:]} {element['comment']}")
with open("budget_db", "w") as file:
for account in accounts:
file.write(f"{account} {accounts[account]}\n")
for category in categories:
file.write(f"{category}\n")
for subcategory in categories[category]:
file.write(f"{subcategory}\n")
file.write("H\n")
for element in history:
file.write(f"{element['data']} {element['amount']} {'in' if element['inout'] else 'out'} {element['account']} {element['category']} {element['subcategory']} {element['comment']}\n")
| 35.566667 | 183 | 0.644017 | 925 | 6,402 | 4.419459 | 0.139459 | 0.092466 | 0.05137 | 0.031311 | 0.329501 | 0.272994 | 0.188112 | 0.155333 | 0.099804 | 0.044521 | 0 | 0.031186 | 0.158544 | 6,402 | 179 | 184 | 35.765363 | 0.727678 | 0.055139 | 0 | 0.176871 | 0 | 0.027211 | 0.281405 | 0.042261 | 0 | 0 | 0 | 0 | 0.197279 | 1 | 0.006803 | false | 0.013605 | 0.013605 | 0 | 0.020408 | 0.020408 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f002e0327a66f22542c7bf5155ea4b50ed6e173f | 2,135 | py | Python | NHentaidesu/types/object.py | rushkii/NHentaidesu | c1ee1ced37fa5dbed6feb53c89349cda913e7f06 | [
"MIT"
] | 4 | 2021-09-27T07:53:09.000Z | 2022-03-15T00:53:18.000Z | NHentaidesu/types/object.py | rushkii/NHentaidesu | c1ee1ced37fa5dbed6feb53c89349cda913e7f06 | [
"MIT"
] | null | null | null | NHentaidesu/types/object.py | rushkii/NHentaidesu | c1ee1ced37fa5dbed6feb53c89349cda913e7f06 | [
"MIT"
] | null | null | null | import NHentaidesu
from typing import Match
from datetime import datetime
import json
class Meta(type, metaclass=type("", (type,), {"__str__": lambda _: "~doujinshi"})):
def __str__(self):
return f'<class "NHentaidesu.types.{self.__name__}">'
class Object(metaclass=Meta):
def __init__(self, nhentai: "NHentaidesu.DoujinClient" = None):
self._nhentai = nhentai
def bind(self, nhentai: "NHentaidesu.DoujinClient"):
self._nhentai = nhentai
@staticmethod
def default(obj: "Object"):
if isinstance(obj, bytes):
return repr(obj)
if isinstance(obj, Match):
return repr(obj)
if isinstance(obj, datetime):
return str(obj)
return {
"_": obj.__class__.__name__,
**{
attr: (
getattr(obj, attr)
)
for attr in filter(lambda x: not x.startswith("_"), obj.__dict__)
if getattr(obj, attr) is not None
}
}
def __str__(self) -> str:
return json.dumps(self, indent=4, default=Object.default, ensure_ascii=False)
def __repr__(self) -> str:
return "NHentaidesu.types.{}({})".format(
self.__class__.__name__,
", ".join(
f"{attr}={repr(getattr(self, attr))}"
for attr in filter(lambda x: not x.startswith("_"), self.__dict__)
if getattr(self, attr) is not None
)
)
def __eq__(self, other: "Object") -> bool:
for attr in self.__dict__:
try:
if getattr(self, attr) != getattr(other, attr):
return False
except AttributeError:
return False
return True
def __getitem__(self, item):
return getattr(self, item)
def __setitem__(self, key, value):
setattr(self, key, value)
def __getstate__(self):
new_dict = self.__dict__.copy()
new_dict.pop("_nhentai", None)
return new_dict | 29.652778 | 86 | 0.533021 | 218 | 2,135 | 4.83945 | 0.311927 | 0.041706 | 0.042654 | 0.064455 | 0.159242 | 0.12891 | 0.075829 | 0.075829 | 0.075829 | 0.075829 | 0 | 0.000727 | 0.355972 | 2,135 | 72 | 87 | 29.652778 | 0.766545 | 0 | 0 | 0.105263 | 0 | 0 | 0.092494 | 0.064891 | 0 | 0 | 0 | 0 | 0 | 1 | 0.175439 | false | 0 | 0.070175 | 0.070175 | 0.491228 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f002fddd7f5dc7afd6a19a99dc0db701244214d7 | 2,112 | py | Python | setup.py | kulasinski/multitrader | 1d375df723389e00caa6e18c09d46bf91fe9d0d3 | [
"Apache-2.0"
] | null | null | null | setup.py | kulasinski/multitrader | 1d375df723389e00caa6e18c09d46bf91fe9d0d3 | [
"Apache-2.0"
] | null | null | null | setup.py | kulasinski/multitrader | 1d375df723389e00caa6e18c09d46bf91fe9d0d3 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Multitrader - backtesting engine for multiple tickers at once
# https://github.com/kulasinski/multitrader
from setuptools import setup
import io
from os import path
# --- get version ---
version = "unknown"
with open("multitrader/version.py") as f:
line = f.read().strip()
version = line.replace("version = ", "").replace('"', '')
# --- /get version ---
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with io.open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='yfinance',
version=version,
description='Backtesting engine for multiple tickers at once',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/kulasinski/multitrader',
author='Karol Kulasinski',
author_email='physica.solutions@gmail.com',
license='Apache',
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Development Status :: 3 - Alpha',
# 'Development Status :: 4 - Beta',
# 'Development Status :: 5 - Production/Stable',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Topic :: Office/Business :: Financial',
'Topic :: Office/Business :: Financial :: Investment',
'Topic :: Scientific/Engineering :: Interface Engine/Protocol Translator',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
platforms=['any'],
keywords='pandas, yahoo finance',
# packages=find_packages(exclude=['contrib', 'docs', 'tests', 'examples']),
) | 35.2 | 83 | 0.621686 | 220 | 2,112 | 5.913636 | 0.513636 | 0.087625 | 0.115296 | 0.119908 | 0.116833 | 0.063028 | 0.063028 | 0 | 0 | 0 | 0 | 0.010474 | 0.231534 | 2,112 | 60 | 84 | 35.2 | 0.791128 | 0.183712 | 0 | 0 | 0 | 0 | 0.522659 | 0.0429 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.075 | 0 | 0.075 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f00317eb8119f0aa2bef1eb1d0dc2da43e342087 | 1,661 | py | Python | examples/03-remote-system/main.py | grelleum/diffsync | 00891f5942ca3c4fef8e76ce47d3c226e2375cb8 | [
"Apache-2.0"
] | 67 | 2020-10-26T14:57:53.000Z | 2022-03-28T20:38:03.000Z | examples/03-remote-system/main.py | grelleum/diffsync | 00891f5942ca3c4fef8e76ce47d3c226e2375cb8 | [
"Apache-2.0"
] | 47 | 2020-10-26T14:49:37.000Z | 2022-03-04T11:32:10.000Z | examples/03-remote-system/main.py | grelleum/diffsync | 00891f5942ca3c4fef8e76ce47d3c226e2375cb8 | [
"Apache-2.0"
] | 13 | 2020-12-06T02:32:34.000Z | 2022-03-28T16:10:02.000Z | #!/usr/bin/env python
"""Main executable for DiffSync "example3"."""
import sys
import argparse
from local_adapter import LocalAdapter
from nautobot_adapter import NautobotAdapter
from diff import AlphabeticalOrderDiff
from diffsync.enum import DiffSyncFlags
from diffsync.logging import enable_console_logging
def main():
"""Demonstrate DiffSync behavior using the example backends provided."""
parser = argparse.ArgumentParser("example3")
parser.add_argument("--verbosity", "-v", default=0, action="count")
parser.add_argument("--diff", action="store_true")
parser.add_argument("--sync", action="store_true")
args = parser.parse_args()
enable_console_logging(verbosity=args.verbosity)
if not args.sync and not args.diff:
sys.exit("please select --diff or --sync")
print("Initializing and loading Local Data ...")
local = LocalAdapter()
local.load()
print("Initializing and loading Nautobot Data ...")
nautobot = NautobotAdapter()
nautobot.load()
# If a Region exists in Nautobot (the "destination") but not in the local data, skip it, rather than deleting it
flags = DiffSyncFlags.SKIP_UNMATCHED_DST
if args.diff:
print("Calculating the Diff between the local adapter and Nautobot ...")
diff = nautobot.diff_from(local, flags=flags, diff_class=AlphabeticalOrderDiff)
print(diff.str())
if args.sync:
if not args.diff:
diff = None
print("Updating the list of countries in Nautobot ...")
nautobot.sync_from(local, flags=flags, diff_class=AlphabeticalOrderDiff, diff=diff)
if __name__ == "__main__":
main()
| 32.568627 | 116 | 0.707405 | 204 | 1,661 | 5.632353 | 0.411765 | 0.023499 | 0.044386 | 0.046997 | 0.085292 | 0.085292 | 0.085292 | 0 | 0 | 0 | 0 | 0.002221 | 0.186635 | 1,661 | 50 | 117 | 33.22 | 0.848261 | 0.143889 | 0 | 0 | 0 | 0 | 0.202837 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029412 | false | 0 | 0.205882 | 0 | 0.235294 | 0.147059 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f004250bb1daee9f282cbee076b553141192e511 | 5,452 | py | Python | ord_interface/visualization/drawing.py | Open-Reaction-Database/ord-interface | 5a9c20e72a1d6fddf71dc79d8e73071d029ae2cd | [
"Apache-2.0"
] | 2 | 2020-12-04T15:41:18.000Z | 2020-12-04T15:41:24.000Z | ord_interface/visualization/drawing.py | Open-Reaction-Database/ord-interface | 5a9c20e72a1d6fddf71dc79d8e73071d029ae2cd | [
"Apache-2.0"
] | 2 | 2020-12-03T23:07:46.000Z | 2020-12-04T16:44:43.000Z | ord_interface/visualization/drawing.py | Open-Reaction-Database/ord-interface | 5a9c20e72a1d6fddf71dc79d8e73071d029ae2cd | [
"Apache-2.0"
] | 1 | 2020-12-03T19:13:10.000Z | 2020-12-03T19:13:10.000Z | # Copyright 2020 Open Reaction Database Project Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Drawing functions.
This module contains two molecular drawing functions to render SVGs or PNGs
given an RDKit molecule object: mol_to_svg and mol_to_png.
"""
import io
import base64
import re
from typing import Optional
import numpy as np
from PIL import Image, ImageOps
from rdkit import Chem
from rdkit.Chem import Draw
from rdkit.Chem import rdDepictor
rdDepictor.SetPreferCoordGen(True)
# pylint: disable=unsubscriptable-object
def trim_image_whitespace(image: Image.Image, padding: int = 0) -> Image.Image:
"""Crops and image to a minimal rectangle.
This function takes a PIL image and crops it to the minimum rectangle based
on its whiteness/transparency.
Args:
image: PIL image.
padding: Integer number of pixels to use for padding.
Returns:
A new PIL image.
"""
# Convert to array
as_array = np.array(image) # N x N x (r,g,b,a)
# Set previously-transparent pixels to white
if as_array.shape[2] == 4:
as_array[as_array[:, :, 3] == 0] = [255, 255, 255, 0]
as_array = as_array[:, :, :3]
# Content defined as non-white and non-transparent pixel
has_content = np.sum(as_array, axis=2, dtype=np.uint32) != 255 * 3
xs_nonzero, ys_nonzero = np.nonzero(has_content)
# Crop down
margin = 5
x_range = (
max([min(xs_nonzero) - margin, 0]),
min([max(xs_nonzero) + margin, as_array.shape[0]]),
)
y_range = (
max([min(ys_nonzero) - margin, 0]),
min([max(ys_nonzero) + margin, as_array.shape[1]]),
)
as_array_cropped = as_array[x_range[0] : x_range[1], y_range[0] : y_range[1], 0:3]
image = Image.fromarray(as_array_cropped, mode="RGB")
return ImageOps.expand(image, border=padding, fill=255)
def mol_to_svg( # pylint: disable=inconsistent-return-statements
mol: Chem.Mol,
min_size: int = 50,
max_size: int = 300,
bond_length: int = 25,
padding: int = 10,
) -> Optional[str]:
"""Creates a (cropped) SVG molecule drawing as a string.
Args:
mol: RDKit Mol.
min_size: Integer minimum image size (in pixels).
max_size: Integer maximum image size (in pixels).
bond_length: Integer bond length (in pixels).
padding: Integer number of padding pixels in each dimension.
Returns:
String SVG image.
"""
Chem.Kekulize(mol)
rdDepictor.Compute2DCoords(mol)
drawer = _draw_svg(mol, size_x=max_size, size_y=max_size, bond_length=bond_length)
# Find the extent of the drawn image so we can crop the canvas.
min_x, max_x, min_y, max_y = np.inf, -np.inf, np.inf, -np.inf
for atom in mol.GetAtoms():
canvas_x, canvas_y = drawer.GetDrawCoords(atom.GetIdx())
min_x = min(canvas_x, min_x)
max_x = max(canvas_x, max_x)
min_y = min(canvas_y, min_y)
max_y = max(canvas_y, max_y)
drawer = _draw_svg(
mol,
size_x=max(min_size, int(max_x - min_x + 2 * padding)),
size_y=max(min_size, int(max_y - min_y + 2 * padding)),
bond_length=bond_length,
)
match = re.search(r"(<svg\s+.*</svg>)", drawer.GetDrawingText(), flags=re.DOTALL)
if match:
return match.group(1)
def _draw_svg(mol: Chem.Mol, size_x: int, size_y: int, bond_length: int) -> Draw.MolDraw2DSVG:
"""Creates a canvas and draws a SVG.
Args:
mol: RDKit Mol.
size_x: Integer image size in the x-dimension (in pixels).
size_y: Integer image size in the y-dimension (in pixels).
bond_length: Integer bond length (in pixels).
Returns:
MolDraw2DSVG.
"""
drawer = Draw.MolDraw2DSVG(size_x, size_y)
drawer.drawOptions().fixedBondLength = bond_length
drawer.drawOptions().padding = 0.0
drawer.DrawMolecule(mol)
drawer.FinishDrawing()
return drawer
def mol_to_png(mol: Chem.Mol, max_size: int = 1000, bond_length: int = 25, png_quality: int = 70) -> str:
"""Creates a (cropped) PNG molecule drawing as a string.
Args:
mol: RDKit Mol.
max_size: Integer maximum image size (in pixels).
bond_length: Integer bond length (in pixels).
png_quality: Integer PNG quality.
Returns:
String PNG image.
"""
drawer = Draw.MolDraw2DCairo(max_size, max_size)
drawer.drawOptions().fixedBondLength = bond_length
try:
drawer.DrawMolecule(mol)
except ValueError as value_error:
raise ValueError(Chem.MolToSmiles(mol)) from value_error
drawer.FinishDrawing()
temp = io.BytesIO()
temp.write(drawer.GetDrawingText())
temp.seek(0)
img = Image.open(temp)
img = trim_image_whitespace(img, padding=10)
output = io.BytesIO()
img.save(output, format="png", quality=png_quality)
output.seek(0)
b64 = base64.b64encode(output.getvalue())
return b64.decode("UTF-8")
| 32.452381 | 105 | 0.668929 | 796 | 5,452 | 4.447236 | 0.296482 | 0.042373 | 0.015537 | 0.012712 | 0.177401 | 0.094633 | 0.088983 | 0.075424 | 0.075424 | 0.041243 | 0 | 0.020636 | 0.226706 | 5,452 | 167 | 106 | 32.646707 | 0.819023 | 0.393434 | 0 | 0.075 | 0 | 0 | 0.008963 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.1125 | 0 | 0.2125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f006b4b0af1d9138de164f109ebcd7a99bdd93cd | 1,623 | py | Python | analysis/combine-results.py | IDLabResearch/Montolo | 6d49db154b7b43cfdae70e8e0eb0cba6efbd638f | [
"CC0-1.0"
] | null | null | null | analysis/combine-results.py | IDLabResearch/Montolo | 6d49db154b7b43cfdae70e8e0eb0cba6efbd638f | [
"CC0-1.0"
] | null | null | null | analysis/combine-results.py | IDLabResearch/Montolo | 6d49db154b7b43cfdae70e8e0eb0cba6efbd638f | [
"CC0-1.0"
] | null | null | null | from rdflib import Graph
import csv
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from pprint import pprint
from optparse import OptionParser
import os
def main():
parser = OptionParser(usage="usage: %prog [options]")
parser.add_option('-i', '--input-dir', action='append', help='The input directory containing the RDF data cube statistics')
parser.add_option('-o', '--output-dir', action='store', help='The output directory where the combined RDF file should be stored')
parser.add_option('-f', '--output-format', action='store', help='The output format, possible values are "xml", "n3", "turtle", "nt", "pretty-xml", "trix", "trig" and "nquads"')
(options, args) = parser.parse_args()
if not options.input_dir or not options.output_dir \
or options.output_format not in ('xml', 'n3', 'turtle', 'nt', 'pretty-xml', 'trix', 'trig', 'nquads'):
parser.print_help();
exit(1)
g = Graph()
all_found_stat_files = 0
for d in options.input_dir:
found_stat_files=0
print("Analyzing directory '" + d + "'")
for stats_file in os.listdir(d):
if stats_file.endswith('ttl'):
g.parse(os.path.join(d, stats_file), format='turtle')
found_stat_files += 1
if stats_file.endswith('nt'):
g.parse(os.path.join(d, stats_file), format='nt')
found_stat_files += 1
all_found_stat_files += found_stat_files
print(str(found_stat_files) + " stat files found for '" + d)
print(str(all_found_stat_files) + " stat files found in total")
g.serialize(destination=options.output_dir, format=options.output_format)
main()
| 36.066667 | 178 | 0.689464 | 240 | 1,623 | 4.516667 | 0.366667 | 0.083026 | 0.103321 | 0.047048 | 0.210332 | 0.166052 | 0.114391 | 0.114391 | 0.059041 | 0 | 0 | 0.005185 | 0.168207 | 1,623 | 44 | 179 | 36.886364 | 0.797778 | 0 | 0 | 0.057143 | 0 | 0.028571 | 0.268638 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028571 | false | 0 | 0.228571 | 0 | 0.257143 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0072fd46cc0e276b92ac29cd4c41ae0a505f4a1 | 404 | py | Python | flatland/database/population/drawing/dash_pattern_instances.py | lelandstarr/flatland-model-diagram-editor | dfbd10d80542359c6951d7b039a5a4e3da2a0f50 | [
"MIT"
] | 10 | 2021-01-03T16:47:34.000Z | 2022-03-30T18:47:07.000Z | flatland/database/population/drawing/dash_pattern_instances.py | lelandstarr/flatland-model-diagram-editor | dfbd10d80542359c6951d7b039a5a4e3da2a0f50 | [
"MIT"
] | 91 | 2021-01-09T02:14:13.000Z | 2022-02-24T10:24:10.000Z | flatland/database/population/drawing/dash_pattern_instances.py | lelandstarr/flatland-model-diagram-editor | dfbd10d80542359c6951d7b039a5a4e3da2a0f50 | [
"MIT"
] | 1 | 2021-01-13T22:13:19.000Z | 2021-01-13T22:13:19.000Z | """
dash_pattern_instances.py
"""
population = [
# To simplify the database, a non-dashed line has the meaningless 0,0 value.
# Otherwise, we would have to subclass line styles into dashed and non-dashed
{'Name': 'no dash', 'Solid': 0, 'Blank': 0},
# only one real dash pattern for now, used for UML assoc classes and imported classes
{'Name': 'even dash', 'Solid': 9, 'Blank': 9}
] | 33.666667 | 89 | 0.665842 | 61 | 404 | 4.377049 | 0.672131 | 0.082397 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01875 | 0.207921 | 404 | 12 | 90 | 33.666667 | 0.815625 | 0.64604 | 0 | 0 | 0 | 0 | 0.328358 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f00d80f4dcb39a5166685681cc16380ca903006b | 563 | py | Python | Examples/camera effects.py | JaledMC/Learning-Raspi | d22692e98156341fca533de12b3c3fc2d45ddf86 | [
"MIT"
] | 1 | 2019-03-18T16:52:33.000Z | 2019-03-18T16:52:33.000Z | Examples/camera effects.py | JaledMC/Learning-Raspi | d22692e98156341fca533de12b3c3fc2d45ddf86 | [
"MIT"
] | null | null | null | Examples/camera effects.py | JaledMC/Learning-Raspi | d22692e98156341fca533de12b3c3fc2d45ddf86 | [
"MIT"
] | null | null | null | from picamera import PiCamera
from time import sleep
camera = PiCamera()
# We can modify many effects with camera.image_effect = 'colorswap'
"""Options: none, negative, solarize, sketch, denoise, emboss, oilpaint,
hatch, gpen, pastel, watercolor, film, blur, saturation, colorswap,
washedout, posterise, colorpoint, colorbalance, cartoon, deinterlace1,
and deinterlace2"""
camera.start_preview()
for effect in camera.IMAGE_EFFECTS:
camera.image_effect = effect
camera.annotate_text = "Effect: %s" % effect
sleep(5)
camera.stop_preview() | 33.117647 | 74 | 0.74778 | 68 | 563 | 6.102941 | 0.691176 | 0.079518 | 0.081928 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006303 | 0.154529 | 563 | 17 | 75 | 33.117647 | 0.865546 | 0.115453 | 0 | 0 | 0 | 0 | 0.03937 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.222222 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f00fe3056e88d4e5f3e90d7ebab85f9ec9766a08 | 1,536 | py | Python | analyticlab/measure/BCategory.py | xingrongtech/analyticlab | 2827591db9b31ff38299712ed6c404ff30583f6f | [
"MIT"
] | 13 | 2018-05-11T02:45:11.000Z | 2021-07-17T22:19:04.000Z | analyticlab/measure/BCategory.py | xingrongtech/analyticlab | 2827591db9b31ff38299712ed6c404ff30583f6f | [
"MIT"
] | null | null | null | analyticlab/measure/BCategory.py | xingrongtech/analyticlab | 2827591db9b31ff38299712ed6c404ff30583f6f | [
"MIT"
] | 2 | 2019-10-17T11:43:11.000Z | 2019-11-27T10:54:28.000Z | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 6 18:22:57 2018
@author: xingrongtech
"""
from ..num import Num
from ..latexoutput import LaTeX
kValue = (1, 3**0.5, 6**0.5, 2**0.5, 1)
kExpr = ('1', r'\sqrt{3}', r'\sqrt{6}', r'\sqrt{2}', '1')
def b(instrument, sym=None, process=False, needValue=False, remainOneMoreDigit=True):
'''计算B类不确定度
【参数说明】
1.instrument(Ins):测量仪器。可以在analyticlab.uncertainty.ins模块中选择已经预设好的测量仪器,或者通过Ins类创建新的测量仪器。
2.sym(可选,str):符号。默认sym=None。
3.process(可选,bool):是否展示计算过程。默认proces=False。
4.needValue(可选,bool):当获得计算过程时,是否返回计算结果。默认needValue=False。
5.remainOneMoreDigit(可选,bool):结果是否多保留一位有效数字。默认remainOneMoreDigit=True。
【返回值】
①process为False时,返回值为Num类型的B类不确定度。
②process为True且needValue为False时,返回值为LaTeX类型的计算过程。
③process为True且needValue为True时,返回值为Num类型的B类不确定度和LaTeX类型的计算过程组成的元组。'''
a = instrument.a
a.setSciBound(9)
if instrument.distribution <= 4:
uB = a / kValue[instrument.distribution]
else:
uB = a / (6/(1+instrument.beta**2))**0.5
if remainOneMoreDigit:
uB.remainOneMoreDigit()
if process:
if instrument.distribution <= 4:
latex = LaTeX(r'u_{%s B}=\cfrac{a}{k}=\cfrac{%s}{%s}=%s' % (sym, a.dlatex(), kExpr[instrument.distribution], uB.latex()))
else:
latex = LaTeX(r'u_{%s B}=\cfrac{a}{k}=\cfrac{%s}{\sqrt{6/(1+%g^{2})}}=%s' % (sym, a.dlatex(), instrument.beta, uB.latex()))
if needValue:
return uB, latex
else:
return latex
return uB
| 35.72093 | 135 | 0.636719 | 194 | 1,536 | 5.030928 | 0.417526 | 0.008197 | 0.006148 | 0.05123 | 0.055328 | 0.055328 | 0.055328 | 0.055328 | 0.055328 | 0.055328 | 0 | 0.037903 | 0.192708 | 1,536 | 42 | 136 | 36.571429 | 0.749194 | 0.352214 | 0 | 0.217391 | 0 | 0.043478 | 0.12845 | 0.08811 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.086957 | 0 | 0.26087 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f01044633b30e4202c3117cd2050c7bf157bf722 | 2,682 | py | Python | tests/test_modules.py | sflicht/pyflyby | b176462e243d7787e60c7604704703ebc161bf05 | [
"BSD-3-Clause"
] | 33 | 2018-10-05T21:21:45.000Z | 2020-03-08T19:53:43.000Z | tests/test_modules.py | sflicht/pyflyby | b176462e243d7787e60c7604704703ebc161bf05 | [
"BSD-3-Clause"
] | 60 | 2018-10-18T03:05:42.000Z | 2020-03-31T20:10:51.000Z | tests/test_modules.py | sflicht/pyflyby | b176462e243d7787e60c7604704703ebc161bf05 | [
"BSD-3-Clause"
] | 12 | 2018-10-06T16:37:20.000Z | 2020-01-17T11:37:05.000Z | # pyflyby/test_modules.py
# License for THIS FILE ONLY: CC0 Public Domain Dedication
# http://creativecommons.org/publicdomain/zero/1.0/
from __future__ import (absolute_import, division, print_function,
with_statement)
import logging.handlers
from pyflyby._file import Filename
from pyflyby._idents import DottedIdentifier
from pyflyby._modules import ModuleHandle
import re
import subprocess
import sys
from textwrap import dedent
import pytest
def test_ModuleHandle_1():
m = ModuleHandle("sys")
assert m.name == DottedIdentifier("sys")
def test_ModuleHandle_dotted_1():
m = ModuleHandle("logging.handlers")
assert m.name == DottedIdentifier("logging.handlers")
def test_ModuleHandle_from_module_1():
m = ModuleHandle(logging.handlers)
assert m == ModuleHandle("logging.handlers")
assert m.name == DottedIdentifier("logging.handlers")
def test_eqne_1():
m1a = ModuleHandle("foo.bar")
m1b = ModuleHandle("foo.bar")
m2 = ModuleHandle("foo.baz")
assert (m1a == m1b)
assert not (m1a != m1b)
assert not (m1a == m2)
assert (m1a != m2)
def test_filename_1():
fn = logging.handlers.__file__
fn = Filename(re.sub("[.]pyc$", ".py", fn)).real
m = ModuleHandle("logging.handlers")
assert m.filename.real == fn
assert m.filename.base == "handlers.py"
def test_filename_init_1():
fn = logging.__file__
fn = Filename(re.sub("[.]pyc$", ".py", fn)).real
m = ModuleHandle("logging")
assert m.filename.real == fn
assert m.filename.base == "__init__.py"
def test_module_1():
m = ModuleHandle("logging")
assert m.module is logging
@pytest.mark.xfail(reason="Fails on CI not locally")
def test_filename_noload_1():
# ensure there is no problem with sys.exit itself.
retcode = subprocess.call([sys.executable, '-c', dedent('''
import sys
sys.exit(0)
''')])
assert retcode == 0
# Ensure there is no error with byflyby itself
retcode = subprocess.call([sys.executable, '-c', dedent('''
from pyflyby._modules import ModuleHandle
import sys
ModuleHandle("multiprocessing").filename
sys.exit(0)
''')])
assert retcode == 0
# don't exit with 1, as something else may exit with 1.
retcode = subprocess.call([sys.executable, '-c', dedent('''
from pyflyby._modules import ModuleHandle
import sys
ModuleHandle("multiprocessing").filename
if "multiprocessing" in sys.modules:
sys.exit(123)
else:
sys.exit(0)
''')])
assert retcode == 0
| 27.367347 | 66 | 0.646532 | 324 | 2,682 | 5.209877 | 0.283951 | 0.037322 | 0.07109 | 0.066351 | 0.505924 | 0.462678 | 0.393957 | 0.37263 | 0.341232 | 0.293839 | 0 | 0.016082 | 0.234899 | 2,682 | 97 | 67 | 27.649485 | 0.80653 | 0.103654 | 0 | 0.455882 | 0 | 0 | 0.258239 | 0.033375 | 0 | 0 | 0 | 0 | 0.235294 | 1 | 0.117647 | false | 0 | 0.220588 | 0 | 0.338235 | 0.014706 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f013da5fdd025cc45a2186ffe9148128c55cef7f | 719 | py | Python | ex112.py | lhardt/PythonCourse | c0654bfc589f5faf1c26f419917683a0a2d6a0de | [
"MIT"
] | null | null | null | ex112.py | lhardt/PythonCourse | c0654bfc589f5faf1c26f419917683a0a2d6a0de | [
"MIT"
] | null | null | null | ex112.py | lhardt/PythonCourse | c0654bfc589f5faf1c26f419917683a0a2d6a0de | [
"MIT"
] | null | null | null |
def leiaInt(prompt):
aceito = False
while not aceito:
try:
return int(input(prompt))
except KeyboardInterrupt:
print('O usuário não preencheu o número')
return 0
except:
print('ERRO! Digite um valor inteiro')
def leiaFloat(prompt):
aceito = False
while not aceito:
try:
tmp = input(prompt)
if ',' in tmp:
tmp = tmp.replace(',','.')
valor = float(tmp)
return valor
except KeyboardInterrupt:
print('O usuário não preencheu o número')
return 0
except:
print('ERRO! Digite um valor no formato XXX.XX')
inteiro = leiaInt('Digite um valor inteiro:\n>>>\t')
numreal = leiaFloat('Digite um valor real:\n>>>\t')
print(f'O inteiro foi {inteiro} e o real foi {numreal}')
| 21.147059 | 56 | 0.667594 | 102 | 719 | 4.705882 | 0.411765 | 0.066667 | 0.108333 | 0.091667 | 0.516667 | 0.516667 | 0.516667 | 0.375 | 0.375 | 0.375 | 0 | 0.003509 | 0.207232 | 719 | 33 | 57 | 21.787879 | 0.838596 | 0 | 0 | 0.518519 | 0 | 0 | 0.334262 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0 | 0 | 0.222222 | 0.185185 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f013ee43c32e1f052ece1160000faab71df98b6d | 3,070 | py | Python | processDocument.py | amberkiser/RSV-NLP | 7c5f562eff8593cd5af7666e649927759e79fc7e | [
"MIT"
] | null | null | null | processDocument.py | amberkiser/RSV-NLP | 7c5f562eff8593cd5af7666e649927759e79fc7e | [
"MIT"
] | null | null | null | processDocument.py | amberkiser/RSV-NLP | 7c5f562eff8593cd5af7666e649927759e79fc7e | [
"MIT"
] | null | null | null | from nlpPipe import ClassificationPipe, ExtractionPipe
import pandas as pd
import re
class ProcessClassificationDoc(object):
def __init__(self, document, sentence_rules, target_rules, context_rules):
self.document = document
self.sentence_rules = sentence_rules
self.target_rules = target_rules
self.context_rules = context_rules
def make_dataframe(self):
doc_annotations = self.make_annotations()
class_columns = ['PatientId', 'DocumentId', 'SpanStart', 'SpanEnd', 'SpanText', 'Status']
classification_df = pd.DataFrame(columns=class_columns)
for ann in doc_annotations:
if ann.type == 'rsv':
patient_id = self.document.document_id[:3]
doc_id = self.document.document_id
span_start = ann.start_index
span_end = ann.end_index
span_text = ann.spanned_text
status = 'affirmed'
for k, v in ann.attributes.items():
if k == 'hypothetical' or k == 'negated':
status = k
ann_dict = {'PatientId': patient_id, 'DocumentId': doc_id, 'SpanStart': span_start,
'SpanEnd': span_end, 'SpanText': span_text, 'Status': status}
classification_df = classification_df.append(ann_dict, ignore_index=True)
return classification_df
def make_annotations(self):
classification_pipe = ClassificationPipe(self.sentence_rules, self.target_rules, self.context_rules)
doc_annotations = classification_pipe.process(self.document.text)
return doc_annotations
class ProcessExtractionDoc(object):
def __init__(self, document, sentence_rules, target_rules, between_rules):
self.document = document
self.sentence_rules = sentence_rules
self.target_rules = target_rules
self.between_rules = between_rules
def make_dataframe(self):
doc_annotations = self.make_annotations()
extract_columns = ['PatientId', 'DocumentId', 'SpanStart', 'SpanEnd', 'SpanText', 'OxygenSaturation']
extraction_df = pd.DataFrame(columns=extract_columns)
for ann in doc_annotations:
patient_id = self.document.document_id[:3]
doc_id = self.document.document_id
span_start = ann.start_index
span_end = ann.end_index
span_text = ann.spanned_text
o2sat = [int(s) for s in re.findall(r'\b\d+\b', span_text)][0]
ann_dict = {'PatientId': patient_id, 'DocumentId': doc_id, 'SpanStart': span_start,
'SpanEnd': span_end, 'SpanText': span_text, 'OxygenSaturation': o2sat}
extraction_df = extraction_df.append(ann_dict, ignore_index=True)
return extraction_df
def make_annotations(self):
extraction_pipe = ExtractionPipe(self.sentence_rules, self.target_rules, self.between_rules)
doc_annotations = extraction_pipe.process(self.document.text)
return doc_annotations
| 42.638889 | 109 | 0.652443 | 341 | 3,070 | 5.583578 | 0.222874 | 0.063025 | 0.063025 | 0.048319 | 0.679622 | 0.629202 | 0.546218 | 0.508403 | 0.421218 | 0.369748 | 0 | 0.002197 | 0.258632 | 3,070 | 71 | 110 | 43.239437 | 0.834359 | 0 | 0 | 0.491228 | 0 | 0 | 0.08241 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.052632 | 0 | 0.263158 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f015522f57ccd143a026e7a05d3125ca1b70e221 | 2,781 | py | Python | src/flotilla/db/lock.py | pebble/flotilla | 23d9b3aefd8312879549c50e52ea73f3e3f493be | [
"MIT"
] | 5 | 2016-01-01T15:50:21.000Z | 2018-11-27T17:38:15.000Z | src/flotilla/db/lock.py | pebble/flotilla | 23d9b3aefd8312879549c50e52ea73f3e3f493be | [
"MIT"
] | 27 | 2015-12-17T07:49:56.000Z | 2018-07-13T15:06:33.000Z | src/flotilla/db/lock.py | pebble/flotilla | 23d9b3aefd8312879549c50e52ea73f3e3f493be | [
"MIT"
] | 7 | 2015-12-01T22:04:24.000Z | 2021-11-28T13:21:35.000Z | import logging
logger = logging.getLogger('flotilla')
import time
from boto.dynamodb2.exceptions import ConditionalCheckFailedException, \
ItemNotFound
class DynamoDbLocks(object):
def __init__(self, instance_id, lock_table):
self._id = instance_id
self._locks = lock_table
def try_lock(self, name, ttl=60, refresh=False):
acquire_time = time.time()
try:
lock_item = self._locks.get_item(lock_name=name, consistent=True)
except ItemNotFound:
logger.debug('Lock %s not found, creating.', name)
try:
self._locks.put_item({
'lock_name': name,
'acquire_time': acquire_time,
'owner': self._id,
})
return True
except Exception as e:
logger.exception(e)
return False
# Lock found, check ttl:
acquired_time = float(lock_item['acquire_time'])
if (acquire_time - acquired_time) > ttl:
logger.debug('Lock %s has expired, attempting to acquire.', name)
lock_item['owner'] = self._id
lock_item['acquire_time'] = acquire_time
try:
lock_item.save()
logger.debug('Acquired expired lock %s.', name)
return True
except ConditionalCheckFailedException:
logger.debug('Did not acquire expired lock %s.', name)
return False
owner = lock_item['owner']
if owner == self._id:
logger.debug('Lock %s is held by me (since %s).', name,
acquired_time)
if refresh:
lock_item['acquire_time'] = acquire_time
lock_item.save()
return True
else:
logger.debug('Lock %s is held by %s (since %s).', name, owner,
acquired_time)
return False
def release_lock(self, name):
try:
logger.debug('Looking up lock %s to release', name)
lock_item = self._locks.get_item(lock_name=name, consistent=True)
owner = lock_item['owner']
if owner == self._id:
logger.debug('Lock belongs to me, releasing')
lock_item.delete()
else:
logger.debug('Lock belongs to %s, unable to release', owner)
except ItemNotFound:
logger.debug('Lock %s not found to release', name)
def get_owner(self, name):
try:
lock_item = self._locks.get_item(lock_name=name, consistent=True)
return lock_item['owner'], float(lock_item['acquire_time'])
except ItemNotFound:
return None, None
| 34.7625 | 77 | 0.550881 | 306 | 2,781 | 4.820261 | 0.228758 | 0.075932 | 0.071186 | 0.054237 | 0.372881 | 0.288136 | 0.247458 | 0.225085 | 0.168136 | 0.168136 | 0 | 0.001675 | 0.355987 | 2,781 | 79 | 78 | 35.202532 | 0.821887 | 0.007911 | 0 | 0.432836 | 0 | 0 | 0.151977 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.059701 | false | 0 | 0.044776 | 0 | 0.238806 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f01567e34148714623e278d87fcf480ec2de8bde | 6,924 | py | Python | python/tests/test_parse.py | zengxs/highlight | 411d3765af02312e86b8f09ba0077ee7f02c751d | [
"Apache-2.0"
] | null | null | null | python/tests/test_parse.py | zengxs/highlight | 411d3765af02312e86b8f09ba0077ee7f02c751d | [
"Apache-2.0"
] | null | null | null | python/tests/test_parse.py | zengxs/highlight | 411d3765af02312e86b8f09ba0077ee7f02c751d | [
"Apache-2.0"
] | null | null | null | import os
from pathlib import Path
import yaml
from hlkit.syntax import MatchPattern, SyntaxDefinition
from hlkit.parse import ParseResult, ParseState
BASE_DIR = os.path.join(os.path.dirname(__file__), "..", "..")
ASSETS_DIR = os.path.join(BASE_DIR, "assets")
ASSETS_DIR = os.path.abspath(ASSETS_DIR)
class TestParseState(object):
syndef: SyntaxDefinition
def setup_method(self, method):
synfile = "Packages/JSON/JSON.sublime-syntax"
full_path = Path(os.path.join(ASSETS_DIR, synfile))
data = yaml.load(full_path.read_text(), yaml.FullLoader)
self.syndef = SyntaxDefinition.load(data)
def test_flatten(self):
state = ParseState(self.syndef)
# MatchPatterns in main context:
# - comments[*] | prototype | 3
# - constant[*] | main -> value -> constant | 1
# - number[*] | main -> value -> number | 2
# - string[*] | main -> value -> string | 1
# - array[*] | main -> value -> array | 1
# - object[*] | main -> value -> object | 1
matches = state.current_level.matches
assert len(matches) == 9
# comments[0]
assert matches[0].match._regex == r"/\*\*(?!/)"
# constant[0]
assert matches[3].match._regex == r"\b(?:true|false|null)\b"
# number[1]
assert matches[5].match._regex == r"(-?)(0|[1-9]\d*)"
# string[0]
assert matches[6].match._regex == r'"'
# array[0]
assert matches[7].match._regex == r"\["
# object[0]
assert matches[8].match._regex == r"\{"
def test_best_match(self):
state = ParseState(self.syndef)
pattern, match = state.find_best_match(" [ \n")
assert isinstance(pattern, MatchPattern)
assert pattern.scope == "punctuation.section.sequence.begin.json"
assert match.start() == 1
pattern, match = state.find_best_match(" \n")
assert pattern is None
assert match is None
pattern, match = state.find_best_match(r'"\n"')
assert pattern.match._regex == r'"' # string[0]
assert pattern.scope == "punctuation.definition.string.begin.json"
state.push_context(self.syndef["inside-string"])
pattern, match = state.find_best_match(r"a\tc\n")
assert pattern.scope == "constant.character.escape.json"
assert match.group() == r"\t"
def test_next_token(self):
state = ParseState(self.syndef)
line = " // comment\n"
result = state.parse_next_token(line)
assert isinstance(result, ParseResult)
assert len(line) == result.chars_count
assert len(result.tokens) == 3
assert result.tokens[0].text == " "
assert result.tokens[0].scopes == ["source.json"]
assert result.tokens[1].text == "//"
assert result.tokens[1].scopes == [
"source.json",
"comment.line.double-slash.js",
"punctuation.definition.comment.json",
]
assert result.tokens[2].text == " comment\n"
assert result.tokens[2].scopes == [
"source.json",
"comment.line.double-slash.js",
]
def test_next_token2(self):
state = ParseState(self.syndef)
line, pos = "[12,// comment\n", 0
result = state.parse_next_token(line, start=pos)
pos += result.chars_count
assert pos == 1 and result.tokens[0].text == "["
assert result.tokens[0].scopes == [
"source.json",
"meta.sequence.json",
"punctuation.section.sequence.begin.json",
]
result = state.parse_next_token(line, start=pos)
pos += result.chars_count
assert pos == 3 and result.tokens[0].text == "12"
assert result.tokens[0].scopes == [
"source.json",
"meta.sequence.json",
"meta.number.integer.decimal.json",
"constant.numeric.value.json",
]
result = state.parse_next_token(line, start=pos)
pos += result.chars_count
assert pos == 4 and result.tokens[0].text == ","
assert result.tokens[0].scopes == [
"source.json",
"meta.sequence.json",
"punctuation.separator.sequence.json",
]
result = state.parse_next_token(line, start=pos)
pos += result.chars_count
assert pos == len(line)
assert result.tokens[0].text == "//"
assert result.tokens[0].scopes == [
"source.json",
"meta.sequence.json",
"comment.line.double-slash.js",
"punctuation.definition.comment.json",
]
assert result.tokens[1].text == " comment\n"
assert result.tokens[1].scopes == [
"source.json",
"meta.sequence.json",
"comment.line.double-slash.js",
]
line, pos = ' "a\\tb", ab\n', 0
result = state.parse_next_token(line, start=pos)
pos += result.chars_count
assert pos == 3
assert result.tokens[0].text == " "
assert result.tokens[1].text == '"'
result = state.parse_next_token(line, start=pos)
pos += result.chars_count
assert pos == 6
assert result.tokens[0].text == "a"
assert result.tokens[1].text == r"\t"
result = state.parse_next_token(line, start=pos)
pos += result.chars_count
assert pos == 8
assert result.tokens[0].text == "b"
assert result.tokens[1].text == '"'
result = state.parse_next_token(line, start=pos)
pos += result.chars_count
assert pos == 9
assert result.tokens[0].text == ","
result = state.parse_next_token(line, start=pos)
pos += result.chars_count
assert pos == 11
assert result.tokens[0].text == " "
assert result.tokens[1].text == "a"
assert result.tokens[1].scopes == [
"source.json",
"meta.sequence.json",
"invalid.illegal.expected-sequence-separator.json",
]
result = state.parse_next_token(line, start=pos)
pos += result.chars_count
assert result.tokens[0].text == "b"
assert result.tokens[0].scopes == [
"source.json",
"meta.sequence.json",
"invalid.illegal.expected-sequence-separator.json",
]
result = state.parse_next_token(line, start=pos)
pos += result.chars_count
assert pos == len(line)
assert result.tokens[0].text == "\n"
assert result.tokens[0].scopes == [
"source.json",
"meta.sequence.json",
]
def test_push_context(self):
state = ParseState(self.syndef)
line = "["
result = state.parse_next_token(line)
assert result.tokens[0].text == "["
assert state.level_stack[-1].current_ctx.meta_scope == "meta.sequence.json"
| 36.0625 | 83 | 0.571635 | 806 | 6,924 | 4.810174 | 0.157568 | 0.099046 | 0.129997 | 0.083312 | 0.620841 | 0.575703 | 0.526696 | 0.481816 | 0.449316 | 0.438225 | 0 | 0.01475 | 0.28524 | 6,924 | 191 | 84 | 36.251309 | 0.76864 | 0.049827 | 0 | 0.464968 | 0 | 0 | 0.15585 | 0.087751 | 0 | 0 | 0 | 0 | 0.369427 | 1 | 0.038217 | false | 0 | 0.031847 | 0 | 0.082803 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0188e29936ed098070af21699bc9ea11a74747a | 970 | py | Python | tracpro/orgs_ext/migrations/0003_show_spoof_data_prefill.py | rapidpro/tracpro | a68a782a7ff9bb0ccee85368132d8847c280fea3 | [
"BSD-3-Clause"
] | 5 | 2015-07-21T15:58:31.000Z | 2019-09-14T22:34:00.000Z | tracpro/orgs_ext/migrations/0003_show_spoof_data_prefill.py | rapidpro/tracpro | a68a782a7ff9bb0ccee85368132d8847c280fea3 | [
"BSD-3-Clause"
] | 197 | 2015-03-24T15:26:04.000Z | 2017-11-28T19:24:37.000Z | tracpro/orgs_ext/migrations/0003_show_spoof_data_prefill.py | rapidpro/tracpro | a68a782a7ff9bb0ccee85368132d8847c280fea3 | [
"BSD-3-Clause"
] | 10 | 2015-03-24T12:26:36.000Z | 2017-02-21T13:08:57.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
from django.conf import settings
from django.db import models, migrations
def add_show_spoof_data(apps, schema_editor):
"""Set default for show spoof data to False"""
for org in apps.get_model('orgs', 'Org').objects.all():
config = json.loads(org.config) if org.config else {}
# Default Caktus to show spoof data = True for testing
if org.name.lower() == 'caktus':
config['show_spoof_data'] = True
org.config = json.dumps(config)
org.save()
elif not config.get('show_spoof_data'):
config['show_spoof_data'] = False
org.config = json.dumps(config)
org.save()
class Migration(migrations.Migration):
dependencies = [
('orgs_ext', '0002_auto_20150724_1609'),
]
operations = [
migrations.RunPython(add_show_spoof_data, migrations.RunPython.noop),
]
| 30.3125 | 77 | 0.639175 | 122 | 970 | 4.893443 | 0.483607 | 0.105528 | 0.152429 | 0.053601 | 0.103853 | 0.103853 | 0.103853 | 0 | 0 | 0 | 0 | 0.023256 | 0.246392 | 970 | 31 | 78 | 31.290323 | 0.793434 | 0.119588 | 0 | 0.181818 | 0 | 0 | 0.105077 | 0.027155 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.181818 | 0 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f01998793ef1afc29f8043db4c0baad686627c04 | 1,708 | py | Python | src/msrp_parser/cpim_message.py | alxgb/msrp-parser | 6917715a5b5431d02b55780d702295581d6d1400 | [
"MIT"
] | null | null | null | src/msrp_parser/cpim_message.py | alxgb/msrp-parser | 6917715a5b5431d02b55780d702295581d6d1400 | [
"MIT"
] | null | null | null | src/msrp_parser/cpim_message.py | alxgb/msrp-parser | 6917715a5b5431d02b55780d702295581d6d1400 | [
"MIT"
] | null | null | null | import re
HEADER_REGEX = r"([\w\-\.]+):\s*(.*)"
class CpimParseError(Exception):
pass
class CpimMessage:
""" Parse a CPIM content-type message (RFC3862) """
def __init__(self):
self.headers = {}
self.content = {"headers": {}, "body": []}
@classmethod
def from_string(cls, message: str):
""" Generate a CpimMessage structure based on a given message string """
lines = [line.strip() for line in message.split("\n")]
if not lines or len(lines) == 1 and not lines[0]:
raise CpimParseError("Empty CPIM message")
cpim_m = cls()
for h_line_num, line in enumerate(lines):
# First come the headers until a linebreak is found
if line == "":
break
m = re.match(HEADER_REGEX, line)
if not m:
raise CpimParseError(
f"Invalid header found while parsing CPIM message (line {h_line_num+1}: {line})"
)
cpim_m.headers[m.group(1)] = m.group(2)
# Next is the encapsulated MIME message-body
content_start_line = h_line_num + 1
for c_line_num, line in enumerate(lines[content_start_line:], start=content_start_line):
if line == "":
break
m = re.match(HEADER_REGEX, line)
if not m:
raise CpimParseError(
f"Expected header while parsing CPIM's message body (line {c_line_num+1}, {line})"
)
cpim_m.content["headers"][m.group(1)] = m.group(2)
if len(lines) > c_line_num:
cpim_m.content["body"] = lines[c_line_num + 1 :]
return cpim_m
| 30.5 | 102 | 0.551522 | 214 | 1,708 | 4.247664 | 0.35514 | 0.053905 | 0.035204 | 0.028603 | 0.294829 | 0.275028 | 0.178218 | 0.132013 | 0.132013 | 0.132013 | 0 | 0.012302 | 0.333724 | 1,708 | 55 | 103 | 31.054545 | 0.786467 | 0.119438 | 0 | 0.277778 | 0 | 0 | 0.145638 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0.027778 | 0.027778 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f019cc1665261ba956d1e8f35b42e95c17bb252a | 162 | py | Python | 11. ForLoops/nested_loops.py | VasuGoel/python-personal-notes | 752c84533677e30e2abdaaf288ed7cf43220bd42 | [
"MIT"
] | 1 | 2019-09-04T12:08:29.000Z | 2019-09-04T12:08:29.000Z | 11. ForLoops/nested_loops.py | VasuGoel/python-personal-notes | 752c84533677e30e2abdaaf288ed7cf43220bd42 | [
"MIT"
] | null | null | null | 11. ForLoops/nested_loops.py | VasuGoel/python-personal-notes | 752c84533677e30e2abdaaf288ed7cf43220bd42 | [
"MIT"
] | 2 | 2019-09-04T12:08:30.000Z | 2020-10-13T16:18:58.000Z | for i in range(3):
for j in range(2):
print(f'P({i}, {j})')
# Prints out the coordinates
# P(0, 0)
# P(0, 1)
# P(1, 0)
# P(1, 1)
# P(2, 0)
# P(2, 1)
| 13.5 | 29 | 0.45679 | 37 | 162 | 2 | 0.432432 | 0.081081 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.12069 | 0.283951 | 162 | 11 | 30 | 14.727273 | 0.517241 | 0.45679 | 0 | 0 | 0 | 0 | 0.1375 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.333333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f01d71825f3603f9d7c61729497dab48a89daed9 | 14,107 | py | Python | pysm4/sm4.py | xiaoyinchong/pysm4 | ac751abbeed0ecb3157a703ff206eb82745bfae6 | [
"MIT"
] | 94 | 2017-03-13T15:39:37.000Z | 2022-02-23T06:49:03.000Z | pysm4/sm4.py | chenpei97/pysm4 | ac751abbeed0ecb3157a703ff206eb82745bfae6 | [
"MIT"
] | 4 | 2017-08-11T02:56:38.000Z | 2020-10-11T13:06:11.000Z | pysm4/sm4.py | chenpei97/pysm4 | ac751abbeed0ecb3157a703ff206eb82745bfae6 | [
"MIT"
] | 43 | 2017-09-09T10:42:41.000Z | 2021-10-07T13:34:06.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from sys import version_info
from base64 import b64encode, b64decode
from binascii import hexlify, unhexlify
__all__ = ['encrypt_ecb', 'decrypt_ecb',
'encrypt_cbc', 'decrypt_cbc',
'encrypt', 'decrypt']
if version_info[0] == 2:
# python2
PY2 = True
PY3 = False
else:
# python3
PY2 = False
PY3 = True
if PY2:
_range = xrange
string_types = (basestring,)
text_type = unicode
binary_type = str
else:
_range = range
string_types = (str,)
text_type = str
binary_type = bytes
E_FMT = 'UTF8'
# S盒
S_BOX = {
0X00: 0XD6, 0X01: 0X90, 0X02: 0XE9, 0X03: 0XFE, 0X04: 0XCC, 0X05: 0XE1, 0X06: 0X3D, 0X07: 0XB7,
0X08: 0X16, 0X09: 0XB6, 0X0A: 0X14, 0X0B: 0XC2, 0X0C: 0X28, 0X0D: 0XFB, 0X0E: 0X2C, 0X0F: 0X05,
0X10: 0X2B, 0X11: 0X67, 0X12: 0X9A, 0X13: 0X76, 0X14: 0X2A, 0X15: 0XBE, 0X16: 0X04, 0X17: 0XC3,
0X18: 0XAA, 0X19: 0X44, 0X1A: 0X13, 0X1B: 0X26, 0X1C: 0X49, 0X1D: 0X86, 0X1E: 0X06, 0X1F: 0X99,
0X20: 0X9C, 0X21: 0X42, 0X22: 0X50, 0X23: 0XF4, 0X24: 0X91, 0X25: 0XEF, 0X26: 0X98, 0X27: 0X7A,
0X28: 0X33, 0X29: 0X54, 0X2A: 0X0B, 0X2B: 0X43, 0X2C: 0XED, 0X2D: 0XCF, 0X2E: 0XAC, 0X2F: 0X62,
0X30: 0XE4, 0X31: 0XB3, 0X32: 0X1C, 0X33: 0XA9, 0X34: 0XC9, 0X35: 0X08, 0X36: 0XE8, 0X37: 0X95,
0X38: 0X80, 0X39: 0XDF, 0X3A: 0X94, 0X3B: 0XFA, 0X3C: 0X75, 0X3D: 0X8F, 0X3E: 0X3F, 0X3F: 0XA6,
0X40: 0X47, 0X41: 0X07, 0X42: 0XA7, 0X43: 0XFC, 0X44: 0XF3, 0X45: 0X73, 0X46: 0X17, 0X47: 0XBA,
0X48: 0X83, 0X49: 0X59, 0X4A: 0X3C, 0X4B: 0X19, 0X4C: 0XE6, 0X4D: 0X85, 0X4E: 0X4F, 0X4F: 0XA8,
0X50: 0X68, 0X51: 0X6B, 0X52: 0X81, 0X53: 0XB2, 0X54: 0X71, 0X55: 0X64, 0X56: 0XDA, 0X57: 0X8B,
0X58: 0XF8, 0X59: 0XEB, 0X5A: 0X0F, 0X5B: 0X4B, 0X5C: 0X70, 0X5D: 0X56, 0X5E: 0X9D, 0X5F: 0X35,
0X60: 0X1E, 0X61: 0X24, 0X62: 0X0E, 0X63: 0X5E, 0X64: 0X63, 0X65: 0X58, 0X66: 0XD1, 0X67: 0XA2,
0X68: 0X25, 0X69: 0X22, 0X6A: 0X7C, 0X6B: 0X3B, 0X6C: 0X01, 0X6D: 0X21, 0X6E: 0X78, 0X6F: 0X87,
0X70: 0XD4, 0X71: 0X00, 0X72: 0X46, 0X73: 0X57, 0X74: 0X9F, 0X75: 0XD3, 0X76: 0X27, 0X77: 0X52,
0X78: 0X4C, 0X79: 0X36, 0X7A: 0X02, 0X7B: 0XE7, 0X7C: 0XA0, 0X7D: 0XC4, 0X7E: 0XC8, 0X7F: 0X9E,
0X80: 0XEA, 0X81: 0XBF, 0X82: 0X8A, 0X83: 0XD2, 0X84: 0X40, 0X85: 0XC7, 0X86: 0X38, 0X87: 0XB5,
0X88: 0XA3, 0X89: 0XF7, 0X8A: 0XF2, 0X8B: 0XCE, 0X8C: 0XF9, 0X8D: 0X61, 0X8E: 0X15, 0X8F: 0XA1,
0X90: 0XE0, 0X91: 0XAE, 0X92: 0X5D, 0X93: 0XA4, 0X94: 0X9B, 0X95: 0X34, 0X96: 0X1A, 0X97: 0X55,
0X98: 0XAD, 0X99: 0X93, 0X9A: 0X32, 0X9B: 0X30, 0X9C: 0XF5, 0X9D: 0X8C, 0X9E: 0XB1, 0X9F: 0XE3,
0XA0: 0X1D, 0XA1: 0XF6, 0XA2: 0XE2, 0XA3: 0X2E, 0XA4: 0X82, 0XA5: 0X66, 0XA6: 0XCA, 0XA7: 0X60,
0XA8: 0XC0, 0XA9: 0X29, 0XAA: 0X23, 0XAB: 0XAB, 0XAC: 0X0D, 0XAD: 0X53, 0XAE: 0X4E, 0XAF: 0X6F,
0XB0: 0XD5, 0XB1: 0XDB, 0XB2: 0X37, 0XB3: 0X45, 0XB4: 0XDE, 0XB5: 0XFD, 0XB6: 0X8E, 0XB7: 0X2F,
0XB8: 0X03, 0XB9: 0XFF, 0XBA: 0X6A, 0XBB: 0X72, 0XBC: 0X6D, 0XBD: 0X6C, 0XBE: 0X5B, 0XBF: 0X51,
0XC0: 0X8D, 0XC1: 0X1B, 0XC2: 0XAF, 0XC3: 0X92, 0XC4: 0XBB, 0XC5: 0XDD, 0XC6: 0XBC, 0XC7: 0X7F,
0XC8: 0X11, 0XC9: 0XD9, 0XCA: 0X5C, 0XCB: 0X41, 0XCC: 0X1F, 0XCD: 0X10, 0XCE: 0X5A, 0XCF: 0XD8,
0XD0: 0X0A, 0XD1: 0XC1, 0XD2: 0X31, 0XD3: 0X88, 0XD4: 0XA5, 0XD5: 0XCD, 0XD6: 0X7B, 0XD7: 0XBD,
0XD8: 0X2D, 0XD9: 0X74, 0XDA: 0XD0, 0XDB: 0X12, 0XDC: 0XB8, 0XDD: 0XE5, 0XDE: 0XB4, 0XDF: 0XB0,
0XE0: 0X89, 0XE1: 0X69, 0XE2: 0X97, 0XE3: 0X4A, 0XE4: 0X0C, 0XE5: 0X96, 0XE6: 0X77, 0XE7: 0X7E,
0XE8: 0X65, 0XE9: 0XB9, 0XEA: 0XF1, 0XEB: 0X09, 0XEC: 0XC5, 0XED: 0X6E, 0XEE: 0XC6, 0XEF: 0X84,
0XF0: 0X18, 0XF1: 0XF0, 0XF2: 0X7D, 0XF3: 0XEC, 0XF4: 0X3A, 0XF5: 0XDC, 0XF6: 0X4D, 0XF7: 0X20,
0XF8: 0X79, 0XF9: 0XEE, 0XFA: 0X5F, 0XFB: 0X3E, 0XFC: 0XD7, 0XFD: 0XCB, 0XFE: 0X39, 0XFF: 0X48
}
# 系统参数FK
FK = (0XA3B1BAC6, 0X56AA3350, 0X677D9197, 0XB27022DC)
# 固定参数CK
CK = (0X00070E15, 0X1C232A31, 0X383F464D, 0X545B6269,
0X70777E85, 0X8C939AA1, 0XA8AFB6BD, 0XC4CBD2D9,
0XE0E7EEF5, 0XFC030A11, 0X181F262D, 0X343B4249,
0X50575E65, 0X6C737A81, 0X888F969D, 0XA4ABB2B9,
0XC0C7CED5, 0XDCE3EAF1, 0XF8FF060D, 0X141B2229,
0X30373E45, 0X4C535A61, 0X686F767D, 0X848B9299,
0XA0A7AEB5, 0XBCC3CAD1, 0XD8DFE6ED, 0XF4FB0209,
0X10171E25, 0X2C333A41, 0X484F565D, 0X646B7279)
# 轮密钥缓存
_rk_cache = {}
# 加密
SM4_ENCRYPT = 1
# 解密
SM4_DECRYPT = 0
# 分组byte数
BLOCK_BYTE = 16
BLOCK_HEX = BLOCK_BYTE * 2
def num2hex(num, width=1):
"""
整数转为指定长度的十六进制字符串,不足补0
>>> num2hex(1000, width=4)
'03e8'
:param num: 整数
:param width: 16进制字符串长度, 默认为1
:return str
"""
return '{:0>{width}}'.format(hex(num)[2:].replace('L', ''),
width=width)
def _byte_unpack(num, byte_n=4):
# 分解后元组长度
_len = 4
# 步长
step = (byte_n // _len) * 2
hex_str = num2hex(num=num, width=byte_n * 2)
split_v = list(_range(len(hex_str)))[::step] + [len(hex_str)]
return tuple([int(hex_str[s:e], base=16) for s, e in
zip(split_v[:-1], split_v[1:])])
def _byte_pack(byte_array, byte_n=4):
_len = 4
# byte_array每一项16进制字符串的长度
width = (byte_n // _len) * 2
if len(byte_array) != _len:
raise ValueError('byte_array length must be 4.')
return int(''.join([num2hex(num=v, width=width)
for v in byte_array]), 16)
def _s_box(byte):
return S_BOX.get(byte)
def _non_linear_map(byte_array):
"""
非线性变换, 输入A=(a0, a1, a2, a3)
(b0, b1, b2, b3) = (Sbox(a0), Sbox(a1), Sbox(a2), Sbox(a3))
"""
return (_s_box(byte_array[0]), _s_box(byte_array[1]),
_s_box(byte_array[2]), _s_box(byte_array[3]))
def _linear_map(byte4):
"""
线性变换L
L(B) = B ⊕ (B <<< 2) ⊕ (B <<< 10) ⊕ (B <<< 18) ⊕ (B <<< 24)
"""
_left = loop_left_shift
return byte4 ^ _left(byte4, 2) ^ _left(byte4, 10) ^ _left(byte4, 18) ^ _left(byte4, 24)
def _linear_map_s(byte4):
"""
线性变换L'
L'(B) = B ⊕ (B <<< 13) ⊕ (B <<< 23)
"""
_left = loop_left_shift
return byte4 ^ _left(byte4, 13) ^ _left(byte4, 23)
def loop_left_shift(num, offset, base=32):
"""
循环向左移位
>>> loop_left_shift(0b11010000, 3, base=8)
>>> 0b10000110
"""
bin_str = '{:0>{width}}'.format(bin(num)[2:], width=base)
rem = offset % base
return int(bin_str[rem:] + bin_str[:rem], 2)
def _rep_t(byte4):
"""合成置换T, 由非线性变换和线性变换L复合而成"""
# 非线性变换
b_array = _non_linear_map(_byte_unpack(byte4))
# 线性变换L
return _linear_map(_byte_pack(b_array))
def _rep_t_s(byte4):
"""
合成置换T', 由非线性变换和线性变换L'复合而成
"""
# 非线性变换
b_array = _non_linear_map(_byte_unpack(byte4))
# 线性变换L'
return _linear_map_s(_byte_pack(b_array))
def _round_keys(mk):
"""
轮密钥由加密密钥通过密钥扩展算法生成
加密密钥MK = (MK0, MK1, MK2, MK3)
轮密钥生成算法:
(K0, K1, K2, K3) = (MK0 ⊕ FK0, MK1 ⊕ FK1, MK2 ⊕ FK2, MK3 ⊕ FK3)
rki = Ki+4 = Ki⊕T'(Ki+1 ⊕ Ki+2 ⊕ Ki+3 ⊕ CKi) i=0, 1,...,31
:param mk: 加密密钥, 16byte, 128bit
:return list
"""
# 尝试从轮密钥缓存中获取轮密钥
# 没有获取到, 根据密钥扩展算法生成
_rk_keys = _rk_cache.get(mk)
if _rk_keys is None:
mk0, mk1, mk2, mk3 = _byte_unpack(mk, byte_n=16)
keys = [mk0 ^ FK[0], mk1 ^ FK[1], mk2 ^ FK[2], mk3 ^ FK[3]]
for i in _range(32):
rk = keys[i] ^ _rep_t_s(keys[i + 1] ^ keys[i + 2] ^ keys[i + 3] ^ CK[i])
keys.append(rk)
_rk_keys = keys[4:]
# 加入轮密钥缓存中
_rk_cache[mk] = _rk_keys
return _rk_keys
def _round_f(byte4_array, rk):
"""
轮函数, F(X0, X1, X2, X3, rk) = X0 ⊕ T(X1 ⊕ X2 ⊕ X3 ⊕ rk)
:param byte4_array: (X0, X1, X2, X3), 每一项4byte, 32bit
:param rk: 轮密钥, 4byte, 32bit
"""
x0, x1, x2, x3 = byte4_array
return x0 ^ _rep_t(x1 ^ x2 ^ x3 ^ rk)
def _crypt(num, mk, mode=SM4_ENCRYPT):
"""
SM4加密和解密
:param num: 密文或明文 16byte
:param mk: 密钥 16byte
:param mode: 轮密钥顺序
"""
x_keys = list(_byte_unpack(num, byte_n=16))
round_keys = _round_keys(mk)
if mode == SM4_DECRYPT:
round_keys = round_keys[::-1]
for i in _range(32):
x_keys.append(_round_f(x_keys[i:i+4], round_keys[i]))
return _byte_pack(x_keys[-4:][::-1], byte_n=16)
def encrypt(clear_num, mk):
"""
SM4加密算法由32次迭代运算和1次反序变换R组成.
明文输入为(X0, X1, X2, X3), 每一项4byte, 密文输出为(Y0, Y1, Y2, Y3), 每一项4byte
轮密钥为rki, i=0,1,...,32, 4byte, 运算过程如下:
1). 32次迭代运算: Xi+4 = F(Xi, Xi+1, Xi+2, Xi+3, rki), i=0,1,...,32
2). 反序变换: (Y0, Y1, Y2, Y3) = (X35, X34, X33, X32)
:param clear_num: 明文, 16byte
:param mk: 密钥, 16byte
"""
return _crypt(num=clear_num, mk=mk)
def decrypt(cipher_num, mk):
"""
SM4解密算法, 解密变换与加密变换结构相同, 不同的仅是轮密钥的使用顺序.
解密时轮密钥使用顺序为(rk31,rk30,...,rk0)
:param cipher_num: 密文, 16byte
:param mk: 密钥, 16byte
"""
return _crypt(num=cipher_num, mk=mk, mode=SM4_DECRYPT)
def _padding(text, mode=SM4_ENCRYPT):
"""
加密填充和解密去填充
"""
# python2 is (basestring, )
# python3 is (str, bytes)
_str_or_bytes = string_types if PY2 else (string_types + (binary_type,))
if text is None or not isinstance(text, _str_or_bytes):
return
# unicode
if isinstance(text, text_type):
text = text.encode(encoding=E_FMT)
if mode == SM4_ENCRYPT:
# 填充
p_num = BLOCK_BYTE - (len(text) % BLOCK_BYTE)
space = '' if PY2 else b''
pad_s = (chr(p_num) * p_num) if PY2 else (chr(p_num).encode(E_FMT) * p_num)
res = space.join([text, pad_s])
else:
# 去填充
p_num = ord(text[-1]) if PY2 else text[-1]
res = text[:-p_num]
return res
def _key_iv_check(key_iv):
"""
密钥或初始化向量检测
"""
# 密钥
if key_iv is None or not isinstance(key_iv, string_types):
raise TypeError('Parameter key or iv:{} not a basestring'.format(key_iv))
if isinstance(key_iv, text_type):
key_iv = key_iv.encode(encoding=E_FMT)
if len(key_iv) > BLOCK_BYTE:
raise ValueError('Parameter key or iv:{} byte greater than {}'.format(key_iv.decode(E_FMT),
BLOCK_BYTE))
return key_iv
def _hex(str_or_bytes):
# PY2: _hex('北京') --> 'e58c97e4baac'
# PY3: _hex('北京') --> b'e58c97e4baac'
if PY2:
hex_str = hexlify(str_or_bytes)
else:
# python3
if isinstance(str_or_bytes, text_type):
byte = str_or_bytes.encode(encoding=E_FMT)
elif isinstance(str_or_bytes, binary_type):
byte = str_or_bytes
else:
byte = b''
hex_str = hexlify(byte)
return hex_str
def _unhex(hex_str):
# PY2: _unhex('e58c97e4baac') --> '\xe5\x8c\x97\xe4\xba\xac'
# PY3: _unhex('e58c97e4baac') --> b'\xe5\x8c\x97\xe4\xba\xac'
return unhexlify(hex_str)
# 电子密码本(ECB)
def encrypt_ecb(plain_text, key):
"""
SM4(ECB)加密
:param plain_text: 明文
:param key: 密钥, 小于等于16字节
"""
plain_text = _padding(plain_text, mode=SM4_ENCRYPT)
if plain_text is None:
return
# 密钥检验
key = _key_iv_check(key_iv=key)
plain_hex = _hex(plain_text)
cipher_hex_list = []
for i in _range(len(plain_text) // BLOCK_BYTE):
sub_hex = plain_hex[i * BLOCK_HEX:(i + 1) * BLOCK_HEX]
cipher = encrypt(clear_num=int(sub_hex, 16),
mk=int(_hex(key), 16))
cipher_hex_list.append(num2hex(num=cipher, width=BLOCK_HEX))
cipher_text = b64encode(_unhex(''.join(cipher_hex_list)))
return cipher_text if PY2 else cipher_text.decode(E_FMT)
def decrypt_ecb(cipher_text, key):
"""
SM4(ECB)解密
:param cipher_text: 密文
:param key: 密钥, 小于等于16字节
"""
cipher_text = b64decode(cipher_text)
cipher_hex = _hex(cipher_text)
# 密码检验
key = _key_iv_check(key_iv=key)
plain_hex_list = []
for i in _range(len(cipher_text) // BLOCK_BYTE):
sub_hex = cipher_hex[i * BLOCK_HEX:(i + 1) * BLOCK_HEX]
plain = decrypt(cipher_num=int(sub_hex, 16),
mk=int(_hex(key), 16))
plain_hex_list.append(num2hex(num=plain, width=BLOCK_HEX))
plain_text = _padding(_unhex(''.join(plain_hex_list)),
mode=SM4_DECRYPT)
return plain_text if PY2 else plain_text.decode(E_FMT)
# 密码块链接(CBC)
def encrypt_cbc(plain_text, key, iv):
"""
SM4(CBC)加密
:param plain_text: 明文
:param key: 密钥, 小于等于16字节
:param iv: 初始化向量, 小于等于16字节
"""
plain_text = _padding(plain_text, mode=SM4_ENCRYPT)
if plain_text is None:
return
# 密钥检验
key = _key_iv_check(key_iv=key)
# 初始化向量监测
iv = _key_iv_check(key_iv=iv)
plain_hex = _hex(plain_text)
ivs = [int(_hex(iv), 16)]
for i in _range(len(plain_text) // BLOCK_BYTE):
sub_hex = plain_hex[i * BLOCK_HEX:(i + 1) * BLOCK_HEX]
cipher = encrypt(clear_num=(int(sub_hex, 16) ^ ivs[i]),
mk=int(_hex(key), 16))
ivs.append(cipher)
cipher_text = b64encode(_unhex(''.join([num2hex(num=c, width=BLOCK_HEX)
for c in ivs[1:]])))
return cipher_text if PY2 else cipher_text.decode(E_FMT)
def decrypt_cbc(cipher_text, key, iv):
"""
SM4(CBC)解密
:param cipher_text: 密文
:param key: 密钥 小于等于16字节
:param iv: 初始化向量 小于等于16字节
"""
cipher_text = b64decode(cipher_text)
cipher_hex = _hex(cipher_text)
# 密钥检测
key = _key_iv_check(key_iv=key)
# 初始化向量检测
iv = _key_iv_check(key_iv=iv)
ivs = [int(_hex(iv), 16)]
plain_hex_list = []
for i in _range(len(cipher_text) // BLOCK_BYTE):
sub_hex = cipher_hex[i * BLOCK_HEX:(i + 1) * BLOCK_HEX]
cipher = int(sub_hex, 16)
plain = (ivs[i] ^ decrypt(cipher_num=cipher,
mk=int(_hex(key), 16)))
ivs.append(cipher)
plain_hex_list.append(num2hex(num=plain, width=BLOCK_HEX))
plain_text = _padding(_unhex(''.join(plain_hex_list)),
mode=SM4_DECRYPT)
return plain_text if PY2 else plain_text.decode(E_FMT)
if __name__ == '__main__':
pass
| 31.772523 | 99 | 0.603388 | 2,084 | 14,107 | 3.893954 | 0.272073 | 0.015404 | 0.009858 | 0.011214 | 0.268145 | 0.212816 | 0.20838 | 0.19655 | 0.168946 | 0.148614 | 0 | 0.168138 | 0.253349 | 14,107 | 443 | 100 | 31.844244 | 0.600589 | 0.1495 | 0 | 0.259574 | 0 | 0 | 0.017767 | 0 | 0 | 0 | 0.208702 | 0 | 0 | 1 | 0.097872 | false | 0.004255 | 0.012766 | 0.008511 | 0.221277 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f020aaedf9d869e388ca9586c0da95a2ba99621b | 13,383 | py | Python | scripts/NR_plotting2.py | arm61/thesis | 4c76e837b1041472a5522427de0069a5a28d40c9 | [
"CC-BY-4.0"
] | 3 | 2019-06-04T20:53:19.000Z | 2020-06-01T06:25:20.000Z | scripts/NR_plotting2.py | arm61/thesis | 4c76e837b1041472a5522427de0069a5a28d40c9 | [
"CC-BY-4.0"
] | 1 | 2019-06-04T17:11:33.000Z | 2019-06-04T17:11:33.000Z | scripts/NR_plotting2.py | arm61/thesis | 4c76e837b1041472a5522427de0069a5a28d40c9 | [
"CC-BY-4.0"
] | null | null | null | from __future__ import division
import refnx
from refnx.reflect import structure, ReflectModel, SLD
from refnx.dataset import ReflectDataset
from refnx.analysis import Transform, CurveFitter, Objective, GlobalObjective, Parameter
import numpy as np
import scipy
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import seaborn as sns
from scipy.stats import pearsonr
sns.set(palette="colorblind")
import corner
import sys
sys.path.insert(0, "../reports/code_blocks")
import mol_vol as mv
import ref_help as rh
import matplotlib as mpl
mpl.rcParams["xtick.labelsize"] = 10
mpl.rcParams["ytick.labelsize"] = 10
mpl.rcParams["axes.facecolor"] = "w"
mpl.rcParams["lines.linewidth"] = 2
mpl.rcParams["xtick.top"] = False
mpl.rcParams["xtick.bottom"] = True
mpl.rcParams["ytick.left"] = True
mpl.rcParams["grid.linestyle"] = "--"
mpl.rcParams["legend.fontsize"] = 10
mpl.rcParams["legend.facecolor"] = [1, 1, 1]
mpl.rcParams["legend.framealpha"] = 0.75
mpl.rcParams["axes.labelsize"] = 10
mpl.rcParams["axes.linewidth"] = 1
mpl.rcParams["axes.edgecolor"] = "k"
mpl.rcParams["axes.titlesize"] = 10
# The lipid to be investigated
lipid = sys.argv[1]
# Number of carbon atoms in tail group
t_length = int(sys.argv[2])
# The type of radiation that is used
# The surface pressures to probe
sp = sys.argv[3]
cont = ['d13acmw', 'd13d2o', 'hd2o', 'd70acmw',
'd70d2o', 'd83acmw', 'd83d2o']
apm = sys.argv[4]
neutron = 1
# A label for the output files
label = "{}_{}".format(lipid, sp)
# Relative directory locations
data_dir = "../data/reflectometry2/{}".format(label)
fig_dir = "../reports/figures/reflectometry2/"
anal_dir = "../output/reflectometry2/{}".format(label)
# For the analysis to be reproduced exactly, the same versions of `refnx`, `scipy`, and `numpy`, must be present.
# The versions used in the original version are:
#
# refnx.version.full_version == 0.1.2
# scipy.version.version == 1.1.0
# np.__version__ == 1.15.4
refnx.version.full_version, scipy.version.version, np.__version__
datasets = []
for i in range(len(cont)):
datasets.append(
ReflectDataset(
"{}/{}{}.dat".format(
data_dir, cont[i], sp
)
)
)
b_head = []
b_tail = []
for i in range(len(cont)):
if cont[i][:3] == 'd13':
head = {"C": 10, "D": 13, "H": 5, "O": 8, "N": 1, "P": 1}
tail = {"C": t_length * 2, "H": t_length * 4 + 2}
elif cont[i][:3] == 'd70':
head = {"C": 10, "D": 5, "H": 13, "O": 8, "N": 1, "P": 1}
tail = {"C": t_length * 2, "D": t_length * 4 + 2}
elif cont[i][:3] == 'd83':
head = {"C": 10, "D": 18, "O": 8, "N": 1, "P": 1}
tail = {"C": t_length * 2, "D": t_length * 4 + 2}
elif cont[i][:1] == 'h':
head = {"C": 10, "H": 18, "O": 8, "N": 1, "P": 1}
tail = {"C": t_length * 2, "H": t_length * 4 + 2}
b_head.append(rh.get_scattering_length(head, 1))
b_tail.append(rh.get_scattering_length(tail, 1))
d_h = 8.5
V_h = 330.0
V_t = 1100.0
min_d_t = 9
lipids = []
for i in range(len(cont)):
lipids.append(
mv.VolMono(
[V_h, V_t], [b_head[i], b_tail[i]], d_h, t_length, name=label
)
)
air = SLD(0, "air")
structures = []
for i in range(len(cont)):
if cont[i][-3:] == 'd2o':
water = SLD(6.35, "d2o")
elif cont[i][-4:] == 'acmw':
water = SLD(0.0, "acmw")
structures.append(air(0, 0) | lipids[i] | water(0, 3.3))
for i in range(len(cont)):
lipids[i].vol[0].setp(
vary=True, bounds=(V_h * 0.8, V_h * 1.2)
)
lipids[i].vol[1].setp(
vary=True, bounds=(V_t * 0.8, V_t * 1.2)
)
lipids[i].d[0].setp(vary=True, bounds=(5, 12))
max_d_t = 1.54 + 1.265 * t_length
lipids[i].d[1].setp(vary=True, bounds=(min_d_t, max_d_t))
lipids[i].vol[1].constraint = lipids[i].d[1] * float(apm)
structures[i][-1].rough.setp(vary=True, bounds=(3.3, 6))
lipids = rh.set_constraints(
lipids,
structures,
hold_tails=True,
hold_rough=True,
hold_phih=True,
)
models = []
t = len(cont)
for i in range(t):
models.append(ReflectModel(structures[i]))
models[i].scale.setp(vary=True, bounds=(0.005, 10))
models[i].bkg.setp(datasets[i].y[-1], vary=False)
objectives = []
t = len(cont)
for i in range(t):
objectives.append(
Objective(models[i], datasets[i], transform=Transform("YX4"))
)
global_objective = GlobalObjective(objectives)
chain = refnx.analysis.load_chain("{}/{}_chain.txt".format(anal_dir, label))
pchain = refnx.analysis.process_chain(global_objective, chain)
para_labels = ['{}_scale_{}_{}'.format(label, sp, cont[0]),
'{}-V_h_{}'.format(label, sp),
'{}-d_h_{}'.format(label, sp),
'{}-d_t_{}'.format(label, sp),
'{}_rough_{}'.format(label, sp),
'{}_scale_{}_{}'.format(label, sp, cont[1]),
'{}_scale_{}_{}'.format(label, sp, cont[2]),
'{}_scale_{}_{}'.format(label, sp, cont[3]),
'{}_scale_{}_{}'.format(label, sp, cont[4]),
'{}_scale_{}_{}'.format(label, sp, cont[5]),
'{}_scale_{}_{}'.format(label, sp, cont[6])]
units = ['', '\\angstrom3', '\\angstrom', '\\angstrom', '\\angstrom',
'', '', '', '', '', '']
from scipy.stats.mstats import mquantiles
alpha = 0.05
for i in range(len(pchain)):
file_open = open('{}/{}.tex'.format(anal_dir, para_labels[i]), 'w')
stat, p = scipy.stats.shapiro(pchain[i].chain[::5000])
if p > alpha:
quats = mquantiles(pchain[i].chain.flatten(), prob=[0.025, 0.5, 0.975])
a = '{:.2f}'.format(quats[1])
b = '{:.2f}'.format(quats[1]-quats[0])
string = '$' + str(a) + '\pm{' + str(b) + '}$'
file_open.write(string)
file_open.close()
else:
quats = mquantiles(pchain[i].chain.flatten(), prob=[0.025, 0.5, 0.975])
a = '{:.2f}'.format(quats[1])
b = '{:.2f}'.format(quats[2]-quats[1])
c = '{:.2f}'.format(quats[1]-quats[0])
string = '$' + str(a) + '^{+' + str(b) + '}_{-' + str(c) + '}$'
file_open.write(string)
file_open.close()
def get_value(file):
f = open(file, "r")
for line in f:
k = line
if "^" in k:
l = k.split("$")[1].split("^")[0]
else:
l = k.split("$")[1].split("\\pm")[0]
return float(l)
import corner
from scipy.misc import factorial
per_sp = np.zeros((pchain[0].chain.size, 6, 1))
names=['$V_t$/Å$^3$', '$V_h$/Å$^3$', '$d_t$/Å', '$d_h$/Å', r'$\phi_h/\times10^{-2}$', r'$\sigma_{t,h,s}$/Å']
abc = {'dspc_20': '(a)', 'dspc_30': '(b)', 'dspc_40': '(c)', 'dspc_50': '(d)'}
solh_store = np.zeros((1, pchain[0].chain.size))
vt_store = np.zeros((1, pchain[0].chain.size))
p_lab=['V_t', 'V_h', 'd_t', 'd_h', 'phi_h', 'sigma']
p_all = np.array([])
i=0
per_sp[:, 1, i] = list(pchain[1].chain.flatten())
per_sp[:, 2, i] = list(pchain[3].chain.flatten())
per_sp[:, 3, i] = list(pchain[2].chain.flatten())
per_sp[:, 0, i] = per_sp[:, 2, i] * float(apm)
vt_store[0, :] = list(per_sp[:, 0, i])
solh = 1 - (per_sp[:, 1, i] * per_sp[:, 2, i] / (per_sp[:, 0, i] * per_sp[:, 3, i]))
per_sp[:, 4, i] = list(solh)
solh_store[0, :] = list(solh)
per_sp[:, 5, i] = list(pchain[4].chain.flatten())
figure = corner.corner(per_sp[:, :, i],
max_n_ticks=3, show_titles=True,
color=sns.color_palette()[i], smooth1d=True)
figure.set_size_inches(4.13, 3.51)
axes = np.array(figure.axes).reshape((per_sp.shape[1],
per_sp.shape[1]))
for j, n in enumerate(names):
axes[j, j].set_title(n)
axes[0, 0].text(
0.02, 0.75, abc[label], transform=axes[0, 0].transAxes
)
p_mag = np.array([])
for j in range(0, axes.shape[0]-1):
for k in range(j+1, axes.shape[1]):
pear = pearsonr(per_sp[:, j, i], per_sp[:, k, i])[0]
file_open = open('{}/{}_p_{}_{}_sp{}.txt'.format(anal_dir, lipid, p_lab[j], p_lab[k], sp), 'w')
file_open.write('{:.2f}'.format(pear))
file_open.close()
p_mag = np.append(p_mag, pear)
p_all = np.append(p_all, pear)
if pear < 0:
axes[k, j].text(0.95, 0.95, '{:.2f}'.format(pear), ha='right',
transform=axes[k, j].transAxes, size=8,
va='top', zorder=10)
else:
axes[k, j].text(0.05, 0.95, '{:.2f}'.format(pear), ha='left',
transform=axes[k, j].transAxes, size=8,
va='top', zorder=10)
file_open = open('{}/{}_p_sum_sp{}.txt'.format(anal_dir, lipid, sp), 'w')
file_open.write('{:.2f}'.format(np.sum(np.abs(p_mag))))
file_open.close()
plt.savefig('{}/{}_pdf.pdf'.format(fig_dir, label),
bbox_inches='tight', pad_inches=0.1)
plt.close()
file_open = open('{}/{}_p_all_sp{}.txt'.format(anal_dir, lipid, sp), 'w')
file_open.write('{:.2f}'.format(np.sum(np.abs(p_all))))
file_open.close()
para_labels = ['{}-wph_{}'.format(label, sp)]
wph = pchain[1].chain.flatten() * solh / (29.9 - 29.9 * solh)
alpha = 0.05
file_open = open('{}/{}.tex'.format(anal_dir, para_labels[i]), 'w')
stat, p = scipy.stats.shapiro(wph[::5000])
if p > alpha:
quats = mquantiles(wph, prob=[0.025, 0.5, 0.975])
a = '{:.2f}'.format(quats[1])
b = '{:.2f}'.format(quats[1]-quats[0])
string = '$' + str(a) + '\pm{' + str(b) + '}$'
file_open.write(string)
file_open.close()
else:
quats = mquantiles(wph, prob=[0.025, 0.5, 0.975])
a = '{:.2f}'.format(quats[1])
b = '{:.2f}'.format(quats[2]-quats[1])
c = '{:.2f}'.format(quats[1]-quats[0])
string = '$' + str(a) + '^{+' + str(b) + '}_{-' + str(c) + '}$'
file_open.write(string)
file_open.close()
para_labels = ['{}-V_t_{}'.format(label, sp)]
for i in range(len(para_labels)):
alpha = 0.05
file_open = open('{}/{}.tex'.format(anal_dir, para_labels[i]), 'w')
stat, p = scipy.stats.shapiro(vt_store[i][::5000])
if p > alpha:
quats = mquantiles(vt_store[i], prob=[0.025, 0.5, 0.975])
a = '{:.2f}'.format(quats[1])
b = '{:.2f}'.format(quats[1]-quats[0])
string = '$' + str(a) + '\pm{' + str(b) + '}$'
file_open.write(string)
file_open.close()
else:
quats = mquantiles(vt_store[i], prob=[0.025, 0.5, 0.975])
a = '{:.2f}'.format(quats[1])
b = '{:.2f}'.format(quats[2]-quats[1])
c = '{:.2f}'.format(quats[1]-quats[0])
string = '$' + str(a) + '^{+' + str(b) + '}_{-' + str(c) + '}$'
file_open.write(string)
file_open.close()
para_labels = ['{}-phih_{}'.format(label, sp)]
for i in range(len(para_labels)):
alpha = 0.05
file_open = open('{}/{}.tex'.format(anal_dir, para_labels[i]), 'w')
stat, p = scipy.stats.shapiro(solh_store[i][::5000])
if p > alpha:
quats = mquantiles(solh_store[i] * 100, prob=[0.025, 0.5, 0.975])
a = '{:.2f}'.format(quats[1])
b = '{:.2f}'.format(quats[1]-quats[0])
string = '$' + str(a) + '\pm{' + str(b) + '}$'
file_open.write(string)
file_open.close()
else:
quats = mquantiles(solh_store[i] * 100, prob=[0.025, 0.5, 0.975])
a = '{:.2f}'.format(quats[1])
b = '{:.2f}'.format(quats[2]-quats[1])
c = '{:.2f}'.format(quats[1]-quats[0])
string = '$' + str(a) + '^{+' + str(b) + '}_{-' + str(c) + '}$'
file_open.write(string)
file_open.close()
fig = plt.figure(figsize=(4.13, 3.51*1.3))
gs = mpl.gridspec.GridSpec(1, 3)
ax1 = plt.subplot(gs[0, 0:2])
ax2 = plt.subplot(gs[0, 2])
abc = {'dspc_20': '(a)', 'dspc_30': '(b)', 'dspc_40': '(c)', 'dspc_50': '(d)'}
chi = np.zeros((7))
for i in range(len(cont)):
choose = global_objective.pgen(ngen=100)
ax1.errorbar(datasets[i].x,
datasets[i].y*(datasets[i].x)**4 * 10**(i-1),
yerr=datasets[i].y_err*(
datasets[i].x)**4 * 10**(i-1),
linestyle='', marker='o',
color=sns.color_palette()[i])
for pvec in choose:
global_objective.setp(pvec)
ax1.plot(datasets[i].x,
models[i](datasets[i].x,
x_err=datasets[i].x_err)*(
datasets[i].x)**4 * 10**(i-1),
color=sns.color_palette()[i], alpha=0.1)
zs, sld = structures[i].sld_profile()
ax2.plot(zs, sld + i*10, color=sns.color_palette()[i],
alpha=0.1)
ax1.plot(datasets[i].x,
models[i](datasets[i].x,
x_err=datasets[i].x_err)*(
datasets[i].x)**4 * 10**(i-1),
color='k', zorder=10)
chi[i] = global_objective.objectives[i].chisqr()
file_open = open('{}/dspc_{}_{}_chi.txt'.format(anal_dir, sp, cont[i]), 'w')
file_open.write('{:.2f}'.format(chi[i]))
file_open.close()
file_open = open('{}/dspc_{}_all_chi.txt'.format(anal_dir, sp), 'w')
file_open.write('${:.2f}\\pm{:.2f}$'.format(np.average(chi), np.std(chi)))
file_open.close()
ax1.set_ylabel(r'$Rq^4$/Å$^{-4}$')
ax1.set_yscale('log')
ax1.set_xlabel(r'$q$/Å$^{-1}$')
ax2.set_xlabel(r'$z$/Å')
ax2.set_ylabel(r'SLD/$10^{-6}$Å$^{-2}$')
ax1.text(0.02, 0.98, '(a)', transform=ax1.transAxes, va='top', ha='left', size=8)
plt.tight_layout()
plt.savefig('{}{}_ref_sld.pdf'.format(fig_dir, label), bbox_inches='tight', pad_inches=0.1)
plt.close()
| 34.403599 | 113 | 0.555331 | 2,057 | 13,383 | 3.481283 | 0.158969 | 0.039101 | 0.036308 | 0.031281 | 0.445887 | 0.375367 | 0.344505 | 0.332356 | 0.302472 | 0.302472 | 0 | 0.050582 | 0.217066 | 13,383 | 388 | 114 | 34.492268 | 0.63285 | 0.032878 | 0 | 0.336391 | 0 | 0 | 0.105955 | 0.016705 | 0 | 0 | 0 | 0 | 0 | 1 | 0.003058 | false | 0 | 0.058104 | 0 | 0.06422 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0236aff9fbeb6910b32744324bc3f8e3ee55a82 | 2,222 | py | Python | src/sentry/utils/pytest/kafka.py | learninto/sentry | 4f9f564841498b3af49c1677d6b61f3e47b01923 | [
"BSD-3-Clause"
] | null | null | null | src/sentry/utils/pytest/kafka.py | learninto/sentry | 4f9f564841498b3af49c1677d6b61f3e47b01923 | [
"BSD-3-Clause"
] | null | null | null | src/sentry/utils/pytest/kafka.py | learninto/sentry | 4f9f564841498b3af49c1677d6b61f3e47b01923 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
import pytest
import six
from confluent_kafka.admin import AdminClient
from confluent_kafka import Producer
_EVENTS_TOPIC_NAME = "test-ingest-events"
_ATTACHMENTS_TOPIC_NAME = "test-ingest-attachments"
_TRANSACTIONS_TOPIC_NAME = "test-ingest-transactions"
def _get_topic_name(base_topic_name, test_name):
if test_name is None:
return base_topic_name
else:
return "{}--{}".format(_EVENTS_TOPIC_NAME, test_name)
@pytest.fixture
def kafka_producer():
def inner(settings):
producer = Producer(
{"bootstrap.servers": settings.KAFKA_CLUSTERS["default"]["bootstrap.servers"]}
)
return producer
return inner
class _KafkaAdminWrapper:
def __init__(self, request, settings):
self.test_name = request.node.name
kafka_config = {}
for key, val in six.iteritems(settings.KAFKA_CLUSTERS["default"]):
kafka_config[key] = val
self.admin_client = AdminClient(kafka_config)
def delete_events_topic(self):
self._delete_topic(_EVENTS_TOPIC_NAME)
def _delete_topic(self, base_topic_name):
topic_name = _get_topic_name(base_topic_name, self.test_name)
try:
futures_dict = self.admin_client.delete_topics([topic_name])
self._sync_wait_on_result(futures_dict)
except Exception: # noqa
pass # noqa nothing to do (probably there was no topic to start with)
def _sync_wait_on_result(self, futures_dict):
"""
Synchronously waits on all futures returned by the admin_client api.
:param futures_dict: the api returns a dict of futures that can be awaited
"""
# just wait on all futures returned by the async operations of the admin_client
for f in futures_dict.values():
f.result(5) # wait up to 5 seconds for the admin operation to finish
@pytest.fixture
def kafka_admin(request):
"""
A fixture representing a simple wrapper over the admin interface
:param request: the pytest request
:return: a Kafka admin wrapper
"""
def inner(settings):
return _KafkaAdminWrapper(request, settings)
return inner
| 29.626667 | 90 | 0.694419 | 285 | 2,222 | 5.133333 | 0.347368 | 0.079973 | 0.044429 | 0.038961 | 0.068353 | 0.068353 | 0 | 0 | 0 | 0 | 0 | 0.001171 | 0.231323 | 2,222 | 74 | 91 | 30.027027 | 0.855386 | 0.214221 | 0 | 0.133333 | 0 | 0 | 0.070498 | 0.027844 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0.022222 | 0.111111 | 0.022222 | 0.466667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f024e6fe6314e0b844eaaba3f8a963b4fa244d82 | 4,126 | py | Python | inst/tools/mxAlgebraParser.py | JuKa87/OpenMx | f055df183ca433abd194e494a433142825666128 | [
"Apache-2.0"
] | null | null | null | inst/tools/mxAlgebraParser.py | JuKa87/OpenMx | f055df183ca433abd194e494a433142825666128 | [
"Apache-2.0"
] | null | null | null | inst/tools/mxAlgebraParser.py | JuKa87/OpenMx | f055df183ca433abd194e494a433142825666128 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Mx 1.0 algebras are parsed using the
# PLY implementation of lex and yacc parsing tools for Python.
cognates = ['det', 'tr', 'sum', 'prod', 'max', \
'min', 'abs', 'cos', 'cosh', 'sin', \
'sinh', 'tan', 'tanh', 'exp', 'sqrt']
tokens = (
'NAME', 'FNAME',
'AMPERSAND', 'ASTERISK', 'AT', 'CARET',
'DOT', 'MINUS', 'PERCENT', 'PIPE',
'PLUS', 'SQUOTE', 'TILDE', 'UNDERSCORE',
'LPAREN','RPAREN', 'COMMA'
)
# Tokens
t_AMPERSAND = r'&'
t_ASTERISK = r'\*'
t_AT = r'@'
t_CARET = r'\^'
t_DOT = r'\.'
t_MINUS = r'-'
t_PERCENT = r'%'
t_PIPE = r'\|'
t_PLUS = r'\+'
t_SQUOTE = r'\''
t_TILDE = r'~'
t_UNDERSCORE = r'_'
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_COMMA = r','
t_FNAME = r'\\[a-zA-Z0-9]+'
t_NAME = r'[a-zA-Z0-9]+'
# Ignored characters
t_ignore = " \t\n"
def t_error(t):
raise Exception("Illegal character " + str(t.value[0]))
# Build the lexer
import ply.lex as lex
lex.lex()
# Parsing rules
precedence = (
('left','UNDERSCORE'),
('left','PIPE'),
('left','PLUS','MINUS'),
('left','ASTERISK','DOT','AT','AMPERSAND','PERCENT'),
('right','CARET'),
('right','UMINUS'),
('left','TILDE','SQUOTE')
)
def p_statement_expr(t):
'statement : expression'
t[0] = t[1]
def p_expression_binop(t):
'''expression : expression PLUS expression
| expression MINUS expression
| expression PIPE expression
| expression UNDERSCORE expression
| expression ASTERISK expression
| expression DOT expression
| expression AT expression
| expression AMPERSAND expression
| expression PERCENT expression
| expression CARET expression'''
if t[2] == '+' : t[0] = t[1] + ' + ' + t[3]
elif t[2] == '-': t[0] = t[1] + ' - ' + t[3]
elif t[2] == '|': t[0] = 'cbind(' + t[1] + ', ' + t[3] + ')'
elif t[2] == '_': t[0] = 'rbind(' + t[1] + ', ' + t[3] + ')'
elif t[2] == '*': t[0] = t[1] + ' %*% ' + t[3]
elif t[2] == '.': t[0] = '(' + t[1] + ' * ' + t[3] + ')'
elif t[2] == '@': t[0] = t[1] + ' %x% ' + t[3]
elif t[2] == '&': t[0] = t[1] + ' %&% ' + t[3]
elif t[2] == '%': t[0] = '(' + t[1] + ' / ' + t[3] + ')'
elif t[2] == '^': t[0] = t[1] + ' ^ ' + t[3]
def p_expression_unaryop(t):
'''expression : expression TILDE
| expression SQUOTE'''
if t[2] == '~' : t[0] = 'solve(' + t[1] + ')'
elif t[2] == '\'' : t[0] = 't(' + t[1] + ')'
def p_expression_uminus(t):
'expression : MINUS expression %prec UMINUS'
t[0] = "(-" + t[2] + ")"
def p_expression_group(t):
'expression : LPAREN expression RPAREN'
t[0] = "(" + t[2] + ")"
def p_expression_function(t):
'expression : FNAME LPAREN arglist RPAREN'
funcname = convertFunctionName(t[1][1:])
t[0] = funcname + '('
for i in range(len(t[3])):
t[0] += t[3][i]
if i < len(t[3]) - 1: t[0] += ', '
t[0] += ')'
def convertFunctionName(name):
if name in cognates:
return(name)
elif fname == 'ln':
return('log')
else:
raise Exception("Function has not been implemented: " + fname)
def p_arglist(t):
'''arglist : expression args
| empty'''
if len(t) == 3: t[0] = [t[1]] + t[2]
else: t[0] = []
def p_args(t):
'''args : COMMA expression args
| empty'''
if len(t) == 4: t[0] = [t[2]] + t[3]
else: t[0] = []
def p_empty(t):
'empty :'
t[0] = []
def p_expression_name(t):
'expression : NAME'
t[0] = t[1]
def p_error(t):
raise Exception("Syntax error on token " + str(t.value) +\
" at line " + str(t.lineno) + " at position " +\
str(t.lexpos))
import ply.yacc as parser
parser.yacc(write_tables=0,debug=0)
if __name__ == "__main__":
import sys
lines = sys.stdin.readlines()
input = "".join(lines)
print(parser.parse(input))
| 27.144737 | 74 | 0.479157 | 535 | 4,126 | 3.605607 | 0.256075 | 0.02592 | 0.026439 | 0.024883 | 0.160187 | 0.123898 | 0.078797 | 0.060135 | 0.060135 | 0.053914 | 0 | 0.029401 | 0.307562 | 4,126 | 151 | 75 | 27.324503 | 0.645782 | 0.194377 | 0 | 0.039216 | 0 | 0 | 0.206992 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.127451 | false | 0 | 0.029412 | 0 | 0.156863 | 0.009804 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f025382cac3f7e113ffe03367cdf7a501861b523 | 1,139 | py | Python | CSC315_DistributedSystems/Rumor_mongering/main.py | kimanihuon/academics | 83fff621fc0fbae076089c6e4c7528ef375a94d9 | [
"MIT"
] | null | null | null | CSC315_DistributedSystems/Rumor_mongering/main.py | kimanihuon/academics | 83fff621fc0fbae076089c6e4c7528ef375a94d9 | [
"MIT"
] | null | null | null | CSC315_DistributedSystems/Rumor_mongering/main.py | kimanihuon/academics | 83fff621fc0fbae076089c6e4c7528ef375a94d9 | [
"MIT"
] | null | null | null | from node import Node
import random
# Initialize the other nodes
P = Q = R = S = T = None
def resetNodes():
global P, Q, R, S, T
# Initialize the other nodes
P = Node("P")
Q = Node("Q")
R = Node("R")
S = Node("S")
T = Node("T")
# Create dictionary of nodes
newNodes = dict(P=P, Q=Q, R=R, S=S, T=T)
# Fully connected graph
P.create(newNodes)
Q.create(newNodes)
R.create(newNodes)
S.create(newNodes)
T.create(newNodes)
return
# Blind probability
resetNodes()
print("Blind probability start")
P.infect("d1", "newValue") # Infect a new node 'P'
P.blindProbabilityUpdate("d1", "newValue")
# Feedback based probability start
print("\n")
resetNodes() # Re-initialize nodes
print("Feedback-based probability start")
Q.infect("d1", "newValue") # Infect a new node 'Q'
Q.feedbackBasedUpdate("d1", "newValue")
# Fixed probability start
print("\n")
resetNodes() # Re-initialize nodes
print("Fixed probability start")
S.infect("d1", "newValue") # Infect a new node 'S'
S.fixedProbabilityUpdate("d1", "newValue")
# q = random.choice(list(newNodes.items()))
# print(q[1])
| 21.092593 | 51 | 0.650571 | 161 | 1,139 | 4.602484 | 0.291925 | 0.080972 | 0.064777 | 0.089069 | 0.34413 | 0.267206 | 0.267206 | 0.145749 | 0.145749 | 0 | 0 | 0.007634 | 0.194908 | 1,139 | 53 | 52 | 21.490566 | 0.800436 | 0.295874 | 0 | 0.16129 | 0 | 0 | 0.186785 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032258 | false | 0 | 0.064516 | 0 | 0.129032 | 0.16129 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f027dda0c79da96ebfd5ed3066b4d4f648b19b47 | 4,451 | py | Python | Train/store_score.py | vivek2188/SiamFC-PyTorch | 2bebd2e82c545a5cfd3bee2aa3db45eec5b6e00a | [
"Apache-2.0"
] | null | null | null | Train/store_score.py | vivek2188/SiamFC-PyTorch | 2bebd2e82c545a5cfd3bee2aa3db45eec5b6e00a | [
"Apache-2.0"
] | null | null | null | Train/store_score.py | vivek2188/SiamFC-PyTorch | 2bebd2e82c545a5cfd3bee2aa3db45eec5b6e00a | [
"Apache-2.0"
] | null | null | null | import os
import pickle
from tqdm import tqdm
import torchvision.transforms as transforms
from SiamNet import *
from VIDDataset import *
from torch.autograd import Variable
from Config import *
from DataAugmentation import *
from torch.utils.data import DataLoader
from CurriculumLearning.scoring_functions import scoring_function
# fix random seed
np.random.seed(1357)
torch.manual_seed(1234)
def generate_score(data_dir, train_json, store_folder, pretrained_model_path, use_gpu=True):
# initialize training configuration
config = Config()
# Some parameters
pos_pair_range = float(config.pos_pair_range)
norm_factor = 2466
alpha = list(np.arange(0.0, 1.1, 0.1))
# Data Augmentation
center_crop_size = config.instance_size - config.stride
random_crop_size = config.instance_size - 2 * config.stride
z_transforms = transforms.Compose([
RandomStretch(),
CenterCrop((config.examplar_size, config.examplar_size)),
ToTensor()
])
x_transforms = transforms.Compose([
RandomStretch(),
CenterCrop((center_crop_size, center_crop_size)),
RandomCrop((random_crop_size, random_crop_size)),
ToTensor()
])
# Dataset
train_dataset = VIDDataset(train_json, data_dir, config, z_transforms, x_transforms)
# Dataloader
data_loader = DataLoader(train_dataset, batch_size=config.batch_size,
shuffle=True, num_workers=config.train_num_workers)
img_data = [set() for i in range(len(alpha))]
label_mask = None
# Specifying SiamFC Network Architecture
net = torch.load(pretrained_model_path)
if use_gpu:
net.cuda()
net.eval()
for epoch in range(config.num_epoch):
for j, data in enumerate(tqdm(data_loader)):
zs, xs, z_paths, x_paths, fr_dist = data
fr_dist = fr_dist.cpu().numpy()
fr_dist = 1 - fr_dist / pos_pair_range
if use_gpu:
zs = zs.cuda()
xs = xs.cuda()
# Forward pass
correlation = net.forward(Variable(zs), Variable(xs)) / norm_factor
normalised_correlation = torch.sigmoid(correlation).detach().cpu().numpy()
# Creating the mask for the score map
if label_mask is None:
response_size = normalised_correlation.shape[2: 4] # Score Map
half = response_size[0] // 2 + 1
label_mask, label_weight = create_label(response_size, config, use_gpu)
label_mask = label_mask.cpu().numpy()[0].reshape(response_size)
for row in range(label_mask.shape[0]):
for col in range(label_mask.shape[1]):
if label_mask[row, col] == 0.:
continue
counter = abs(half-row-1) + abs(half-col-1)
if counter == 2.:
label_mask[row, col] = 1 / 8.
elif counter == 1.:
label_mask[row, col] = 1 / 4.
label_mask = np.reshape(label_mask, normalised_correlation.shape[1:])
# Get Correlation Score
score = normalised_correlation * label_mask
score = score.sum((1, 2, 3)) / 3.
# Scoring function
for ai in alpha:
cumulative_score = [scoring_function(round(ai, 1), scr, fr) for scr, fr in zip(score, fr_dist)]
idx = int(ai*10)
for z, x, cuscr in zip(z_paths, x_paths, cumulative_score):
img_data[idx].add((z, x, cuscr))
# Sorting the score in descending order as higher the score corresponds to the more easy sample
img_data = [sorted(list(imgDatum), key=lambda f: f[2], reverse=True) for imgDatum in img_data]
#Storing to a pickle file
for idx, imgDatum in enumerate(img_data):
filename = str(idx) + '.pickle'
filepath = store_folder + filename
with open(filepath, 'wb') as pickle_file:
pickle.dump(imgDatum, pickle_file)
if __name__ == "__main__":
data_dir = "PATH/TO/THE/DATA_DIRECTORY"
train_json = "PATH/TO/THE/TRAIN_JSON_FILE"
store_folder = 'PATH/TO/STORE/THE/FINAL_RESULTS'
pretrained_model = "PATH/TO/THE/PRETRAINED_MODEL"
# Get score for each image
scores = generate_score(data_dir, train_json, store_folder, pretrained_model)
| 36.785124 | 111 | 0.621209 | 559 | 4,451 | 4.729875 | 0.320215 | 0.044251 | 0.021558 | 0.01702 | 0.12708 | 0.041604 | 0.041604 | 0.041604 | 0.041604 | 0.041604 | 0 | 0.014475 | 0.286003 | 4,451 | 120 | 112 | 37.091667 | 0.817495 | 0.085823 | 0 | 0.095238 | 0 | 0 | 0.031828 | 0.027634 | 0 | 0 | 0 | 0 | 0 | 1 | 0.011905 | false | 0 | 0.130952 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f029de750385d48bb450ba129cb7909e4ab8ad96 | 456 | py | Python | tests/test_db.py | MrKiven/ECache | 44b97f425585972bc8b6b2a399186e88bb843dcd | [
"MIT"
] | 32 | 2016-11-10T13:06:06.000Z | 2021-06-25T02:59:51.000Z | tests/test_db.py | MrKiven/ECache | 44b97f425585972bc8b6b2a399186e88bb843dcd | [
"MIT"
] | 3 | 2017-11-03T17:00:57.000Z | 2019-06-12T09:04:07.000Z | tests/test_db.py | MrKiven/ECache | 44b97f425585972bc8b6b2a399186e88bb843dcd | [
"MIT"
] | 8 | 2016-11-12T09:41:55.000Z | 2022-01-18T03:45:41.000Z | # -*- coding: utf-8 -*-
import ecache.db as db
from tests.conftest import engines
def test_session_stack():
DBSession = db.make_session(engines, force_scope=True)
session1 = DBSession()
with db.session_stack():
session2 = DBSession()
session2.close()
with db.session_stack():
session3 = DBSession()
session3.close()
session1.close()
assert not (session1 is session2 is session3)
| 22.8 | 58 | 0.633772 | 52 | 456 | 5.442308 | 0.538462 | 0.127208 | 0.091873 | 0.127208 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.029674 | 0.260965 | 456 | 19 | 59 | 24 | 0.810089 | 0.046053 | 0 | 0.153846 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.076923 | 1 | 0.076923 | false | 0 | 0.153846 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f02ade42e7763605560fe2b2745ff8709cda0b06 | 504 | py | Python | autopipe/utils.py | AnonymusRaccoon/AutoPipe | e75a5891828c0d937f0e2b0ea38ae33f7758512a | [
"MIT"
] | null | null | null | autopipe/utils.py | AnonymusRaccoon/AutoPipe | e75a5891828c0d937f0e2b0ea38ae33f7758512a | [
"MIT"
] | null | null | null | autopipe/utils.py | AnonymusRaccoon/AutoPipe | e75a5891828c0d937f0e2b0ea38ae33f7758512a | [
"MIT"
] | null | null | null | from enum import Enum
def to_dict(obj):
if isinstance(obj, Enum):
return obj
if isinstance(obj, dict):
return {i: to_dict(j) for (i, j) in obj.items()}
if hasattr(obj, "__dict__") and hasattr(obj, "__slots__"):
return to_dict(dict([(i, getattr(obj, i)) for i in (list(obj.__slots__) + list(obj.__dict__)) if hasattr(obj, i)]))
if hasattr(obj, "__dict__"):
return to_dict(obj.__dict__)
if hasattr(obj, "__slots__"):
return to_dict({i: getattr(obj, i) for i in obj.__slots__})
return obj
| 31.5 | 117 | 0.686508 | 85 | 504 | 3.635294 | 0.235294 | 0.097087 | 0.15534 | 0.116505 | 0.394822 | 0.304207 | 0.142395 | 0.142395 | 0 | 0 | 0 | 0 | 0.152778 | 504 | 15 | 118 | 33.6 | 0.723653 | 0 | 0 | 0.153846 | 0 | 0 | 0.06746 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.076923 | 0 | 0.615385 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f02da3232dd9d630a46aba69ed9af02cd1419ace | 504 | py | Python | 04.神经网络(进阶)/12.PyTorch-predict-house-prices-P1-master/utils.py | chengleniubi/L_tensor | 4dc74f027d7d38bcdb9adcccb20983f20ba8d16f | [
"Apache-2.0"
] | null | null | null | 04.神经网络(进阶)/12.PyTorch-predict-house-prices-P1-master/utils.py | chengleniubi/L_tensor | 4dc74f027d7d38bcdb9adcccb20983f20ba8d16f | [
"Apache-2.0"
] | null | null | null | 04.神经网络(进阶)/12.PyTorch-predict-house-prices-P1-master/utils.py | chengleniubi/L_tensor | 4dc74f027d7d38bcdb9adcccb20983f20ba8d16f | [
"Apache-2.0"
] | null | null | null | import torch
from torch import nn
from torch.autograd import Variable
def get_rmse_log(model, feature, label, use_gpu):
model.eval()
mse_loss = nn.MSELoss()
if use_gpu:
feature = feature.cuda()
label = label.cuda()
feature = Variable(feature, volatile=True)
label = Variable(label, volatile=True)
pred = model(feature)
clipped_pred = torch.clamp(pred, 1, float('inf'))
rmse = torch.sqrt(mse_loss(clipped_pred.log(), label.log()))
return rmse.data[0]
| 28 | 64 | 0.672619 | 70 | 504 | 4.728571 | 0.471429 | 0.054381 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004988 | 0.204365 | 504 | 17 | 65 | 29.647059 | 0.820449 | 0 | 0 | 0 | 0 | 0 | 0.005952 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.2 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f02fd84a342e9e440b52807dbbf0c6a76064582b | 2,489 | py | Python | app/db_con.py | Paulvitalis200/Store-Manager-API-V2 | f454b0a80cdd17a34ea31de4efa8778539414d3e | [
"MIT"
] | 1 | 2018-12-14T10:39:00.000Z | 2018-12-14T10:39:00.000Z | app/db_con.py | Paulvitalis200/Store-Manager-API-V2 | f454b0a80cdd17a34ea31de4efa8778539414d3e | [
"MIT"
] | 9 | 2018-11-05T16:58:02.000Z | 2019-10-21T17:33:10.000Z | app/db_con.py | Paulvitalis200/Store-Manager-API-V2 | f454b0a80cdd17a34ea31de4efa8778539414d3e | [
"MIT"
] | 2 | 2018-11-05T15:47:23.000Z | 2018-11-06T09:30:04.000Z | import os
import psycopg2
from psycopg2 import Error
from instance.config import Config, TestConfig
db_url = os.getenv('DATABASE_URL')
print(db_url)
def db_connection():
variable = os.getenv("APP_SETTINGS")
try:
conn = psycopg2.connect(db_url)
return conn
except (Exception, psycopg2.DatabaseError) as error:
return ('Failed to connect', error)
conn = db_connection()
print(conn)
def create_tables():
conn = db_connection()
curr = conn.cursor()
queries = tables()
for query in queries:
curr.execute(query)
curr.close()
conn.commit()
def tables():
products_table = """
CREATE TABLE IF NOT EXISTS products (
id serial PRIMARY KEY NOT NULL,
name text NOT NULL UNIQUE,
price integer NOT NULL,
inventory integer NOT NULL,
minimum_stock integer NOT NULL,
category text
)
"""
sales_table = """
CREATE TABLE IF NOT EXISTS sales (
id serial PRIMARY KEY,
sold_by varchar NOT NULL,
product_name varchar NOT NULL,
quantity integer NOT NULL,
price integer NOT NULL,
total_price integer
)
"""
users_table = """
CREATE TABLE IF NOT EXISTS users (
id serial PRIMARY KEY NOT NULL,
username text UNIQUE NOT NULL,
email text NOT NULL,
password text NOT NULL,
role text NOT NULL
)
"""
tokens_table = """
CREATE TABLE IF NOT EXISTS tokens (
id serial PRIMARY KEY NOT NULL,
tokens varchar
)
"""
fix = """CREATE EXTENSION IF NOT EXISTS citext;"""
alteration = """ALTER TABLE products ALTER COLUMN name TYPE citext;"""
queries = [products_table, sales_table,
users_table, tokens_table, fix, alteration]
return queries
def destroy_tables():
""" Delete tables"""
conn = db_connection()
curr = conn.cursor()
users = """DROP TABLE IF EXISTS users CASCADE"""
sales = """DROP TABLE IF EXISTS sales CASCADE"""
product = """DROP TABLE IF EXISTS products CASCADE"""
conn.commit()
| 27.351648 | 74 | 0.528325 | 256 | 2,489 | 5.046875 | 0.320313 | 0.081269 | 0.04257 | 0.055728 | 0.197368 | 0.197368 | 0.055728 | 0 | 0 | 0 | 0 | 0.002676 | 0.399357 | 2,489 | 90 | 75 | 27.655556 | 0.861538 | 0.005223 | 0 | 0.225352 | 0 | 0 | 0.588902 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.056338 | false | 0.014085 | 0.056338 | 0 | 0.15493 | 0.028169 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0316441c067d916124e19581e68add6296834e3 | 1,858 | py | Python | tests/cases/test_dm_export.py | clayne/blender-xray | 84d5d52049ec9e22c85ba8544995bd39c3a83e55 | [
"BSD-2-Clause"
] | 93 | 2016-12-02T14:42:18.000Z | 2022-03-23T08:15:41.000Z | tests/cases/test_dm_export.py | clayne/blender-xray | 84d5d52049ec9e22c85ba8544995bd39c3a83e55 | [
"BSD-2-Clause"
] | 276 | 2018-07-04T20:13:22.000Z | 2022-03-31T09:13:37.000Z | tests/cases/test_dm_export.py | clayne/blender-xray | 84d5d52049ec9e22c85ba8544995bd39c3a83e55 | [
"BSD-2-Clause"
] | 31 | 2018-07-04T20:03:17.000Z | 2022-01-27T18:37:36.000Z | from tests import utils
import bpy
class TestDmExport(utils.XRayTestCase):
def test_export_single(self):
# Arrange
self._create_dm_objects()
# Act
bpy.ops.xray_export.dm_file(
detail_model='tdm1', filepath=self.outpath('test.dm'),
texture_name_from_image_path=False
)
# Assert
self.assertOutputFiles({
'test.dm'
})
def _create_dm_objects(self, create_uv=True, create_material=True):
bmesh = utils.create_bmesh((
(0, 0, 0),
(-1, -1, 0), (+1, -1, 0), (+1, +1, 0), (-1, +1, 0),
), ((0, 1, 2), (0, 2, 3), (0, 3, 4), (0, 4, 1)), create_uv)
objs = []
for i in range(3):
obj = utils.create_object(bmesh, create_material)
obj.name = 'tdm%d' % (i + 1)
objs.append(obj)
bpy_texture = bpy.data.textures.new('test_texture', 'IMAGE')
bpy_image = bpy.data.images.new('test_image.dds', 0, 0)
bpy_image.source = 'FILE'
bpy_image.filepath = 'test_image.dds'
if bpy.app.version >= (2, 80, 0):
obj.data.materials[0].use_nodes = True
node_tree = obj.data.materials[0].node_tree
texture_node = node_tree.nodes.new('ShaderNodeTexImage')
texture_node.image = bpy_image
texture_node.location.x -= 500
princ_shader = node_tree.nodes['Principled BSDF']
node_tree.links.new(
texture_node.outputs['Color'],
princ_shader.inputs['Base Color']
)
else:
bpy_texture.image = bpy_image
texture_slot = obj.data.materials[0].texture_slots.add()
texture_slot.texture = bpy_texture
return objs
| 35.056604 | 72 | 0.531216 | 223 | 1,858 | 4.215247 | 0.381166 | 0.010638 | 0.012766 | 0.017021 | 0.01383 | 0.01383 | 0.01383 | 0.01383 | 0.01383 | 0.01383 | 0 | 0.034483 | 0.344456 | 1,858 | 52 | 73 | 35.730769 | 0.737274 | 0.009688 | 0 | 0 | 0 | 0 | 0.065359 | 0 | 0 | 0 | 0 | 0 | 0.02381 | 1 | 0.047619 | false | 0 | 0.047619 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f033dd54f00a0548ab25768ec8dc4a70eaa09de6 | 1,508 | py | Python | s3_bucket_stack/s3_bucket_stack.py | richardfan1126/ssm-patch-portal | a511297a4d5ec701568b16b2bda253107f7b9c64 | [
"Apache-2.0"
] | 4 | 2022-03-21T09:31:19.000Z | 2022-03-30T19:50:20.000Z | s3_bucket_stack/s3_bucket_stack.py | richardfan1126/ssm-patch-portal | a511297a4d5ec701568b16b2bda253107f7b9c64 | [
"Apache-2.0"
] | null | null | null | s3_bucket_stack/s3_bucket_stack.py | richardfan1126/ssm-patch-portal | a511297a4d5ec701568b16b2bda253107f7b9c64 | [
"Apache-2.0"
] | null | null | null | from aws_cdk import (
NestedStack,
aws_s3 as s3,
aws_iam as iam
)
import aws_cdk
from constructs import Construct
class S3BucketStack(NestedStack):
main_bucket = None
ec2_iam_role_arns = None
def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
self.ec2_iam_role_arns = kwargs.pop('ec2_iam_role_arns')
super().__init__(scope, construct_id, **kwargs)
self.main_bucket = s3.Bucket(
self, "SsmPatchPortal",
block_public_access = s3.BlockPublicAccess.BLOCK_ALL,
encryption = s3.BucketEncryption.S3_MANAGED,
)
# Allow EC2 instance read/write access on the bucket
override_list_read_policy = iam.PolicyStatement(
actions = ["s3:GetObject"],
resources = [self.main_bucket.arn_for_objects("InstallOverrideLists/*")],
)
command_output_policy = iam.PolicyStatement(
actions = [
"s3:PutObject",
"s3:PutObjectAcl"
],
resources = [self.main_bucket.arn_for_objects("CommandOutputs/*")],
)
for principal in aws_cdk.Fn.split(",", self.ec2_iam_role_arns.value_as_string):
override_list_read_policy.add_arn_principal(principal)
command_output_policy.add_arn_principal(principal)
self.main_bucket.add_to_resource_policy(override_list_read_policy)
self.main_bucket.add_to_resource_policy(command_output_policy) | 35.069767 | 87 | 0.655172 | 172 | 1,508 | 5.348837 | 0.389535 | 0.065217 | 0.076087 | 0.06087 | 0.326087 | 0.15 | 0.15 | 0 | 0 | 0 | 0 | 0.013405 | 0.257958 | 1,508 | 43 | 88 | 35.069767 | 0.808758 | 0.033157 | 0 | 0 | 0 | 0 | 0.074811 | 0.0151 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029412 | false | 0 | 0.088235 | 0 | 0.205882 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f035681928063b832214beea7922e32127d09b22 | 1,318 | py | Python | 01-mathematics/04-numerical-calculation/appendix-coding-practice/15_sqrt_derive.py | jameszhan/notes-ml | c633d04e5443eab71bc3b27fff89d57b89d1786c | [
"Apache-2.0"
] | null | null | null | 01-mathematics/04-numerical-calculation/appendix-coding-practice/15_sqrt_derive.py | jameszhan/notes-ml | c633d04e5443eab71bc3b27fff89d57b89d1786c | [
"Apache-2.0"
] | null | null | null | 01-mathematics/04-numerical-calculation/appendix-coding-practice/15_sqrt_derive.py | jameszhan/notes-ml | c633d04e5443eab71bc3b27fff89d57b89d1786c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
def target(x, v):
return x ** 2 - v
n = 5
X = np.linspace(-5, 5, 100)
plt.plot(X, target(X, n), 'r:')
plt.grid(True)
plt.show()
def binary_method(n, epsilon):
l, r, i = 0, n, 0
while True:
m = (l + r) / 2.0
print('[ Epoch {0} ] l = {1}, r = {2}, m = {3}'.format(i, l, r, m))
guess = m ** 2
if abs(n - guess) <= epsilon:
break
elif guess > n:
r = m
else:
l = m
i += 1
return m
# newton method, n is positive integer
#
# Theory:
#
# slope of the tangent line is f'(Xn)
# [Xn, f(Xn)] is one point of the tangent line
# f(Xn) - 0 = f'(Xn) * (Xn - Xn+1)
# Xn+1 = Xn - f(Xn) / f'(Xn)
# f(x) = x ** 2 - n
# f'(x) = 2 * x
# Xn+1 = Xn - (Xn - n / Xn) / 2
# Xn+1 = (Xn + n / Xn) / 2
def newton_method(n, epsilon):
guess, i = n, 0
while abs(guess ** 2 - n) > epsilon:
print('[ Epoch {0} ] guess = {1}, n = {2}'.format(i, guess, n))
# f(x) = x ** 2 - n => x0 - f(x0) / f'(x0) = (x0 + n / x0) / 2
guess = (guess + n / guess) / 2.0
i += 1
return guess
print('[Binary] sqrt({0}) = {1}.\n'.format(n, binary_method(n, 1e-6)))
print('[Newton] sqrt({0}) = {1}.\n'.format(n, newton_method(n, 1e-6)))
| 22.724138 | 75 | 0.468892 | 231 | 1,318 | 2.658009 | 0.268398 | 0.029316 | 0.032573 | 0.052117 | 0.061889 | 0.045603 | 0 | 0 | 0 | 0 | 0 | 0.055741 | 0.319423 | 1,318 | 57 | 76 | 23.122807 | 0.628763 | 0.269348 | 0 | 0.0625 | 0 | 0.03125 | 0.13622 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09375 | false | 0 | 0.0625 | 0.03125 | 0.25 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f036aa3c9809e29df72d968727ffd3fd8d61e184 | 1,511 | py | Python | examples/example_prob_map.py | Menelau/synthetic_datasets | 86fd99042cff6a8bbdfa195fe6eee938a9c9d8f5 | [
"MIT"
] | 6 | 2018-02-07T02:02:00.000Z | 2020-01-22T10:33:01.000Z | examples/example_prob_map.py | Menelau/synthetic_datasets | 86fd99042cff6a8bbdfa195fe6eee938a9c9d8f5 | [
"MIT"
] | null | null | null | examples/example_prob_map.py | Menelau/synthetic_datasets | 86fd99042cff6a8bbdfa195fe6eee938a9c9d8f5 | [
"MIT"
] | null | null | null | # coding=utf-8
# Author: Rafael Menelau Oliveira e Cruz <rafaelmenelau@gmail.com>
#
# License: MIT
if __name__ == "__main__":
from syndata.plot_tools import plot_classifier_probability_map
from syndata.synthetic_datasets import generate_p2, generate_circle_square, generate_banana, generate_banana2
import matplotlib.pyplot as plt
import numpy as np
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
# Set-up 2x2 grid for plotting.
fig, sub = plt.subplots(2, 2, figsize=(15, 15))
plt.subplots_adjust(wspace=0.4, hspace=0.4)
cmap = plt.cm.RdBu
X_P2, y_P2 = generate_p2([1000, 1000])
X_cs, y_cs = generate_circle_square([1000, 1000])
X_banana, y_banana = generate_banana([1000, 1000])
X_banana2, y_banana2 = generate_banana2([1000, 1000])
X_list = list([X_P2, X_cs, X_banana, X_banana2])
y_list = list([y_P2, y_cs, y_banana, y_banana2])
# title for the plots
titles = ('P2 Dataset',
'Circle Square Dataset',
'Banana Dataset',
'Banana 2 Dataset')
for X, y, title, ax in zip(X_list, y_list, titles, sub.flatten()):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5)
svm = SVC(probability=True)
svm.fit(X_train, y_train)
plot_classifier_probability_map(ax, svm, X_train, cmap=cmap)
ax.set_xlim(np.min(X[:, 0]), np.max(X[:, 0]))
ax.set_ylim(np.min(X[:, 1]), np.max(X[:, 1]))
plt.show()
| 35.139535 | 113 | 0.662475 | 235 | 1,511 | 3.995745 | 0.387234 | 0.034079 | 0.038339 | 0.059638 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.054852 | 0.215751 | 1,511 | 42 | 114 | 35.97619 | 0.737553 | 0.092654 | 0 | 0 | 0 | 0 | 0.050587 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.214286 | 0 | 0.214286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f038745f7bb7b21df59b63f98ffafc114106017c | 686 | py | Python | solution/prime_factors.py | vinimmelo/prime-factors-challenge | 0f8bcd21ccd7382f7125fd2629a262fa193c172e | [
"MIT"
] | null | null | null | solution/prime_factors.py | vinimmelo/prime-factors-challenge | 0f8bcd21ccd7382f7125fd2629a262fa193c172e | [
"MIT"
] | null | null | null | solution/prime_factors.py | vinimmelo/prime-factors-challenge | 0f8bcd21ccd7382f7125fd2629a262fa193c172e | [
"MIT"
] | null | null | null | """ Simple script to return Prime Factors of a given number """
def is_prime(number: int) -> bool:
for n in range(2, number):
if number % n == 0:
return False
return True
def is_prime_factor(factor, number) -> bool:
return is_prime(factor) and (number % factor == 0 or factor == number)
def prime_factors(number: int) -> list:
remainder = int(number)
if remainder <= 3:
return [remainder]
prime_factors = []
while remainder > 1:
for i in range(2, remainder + 1):
if is_prime_factor(i, remainder):
prime_factors.append(i)
remainder //= i
return sorted(prime_factors)
| 24.5 | 74 | 0.59621 | 90 | 686 | 4.433333 | 0.377778 | 0.150376 | 0.097744 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014583 | 0.300292 | 686 | 27 | 75 | 25.407407 | 0.816667 | 0.080175 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0 | 0.055556 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f03c241b0ece50386bf3b2cda85d7356585fb9b5 | 13,856 | py | Python | examples/reinforcement_learning/tutorial_DPPO.py | Helilysyt/tensorlayer | 2dc4482a13aff3833a246b4d85b69a5d9079f01d | [
"Apache-2.0"
] | 1 | 2019-12-30T03:16:26.000Z | 2019-12-30T03:16:26.000Z | examples/reinforcement_learning/tutorial_DPPO.py | Helilysyt/tensorlayer | 2dc4482a13aff3833a246b4d85b69a5d9079f01d | [
"Apache-2.0"
] | null | null | null | examples/reinforcement_learning/tutorial_DPPO.py | Helilysyt/tensorlayer | 2dc4482a13aff3833a246b4d85b69a5d9079f01d | [
"Apache-2.0"
] | null | null | null | """
Distributed Proximal Policy Optimization (DPPO)
----------------------------
A distributed version of OpenAI's Proximal Policy Optimization (PPO).
Workers in parallel to collect data, then stop worker's roll-out and train PPO on collected data.
Restart workers once PPO is updated.
Reference
---------
Emergence of Locomotion Behaviours in Rich Environments, Heess et al. 2017
Proximal Policy Optimization Algorithms, Schulman et al. 2017
High Dimensional Continuous Control Using Generalized Advantage Estimation, Schulman et al. 2016
MorvanZhou's tutorial page: https://morvanzhou.github.io/tutorials
Environment
-----------
Openai Gym Pendulum-v0, continual action space
Prerequisites
--------------
tensorflow >=2.0.0a0
tensorflow-probability 0.6.0
tensorlayer >=2.0.0
To run
------
python tutorial_DPPO.py --train/test
"""
import argparse
import os
import queue
import threading
import time
import gym
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
import tensorlayer as tl
parser = argparse.ArgumentParser(description='Train or test neural net motor controller.')
parser.add_argument('--train', dest='train', action='store_true', default=True)
parser.add_argument('--test', dest='train', action='store_false')
args = parser.parse_args()
##################### hyper parameters ####################
GAME = 'Pendulum-v0' # environment name
RANDOMSEED = 1 # random seed
EP_MAX = 1000 # total number of episodes for training
EP_LEN = 200 # total number of steps for each episode
GAMMA = 0.9 # reward discount
A_LR = 0.0001 # learning rate for actor
C_LR = 0.0002 # learning rate for critic
BATCH = 32 # update batchsize
A_UPDATE_STEPS = 10 # actor update steps
C_UPDATE_STEPS = 10 # critic update steps
S_DIM, A_DIM = 3, 1 # state dimension, action dimension
EPS = 1e-8 # epsilon
METHOD = [
dict(name='kl_pen', kl_target=0.01, lam=0.5), # KL penalty
dict(name='clip', epsilon=0.2), # Clipped surrogate objective, find this is better
][1] # choose the method for optimization
N_WORKER = 4 # parallel workers
MIN_BATCH_SIZE = 64 # minimum batch size for updating PPO
UPDATE_STEP = 10 # loop update operation n-steps
############################### DPPO ####################################
class PPO(object):
'''
PPO class
'''
def __init__(self):
# critic
tfs = tl.layers.Input([None, S_DIM], tf.float32, 'state')
l1 = tl.layers.Dense(100, tf.nn.relu)(tfs)
v = tl.layers.Dense(1)(l1)
self.critic = tl.models.Model(tfs, v)
self.critic.train()
# actor
self.actor = self._build_anet('pi', trainable=True)
self.actor_old = self._build_anet('oldpi', trainable=False)
self.actor_opt = tf.optimizers.Adam(A_LR)
self.critic_opt = tf.optimizers.Adam(C_LR)
def a_train(self, tfs, tfa, tfadv):
'''
Update policy network
:param tfs: state
:param tfa: act
:param tfadv: advantage
:return:
'''
tfs = np.array(tfs, np.float32)
tfa = np.array(tfa, np.float32)
tfadv = np.array(tfadv, np.float32)
with tf.GradientTape() as tape:
mu, sigma = self.actor(tfs)
pi = tfp.distributions.Normal(mu, sigma)
mu_old, sigma_old = self.actor_old(tfs)
oldpi = tfp.distributions.Normal(mu_old, sigma_old)
# ratio = tf.exp(pi.log_prob(self.tfa) - oldpi.log_prob(self.tfa))
ratio = pi.prob(tfa) / (oldpi.prob(tfa) + EPS)
surr = ratio * tfadv
if METHOD['name'] == 'kl_pen':
tflam = METHOD['lam']
kl = tfp.distributions.kl_divergence(oldpi, pi)
kl_mean = tf.reduce_mean(kl)
aloss = -(tf.reduce_mean(surr - tflam * kl))
else: # clipping method, find this is better
aloss = -tf.reduce_mean(
tf.minimum(surr,
tf.clip_by_value(ratio, 1. - METHOD['epsilon'], 1. + METHOD['epsilon']) * tfadv)
)
a_gard = tape.gradient(aloss, self.actor.trainable_weights)
self.actor_opt.apply_gradients(zip(a_gard, self.actor.trainable_weights))
if METHOD['name'] == 'kl_pen':
return kl_mean
def update_old_pi(self):
'''
Update old policy parameter
:return: None
'''
for p, oldp in zip(self.actor.trainable_weights, self.actor_old.trainable_weights):
oldp.assign(p)
def c_train(self, tfdc_r, s):
'''
Update actor network
:param tfdc_r: cumulative reward
:param s: state
:return: None
'''
tfdc_r = np.array(tfdc_r, dtype=np.float32)
with tf.GradientTape() as tape:
advantage = tfdc_r - self.critic(s)
closs = tf.reduce_mean(tf.square(advantage))
grad = tape.gradient(closs, self.critic.trainable_weights)
self.critic_opt.apply_gradients(zip(grad, self.critic.trainable_weights))
def cal_adv(self, tfs, tfdc_r):
'''
Calculate advantage
:param tfs: state
:param tfdc_r: cumulative reward
:return: advantage
'''
tfdc_r = np.array(tfdc_r, dtype=np.float32)
advantage = tfdc_r - self.critic(tfs)
return advantage.numpy()
def update(self):
'''
Update parameter with the constraint of KL divergent
:return: None
'''
global GLOBAL_UPDATE_COUNTER
while not COORD.should_stop():
if GLOBAL_EP < EP_MAX:
UPDATE_EVENT.wait() # wait until get batch of data
self.update_old_pi() # copy pi to old pi
data = [QUEUE.get() for _ in range(QUEUE.qsize())] # collect data from all workers
data = np.vstack(data)
s, a, r = data[:, :S_DIM].astype(np.float32), \
data[:, S_DIM: S_DIM + A_DIM].astype(np.float32), \
data[:, -1:].astype(np.float32)
adv = self.cal_adv(s, r)
# adv = (adv - adv.mean())/(adv.std()+1e-6) # sometimes helpful
# update actor
if METHOD['name'] == 'kl_pen':
for _ in range(A_UPDATE_STEPS):
kl = self.a_train(s, a, adv)
if kl > 4 * METHOD['kl_target']: # this in in google's paper
break
if kl < METHOD['kl_target'] / 1.5: # adaptive lambda, this is in OpenAI's paper
METHOD['lam'] /= 2
elif kl > METHOD['kl_target'] * 1.5:
METHOD['lam'] *= 2
# sometimes explode, this clipping is MorvanZhou's solution
METHOD['lam'] = np.clip(METHOD['lam'], 1e-4, 10)
else: # clipping method, find this is better (OpenAI's paper)
for _ in range(A_UPDATE_STEPS):
self.a_train(s, a, adv)
# update critic
for _ in range(C_UPDATE_STEPS):
self.c_train(r, s)
UPDATE_EVENT.clear() # updating finished
GLOBAL_UPDATE_COUNTER = 0 # reset counter
ROLLING_EVENT.set() # set roll-out available
def _build_anet(self, name, trainable):
'''
Build policy network
:param name: name
:param trainable: trainable flag
:return: policy network
'''
tfs = tl.layers.Input([None, S_DIM], tf.float32, name + '_state')
l1 = tl.layers.Dense(100, tf.nn.relu, name=name + '_l1')(tfs)
a = tl.layers.Dense(A_DIM, tf.nn.tanh, name=name + '_a')(l1)
mu = tl.layers.Lambda(lambda x: x * 2, name=name + '_lambda')(a)
sigma = tl.layers.Dense(A_DIM, tf.nn.softplus, name=name + '_sigma')(l1)
model = tl.models.Model(tfs, [mu, sigma], name)
if trainable:
model.train()
else:
model.eval()
return model
def choose_action(self, s):
'''
Choose action
:param s: state
:return: clipped act
'''
s = s[np.newaxis, :].astype(np.float32)
mu, sigma = self.actor(s)
pi = tfp.distributions.Normal(mu, sigma)
a = tf.squeeze(pi.sample(1), axis=0)[0] # choosing action
return np.clip(a, -2, 2)
def get_v(self, s):
'''
Compute value
:param s: state
:return: value
'''
s = s.astype(np.float32)
if s.ndim < 2: s = s[np.newaxis, :]
return self.critic(s)[0, 0]
def save_ckpt(self):
"""
save trained weights
:return: None
"""
if not os.path.exists('model'):
os.makedirs('model')
tl.files.save_weights_to_hdf5('model/dppo_actor.hdf5', self.actor)
tl.files.save_weights_to_hdf5('model/dppo_actor_old.hdf5', self.actor_old)
tl.files.save_weights_to_hdf5('model/dppo_critic.hdf5', self.critic)
def load_ckpt(self):
"""
load trained weights
:return: None
"""
tl.files.load_hdf5_to_weights_in_order('model/dppo_actor.hdf5', self.actor)
tl.files.load_hdf5_to_weights_in_order('model/dppo_actor_old.hdf5', self.actor_old)
tl.files.load_hdf5_to_weights_in_order('model/dppo_critic.hdf5', self.critic)
'''--------------------------------------------------------------'''
class Worker(object):
'''
Worker class for distributional running
'''
def __init__(self, wid):
self.wid = wid
self.env = gym.make(GAME).unwrapped
self.env.seed(wid * 100 + RANDOMSEED)
self.ppo = GLOBAL_PPO
def work(self):
'''
Define a worker
:return: None
'''
global GLOBAL_EP, GLOBAL_RUNNING_R, GLOBAL_UPDATE_COUNTER
while not COORD.should_stop():
s = self.env.reset()
ep_r = 0
buffer_s, buffer_a, buffer_r = [], [], []
t0 = time.time()
for t in range(EP_LEN):
if not ROLLING_EVENT.is_set(): # while global PPO is updating
ROLLING_EVENT.wait() # wait until PPO is updated
buffer_s, buffer_a, buffer_r = [], [], [] # clear history buffer, use new policy to collect data
a = self.ppo.choose_action(s)
s_, r, done, _ = self.env.step(a)
buffer_s.append(s)
buffer_a.append(a)
buffer_r.append((r + 8) / 8) # normalize reward, find to be useful
s = s_
ep_r += r
GLOBAL_UPDATE_COUNTER += 1 # count to minimum batch size, no need to wait other workers
if t == EP_LEN - 1 or GLOBAL_UPDATE_COUNTER >= MIN_BATCH_SIZE:
v_s_ = self.ppo.get_v(s_)
discounted_r = [] # compute discounted reward
for r in buffer_r[::-1]:
v_s_ = r + GAMMA * v_s_
discounted_r.append(v_s_)
discounted_r.reverse()
bs, ba, br = np.vstack(buffer_s), np.vstack(buffer_a), np.array(discounted_r)[:, np.newaxis]
buffer_s, buffer_a, buffer_r = [], [], []
QUEUE.put(np.hstack((bs, ba, br))) # put data in the queue
if GLOBAL_UPDATE_COUNTER >= MIN_BATCH_SIZE:
ROLLING_EVENT.clear() # stop collecting data
UPDATE_EVENT.set() # globalPPO update
if GLOBAL_EP >= EP_MAX: # stop training
COORD.request_stop()
break
# record reward changes, plot later
if len(GLOBAL_RUNNING_R) == 0:
GLOBAL_RUNNING_R.append(ep_r)
else:
GLOBAL_RUNNING_R.append(GLOBAL_RUNNING_R[-1] * 0.9 + ep_r * 0.1)
GLOBAL_EP += 1
print(
'Episode: {}/{} | Worker: {} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format(
GLOBAL_EP, EP_MAX, self.wid, ep_r,
time.time() - t0
)
)
if __name__ == '__main__':
# reproducible
np.random.seed(RANDOMSEED)
tf.random.set_seed(RANDOMSEED)
GLOBAL_PPO = PPO()
if args.train: # train
UPDATE_EVENT, ROLLING_EVENT = threading.Event(), threading.Event()
UPDATE_EVENT.clear() # not update now
ROLLING_EVENT.set() # start to roll out
workers = [Worker(wid=i) for i in range(N_WORKER)]
GLOBAL_UPDATE_COUNTER, GLOBAL_EP = 0, 0
GLOBAL_RUNNING_R = []
COORD = tf.train.Coordinator()
QUEUE = queue.Queue() # workers putting data in this queue
threads = []
for worker in workers: # worker threads
t = threading.Thread(target=worker.work, args=())
t.start() # training
threads.append(t)
# add a PPO updating thread
threads.append(threading.Thread(target=GLOBAL_PPO.update, ))
threads[-1].start()
COORD.join(threads)
GLOBAL_PPO.save_ckpt()
# plot reward change and test
plt.title('DPPO')
plt.plot(np.arange(len(GLOBAL_RUNNING_R)), GLOBAL_RUNNING_R)
plt.xlabel('Episode')
plt.ylabel('Moving reward')
plt.ylim(-2000, 0)
plt.show()
# test
GLOBAL_PPO.load_ckpt()
env = gym.make(GAME)
while True:
s = env.reset()
for t in range(EP_LEN):
env.render()
s, r, done, info = env.step(GLOBAL_PPO.choose_action(s))
if done:
break
| 34.901763 | 117 | 0.557015 | 1,734 | 13,856 | 4.291234 | 0.217993 | 0.018143 | 0.015052 | 0.006451 | 0.188953 | 0.158715 | 0.099987 | 0.077543 | 0.059266 | 0.0254 | 0 | 0.017999 | 0.318346 | 13,856 | 396 | 118 | 34.989899 | 0.769825 | 0.220843 | 0 | 0.123404 | 0 | 0.004255 | 0.047949 | 0.013473 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055319 | false | 0 | 0.046809 | 0 | 0.131915 | 0.004255 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f03dc7ec0f51ea54c86dbaf33d260fb7f5e6aaa2 | 1,741 | py | Python | alg/dgp_survival/utils/metrics.py | loramf/mlforhealthlabpub | aa5a42a4814cf69c8223f27c21324ee39d43c404 | [
"BSD-3-Clause"
] | 171 | 2021-02-12T10:23:19.000Z | 2022-03-29T01:58:52.000Z | alg/dgp_survival/utils/metrics.py | loramf/mlforhealthlabpub | aa5a42a4814cf69c8223f27c21324ee39d43c404 | [
"BSD-3-Clause"
] | 4 | 2021-06-01T08:18:33.000Z | 2022-02-20T13:37:30.000Z | alg/dgp_survival/utils/metrics.py | loramf/mlforhealthlabpub | aa5a42a4814cf69c8223f27c21324ee39d43c404 | [
"BSD-3-Clause"
] | 93 | 2021-02-10T03:21:59.000Z | 2022-03-30T19:10:37.000Z | # Copyright (c) 2019, Ahmed M. Alaa
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from sksurv.metrics import cumulative_dynamic_auc, concordance_index_ipcw
from lifelines.utils import concordance_index
def evaluate_performance(T_train, c_train, T_test, c_test, prediction, time_horizon,
num_causes=2, cause_names=["Cause 1", "Cause 2"]):
Harell_c_index = []
UNO_c_index = []
dynamic_auc = []
for _ in range(num_causes):
y_train = np.array([((c_train.loc[c_train.index[k]]== _ + 1), T_train.loc[T_train.index[k]]) for k in range(len(T_train))], dtype=[('Status', '?'), ('Survival_in_days', '<f8')])
y_test = np.array([((c_test.loc[c_test.index[k]]== _ + 1), T_test.loc[T_test.index[k]]) for k in range(len(T_test))], dtype=[('Status', '?'), ('Survival_in_days', '<f8')])
Harell_c_index.append(concordance_index(T_test, prediction[_ + 1], event_observed=(c_test==(_+1))*1))
tau = max(y_train['Survival_in_days'])
ci_tau = concordance_index_ipcw(y_train, y_test, 1 - prediction[_ + 1], tau=tau)[0]
UNO_c_index.append(ci_tau)
try:
dynamic_auc_val = cumulative_dynamic_auc(y_train, y_test, 1 - prediction[_ + 1], times=[time_horizon])[0][0]
except ValueError:
print('*warning: exception while calculating dynamic_auc, dynamic_auc is not calculated*')
dynamic_auc_val = "-"
dynamic_auc.append(dynamic_auc_val)
print("--- Cause: {} -> [C-index: {:0.4f} ] [Dynamic AUC-ROC: {} ]".format(
cause_names[_],
UNO_c_index[-1],
'{:0.4f}'.format(dynamic_auc[-1]) if dynamic_auc[-1] != "-" else "-"))
| 45.815789 | 185 | 0.624354 | 249 | 1,741 | 4.048193 | 0.341365 | 0.119048 | 0.026786 | 0.015873 | 0.140873 | 0.140873 | 0.087302 | 0.041667 | 0 | 0 | 0 | 0.021199 | 0.214245 | 1,741 | 37 | 186 | 47.054054 | 0.715643 | 0.052269 | 0 | 0 | 0 | 0.04 | 0.140862 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.12 | 0 | 0.16 | 0.08 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f03e47c7384f0616cbd49115a8a9c1e7c43d68dc | 2,291 | py | Python | sahara/plugins/mapr/util/maprfs_helper.py | esikachev/sahara-backup | a470fa6aec5f1009d41d82fabc1e5d64874aa213 | [
"Apache-2.0"
] | null | null | null | sahara/plugins/mapr/util/maprfs_helper.py | esikachev/sahara-backup | a470fa6aec5f1009d41d82fabc1e5d64874aa213 | [
"Apache-2.0"
] | null | null | null | sahara/plugins/mapr/util/maprfs_helper.py | esikachev/sahara-backup | a470fa6aec5f1009d41d82fabc1e5d64874aa213 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2015, MapR Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import uuid
import six
import sahara.plugins.mapr.util.general as g
MV_TO_MAPRFS_CMD = ('sudo -u %(user)s'
' hadoop fs -copyFromLocal %(source)s %(target)s'
' && sudo rm -f %(source)s')
MKDIR_CMD_MAPR4 = 'sudo -u %(user)s hadoop fs -mkdir -p %(path)s'
MKDIR_CMD_MAPR3 = 'sudo -u %(user)s hadoop fs -mkdir %(path)s'
def put_file_to_maprfs(r, content, file_name, path, hdfs_user):
tmp_file_name = '/tmp/%s.%s' % (file_name, six.text_type(uuid.uuid4()))
r.write_file_to(tmp_file_name, content)
target = os.path.join(path, file_name)
move_from_local(r, tmp_file_name, target, hdfs_user)
def move_from_local(r, source, target, hdfs_user):
args = {'user': hdfs_user, 'source': source, 'target': target}
r.execute_command(MV_TO_MAPRFS_CMD % args)
def create_maprfs4_dir(remote, dir_name, hdfs_user):
remote.execute_command(MKDIR_CMD_MAPR4 % {'user': hdfs_user,
'path': dir_name})
def create_maprfs3_dir(remote, dir_name, hdfs_user):
remote.execute_command(MKDIR_CMD_MAPR3 % {'user': hdfs_user,
'path': dir_name})
def mkdir(remote, path, recursive=True, run_as=None):
command = 'hadoop fs -mkdir %(recursive)s %(path)s'
args = {'recursive': '-p' if recursive else '', 'path': path}
remote.execute_command(g._run_as(run_as, command % args))
def chmod(remote, path, mode, recursive=True, run_as=None):
command = 'hadoop fs -chmod %(recursive)s %(mode)s %(path)s'
args = {'recursive': '-R' if recursive else '', 'path': path, 'mode': mode}
remote.execute_command(g._run_as(run_as, command % args))
| 36.365079 | 79 | 0.672196 | 342 | 2,291 | 4.321637 | 0.359649 | 0.043302 | 0.054127 | 0.020298 | 0.312585 | 0.255751 | 0.243572 | 0.177267 | 0.127199 | 0.127199 | 0 | 0.008228 | 0.204278 | 2,291 | 62 | 80 | 36.951613 | 0.802523 | 0.244871 | 0 | 0.129032 | 0 | 0 | 0.19697 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.193548 | false | 0 | 0.129032 | 0 | 0.322581 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f03ebc54126f864f341ca74e6bf48ceec13120e0 | 881 | py | Python | openpmd_viewer/openpmd_timeseries/numba_wrapper.py | pordyna/openPMD-viewer | f7b792be58d5dca1af5b36d9875b3d7768a3617d | [
"BSD-3-Clause-LBNL"
] | 51 | 2015-10-08T21:07:28.000Z | 2022-01-31T06:16:32.000Z | openpmd_viewer/openpmd_timeseries/numba_wrapper.py | pordyna/openPMD-viewer | f7b792be58d5dca1af5b36d9875b3d7768a3617d | [
"BSD-3-Clause-LBNL"
] | 239 | 2015-10-09T18:11:00.000Z | 2022-03-31T22:45:14.000Z | openpmd_viewer/openpmd_timeseries/numba_wrapper.py | pordyna/openPMD-viewer | f7b792be58d5dca1af5b36d9875b3d7768a3617d | [
"BSD-3-Clause-LBNL"
] | 40 | 2015-10-08T17:11:36.000Z | 2022-03-30T21:21:09.000Z | """
This file is part of the openPMD-viewer.
It defines a wrapper around numba.
Copyright 2019, openPMD-viewer contributors
Author: Remi Lehe
License: 3-Clause-BSD-LBNL
"""
import warnings
try:
# Import jit decorator from numba
import numba
numba_installed = True
jit = numba.njit(cache=True)
except ImportError:
numba_installed = False
# Create dummy decorator: warns about installing numba when calling
# the decorated function.
def jit(f):
def decorated_f(*args, **kwargs):
warnings.warn(
'\nOne of the functions called by openPMD-viewer ' +\
'(%s)\n' %f.__name__ +\
'could have been faster if `numba` had been installed.\n' +\
'Please consider installing `numba` (e.g. `pip install numba`)')
return f(*args, **kwargs)
return decorated_f
| 28.419355 | 80 | 0.632236 | 110 | 881 | 4.990909 | 0.654545 | 0.071038 | 0.040073 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007813 | 0.273553 | 881 | 30 | 81 | 29.366667 | 0.85 | 0.328036 | 0 | 0 | 0 | 0 | 0.292096 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.1875 | 0 | 0.4375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f03f5fb734ba2df7756a1058e51dd379f93133d5 | 1,676 | py | Python | tests/test_config.py | up2cat/flask_extras | 7888da0ca2793e49a803a256b405fa43e6e64ae2 | [
"MIT"
] | null | null | null | tests/test_config.py | up2cat/flask_extras | 7888da0ca2793e49a803a256b405fa43e6e64ae2 | [
"MIT"
] | null | null | null | tests/test_config.py | up2cat/flask_extras | 7888da0ca2793e49a803a256b405fa43e6e64ae2 | [
"MIT"
] | null | null | null | """Test configuration utilities."""
from flask import Flask
from flask_extras.filters import config
class TestGetFuncs:
"""All tests for get funcs function."""
def test_get_module_funcs(self, client):
"""Test the return value."""
assert isinstance(config._get_funcs(config), dict)
def test_get_module_funcs_notempty(self, client):
"""Test the return value functions length."""
assert len(list(config._get_funcs(config).items())) > 0
class TestInjectFilters:
"""All tests for inject filters function."""
def test_inject_filters_inst(self, client):
"""Test the return value."""
app, test = client
assert isinstance(config._inject_filters(app, {}), Flask)
def test_inject_filters_count(self, client):
"""Test the return value."""
app, test = client
old = len(app.jinja_env.filters)
config._inject_filters(app, {'foo': lambda x: x})
new = len(app.jinja_env.filters)
assert new > old
assert 'foo' in app.jinja_env.filters
class TestConfigFlaskFilters:
"""All tests for config flask filters function."""
def test_config_filters_inst(self, client):
"""Test the return value."""
app, test = client
assert isinstance(config.config_flask_filters(app), Flask)
def test_config_filters_count(self, client):
"""Test the return value."""
app, test = client
del app.jinja_env.filters
setattr(app.jinja_env, 'filters', dict())
old = len(app.jinja_env.filters)
config.config_flask_filters(app)
new = len(app.jinja_env.filters)
assert new > old
| 30.472727 | 66 | 0.652148 | 211 | 1,676 | 4.995261 | 0.236967 | 0.053131 | 0.073055 | 0.119545 | 0.531309 | 0.419355 | 0.366224 | 0.309298 | 0.309298 | 0.240987 | 0 | 0.000781 | 0.23568 | 1,676 | 54 | 67 | 31.037037 | 0.822014 | 0.180191 | 0 | 0.333333 | 0 | 0 | 0.009826 | 0 | 0 | 0 | 0 | 0 | 0.233333 | 1 | 0.2 | false | 0 | 0.066667 | 0 | 0.366667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f03f67838f2ecf88a5b04b3edcab80607c1d0fb2 | 6,601 | py | Python | analyze_gwas_results.py | bvilhjal/mixmogam | 766b889d4f5e97f4c9a960e3a007b125137ba796 | [
"MIT"
] | 15 | 2015-08-02T05:39:06.000Z | 2021-12-22T12:13:21.000Z | analyze_gwas_results.py | bvilhjal/mixmogam | 766b889d4f5e97f4c9a960e3a007b125137ba796 | [
"MIT"
] | null | null | null | analyze_gwas_results.py | bvilhjal/mixmogam | 766b889d4f5e97f4c9a960e3a007b125137ba796 | [
"MIT"
] | 8 | 2017-02-16T07:35:59.000Z | 2022-02-11T19:56:19.000Z | """
A container for functions which aim to analyze or process gwas results, for some aim.
Author: Bjarni J. Vilhjalmsson
Email: bjarni.vilhjalmsson@gmail.com
"""
import scipy as sp
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import warnings
import itertools as it
import random
import phenotypeData as pd
import math
def calc_median(scores, exp_median=0.5):
s = sp.copy(scores)
s.sort()
median = s[len(s) / 2]
del s
return (exp_median - median)
def _estAreaBetweenCurves_(quantiles, expQuantiles):
area = 0
for i in range(0, len(quantiles) - 1):
area += (expQuantiles[i + 1] - expQuantiles[i]) * (abs(quantiles[i + 1] - expQuantiles[i + 1] + quantiles[i] - expQuantiles[i])) / 2.0
return area
def calc_ks_stats(scores, exp_scores=None):
from scipy import stats
if exp_scores:
(D, p_val) = stats.ks_2samp(scores, exp_scores)
else:
(D, p_val) = stats.kstest(scores, stats.uniform.cdf)
return {'D':D, 'p_val':p_val}
def _getExpectedPvalueQuantiles_(numQuantiles):
quantiles = []
for i in range(numQuantiles):
quantiles.append(float(i) + 0.5 / (numQuantiles + 1))
return quantiles
def get_log_quantiles(scores, num_dots=1000, max_val=5):
"""
Uses scipy
"""
scores = sp.copy(sp.array(scores))
scores.sort()
indices = sp.array(10 ** ((-sp.arange(1, num_dots + 1, dtype='single') / (num_dots + 1)) * max_val) \
* len(scores), dtype='int')
return -sp.log10(scores[indices])
def simple_log_qqplot(quantiles_list, png_file=None, pdf_file=None, quantile_labels=None, line_colors=None,
max_val=5, title=None, text=None, plot_label=None, ax=None, **kwargs):
storeFig = False
if ax is None:
f = plt.figure(figsize=(5.4, 5))
ax = f.add_axes([0.1, 0.09, 0.88, 0.86])
storeFig = True
ax.plot([0, max_val], [0, max_val], 'k--', alpha=0.5, linewidth=2.0)
num_dots = len(quantiles_list[0])
exp_quantiles = sp.arange(1, num_dots + 1, dtype='single') / (num_dots + 1) * max_val
for i, quantiles in enumerate(quantiles_list):
if line_colors:
c = line_colors[i]
else:
c = 'b'
if quantile_labels:
ax.plot(exp_quantiles, quantiles, label=quantile_labels[i], c=c, alpha=0.5, linewidth=2.2)
else:
ax.plot(exp_quantiles, quantiles, c=c, alpha=0.5, linewidth=2.2)
ax.set_ylabel("Observed $-log_{10}(p$-value$)$")
ax.set_xlabel("Expected $-log_{10}(p$-value$)$")
if title:
ax.title(title)
max_x = max_val
max_y = max(map(max, quantiles_list))
ax.axis([-0.025 * max_x, 1.025 * max_x, -0.025 * max_y, 1.025 * max_y])
if quantile_labels:
fontProp = matplotlib.font_manager.FontProperties(size=10)
ax.legend(loc=2, numpoints=2, markerscale=1, prop=fontProp)#, handlelen=0.05, pad=0.018)
y_min, y_max = plt.ylim()
if text:
f.text(0.05 * max_val, y_max * 0.9, text)
if plot_label:
f.text(-0.138 * max_val, y_max * 1.01, plot_label, fontsize=14)
if storeFig == False:
return
if png_file != None:
f.savefig(png_file)
if pdf_file != None:
f.savefig(pdf_file, format='pdf')
def get_quantiles(scores, num_dots=1000):
"""
Uses scipy
"""
scores = sp.copy(sp.array(scores))
scores.sort()
indices = [int(len(scores) * i / (num_dots + 2)) for i in range(1, num_dots + 1)]
return scores[indices]
def simple_qqplot(quantiles_list, png_file=None, pdf_file=None, quantile_labels=None, line_colors=None,
title=None, text=None, ax=None, plot_label=None, **kwargs):
storeFig = False
if ax is None:
f = plt.figure(figsize=(5.4, 5))
ax = f.add_axes([0.11, 0.09, 0.87, 0.86])
storeFig = True
ax.plot([0, 1], [0, 1], 'k--', alpha=0.5, linewidth=2.0)
num_dots = len(quantiles_list[0])
exp_quantiles = sp.arange(1, num_dots + 1, dtype='single') / (num_dots + 1)
for i, quantiles in enumerate(quantiles_list):
if line_colors:
c = line_colors[i]
else:
c = 'b'
if quantile_labels:
ax.plot(exp_quantiles, quantiles, label=quantile_labels[i], c=c, alpha=0.5, linewidth=2.2)
else:
ax.plot(exp_quantiles, quantiles, c=c, alpha=0.5, linewidth=2.2)
ax.set_ylabel("Observed $p$-value")
ax.set_xlabel("Expected $p$-value")
if title:
ax.title(title)
ax.axis([-0.025, 1.025, -0.025, 1.025])
if quantile_labels:
fontProp = matplotlib.font_manager.FontProperties(size=10)
ax.legend(loc=2, numpoints=2, markerscale=1, prop=fontProp)#, handlelen=0.05, pad=0.018)
if text:
f.text(0.05, 0.9, text)
if plot_label:
f.text(-0.151, 1.04, plot_label, fontsize=14)
if storeFig == False:
return
if png_file != None:
f.savefig(png_file)
if pdf_file != None:
f.savefig(pdf_file, format='pdf')
def plot_simple_qqplots(png_file_prefix, results, result_labels=None, line_colors=None,
num_dots=1000, title=None, max_neg_log_val=5):
"""
Plots both log QQ-plots and normal QQ plots.
"""
qs = []
log_qs = []
for res in results:
pvals = res.snp_results['scores'][:]
qs.append(get_quantiles(pvals, num_dots))
log_qs.append(get_log_quantiles(pvals, num_dots, max_neg_log_val))
simple_qqplot(qs, png_file_prefix + '_qq.png', quantile_labels=result_labels,
line_colors=line_colors, num_dots=num_dots, title=title)
simple_log_qqplot(log_qs, png_file_prefix + '_log_qq.png', quantile_labels=result_labels,
line_colors=line_colors, num_dots=num_dots, title=title, max_val=max_neg_log_val)
def plot_simple_qqplots_pvals(png_file_prefix, pvals_list, result_labels=None, line_colors=None,
num_dots=1000, title=None, max_neg_log_val=5):
"""
Plots both log QQ-plots and normal QQ plots.
"""
qs = []
log_qs = []
for pvals in pvals_list:
qs.append(get_quantiles(pvals, num_dots))
log_qs.append(get_log_quantiles(pvals, num_dots, max_neg_log_val))
simple_qqplot(qs, png_file_prefix + '_qq.png', quantile_labels=result_labels,
line_colors=line_colors, num_dots=num_dots, title=title)
simple_log_qqplot(log_qs, png_file_prefix + '_log_qq.png', quantile_labels=result_labels,
line_colors=line_colors, num_dots=num_dots, title=title, max_val=max_neg_log_val)
if __name__ == '__main__':
pass
| 33.678571 | 142 | 0.638237 | 1,006 | 6,601 | 3.974155 | 0.178926 | 0.045523 | 0.014007 | 0.024012 | 0.658829 | 0.645823 | 0.626313 | 0.602801 | 0.602801 | 0.591296 | 0 | 0.039193 | 0.226935 | 6,601 | 195 | 143 | 33.851282 | 0.744268 | 0.049083 | 0 | 0.531034 | 0 | 0 | 0.030928 | 0.007088 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068966 | false | 0.006897 | 0.062069 | 0 | 0.186207 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0428ade1a41678c6ecfd0538b650891b1662a36 | 1,073 | py | Python | Step1_IrisScatterPlot.py | IrvanDimetrio/Iris-Classification-NearestNeighboard | cfb20e4a550bb0045a465c6d7444e436ea08475b | [
"MIT"
] | null | null | null | Step1_IrisScatterPlot.py | IrvanDimetrio/Iris-Classification-NearestNeighboard | cfb20e4a550bb0045a465c6d7444e436ea08475b | [
"MIT"
] | null | null | null | Step1_IrisScatterPlot.py | IrvanDimetrio/Iris-Classification-NearestNeighboard | cfb20e4a550bb0045a465c6d7444e436ea08475b | [
"MIT"
] | null | null | null | """
@author : Muhamad Irvan Dimetrio
NIM : 18360018
Teknik Informatika
Institut Sains dan Teknologi Nasisonal
"""
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
#Menampilan scatter plot perbandingan Sepal
iris = pd.read_csv('iris.csv', names=['Sepal lenght (cm)', 'Sepal width (cm)', 'Petal lenght (cm)', 'Petal width (cm)',
'label'], header=0, sep=",")
sns.scatterplot(x='Sepal lenght (cm)', y='Sepal width (cm)', hue='label', data=iris).set_title('Perbandingan Sepal')
plt.figure(1) # n adalah nomor berbeda untuk setiap window gambar
plt.show()
#Menampilan scatter plot perbandingan petal
iris2 = pd.read_csv('iris.csv', names=['Sepal lenght (cm)', 'Sepal width (cm)', 'Petal lenght (cm)', 'Petal width (cm)',
'label'], header=0, sep=",")
sns.scatterplot(x='Petal lenght (cm)', y='Petal width (cm)', hue='label', data=iris2).set_title('Perbandingan Petal')
plt.figure(1) # n adalah nomor berbeda untuk setiap window gambar
plt.show()
| 41.269231 | 121 | 0.645853 | 144 | 1,073 | 4.784722 | 0.402778 | 0.069666 | 0.056604 | 0.095791 | 0.519594 | 0.464441 | 0.464441 | 0.464441 | 0.464441 | 0.464441 | 0 | 0.01649 | 0.20876 | 1,073 | 25 | 122 | 42.92 | 0.795053 | 0.273998 | 0 | 0.461538 | 0 | 0 | 0.366083 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.230769 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0433a338c90b00da6ae91703f1fc434d0e21add | 7,183 | py | Python | doc/script_for_figures/generate_peeler_sequence_example.py | rdarie/tridesclous | 178c0a67d7b3ac88be8e4383001396c1e0f976c2 | [
"MIT"
] | null | null | null | doc/script_for_figures/generate_peeler_sequence_example.py | rdarie/tridesclous | 178c0a67d7b3ac88be8e4383001396c1e0f976c2 | [
"MIT"
] | null | null | null | doc/script_for_figures/generate_peeler_sequence_example.py | rdarie/tridesclous | 178c0a67d7b3ac88be8e4383001396c1e0f976c2 | [
"MIT"
] | null | null | null | """
Find a good example of collision in striatum rat dataset.
"""
import os,shutil
from tridesclous import DataIO, CatalogueConstructor, Peeler
from tridesclous import download_dataset
from tridesclous.cataloguetools import apply_all_catalogue_steps
from tridesclous.peeler import make_prediction_signals
from tridesclous.tools import int32_to_rgba
from tridesclous.matplotlibplot import plot_waveforms_with_geometry, plot_features_scatter_2d
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
dirname = 'tdc_olfactory_bulb'
channels = [5,6,7,8]
def make_catalogue():
if os.path.exists(dirname):
shutil.rmtree(dirname)
dataio = DataIO(dirname=dirname)
localdir, filenames, params = download_dataset(name='olfactory_bulb')
dataio.set_data_source(type='RawData', filenames=filenames, **params)
dataio.add_one_channel_group(channels = channels)
cc = CatalogueConstructor(dataio=dataio)
fullchain_kargs = {
'duration' : 300.,
'preprocessor' : {
'highpass_freq' : 300.,
'chunksize' : 1024,
'lostfront_chunksize' : 100,
},
'peak_detector' : {
'peak_sign' : '-',
'relative_threshold' : 7.,
'peak_span' : 0.0005,
#~ 'peak_span' : 0.000,
},
'extract_waveforms' : {
'n_left' : -25,
'n_right' : 40,
'nb_max' : 10000,
},
'clean_waveforms' : {
'alien_value_threshold' : 60.,
},
'noise_snippet' : {
'nb_snippet' : 300,
},
}
apply_all_catalogue_steps(cc,
fullchain_kargs,
'global_pca', {'n_components': 20},
'kmeans', {'n_clusters': 5},
verbose=True)
cc.order_clusters(by='waveforms_rms')
cc.move_cluster_to_trash(4)
cc.make_catalogue_for_peeler()
def apply_peeler():
dataio = DataIO(dirname=dirname)
catalogue = dataio.load_catalogue(chan_grp=0)
peeler = Peeler(dataio)
peeler.change_params(catalogue=catalogue,chunksize=1024)
peeler.run(progressbar=True)
def make_animation():
"""
Good example between 1.272 1.302
because collision
"""
dataio = DataIO(dirname=dirname)
catalogue = dataio.load_catalogue(chan_grp=0)
clusters = catalogue['clusters']
sr = dataio.sample_rate
# also a good one a 11.356 - 11.366
t1, t2 = 1.272, 1.295
i1, i2 = int(t1*sr), int(t2*sr)
spikes = dataio.get_spikes()
spike_times = spikes['index'] / sr
keep = (spike_times>=t1) & (spike_times<=t2)
spikes = spikes[keep]
print(spikes)
sigs = dataio.get_signals_chunk(i_start=i1, i_stop=i2,
signal_type='processed')
sigs = sigs.copy()
times = np.arange(sigs.shape[0])/dataio.sample_rate
def plot_spread_sigs(sigs, ax, ratioY = 0.02, **kargs):
#spread signals
sigs2 = sigs * ratioY
sigs2 += np.arange(0, len(channels))[np.newaxis, :]
ax.plot(times, sigs2, **kargs)
ax.set_ylim(-0.5, len(channels)-.5)
ax.set_xticks([])
ax.set_yticks([])
residuals = sigs.copy()
local_spikes = spikes.copy()
local_spikes['index'] -= i1
#~ fig, ax = plt.subplots()
#~ plot_spread_sigs(sigs, ax, color='k')
num_fig = 0
fig_pred, ax_predictions = plt.subplots()
ax_predictions.set_title('All detected templates from catalogue')
fig, ax = plt.subplots()
plot_spread_sigs(residuals, ax, color='k', lw=2)
ax.set_title('Initial filtered signals with spikes')
fig.savefig('../img/peeler_animation_sigs.png')
fig.savefig('png/fig{}.png'.format(num_fig))
num_fig += 1
for i in range(local_spikes.size):
label = local_spikes['cluster_label'][i]
color = clusters[clusters['cluster_label']==label]['color'][0]
color = int32_to_rgba(color, mode='float')
pred = make_prediction_signals(local_spikes[i:i+1], 'float32', (i2-i1, len(channels)), catalogue)
fig, ax = plt.subplots()
plot_spread_sigs(residuals, ax, color='k', lw=2)
plot_spread_sigs(pred, ax, color=color, lw=1.5)
ax.set_title('Dected spike label {}'.format(label))
fig.savefig('png/fig{}.png'.format(num_fig))
num_fig += 1
residuals -= pred
plot_spread_sigs(pred, ax_predictions, color=color, lw=1.5)
fig, ax = plt.subplots()
plot_spread_sigs(residuals, ax, color='k', lw=2)
plot_spread_sigs(pred, ax, color=color, lw=1, ls='--')
ax.set_title('New residual after substraction')
fig.savefig('png/fig{}.png'.format(num_fig))
num_fig += 1
fig_pred.savefig('png/fig{}.png'.format(num_fig))
num_fig += 1
#~ plt.show()
def make_catalogue_figure():
dataio = DataIO(dirname=dirname)
catalogue = dataio.load_catalogue(chan_grp=0)
clusters = catalogue['clusters']
geometry = dataio.get_geometry(chan_grp=0)
fig, ax = plt.subplots()
ax.set_title('Catalogue have 4 templates')
for i in range(clusters.size):
color = clusters[i]['color']
color = int32_to_rgba(color, mode='float')
waveforms = catalogue['centers0' ][i:i+1]
plot_waveforms_with_geometry(waveforms, channels, geometry,
ax=ax, ratioY=3, deltaX= 50, margin=50, color=color,
linewidth=3, alpha=1, show_amplitude=True, ratio_mad=8)
fig.savefig('../img/peeler_templates_for_animation.png')
#~ plt.show()
def make_pca_collision_figure():
dataio = DataIO(dirname=dirname)
cc = CatalogueConstructor(dataio=dataio)
clusters = cc.clusters
#~ plot_features_scatter_2d(cc, labels=None, nb_max=500)
#~ plot_features_scatter_2d
fig, ax = plt.subplots()
ax.set_title('Collision problem')
ax.set_aspect('equal')
features = cc.some_features
labels = cc.all_peaks[cc.some_peaks_index]['cluster_label']
for k in [0,1,2,3]:
color = clusters[clusters['cluster_label']==k]['color'][0]
color = int32_to_rgba(color, mode='float')
keep = labels==k
feat = features[keep]
print(np.unique(labels))
ax.plot(feat[:,0], feat[:,1], ls='None', marker='o', color=color, markersize=3, alpha=.5)
ax.set_xlim(-40, 40)
ax.set_ylim(-40, 40)
ax.set_xlabel('pca0')
ax.set_ylabel('pca1')
ax.annotate('Collision', xy=(17.6, -16.4), xytext=(30, -30),
arrowprops=dict(facecolor='black', shrink=0.05))
#~
fig.savefig('../img/collision_proble_pca.png')
#~ plt.show()
if __name__ == '__main__':
#~ make_catalogue()
#~ apply_peeler()
make_animation()
make_catalogue_figure()
make_pca_collision_figure()
#convert -delay 250 -loop 0 png/*.png ../img/peeler_animation.gif
| 26.025362 | 105 | 0.602951 | 884 | 7,183 | 4.691176 | 0.291855 | 0.015674 | 0.027007 | 0.023149 | 0.233422 | 0.193634 | 0.193634 | 0.166626 | 0.166626 | 0.149265 | 0 | 0.034371 | 0.26688 | 7,183 | 275 | 106 | 26.12 | 0.753133 | 0.064458 | 0 | 0.192308 | 0 | 0 | 0.119437 | 0.018732 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0.00641 | 0.064103 | 0 | 0.102564 | 0.012821 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0442399ac20264cca7a4d50840c58d195062c81 | 3,742 | py | Python | lib/EGG_research/egg/zoo/language_bottleneck/guess_number/features.py | Slowika/GameBias-EmeCom2020 | 5b94c47559f8202bca99c26fc1bcb078dd0509a6 | [
"MIT"
] | 15 | 2020-09-23T08:24:33.000Z | 2022-02-09T14:32:49.000Z | lib/EGG_research/egg/zoo/language_bottleneck/guess_number/features.py | Slowika/GameBias-EmeCom2020 | 5b94c47559f8202bca99c26fc1bcb078dd0509a6 | [
"MIT"
] | null | null | null | lib/EGG_research/egg/zoo/language_bottleneck/guess_number/features.py | Slowika/GameBias-EmeCom2020 | 5b94c47559f8202bca99c26fc1bcb078dd0509a6 | [
"MIT"
] | 5 | 2021-03-05T16:54:45.000Z | 2022-03-31T13:33:58.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.utils.data as data
import torch.nn.parallel
import torch
import numpy as np
def sender_receiver_examples(examples, n_bits, bits_s, bits_r):
sender_examples = np.copy(examples)
sender_examples[:, bits_s:] = 0
sender_examples = torch.from_numpy(sender_examples)
receiver_examples = np.copy(examples)
receiver_examples[:, :n_bits - bits_r] = 0
receiver_examples = torch.from_numpy(receiver_examples)
examples = torch.from_numpy(examples)
return sender_examples, examples, receiver_examples
class _OneHotIterator:
"""
>>> it = _OneHotIterator(n_bits=8, bits_s=4, bits_r=4, n_batches_per_epoch=1, batch_size=128)
>>> batch = list(it)[0]
>>> s, l, r = batch
>>> ((s + r) == l).all().item()
1
>>> it = _OneHotIterator(n_bits=8, bits_s=5, bits_r=5, n_batches_per_epoch=1, batch_size=128)
>>> batch = list(it)[0]
>>> ((s + r).clamp(0, 1) == l).all().item()
1
>>> it = _OneHotIterator(n_bits=8, bits_s=8, bits_r=8, n_batches_per_epoch=1, batch_size=128)
>>> batch = list(it)[0]
>>> s, l, r = batch
>>> (s == r).all().item()
1
>>> it = _OneHotIterator(n_bits=8, bits_s=8, bits_r=1, n_batches_per_epoch=1, batch_size=128)
>>> batch = list(it)[0]
>>> s, l, r = batch
>>> (r[:, -1] > 0).any().item()
1
>>> (r[:, :-1] == 0).all().item()
1
"""
def __init__(self, n_bits, bits_s, bits_r, n_batches_per_epoch, batch_size, seed=None):
self.n_batches_per_epoch = n_batches_per_epoch
self.n_bits = n_bits
self.bits_s = bits_s
self.bits_r = bits_r
self.batch_size = batch_size
self.batches_generated = 0
self.random_state = np.random.RandomState(seed)
def __iter__(self):
return self
def __next__(self):
if self.batches_generated >= self.n_batches_per_epoch:
raise StopIteration()
examples = self.random_state.randint(low=0, high=2, size=(self.batch_size, self.n_bits))
sender_examples, examples, receiver_examples = \
sender_receiver_examples(examples, self.n_bits, self.bits_s, self.bits_r)
self.batches_generated += 1
return sender_examples, examples, receiver_examples
class OneHotLoader(torch.utils.data.DataLoader):
def __init__(self, n_bits, bits_s, bits_r, batches_per_epoch, batch_size, seed=None):
self.seed = seed
self.batches_per_epoch = batches_per_epoch
self.n_bits = n_bits
self.bits_r = bits_r
self.bits_s = bits_s
self.batch_size = batch_size
def __iter__(self):
if self.seed is None:
seed = np.random.randint(0, 2 ** 32)
else:
seed = self.seed
return _OneHotIterator(n_bits=self.n_bits, bits_s=self.bits_s, bits_r=self.bits_r,
n_batches_per_epoch=self.batches_per_epoch,
batch_size=self.batch_size, seed=seed)
class UniformLoader(torch.utils.data.DataLoader):
def __init__(self, n_bits, bits_s, bits_r):
batch_size = 2**(n_bits)
numbers = np.array(range(batch_size))
examples = np.zeros((batch_size, n_bits), dtype=np.int)
for i in range(n_bits):
examples[:, i] = np.bitwise_and(numbers, 2 ** i) > 0
sender_examples, examples, receiver_examples = \
sender_receiver_examples(examples, n_bits, bits_s, bits_r)
self.batch = sender_examples, examples, receiver_examples
def __iter__(self):
return iter([self.batch])
| 32.53913 | 97 | 0.639498 | 544 | 3,742 | 4.080882 | 0.172794 | 0.047297 | 0.087838 | 0.064865 | 0.526577 | 0.439189 | 0.407658 | 0.351351 | 0.318919 | 0.259459 | 0 | 0.019628 | 0.237573 | 3,742 | 114 | 98 | 32.824561 | 0.7585 | 0.234901 | 0 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.066667 | 0.033333 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0449b619544ee3003c1458e45ec2eb5e78f6285 | 5,259 | py | Python | app.py | steven-mi/semantic-segmentation-dataset-viewer | 8ca778eb7b92c5285cf5d22ebae7e32a13bc7ade | [
"Apache-2.0"
] | 1 | 2022-03-27T12:40:57.000Z | 2022-03-27T12:40:57.000Z | app.py | steven-mi/semantic-segmentation-dataset-viewer | 8ca778eb7b92c5285cf5d22ebae7e32a13bc7ade | [
"Apache-2.0"
] | null | null | null | app.py | steven-mi/semantic-segmentation-dataset-viewer | 8ca778eb7b92c5285cf5d22ebae7e32a13bc7ade | [
"Apache-2.0"
] | null | null | null | import os
import numpy as np
import pandas as pd
import streamlit as st
from PIL import Image
BASE_PATH = os.getcwd()
DEFAULT_CLASS_COLOR_PATH = os.path.join(BASE_PATH, "data", "class_specification.csv")
DEFAULT_IMAGE_PATH = os.path.join(BASE_PATH, "data", "images")
DEFAULT_LABEL_PATH = os.path.join(BASE_PATH, "data", "labels")
def rgbstr_to_rgb(rgbstr):
rgb = [int(color) for color in rgbstr.split(",")]
return tuple(rgb)
def rgb_to_rgbstr(r, g, b):
return "{},{},{}".format(r, g, b)
def rgb2hex(rgbstr):
rgb = [int(color) for color in rgbstr.split(",")]
return "#{:02x}{:02x}{:02x}".format(*rgb)
def get_all_images(path):
paths = []
for (dirpath, _, filenames) in os.walk(path, topdown=False):
for filename in filenames:
if filename.endswith(".png") or filename.endswith(".jpg") or filename.endswith(".jpeg"):
temp = os.path.join(dirpath, filename)
paths.append(temp)
paths.sort()
return paths
def create_data_sidebar():
# Reading class color CSV files
st.sidebar.header("Data")
class_color_path = st.sidebar.text_input('Enter path to class color specification:', value=DEFAULT_CLASS_COLOR_PATH)
try:
class_color_df = pd.read_csv(class_color_path)
except FileNotFoundError as error_msg:
st.sidebar.error(error_msg)
try:
# Check if needed columns are available
class_colors = class_color_df["ClassColor"]
class_names = class_color_df["ClassName"]
class_color_dict = {}
for class_color, class_name in zip(class_colors, class_names):
class_color_dict[class_color] = class_name
except KeyError:
st.sidebar.error("ClassColor or ClassName is not available in CSV File")
# Reading SemSeg images and labels
image_path = st.sidebar.text_input('Enter path to images:', value=DEFAULT_IMAGE_PATH)
label_path = st.sidebar.text_input('Enter path to labels:', value=DEFAULT_LABEL_PATH)
# Get all paths
image_paths = get_all_images(image_path)
label_paths = get_all_images(label_path)
st.header("There are {} images and {} labels".format(
len(image_paths), len(label_paths)))
if len(image_paths) != len(label_paths):
st.sidebar.error("There should be the same amount of images as labels".format(
len(image_paths), len(label_paths)))
return class_color_dict, (image_paths, label_paths)
def create_class_color_checkboxes(class_color_dict):
# Read CSV file
st.sidebar.header("Classes")
slider = st.sidebar.slider(label="Transparency", min_value=0.0, value=0.4, max_value=1.0, step=0.1)
# put all needed information in a list
class_checkboxes = []
st.sidebar.text('\nWhich classes do you want to see?')
for class_color in class_color_dict.keys():
# create checkbox with color
color = st.sidebar.markdown("""
<svg width="80" height="20">
<rect width="80" height="20" style="fill:{};stroke-width:3;stroke:rgb(0,0,0)" />
</svg>""".format(rgb2hex(class_color)), unsafe_allow_html=True)
box = st.sidebar.checkbox("{}".format(class_color_dict[class_color]))
# append everything to list
class_checkboxes.append((box, class_color, class_color_dict[class_color]))
return class_checkboxes, slider
@st.cache(suppress_st_warning=True)
def create_images_per_class(paths):
classes_per_image = []
for image_path, label_path in zip(*paths):
label = Image.open(label_path)
colors = label.convert('RGB').getcolors()
class_per_image = {}
for _, color in colors:
key = rgb_to_rgbstr(*color)
class_per_image[key] = 1
class_per_image["ImagePath"] = image_path
class_per_image["LabelPath"] = label_path
classes_per_image.append(class_per_image)
classes_per_image_df = pd.DataFrame(classes_per_image)
return classes_per_image_df
@st.cache(suppress_st_warning=True)
def create_overlay_image(image_path, label_path, alpha):
image = Image.open(image_path)
label = Image.open(label_path)
image = image.convert("RGBA")
label = label.convert("RGBA")
overlay = Image.blend(image, label, alpha)
st.image(overlay, use_column_width=True)
return overlay
def main():
class_color_dict, paths = create_data_sidebar()
class_checkboxes, slider = create_class_color_checkboxes(class_color_dict)
classes_per_image_df = create_images_per_class(paths)
# get all checked boxes
marked_boxes = []
for box, class_color, class_name in class_checkboxes:
if box:
marked_boxes.append(class_color)
marked = classes_per_image_df[marked_boxes + ["ImagePath", "LabelPath"]].dropna()
image_list = marked["ImagePath"].tolist()
label_list = marked["LabelPath"].tolist()
selected_image = st.selectbox(
"Which image do you want to see?",
image_list
)
create_overlay_image(selected_image, label_list[image_list.index(selected_image)], slider)
if __name__ == '__main__':
# Set title of app
st.title('Semantic Segmentation Dataset Viewer')
# Set title for sidebar
st.sidebar.title("Settings")
# run main
main()
| 34.372549 | 120 | 0.68378 | 727 | 5,259 | 4.687758 | 0.242091 | 0.085094 | 0.036972 | 0.019953 | 0.220951 | 0.152876 | 0.145246 | 0.098885 | 0.025822 | 0.025822 | 0 | 0.006913 | 0.20232 | 5,259 | 152 | 121 | 34.598684 | 0.805483 | 0.054763 | 0 | 0.092593 | 0 | 0.009259 | 0.138564 | 0.01432 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.046296 | 0.009259 | 0.203704 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0464819ba197e3831895ed74d9c28ac4d43831c | 5,040 | py | Python | Test/Copy InDesign Test Text.py | justanotherfoundry/Glyphs-Scripts | f28aeab0224ae19ace4a86cf363e7990985199b7 | [
"Apache-2.0"
] | 283 | 2015-01-07T12:35:35.000Z | 2022-03-29T06:10:44.000Z | Test/Copy InDesign Test Text.py | justanotherfoundry/Glyphs-Scripts | f28aeab0224ae19ace4a86cf363e7990985199b7 | [
"Apache-2.0"
] | 203 | 2015-01-26T18:43:08.000Z | 2022-03-04T01:47:58.000Z | Test/Copy InDesign Test Text.py | justanotherfoundry/Glyphs-Scripts | f28aeab0224ae19ace4a86cf363e7990985199b7 | [
"Apache-2.0"
] | 96 | 2015-01-19T20:58:03.000Z | 2022-03-29T06:10:56.000Z | #MenuTitle: Copy InDesign Test Text
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
__doc__="""
Copies a test text for InDesign into the clipboard.
"""
from AppKit import NSStringPboardType, NSPasteboard
hangingindent = chr(7)
linelength = 45
thisFont = Glyphs.font # frontmost font
glyphs = [g for g in thisFont.glyphs if g.unicode and g.export and g.subCategory != "Nonspacing"]
glyphnames = [g.name for g in glyphs]
copyString = u""
lastCategory = None
j=0
for i in range(len(glyphs)):
j+=1
currGlyph = glyphs[i]
currCategory = currGlyph.category
if (j%linelength==0 or (lastCategory and lastCategory != currCategory) or currGlyph.name == "a") and not currCategory in ("Separator","Mark"):
copyString += "\n"
j=0
unicharString = currGlyph.glyphInfo.unicharString()
if unicharString:
copyString += unicharString.replace(u"⁄",u" ⁄ ")
if currGlyph.name == "ldot":
copyString += "l"
if currGlyph.name == "Ldot":
copyString += "L"
lastCategory = currCategory
languages = {
"NLD": u"ÍJ́íj́=ÍJíj",
"PLK": u"ĆŃÓŚŹćńóśź",
"ROM": u"ŞŢşţ",
"CAT": u"L·Ll·l",
"TRK": u"Iıİi"
}
# figures:
allFeatures = [f.name for f in thisFont.features]
figurecount = 4
scFigures = [f for f in thisFont.glyphs if f.category == "Number" and (".c2sc" in f.name or ".smcp" in f.name or ".sc" in f.name)]
figurecount += len(scFigures)//10
figString = u" %s0123456789" % ("0" if "zero" in allFeatures else "")
copyString += ( u"\nfigures: %s%s\n" % (hangingindent, figurecount*figString) )
# small figures:
smallFiguresLine = u""
for smallFigFeature in ("sinf", "subs", "sups", "numr", "dnom"):
if smallFigFeature in allFeatures and not smallFigFeature in smallFiguresLine:
smallFiguresLine += u" %s: 0123456789" % smallFigFeature.replace(u"sinf",u"sinf/subs")
copyString += smallFiguresLine[1:] + "\n"
#copyString += "\n"
for feature in thisFont.features:
if not feature.name in ("aalt", "ccmp", "salt", "cpsp", "numr", "dnom", "subs", "sups", "sinf", "lnum", "onum", "pnum", "tnum"):
testtext = u""
# hardcoded features:
if feature.name == "locl":
listOfFeatures = [f.name for f in Font.features]
for lang in languages.keys():
if " %s;"%lang in feature.code:
langLetters = languages[lang]
if "smcp" in listOfFeatures or "c2sc" in listOfFeatures:
langLetters = u"%s %s" % (langLetters,langLetters)
testtext += u" %s: %s" % (lang, langLetters)
elif feature.name == "ordn":
ordnDict = {
"numero": u"No.8 No8",
"ordfeminine": u"1a2a3a4a5a6a7a8a9a0a",
"ordmasculine": u"1o2o3o4o5o6o7o8o9o0o"
}
for gName in ordnDict:
if gName in glyphnames:
testtext += u"%s " % ordnDict[gName]
elif feature.name == "frac":
testtext += u"12/34 56/78 90/12 34/56 78/90 23/41 67/85 01/29 45/63 89/07 34/12 78/56 12/90 56/34 90/78 41/23 85/67 29/01 63/45 07/89"
# scan feature code for substitutions:
elif "sub " in feature.code:
lines = feature.code.splitlines()
for l in lines:
if l and l.startswith("sub "): # find a sub line
l = l[4:l.find("by")] # get the glyphnames between sub and by
featureglyphnames = l.replace("'","").split(" ") # remove contextual tick, split them at the spaces
for glyphname in featureglyphnames:
if glyphname: # exclude a potential empty string
# suffixed glyphs:
if "." in glyphname:
glyphname = glyphname[:glyphname.find(".")]
# ligatures:
if "_" in glyphname:
# add spaces between ligatures
glyphname += "_space"
if testtext and testtext[-1] != " ":
testtext += " "
# split ligature name, if any:
subglyphnames = glyphname.split("_")
for subglyphname in subglyphnames:
gInfo = Glyphs.glyphInfoForName(subglyphname)
if gInfo and gInfo.subCategory != "Nonspacing":
try:
testtext += gInfo.unicharString()
except:
pass
# pad ligature letters in spaces:
if "lig" in feature.name:
testtext += " "
if feature.name == "case":
testtext = u"HO".join(testtext) + "HO"
# hardcoded contextual kerning:
elif feature.name == "kern":
testtext = u"L’Aquila d’Annunzio l’Oréal"
testtext = testtext.replace("0123456789", " 0123456789 ").replace(" "," ")
if "zero" in allFeatures and "0123456789" in testtext and not "00" in testtext:
testtext = testtext.replace("0","00")
copyString += u"%s: %s%s\n" % (feature.name, hangingindent, testtext)
def setClipboard( myText ):
"""
Sets the contents of the clipboard to myText.
Returns True if successful, False if unsuccessful.
"""
try:
myClipboard = NSPasteboard.generalPasteboard()
myClipboard.declareTypes_owner_( [NSStringPboardType], None )
myClipboard.setString_forType_( myText, NSStringPboardType )
return True
except Exception as e:
return False
if not setClipboard(copyString):
print("Warning: could not set clipboard to %s..." % ( copyString[:12] ))
| 33.377483 | 143 | 0.652579 | 647 | 5,040 | 5.068006 | 0.352396 | 0.026837 | 0.005489 | 0.010979 | 0.031107 | 0.018298 | 0 | 0 | 0 | 0 | 0 | 0.044121 | 0.208532 | 5,040 | 150 | 144 | 33.6 | 0.776134 | 0.109325 | 0 | 0.054545 | 0 | 0.009091 | 0.158709 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.009091 | false | 0.009091 | 0.018182 | 0 | 0.045455 | 0.018182 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0476dc7d331de0a3e778d773138063fef6d16c6 | 2,916 | py | Python | homeassistant/components/rest/entity.py | mib1185/core | b17d4ac65cde9a27ff6032d70b148792e5eba8df | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | homeassistant/components/rest/entity.py | mib1185/core | b17d4ac65cde9a27ff6032d70b148792e5eba8df | [
"Apache-2.0"
] | 24,710 | 2016-04-13T08:27:26.000Z | 2020-03-02T12:59:13.000Z | homeassistant/components/rest/entity.py | mib1185/core | b17d4ac65cde9a27ff6032d70b148792e5eba8df | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """The base entity for the rest component."""
from abc import abstractmethod
from typing import Any
from homeassistant.core import callback
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .data import RestData
class BaseRestEntity(Entity):
"""A class for entities using DataUpdateCoordinator or rest data directly."""
def __init__(
self,
coordinator: DataUpdateCoordinator[Any],
rest: RestData,
resource_template,
force_update,
) -> None:
"""Create the entity that may have a coordinator."""
self.coordinator = coordinator
self.rest = rest
self._resource_template = resource_template
self._force_update = force_update
@property
def force_update(self):
"""Force update."""
return self._force_update
@property
def should_poll(self) -> bool:
"""Poll only if we do not have a coordinator."""
return not self.coordinator
@property
def available(self):
"""Return the availability of this sensor."""
if self.coordinator and not self.coordinator.last_update_success:
return False
return self.rest.data is not None
async def async_added_to_hass(self) -> None:
"""When entity is added to hass."""
await super().async_added_to_hass()
self._update_from_rest_data()
if self.coordinator:
self.async_on_remove(
self.coordinator.async_add_listener(self._handle_coordinator_update)
)
@callback
def _handle_coordinator_update(self) -> None:
"""Handle updated data from the coordinator."""
self._update_from_rest_data()
self.async_write_ha_state()
async def async_update(self):
"""Get the latest data from REST API and update the state."""
if self.coordinator:
await self.coordinator.async_request_refresh()
return
if self._resource_template is not None:
self.rest.set_url(self._resource_template.async_render(parse_result=False))
await self.rest.async_update()
self._update_from_rest_data()
@abstractmethod
def _update_from_rest_data(self):
"""Update state from the rest data."""
class RestEntity(BaseRestEntity):
"""A class for entities using DataUpdateCoordinator or rest data directly."""
def __init__(
self,
coordinator: DataUpdateCoordinator[Any],
rest: RestData,
name,
resource_template,
force_update,
) -> None:
"""Create the entity that may have a coordinator."""
self._name = name
super().__init__(coordinator, rest, resource_template, force_update)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
| 30.375 | 87 | 0.658093 | 337 | 2,916 | 5.459941 | 0.25816 | 0.081522 | 0.030435 | 0.03913 | 0.278261 | 0.208696 | 0.208696 | 0.208696 | 0.208696 | 0.208696 | 0 | 0 | 0.258573 | 2,916 | 95 | 88 | 30.694737 | 0.851064 | 0.164609 | 0 | 0.359375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.09375 | 0 | 0.34375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f049b11cba821012829b24bf89c712ed2e556c55 | 1,999 | py | Python | scripts/excel_writer.py | knguye/barcode_function | e9a1a64d1a26d17ef906e2e1778c8035328dcf40 | [
"MIT"
] | 1 | 2019-04-04T17:43:43.000Z | 2019-04-04T17:43:43.000Z | scripts/excel_writer.py | knguye/barcode_functions | e9a1a64d1a26d17ef906e2e1778c8035328dcf40 | [
"MIT"
] | null | null | null | scripts/excel_writer.py | knguye/barcode_functions | e9a1a64d1a26d17ef906e2e1778c8035328dcf40 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2.7
import rospy
import re
import openpyxl
from std_msgs.msg import String
from custom_msgs.msg import IntList
# Function: To take data from the barcode parser and write the data on the xls file
wb = openpyxl.Workbook()
log_sheet = wb.active # Set log_sheet to active sheet for writing
filename = "log" # MODIFY this to change the name of the log file.
username = "knguye" # MODIFY this to the current username to enable operation
filepath = "/home/{}".format(username) + "/catkin_ws/src/barcode_functions/logs/{}.xlsx".format(filename) #replace knguye with user
# openpyxl
b = IntList()
row = 2
# Create headers for rows
log_sheet['A1'] = 'Row'
log_sheet['B1'] = 'Location'
log_sheet['C1'] = 'Timestamp'
def getData(data):
global row
global b
# try: # if QR is valid format for this usage
if (data.data != b): # if the last barcode isnt a duplicate
b = data.data
# Copying the values onto the excel cells
log_sheet['A{}'.format(str(row))] = b[0]
log_sheet['B{}'.format(str(row))]= b[1]
log_sheet['C{}'.format(str(row))] = rospy.get_time()
row = row + 1
rospy.loginfo("Logging data on Excel..")
wb.save(filepath) # overwrite spreadsheet in real time
# except:
# rospy.loginfo("Invalid QR format")
#c = 0 # first val at 0, second at 1.. resets for each new entry
#for values in data:
#log_sheet.cell(row, col, values)
#col = col + 1
#rospy.loginfo(values)
#row = row + 1
# Parse the data to get numbers
# b = data
#rospy.loginfo(data) # debug
# For some reason, the open pyxl functions dont work here
def main():
rospy.init_node('excel_writer', anonymous=False) # Anon = false to allow multiple excel writers
#while not rospy.is_shutdown():
rospy.Subscriber("/qr_data", IntList, getData) # Take data from topic and send to writing function
rospy.spin()
if __name__ == "__main__":
main()
| 32.770492 | 131 | 0.655328 | 297 | 1,999 | 4.323232 | 0.47138 | 0.056075 | 0.028037 | 0.020249 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009103 | 0.230615 | 1,999 | 60 | 132 | 33.316667 | 0.825748 | 0.458729 | 0 | 0 | 0 | 0 | 0.140684 | 0.042776 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.15625 | 0 | 0.21875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f04c6cdd761805c6b471cee250846006aaa86647 | 16,760 | py | Python | src/python/grpcio/grpc/experimental/aio/_call.py | muxi/grpc | b07d7f14857d09f05b58ccd0e5f15a6992618f3a | [
"Apache-2.0"
] | null | null | null | src/python/grpcio/grpc/experimental/aio/_call.py | muxi/grpc | b07d7f14857d09f05b58ccd0e5f15a6992618f3a | [
"Apache-2.0"
] | null | null | null | src/python/grpcio/grpc/experimental/aio/_call.py | muxi/grpc | b07d7f14857d09f05b58ccd0e5f15a6992618f3a | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Invocation-side implementation of gRPC Asyncio Python."""
import asyncio
from typing import AsyncIterable, Awaitable, Dict, Optional
import grpc
from grpc import _common
from grpc._cython import cygrpc
from . import _base_call
from ._typing import (DeserializingFunction, MetadataType, RequestType,
ResponseType, SerializingFunction)
__all__ = 'AioRpcError', 'Call', 'UnaryUnaryCall', 'UnaryStreamCall'
_LOCAL_CANCELLATION_DETAILS = 'Locally cancelled by application!'
_GC_CANCELLATION_DETAILS = 'Cancelled upon garbage collection!'
_RPC_ALREADY_FINISHED_DETAILS = 'RPC already finished.'
_OK_CALL_REPRESENTATION = ('<{} of RPC that terminated with:\n'
'\tstatus = {}\n'
'\tdetails = "{}"\n'
'>')
_NON_OK_CALL_REPRESENTATION = ('<{} of RPC that terminated with:\n'
'\tstatus = {}\n'
'\tdetails = "{}"\n'
'\tdebug_error_string = "{}"\n'
'>')
_EMPTY_METADATA = tuple()
class AioRpcError(grpc.RpcError):
"""An implementation of RpcError to be used by the asynchronous API.
Raised RpcError is a snapshot of the final status of the RPC, values are
determined. Hence, its methods no longer needs to be coroutines.
"""
# TODO(https://github.com/grpc/grpc/issues/20144) Metadata
# type returned by `initial_metadata` and `trailing_metadata`
# and also taken in the constructor needs to be revisit and make
# it more specific.
_code: grpc.StatusCode
_details: Optional[str]
_initial_metadata: Optional[MetadataType]
_trailing_metadata: Optional[MetadataType]
_debug_error_string: Optional[str]
def __init__(self,
code: grpc.StatusCode,
details: Optional[str] = None,
initial_metadata: Optional[MetadataType] = None,
trailing_metadata: Optional[MetadataType] = None,
debug_error_string: Optional[str] = None) -> None:
"""Constructor.
Args:
code: The status code with which the RPC has been finalized.
details: Optional details explaining the reason of the error.
initial_metadata: Optional initial metadata that could be sent by the
Server.
trailing_metadata: Optional metadata that could be sent by the Server.
"""
super().__init__(self)
self._code = code
self._details = details
self._initial_metadata = initial_metadata
self._trailing_metadata = trailing_metadata
self._debug_error_string = debug_error_string
def code(self) -> grpc.StatusCode:
"""Accesses the status code sent by the server.
Returns:
The `grpc.StatusCode` status code.
"""
return self._code
def details(self) -> Optional[str]:
"""Accesses the details sent by the server.
Returns:
The description of the error.
"""
return self._details
def initial_metadata(self) -> Optional[Dict]:
"""Accesses the initial metadata sent by the server.
Returns:
The initial metadata received.
"""
return self._initial_metadata
def trailing_metadata(self) -> Optional[Dict]:
"""Accesses the trailing metadata sent by the server.
Returns:
The trailing metadata received.
"""
return self._trailing_metadata
def debug_error_string(self) -> str:
"""Accesses the debug error string sent by the server.
Returns:
The debug error string received.
"""
return self._debug_error_string
def _repr(self) -> str:
"""Assembles the error string for the RPC error."""
return _NON_OK_CALL_REPRESENTATION.format(self.__class__.__name__,
self._code, self._details,
self._debug_error_string)
def __repr__(self) -> str:
return self._repr()
def __str__(self) -> str:
return self._repr()
def _create_rpc_error(initial_metadata: Optional[MetadataType],
status: cygrpc.AioRpcStatus) -> AioRpcError:
return AioRpcError(_common.CYGRPC_STATUS_CODE_TO_STATUS_CODE[status.code()],
status.details(), initial_metadata,
status.trailing_metadata())
class Call(_base_call.Call):
_loop: asyncio.AbstractEventLoop
_code: grpc.StatusCode
_status: Awaitable[cygrpc.AioRpcStatus]
_initial_metadata: Awaitable[MetadataType]
_locally_cancelled: bool
def __init__(self) -> None:
self._loop = asyncio.get_event_loop()
self._code = None
self._status = self._loop.create_future()
self._initial_metadata = self._loop.create_future()
self._locally_cancelled = False
def cancel(self) -> bool:
"""Placeholder cancellation method.
The implementation of this method needs to pass the cancellation reason
into self._cancellation, using `set_result` instead of
`set_exception`.
"""
raise NotImplementedError()
def cancelled(self) -> bool:
return self._code == grpc.StatusCode.CANCELLED
def done(self) -> bool:
return self._status.done()
def add_done_callback(self, unused_callback) -> None:
raise NotImplementedError()
def time_remaining(self) -> Optional[float]:
raise NotImplementedError()
async def initial_metadata(self) -> MetadataType:
return await self._initial_metadata
async def trailing_metadata(self) -> MetadataType:
return (await self._status).trailing_metadata()
async def code(self) -> grpc.StatusCode:
await self._status
return self._code
async def details(self) -> str:
return (await self._status).details()
async def debug_error_string(self) -> str:
return (await self._status).debug_error_string()
def _set_initial_metadata(self, metadata: MetadataType) -> None:
self._initial_metadata.set_result(metadata)
def _set_status(self, status: cygrpc.AioRpcStatus) -> None:
"""Private method to set final status of the RPC.
This method may be called multiple time due to data race between local
cancellation (by application) and Core receiving status from peer. We
make no promise here which one will win.
"""
# In case of local cancellation, flip the flag.
if status.details() is _LOCAL_CANCELLATION_DETAILS:
self._locally_cancelled = True
# In case of the RPC finished without receiving metadata.
if not self._initial_metadata.done():
self._initial_metadata.set_result(_EMPTY_METADATA)
# Sets final status
self._status.set_result(status)
self._code = _common.CYGRPC_STATUS_CODE_TO_STATUS_CODE[status.code()]
async def _raise_for_status(self) -> None:
if self._locally_cancelled:
raise asyncio.CancelledError()
await self._status
if self._code != grpc.StatusCode.OK:
raise _create_rpc_error(await self.initial_metadata(),
self._status.result())
def _repr(self) -> str:
"""Assembles the RPC representation string."""
if not self._status.done():
return '<{} object>'.format(self.__class__.__name__)
if self._code is grpc.StatusCode.OK:
return _OK_CALL_REPRESENTATION.format(
self.__class__.__name__, self._code,
self._status.result().self._status.result().details())
else:
return _NON_OK_CALL_REPRESENTATION.format(
self.__class__.__name__, self._code,
self._status.result().details(),
self._status.result().debug_error_string())
def __repr__(self) -> str:
return self._repr()
def __str__(self) -> str:
return self._repr()
# pylint: disable=abstract-method
class UnaryUnaryCall(Call, _base_call.UnaryUnaryCall):
"""Object for managing unary-unary RPC calls.
Returned when an instance of `UnaryUnaryMultiCallable` object is called.
"""
_request: RequestType
_channel: cygrpc.AioChannel
_request_serializer: SerializingFunction
_response_deserializer: DeserializingFunction
_call: asyncio.Task
_cython_call: cygrpc._AioCall
def __init__(self, request: RequestType, deadline: Optional[float],
channel: cygrpc.AioChannel, method: bytes,
request_serializer: SerializingFunction,
response_deserializer: DeserializingFunction) -> None:
super().__init__()
self._request = request
self._channel = channel
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
self._cython_call = self._channel.call(method, deadline)
self._call = self._loop.create_task(self._invoke())
def __del__(self) -> None:
if not self._call.done():
self._cancel(
cygrpc.AioRpcStatus(cygrpc.StatusCode.cancelled,
_GC_CANCELLATION_DETAILS, None, None))
async def _invoke(self) -> ResponseType:
serialized_request = _common.serialize(self._request,
self._request_serializer)
# NOTE(lidiz) asyncio.CancelledError is not a good transport for status,
# because the asyncio.Task class do not cache the exception object.
# https://github.com/python/cpython/blob/edad4d89e357c92f70c0324b937845d652b20afd/Lib/asyncio/tasks.py#L785
try:
serialized_response = await self._cython_call.unary_unary(
serialized_request,
self._set_initial_metadata,
self._set_status,
)
except asyncio.CancelledError:
if self._code != grpc.StatusCode.CANCELLED:
self.cancel()
# Raises here if RPC failed or cancelled
await self._raise_for_status()
return _common.deserialize(serialized_response,
self._response_deserializer)
def _cancel(self, status: cygrpc.AioRpcStatus) -> bool:
"""Forwards the application cancellation reasoning."""
if not self._status.done():
self._set_status(status)
self._cython_call.cancel(status)
self._call.cancel()
return True
else:
return False
def cancel(self) -> bool:
return self._cancel(
cygrpc.AioRpcStatus(cygrpc.StatusCode.cancelled,
_LOCAL_CANCELLATION_DETAILS, None, None))
def __await__(self) -> ResponseType:
"""Wait till the ongoing RPC request finishes."""
try:
response = yield from self._call
except asyncio.CancelledError:
# Even if we caught all other CancelledError, there is still
# this corner case. If the application cancels immediately after
# the Call object is created, we will observe this
# `CancelledError`.
if not self.cancelled():
self.cancel()
raise
return response
# pylint: disable=abstract-method
class UnaryStreamCall(Call, _base_call.UnaryStreamCall):
"""Object for managing unary-stream RPC calls.
Returned when an instance of `UnaryStreamMultiCallable` object is called.
"""
_request: RequestType
_channel: cygrpc.AioChannel
_request_serializer: SerializingFunction
_response_deserializer: DeserializingFunction
_cython_call: cygrpc._AioCall
_send_unary_request_task: asyncio.Task
_message_aiter: AsyncIterable[ResponseType]
def __init__(self, request: RequestType, deadline: Optional[float],
channel: cygrpc.AioChannel, method: bytes,
request_serializer: SerializingFunction,
response_deserializer: DeserializingFunction) -> None:
super().__init__()
self._request = request
self._channel = channel
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
self._send_unary_request_task = self._loop.create_task(
self._send_unary_request())
self._message_aiter = self._fetch_stream_responses()
self._cython_call = self._channel.call(method, deadline)
def __del__(self) -> None:
if not self._status.done():
self._cancel(
cygrpc.AioRpcStatus(cygrpc.StatusCode.cancelled,
_GC_CANCELLATION_DETAILS, None, None))
async def _send_unary_request(self) -> ResponseType:
serialized_request = _common.serialize(self._request,
self._request_serializer)
try:
await self._cython_call.unary_stream(serialized_request,
self._set_initial_metadata,
self._set_status)
except asyncio.CancelledError:
if self._code != grpc.StatusCode.CANCELLED:
self.cancel()
raise
async def _fetch_stream_responses(self) -> ResponseType:
await self._send_unary_request_task
message = await self._read()
while message:
yield message
message = await self._read()
def _cancel(self, status: cygrpc.AioRpcStatus) -> bool:
"""Forwards the application cancellation reasoning.
Async generator will receive an exception. The cancellation will go
deep down into Core, and then propagates backup as the
`cygrpc.AioRpcStatus` exception.
So, under race condition, e.g. the server sent out final state headers
and the client calling "cancel" at the same time, this method respects
the winner in Core.
"""
if not self._status.done():
self._set_status(status)
self._cython_call.cancel(status)
if not self._send_unary_request_task.done():
# Injects CancelledError to the Task. The exception will
# propagate to _fetch_stream_responses as well, if the sending
# is not done.
self._send_unary_request_task.cancel()
return True
else:
return False
def cancel(self) -> bool:
return self._cancel(
cygrpc.AioRpcStatus(cygrpc.StatusCode.cancelled,
_LOCAL_CANCELLATION_DETAILS, None, None))
def __aiter__(self) -> AsyncIterable[ResponseType]:
return self._message_aiter
async def _read(self) -> ResponseType:
# Wait for the request being sent
await self._send_unary_request_task
# Reads response message from Core
try:
raw_response = await self._cython_call.receive_serialized_message()
except asyncio.CancelledError:
if self._code != grpc.StatusCode.CANCELLED:
self.cancel()
raise
if raw_response is None:
return None
else:
return _common.deserialize(raw_response,
self._response_deserializer)
async def read(self) -> ResponseType:
if self._status.done():
await self._raise_for_status()
raise asyncio.InvalidStateError(_RPC_ALREADY_FINISHED_DETAILS)
response_message = await self._read()
if response_message is None:
# If the read operation failed, Core should explain why.
await self._raise_for_status()
# If no exception raised, there is something wrong internally.
assert False, 'Read operation failed with StatusCode.OK'
else:
return response_message
| 37.161863 | 115 | 0.633353 | 1,780 | 16,760 | 5.685393 | 0.191011 | 0.035573 | 0.018972 | 0.010375 | 0.419565 | 0.347826 | 0.29832 | 0.280929 | 0.262352 | 0.253656 | 0 | 0.003445 | 0.289857 | 16,760 | 450 | 116 | 37.244444 | 0.846832 | 0.234487 | 0 | 0.47191 | 0 | 0 | 0.02813 | 0 | 0 | 0 | 0 | 0.002222 | 0.003745 | 1 | 0.116105 | false | 0 | 0.026217 | 0.037453 | 0.367041 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f04d7a4b4d9f50b4c8d623ad5cbca8e00259dc3d | 6,607 | py | Python | paghe/cedolino.py | chirale/mrpaghe | 304440bcf9f68296e3086e3bd449da26a31275f4 | [
"CC0-1.0"
] | null | null | null | paghe/cedolino.py | chirale/mrpaghe | 304440bcf9f68296e3086e3bd449da26a31275f4 | [
"CC0-1.0"
] | null | null | null | paghe/cedolino.py | chirale/mrpaghe | 304440bcf9f68296e3086e3bd449da26a31275f4 | [
"CC0-1.0"
] | null | null | null | # from paghe import ccnl
class Cedolino:
detrazioni = {}
ore_straord = {'15%': 0, '35%': 0}
totale_detrazioni = 0
def __init__(self, **kwargs):
self.ccnl = kwargs.get('ccnl') # istanza di Ccnl
# Scaglioni IRPEF
self.irpef_scaglione = (0, 0.23, 0.27, 0.38, 0.41, 0.43)
self.livello = kwargs.get('livello')
self.giorni_mese = kwargs.get('giorni_mese') # Giorni totali del mese del Cedolino
self.ore_straord['15%'] = kwargs.get('straord_15', 0)
self.ore_straord['35%'] = kwargs.get('straord_35', 0)
self.festivita_settimanale = kwargs.get('festivita_settimanale', 0)
self.festivita_domenicale = kwargs.get('festivita_domenicale', 0)
# Calcoli
self.importo_ordinario = self.ccnl.importo_ordinario(livello=self.livello)
# Calcolo straordinario
self.importo_straordinario = 0
for tipo in ['15%', '35%']:
self.importo_straordinario += round(self.ccnl.importo_straordinario_orario(
tipo=tipo,
livello=self.livello
) * self.ore_straord[tipo], 2)
self.importo_straordinario = round(self.importo_straordinario, 2)
# Calcolo festività
self.importo_festivita_settimanale = round(self.festivita_settimanale * self.ccnl.base_giornaliera(livello=self.livello), 2)
self.importo_festivita_domenicale = round(self.festivita_domenicale * self.ccnl.base_giornaliera(livello=self.livello), 2)
self.totale_competenze = round(self.importo_ordinario + self.importo_straordinario +
self.importo_festivita_domenicale + self.importo_festivita_settimanale, 2)
self.imponibile_previdenziale = round(self.totale_competenze, 0)
self.ctr_previdenziale = round(self.imponibile_previdenziale * 0.0919, 2)
self.tot_ctr_previd = self.ctr_previdenziale
self.imponibile_fiscale = round(self.totale_competenze - self.tot_ctr_previd, 2)
# Fisco
self.irpef_lorda = self.calcola_irpef_lorda()
self.reddito_complessivo_annuo = round(self.imponibile_fiscale * self.ccnl.mensilita, 2)
# Detrazioni personali
self.detrazioni['lav'] = self.calcola_detraz_lav()
"""
# Detrazioni famigliari a carico
# 1) Detrazioni coniuge
self.detrazioni['coniuge'] = self.calcola_detraz_coniuge()
# 2) Detrazioni figli
self.detrazioni['figli'] = self.calcola_detraz_figli()
# 3) Detrazioni altri famigliari
self.detrazioni['altri fam'] = self.calcola_detraz_altri_fam()
# self.irpef_netta = self.irpef_lorda - self.detraz_lav - - self.detraz_coniuge self.detraz_figli - self.detraz_altri_fam
self.totale_detrazioni = 0
"""
for k, v in self.detrazioni.items():
self.totale_detrazioni += v
self.irpef_netta = self.irpef_lorda - self.totale_detrazioni
self.totale_trattenute = self.tot_ctr_previd + self.irpef_netta
self.netto_a_pagare = round(self.totale_competenze - self.totale_trattenute, 0)
self.arrotondamento = round(self.netto_a_pagare - (self.totale_competenze - self.totale_trattenute), 2)
def calcola_irpef_lorda(self):
""" Calcola l'IRPEF in base all'imponibile fiscale """
if self.imponibile_fiscale < 1250:
# I scaglione
importo = self.imponibile_fiscale * self.irpef_scaglione[1]
elif 1250 < self.imponibile_fiscale <= 2333.33:
# II scaglione
importo = round(1250 * self.irpef_scaglione[1], 2) + ((self.imponibile_fiscale - 1250) * self.irpef_scaglione[2])
elif 2333.33 < self.imponibile_fiscale <= 4583.33:
# III scaglione
importo = round(1250 * self.irpef_scaglione[1], 2) + round((2333.33 - 1250) * self.irpef_scaglione[2], 2) + \
((self.imponibile_fiscale - 2333.33) * self.irpef_scaglione[3])
elif 4583.33 < self.imponibile_fiscale <= 6250:
# IV scaglione
importo = round(1250 * self.irpef_scaglione[1], 2) + round((2333.33 - 1250) * self.irpef_scaglione[2], 2) + \
round((4583.33 - 2333.33) * self.irpef_scaglione[3], 2) + \
((self.imponibile_fiscale - 4583.33) * self.irpef_scaglione[4])
elif self.imponibile_fiscale < 6250:
# V scaglione
importo = round(1250 * self.irpef_scaglione[1], 2) + round((2333.33 - 1250) * self.irpef_scaglione[2], 2) + \
round((4583.33 - 2333.33) * self.irpef_scaglione[3], 2) + \
((6250 - 4583.33) * self.irpef_scaglione[4]) + (self.imponibile_fiscale - 6250) * self.irpef_scaglione[5]
return round(importo, 2)
def tronca_coeff_detraz(self, value):
# 4 cifre dopo la virgola troncate
return int((value * 10000)) / 10000
def calcola_detraz_lav(self):
if self.reddito_complessivo_annuo <= 8000:
annuale = 1880 # in realtà non meno di 1380 T. indet. e 690 T. det.
elif 8000 < int(self.reddito_complessivo_annuo) <= 28000:
coefficiente = (28000 - self.reddito_complessivo_annuo) / 20000
annuale = 978 + (902 * self.tronca_coeff_detraz(coefficiente))
elif 28000 < int(self.reddito_complessivo_annuo) <= 55000:
coefficiente = (55000 - self.reddito_complessivo_annuo) / 27000
annuale = 978 + (self.tronca_coeff_detraz(coefficiente))
elif int(self.reddito_complessivo_annuo) <= 55000:
annuale = 0
return round((annuale / 365) * self.giorni_mese, 2)
def calcola_detraz_coniuge(self):
if self.reddito_complessivo_annuo <= 15000:
importo = 800 - (110 * (self.reddito_complessivo_annuo / 15000))
elif 15000 < self.reddito_complessivo_annuo <= 40000:
importo = 690
elif 40000 < self.reddito_complessivo_annuo <= 80000:
importo = 690 * (80000 - self.reddito_complessivo_annuo / 40000)
else:
importo = 0
# Detrazioni aggiuntive
if 29000 < self.reddito_complessivo_annuo <= 29200:
importo += 10
elif 29200 < self.reddito_complessivo_annuo < 34700:
importo += 20
elif 34700 < self.reddito_complessivo_annuo < 35000:
importo += 30
elif 35000 < self.reddito_complessivo_annuo < 35100:
importo += 20
elif 35100 < self.reddito_complessivo_annuo < 35200:
importo += 10
return importo
def calcola_detraz_figli(self):
pass | 52.436508 | 132 | 0.638868 | 772 | 6,607 | 5.256477 | 0.19171 | 0.048793 | 0.092164 | 0.11311 | 0.312962 | 0.216856 | 0.12691 | 0.111138 | 0.111138 | 0.077378 | 0 | 0.085982 | 0.257152 | 6,607 | 126 | 133 | 52.436508 | 0.740831 | 0.05812 | 0 | 0.098901 | 0 | 0 | 0.018323 | 0.0037 | 0 | 0 | 0 | 0 | 0 | 1 | 0.065934 | false | 0.010989 | 0.263736 | 0.010989 | 0.417582 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f055790e93699386ee0ef9c2392d58f9b73372e9 | 2,936 | py | Python | day_12.py | balcortex/advent_of_code_2020 | e56a54c7578d3d70b0dcc451100b0bb65624a954 | [
"MIT"
] | null | null | null | day_12.py | balcortex/advent_of_code_2020 | e56a54c7578d3d70b0dcc451100b0bb65624a954 | [
"MIT"
] | null | null | null | day_12.py | balcortex/advent_of_code_2020 | e56a54c7578d3d70b0dcc451100b0bb65624a954 | [
"MIT"
] | null | null | null | from typing import NamedTuple, Sequence
COORDS = {"E": (1, 0), "N": (0, 1), "W": (-1, 0), "S": (0, -1)}
RTURNS = {"E": "S", "S": "W", "W": "N", "N": "E"}
LTURNS = {"S": "E", "W": "S", "N": "W", "E": "N"}
class Ship:
def __init__(self):
self.posx = 0
self.posy = 0
self.facing = "E"
def move(self, s: str) -> None:
instructions = self.parse(s)
for dir_, value in instructions:
if dir_ == "F":
dirx, diry = COORDS[self.facing]
self.posx += dirx * value
self.posy += diry * value
elif dir_ == "R":
turns = value // 90
for t in range(turns):
self.facing = RTURNS[self.facing]
elif dir_ == "L":
turns = value // 90
for t in range(turns):
self.facing = LTURNS[self.facing]
else:
dirx, diry = COORDS[dir_]
self.posx += dirx * value
self.posy += diry * value
@property
def manhattan(self) -> int:
return abs(self.posx) + abs(self.posy)
@staticmethod
def parse(s: str) -> Sequence:
return [(s[0], int(s[1:])) for s in s.split("\n")]
assert Ship.parse("F10") == [("F", 10)]
INSTRS = """F10
N3
F7
R90
F11"""
ship = Ship()
ship.move(INSTRS)
assert ship.manhattan == 25
with open("day_12_input.txt") as f:
ship = Ship()
ship.move(f.read())
print(ship.manhattan)
class Ship2:
def __init__(self):
self.posx = 0
self.posy = 0
self.wp_posx = 10
self.wp_posy = 1
self.wp_angle = (1, 1)
def move(self, s: str) -> None:
instructions = self.parse(s)
for dir_, value in instructions:
if dir_ == "F":
self.posx += self.wp_posx * value
self.posy += self.wp_posy * value
elif dir_ == "R":
turns = value // 90
for t in range(turns):
self.wp_posx, self.wp_posy = self.wp_posy, -self.wp_posx
elif dir_ == "L":
turns = value // 90
for t in range(turns):
self.wp_posx, self.wp_posy = -self.wp_posy, self.wp_posx
else:
dirx, diry = COORDS[dir_]
self.wp_posx += dirx * value
self.wp_posy += diry * value
@property
def manhattan(self) -> int:
return abs(self.posx) + abs(self.posy)
@staticmethod
def parse(s: str) -> Sequence:
return [(s[0], int(s[1:])) for s in s.split("\n")]
assert Ship2.parse("F10") == [("F", 10)]
INSTRS = """F10
N3
F7
R90
F11"""
ship2 = Ship2()
ship2.move(INSTRS)
assert ship2.manhattan == 286
assert ship2.posy == -72
assert ship2.posx == 214
with open("day_12_input.txt") as f:
ship2 = Ship2()
ship2.move(f.read())
print(ship2.manhattan)
| 24.466667 | 76 | 0.491144 | 385 | 2,936 | 3.649351 | 0.192208 | 0.064057 | 0.049822 | 0.042705 | 0.663345 | 0.663345 | 0.630605 | 0.630605 | 0.560854 | 0.560854 | 0 | 0.041799 | 0.356267 | 2,936 | 119 | 77 | 24.672269 | 0.701587 | 0 | 0 | 0.666667 | 0 | 0 | 0.035763 | 0 | 0 | 0 | 0 | 0 | 0.064516 | 1 | 0.086022 | false | 0 | 0.010753 | 0.043011 | 0.16129 | 0.021505 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0562e3a686c1348ece23c76a5cf0f2f223adc4f | 3,530 | py | Python | cyto/app/_inject.py | sbtinstruments/cyto | f452562e5e9ae9d2516cd92958af6e6a2c985dcc | [
"MIT"
] | 5 | 2021-04-03T04:09:38.000Z | 2021-12-17T15:05:18.000Z | cyto/app/_inject.py | sbtinstruments/cyto | f452562e5e9ae9d2516cd92958af6e6a2c985dcc | [
"MIT"
] | 1 | 2021-04-21T17:00:29.000Z | 2021-04-21T19:12:30.000Z | cyto/app/_inject.py | sbtinstruments/cyto | f452562e5e9ae9d2516cd92958af6e6a2c985dcc | [
"MIT"
] | null | null | null | import inspect
from contextlib import AsyncExitStack, suppress
from functools import wraps
from typing import (
Any,
AsyncContextManager,
Callable,
ContextManager,
Coroutine,
Optional,
Protocol,
Type,
TypeVar,
)
from anyio import create_task_group
from anyio.abc import TaskGroup
ReturnT = TypeVar("ReturnT", covariant=True)
Func = Callable[..., Coroutine[Any, Any, ReturnT]]
# Note that we disable D102 for `Protocol`s since it's redundant documentation.
# Similarly, we disable too-few-public-methods since it doens't make sense for
# `Protocol`s. Hopefully, both pydocstyle and pylint will special-case `Protocol`s
# soon enough.
class InjectedFunc(Protocol[ReturnT]): # pylint: disable=too-few-public-methods
"""`Func` after we apply `inject` to it."""
async def __call__(self) -> ReturnT: # noqa: D102
...
class Factory(Protocol): # pylint: disable=too-few-public-methods
"""Given a type, return an instance of said type."""
async def __call__(self, __annotation: Type[Any]) -> Any: # noqa: D102
...
async def _basic_factory(annotation: Type[Any]) -> Any:
if issubclass(annotation, TaskGroup):
return create_task_group()
raise ValueError
def inject(
*,
extra_factory: Optional[Factory] = None,
) -> Callable[[Func[ReturnT]], InjectedFunc[ReturnT]]:
"""Inject instances of the given function's argument types."""
def _inject(coro: Func[ReturnT]) -> InjectedFunc[ReturnT]:
@wraps(coro)
async def _wrapper() -> ReturnT: # type: ignore[return]
spec = inspect.getfullargspec(coro)
args: Any = []
async with AsyncExitStack() as stack:
for arg_name in spec.args:
try:
annotation = spec.annotations[arg_name]
except KeyError as exc:
raise ValueError(
f'Argument "{arg_name} must have a type annotation"'
) from exc
arg = await _get_arg(annotation, stack, extra_factory)
if arg is None:
raise ValueError(
f'Argument "{arg_name}" has unknown type '
f'annotation "{annotation}"'
)
# There is a bug in pylint with
# isinstance-second-argument-not-valid-type
# See: https://github.com/PyCQA/pylint/issues/3507
if isinstance( # pylint: disable=isinstance-second-argument-not-valid-type
arg, AsyncContextManager
):
arg = await stack.enter_async_context(arg)
elif isinstance( # pylint: disable=isinstance-second-argument-not-valid-type
arg, ContextManager
):
arg = stack.enter_context(arg)
args.append(arg)
return await coro(*args)
return _wrapper
return _inject
async def _get_arg(
annotation: Type[Any],
stack: AsyncExitStack,
extra_factory: Optional[Factory] = None,
) -> Any:
if issubclass(annotation, AsyncExitStack):
return stack
with suppress(ValueError):
return await _basic_factory(annotation)
if extra_factory is not None:
with suppress(ValueError):
return await extra_factory(annotation)
return None
| 32.090909 | 97 | 0.588952 | 370 | 3,530 | 5.516216 | 0.356757 | 0.019598 | 0.019108 | 0.027927 | 0.215581 | 0.140127 | 0.060755 | 0.060755 | 0.060755 | 0.060755 | 0 | 0.005435 | 0.32238 | 3,530 | 109 | 98 | 32.385321 | 0.847826 | 0.212181 | 0 | 0.131579 | 0 | 0 | 0.043605 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026316 | false | 0 | 0.078947 | 0 | 0.236842 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f05786e06d940f0c85db751f6689f2578463207f | 6,466 | py | Python | swendsen_wang.py | VHeusinkveld/CP2-Ising_model | 76fa9e3d2c2bf79d8fdf52709bf7b122910d3600 | [
"MIT"
] | 1 | 2022-03-31T03:57:34.000Z | 2022-03-31T03:57:34.000Z | swendsen_wang.py | VHeusinkveld/CP2-Ising_model | 76fa9e3d2c2bf79d8fdf52709bf7b122910d3600 | [
"MIT"
] | null | null | null | swendsen_wang.py | VHeusinkveld/CP2-Ising_model | 76fa9e3d2c2bf79d8fdf52709bf7b122910d3600 | [
"MIT"
] | 2 | 2021-01-19T21:19:18.000Z | 2021-02-01T21:22:25.000Z | import numpy as np
import numpy.random as rnd
# -----------------------------------------------------------------------------------------------------------------------
# Swendsen-Wang algorithm functions
# -----------------------------------------------------------------------------------------------------------------------
def SW_algorithm(self, grid_coordinates, spin_site_numbers, grid_spins):
'''Functions that identifies al the clusters, "islands",
using the back_track function. At every iteration it is determined
if the cluster is flipped with a 50/50 chance. The function
runs over all spins. Everytime a new cluster is identified it
gets added to the total cluster list.
Parameters
----------
self : NameSpace
contains all the simulation parameters
grid_coordinates : 2D array (dim, L*L)
containing the x coordinates of the spins
bonds : 3D array (2, L, L)
contains al the bonds present in the system.
grid_spins : 2D array (L, L)
containing al the spin values within the grid
Returns
-------
islands : list
contains list of the clusters which form islands of the same spin
grid_spins : 2D array (L, L)
containing al the spin values within the grid
cluster_flips: list of length np.size(cluster)
list which states if the cluster is flipped
'''
islands = []
cluster_flips = []
not_visited = np.ones((self.L, self.L), dtype= bool)
bonds = bond_eval(self, grid_spins)
for i in spin_site_numbers:
cluster = []
flip_cluster = 2*rnd.randint(2) - 1
spin_site_x = grid_coordinates[0][i]
spin_site_y = grid_coordinates[1][i]
cluster, grid_spins = back_track(self, spin_site_x, spin_site_y, bonds, not_visited, cluster, grid_spins, flip_cluster)
if cluster != []:
islands.append(cluster)
cluster_flips.append(flip_cluster)
return islands, grid_spins, cluster_flips
def bond_eval(self, grid_spins):
'''Goes over all the spins in the system and checks the bonds,
if they are opposite the bond is set to 0 if they are equal the
bond is set to infinity with probability (1 - e^-2J/(k_bT)).
Parameters
----------
self : NameSpace
contains all the simulation parameters
grid_spins : 2D array (L, L)
containing al the spin values within the grid
Returns
-------
bonds : 3D array (2, L, L)
contains al the bonds present in the system. The first 2D array
gives the horizontal bonds (Element (i,j) gives the relation
between spin_site (i,j) and (i,j+1). When j+1 does not exist it
refers to (i,0) which illustrates the periodic BCs.) and the
second 2D array gives the vertical bonds (Element (i,j) gives
the relation between spin_site (i,j) and (i+1,j). When i+1 does
not exist it refers to (0,j) which illustrates the periodic BCs.).
'''
bonds = np.zeros((2, self.L, self.L,),dtype=float)
chance_value = np.minimum(1, np.exp(-2*self.J/(self.kb*self.T)))
delta_spin_hor = np.abs(grid_spins+np.roll(grid_spins,-1,axis=1))/2 # Divided by 2 to normalise
nz_delta_spin_hor = np.asarray(np.nonzero(delta_spin_hor)) # Gives array with indices for non-zero elements
delta_spin_ver = np.abs(grid_spins+np.roll(grid_spins,-1,axis=0))/2 # Divided by 2 to normalise
nz_delta_spin_ver = np.asarray(np.nonzero(delta_spin_ver)) # Gives array with indices for non-zero elements
for i in range(np.shape(nz_delta_spin_hor)[1]):
if rnd.binomial(1, chance_value) == 1:
bonds[0, nz_delta_spin_hor[0,i], nz_delta_spin_hor[1,i]] = 0
else:
bonds[0, nz_delta_spin_hor[0,i], nz_delta_spin_hor[1,i]] = np.inf
for j in range(np.shape(nz_delta_spin_ver)[1]):
if rnd.binomial(1, chance_value) == 1:
bonds[1, nz_delta_spin_ver[0,j], nz_delta_spin_ver[1,j]] = 0
else:
bonds[1, nz_delta_spin_ver[0,j], nz_delta_spin_ver[1,j]] = np.inf
return bonds
def back_track(self, x, y, bonds, not_visited, cluster, grid_spins, flip_cluster):
'''Checks the neighbours of the spin, if they are
equal this functions jumps over to that spin and
repeats itself. The spins that are already visited
are skipped. Everytime an equal bond is found, this
spind is added to the cluster.
Parameters
----------
self : NameSpace
contains all the simulation parameters
x : float
x coordinate of spin site
y : float
y coordinate of spin site
bonds : 3D array (2, L, L)
contains al the bonds present in the system.
not_visited : 2D array (L, L)
contains boolean for every spin site in system. True is not visited, false is visited
cluster : list
contains list of the coordinates belonging to one cluster
grid_spins : 2D array (L, L)
containing al the spin values within the grid
flip_cluster: int
Value is 1 or -1, where -1 means a spinflip.
Returns
-------
cluster : list
contains list of the coordinates belonging to one cluster
grid_spins : 2D array (L, L)
containing al the spin values within the grid
'''
if not_visited[x, y]:
not_visited[x, y] = False
cluster.append([x, y])
grid_spins[x, y] = grid_spins[x, y] * flip_cluster
if bonds[0][x][y] == np.inf:
n_x = x
n_y = (y + 1)%self.L
cluster, grid_spins = back_track(self, n_x, n_y, bonds, not_visited, cluster, grid_spins, flip_cluster)
if bonds[0][x][(y - 1)%self.L] == np.inf:
n_x = x
n_y = (y - 1)%self.L
cluster, grid_spins = back_track(self, n_x, n_y, bonds, not_visited, cluster, grid_spins, flip_cluster)
if bonds[1][x][y] == np.inf:
n_x = (x + 1)%self.L
n_y = y
cluster, grid_spins = back_track(self, n_x, n_y, bonds, not_visited, cluster, grid_spins, flip_cluster)
if bonds[1][(x - 1)%self.L][y] == np.inf:
n_x = (x - 1)%self.L
n_y = y
cluster, grid_spins = back_track(self, n_x, n_y, bonds, not_visited, cluster, grid_spins, flip_cluster)
return cluster, grid_spins | 39.426829 | 127 | 0.601608 | 961 | 6,466 | 3.899063 | 0.183143 | 0.064852 | 0.059781 | 0.014412 | 0.587403 | 0.526288 | 0.497732 | 0.465706 | 0.429677 | 0.354684 | 0 | 0.016338 | 0.27111 | 6,466 | 164 | 128 | 39.426829 | 0.778697 | 0.48871 | 0 | 0.210526 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.035088 | 0 | 0.140351 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f05b0ec6926c615d553108aa20f2f1f35ff0f92a | 1,546 | py | Python | spider_crawlcrawl.py | NiroDu/python-tricks | 27d504655b1fd7417bd0e6058293209814efcc21 | [
"MIT"
] | null | null | null | spider_crawlcrawl.py | NiroDu/python-tricks | 27d504655b1fd7417bd0e6058293209814efcc21 | [
"MIT"
] | null | null | null | spider_crawlcrawl.py | NiroDu/python-tricks | 27d504655b1fd7417bd0e6058293209814efcc21 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: UTF-8 -*-
from lxml import html
import requests
import schedule
import smtplib
from email.mime.text import MIMEText
import time
mailcount = 0
def gethtml(url):
return requests.get(url).content.decode('utf-8')
def getcontext(utlhtml, xpaths):
selector = html.fromstring(utlhtml)
return selector.xpath(xpaths)
def runjob():
content = gethtml('http://www.kwh.org.mo')
context = getcontext(content, '//div[@id="content"]//table//font//br')[0].tail
print(context)
if context != '(暫未有貨提供首次注射人士)':
global mailcount
if mailcount < 6:
mailcount += 1
sendmail('预约状态已经被改变成:'+context)
def sendmail(context):
msg_from = 'niro-du@outlook.com'
password = 'Asdfghjkl123'
msg_to = 'charlene0607@163.com'
subject = "晓慧同学请注意:可以速去预约啦"
msg = MIMEText(context)
msg['Subject'] = subject
msg['From'] = msg_from
msg['To'] = msg_to
try:
s = smtplib.SMTP('smtp.live.com')
s.starttls()
s.login(user=msg_from, password=password)
s.sendmail(from_addr=msg_from, to_addrs=msg_to,msg=msg.as_string())
print('发送成功')
s.quit()
except Exception as e:
print('发送失败_'+str(e))
if __name__ == "__main__":
count = 0
schedule.every().day.at("8:05").do(runjob)
schedule.every(3).minutes.do(runjob)
while True:
if mailcount < 6:
schedule.run_pending()
time.sleep(1)
else:
schedule.cancel_job(runjob)
break | 24.935484 | 82 | 0.614489 | 195 | 1,546 | 4.764103 | 0.533333 | 0.037675 | 0.025834 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019675 | 0.243855 | 1,546 | 62 | 83 | 24.935484 | 0.775021 | 0.02458 | 0 | 0.04 | 0 | 0 | 0.133378 | 0.024552 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0.04 | 0.12 | 0.02 | 0.24 | 0.06 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f05baf92d95ef84cbe408b6dbe9752da786615e0 | 8,499 | py | Python | src/api/datamanage/pro/datamodel/mixins/model_instance_mixins.py | Chromico/bk-base | be822d9bbee544a958bed4831348185a75604791 | [
"MIT"
] | 84 | 2021-06-30T06:20:23.000Z | 2022-03-22T03:05:49.000Z | src/api/datamanage/pro/datamodel/mixins/model_instance_mixins.py | Chromico/bk-base | be822d9bbee544a958bed4831348185a75604791 | [
"MIT"
] | 7 | 2021-06-30T06:21:16.000Z | 2022-03-29T07:36:13.000Z | src/api/datamanage/pro/datamodel/mixins/model_instance_mixins.py | Chromico/bk-base | be822d9bbee544a958bed4831348185a75604791 | [
"MIT"
] | 40 | 2021-06-30T06:21:26.000Z | 2022-03-29T12:42:26.000Z | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from common.base_utils import model_to_dict
from datamanage.pro import exceptions as dm_pro_errors
from datamanage.pro.datamodel.models.indicator import DmmModelCalculationAtom
class ModelInstanceMixin(object):
def prepare_fact_sql_generate_params(self, model_instance, model_info, instance_sources, instance_table_fields):
"""准备生成模型应用SQL的参数
:param model_instance: 模型应用实例实体
:param model_info: 模型版本发布信息
:param instance_sources: 记录了模型应用实例各类型输入表的ID
{
'main_table': '1_main_table',
'dimension_tables': ['1_dim_table1', '1_dim_table2']
}
:param instance_table_fields: 模型应用实例主表字段列表
:return: 用于生成模型应用SQL参数,形如:
{
"node_type": "fact_table",
"node_conf": {
"main_table": "1_main_table",
"fields": [
{
"field_name": "field1",
"field_index": 1,
"field_type": "int",
"field_clean_content": {
"clean_option": "SQL",
"clean_content": ""
},
...
"dmm_relation": {
"related_method": "left-join",
...
},
"ins_field": {
"input_result_table_id": "1_source_table",
"input_field_name": "source_field1",
"application_clean_content": {
"clean_option": "SQL",
"clean_content": ""
},
...
},
"ins_relation": {
"input_result_table_id": "",
"input_field_name": "",
...
}
}
]
}
}
:raises DataModelNotExistError: 数据模型不存在
"""
# 从模型发布版本中把字段信息和关联信息都转化成字典已被查询
model_fields = {}
for field_info in model_info.get('model_detail', {}).get('fields', []):
model_fields[field_info.get('field_name')] = field_info
model_relations = {}
for relation_info in model_info.get('model_detail', {}).get('model_relation', []):
relation_key = (
relation_info.get('model_id'),
relation_info.get('field_name'),
relation_info.get('related_model_id'),
)
model_relations[relation_key] = relation_info
# 构建生成SQL时依赖的字段信息
fields = []
for instance_table_field in instance_table_fields:
field_name = instance_table_field.field_name
# 如果从模型发布版本信息中
if field_name not in model_fields:
raise dm_pro_errors.FieldNotExistError(
'无法在模型({model_id})的发布版本({version_id})中找到字段映射配置中的字段({field_name})'.format(
model_id=model_instance.model_id,
version_id=model_instance.version_id,
field_name=field_name,
)
)
# 把application_clean_content里内容为空的字段统一成application_clean_content为空字典的结构
if not instance_table_field.application_clean_content.get('clean_content'):
instance_table_field.application_clean_content = {}
# 从模型发布的详情里获取模型字段的详情,作为组装字段参数的基础
field_params = model_fields[field_name]
field_params['ins_field'] = model_to_dict(instance_table_field)
# 如果模型应用实例时进行了维度关联,则需要找到该模型定义时字段关联所用的关联关系,从而获取其中的关联方法
if instance_table_field.relation:
relation_key = (model_instance.model_id, field_name, instance_table_field.relation.related_model_id)
if relation_key not in model_relations:
raise dm_pro_errors.ModelRelationNotExistError(
message_kv={
'field_name': field_name,
'model_id': model_instance.model_id,
'related_model_id': instance_table_field.relation.related_model_id,
}
)
field_params['dmm_relation'] = model_relations[relation_key]
field_params['ins_relation'] = model_to_dict(instance_table_field.relation)
else:
field_params['dmm_relation'] = {}
field_params['ins_relation'] = {}
fields.append(field_params)
return {
'node_type': model_instance.model.model_type,
'node_conf': {
'main_table': instance_sources['main_table'],
'fields': fields,
},
}
def prepare_ind_sql_generate_params(self, model_instance, instance_indicator):
"""准备生成模型应用SQL的参数
:param model_instance: 模型应用实例实体
:param instance_indicator: 模型应用指标实体
:return: 用于生成模型应用SQL参数,形如:
{
"node_type": "indicator",
"node_conf": {
"main_table": "1_main_table",
"calculation_atom": {
"calculation_atom_name": "test_atom",
"field_type": "int",
"calculation_formula": "count(field1)"
},
"ins_indicator": {
"aggregation_fields": "dim1",
"filter_fomula": "field1 is not null"
}
}
}
:raises CalculationAtomNotExistError: 统计口径不存在
"""
try:
calculation_atom = DmmModelCalculationAtom.objects.get(
calculation_atom_name=instance_indicator.calculation_atom_name,
)
except DmmModelCalculationAtom.DoesNotExist:
raise dm_pro_errors.CalculationAtomNotExistError()
return {
'node_type': 'indicator',
'node_conf': {
'main_table': instance_indicator.parent_result_table_id,
'calculation_atom': {
'calculation_atom_name': calculation_atom.calculation_atom_name,
'field_type': calculation_atom.field_type,
'calculation_formula': calculation_atom.calculation_formula,
},
'ins_indicator': {
'aggregation_fields': instance_indicator.aggregation_fields,
'filter_formula': instance_indicator.filter_formula,
},
},
}
def get_related_model_about_model_inst(self, model_instance):
"""获取模型实例主表相关的所有模型ID
:param model_instance: 模型应用实例
:return: 模型ID列表
"""
pass
| 42.074257 | 116 | 0.544888 | 769 | 8,499 | 5.708713 | 0.310793 | 0.030752 | 0.036902 | 0.01549 | 0.208884 | 0.154214 | 0.074032 | 0.014579 | 0 | 0 | 0 | 0.003932 | 0.371573 | 8,499 | 201 | 117 | 42.283582 | 0.818012 | 0.450524 | 0 | 0.051282 | 0 | 0 | 0.109613 | 0.020598 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0.012821 | 0.038462 | 0 | 0.115385 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f05f8ea67af34bbee2b38d865fbf6f45122c866f | 8,989 | py | Python | python/paddle_fl/mobile/model/language_model.py | barrierye/PaddleFL | eff6ef28491fa2011686ca3daa4f680e5ef83deb | [
"Apache-2.0"
] | 379 | 2019-09-27T14:26:42.000Z | 2022-03-29T14:28:12.000Z | python/paddle_fl/mobile/model/language_model.py | Sprate/PaddleFL | 583691acd5db0a7ca331cc9a72415017b18669b8 | [
"Apache-2.0"
] | 132 | 2019-10-16T03:22:03.000Z | 2022-03-23T08:54:29.000Z | python/paddle_fl/mobile/model/language_model.py | Sprate/PaddleFL | 583691acd5db0a7ca331cc9a72415017b18669b8 | [
"Apache-2.0"
] | 106 | 2019-09-27T12:47:18.000Z | 2022-03-29T09:07:25.000Z | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from model import *
from layer import *
from .model_base import ModelBase
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import paddle.fluid as fluid
from paddle.fluid import ParamAttr
from paddle.fluid.contrib.layers import basic_lstm
class LanguageModel(ModelBase):
def __init__(self):
# model args
self.seq_len_ = 10 # fixed
self.n_hidden_ = 256
self.num_layers_ = 2
self.pad_symbol_ = 0
self.unk_symbol_ = 1
self.vocab_size_ = 10000
self.init_scale_ = 0.1
self.max_grad_norm_ = 5
self.dropout_prob_ = 0.0
# results
self.correct_ = None
self.pred_ = None
self.loss_ = None
# private vars
self.user_params_ = []
self.program_ = None
self.startup_program_ = None
self.input_name_list_ = None
self.target_var_names_ = []
def update_params(self, config):
self.n_hidden_ = config.get("n_hidden", 256)
self.num_layers_ = config.get("num_layers", 2)
self.init_scale_ = config.get("init_scale", 0.1)
self.max_grad_norm_ = config.get("max_grad_norm", 5)
self.dropout_prob_ = config.get("dropout_prob", 0.0)
def get_model_input_names(self):
return self.input_name_list_
def get_model_loss(self):
return self.loss_
def get_model_loss_name(self):
return self.loss_.name
def get_model_metrics(self):
metrics = {
"init_hidden": self.last_hidden_.name,
"init_cell": self.last_cell_.name,
"correct": self.correct_.name
}
return metrics
def get_target_names(self):
return self.target_var_names_
def build_model(self, model_configs):
self.update_params(model_configs)
features = fluid.layers.data(name="features",
shape=[None, self.seq_len_],
dtype='int64')
labels = fluid.layers.data(name="labels",
shape=[None, self.seq_len_],
dtype='int64')
sequence_length_ph = fluid.layers.data(name="seq_len_ph",
shape=[None],
dtype='int64')
sequence_mask_ph = fluid.layers.data(name="seq_mask_ph",
shape=[None],
dtype='float32')
init_hidden = fluid.layers.data(
name="init_hidden",
shape=[None, self.num_layers_, self.n_hidden_],
dtype='float32')
init_cell = fluid.layers.data(
name="init_cell",
shape=[None, self.num_layers_, self.n_hidden_],
dtype='float32')
init_hidden = layers.transpose(init_hidden, perm=[1, 0, 2])
init_cell = layers.transpose(init_cell, perm=[1, 0, 2])
init_hidden_reshape = layers.reshape(
init_hidden, shape=[self.num_layers_, -1, self.n_hidden_])
init_cell_reshape = layers.reshape(
init_cell, shape=[self.num_layers_, -1, self.n_hidden_])
features = layers.reshape(features, shape=[-1, self.seq_len_, 1])
# word embedding
inputs = layers.embedding(
input=features,
size=[self.vocab_size_, self.n_hidden_],
dtype='float32',
is_sparse=False,
param_attr=fluid.ParamAttr(
name='embedding_para',
initializer=fluid.initializer.UniformInitializer(
low=-self.init_scale_, high=self.init_scale_)))
# LSTM
output, last_hidden, last_cell = self._build_rnn_graph(
inputs, init_hidden, init_cell, sequence_length_ph)
output = layers.reshape(output,
shape=[-1, self.seq_len_, self.n_hidden_],
inplace=True)
self.last_hidden_ = layers.reshape(
last_hidden, [-1, self.num_layers_, self.n_hidden_])
self.last_cell_ = layers.reshape(
last_cell, [-1, self.num_layers_, self.n_hidden_])
# softmax
softmax_w = layers.create_parameter(
[self.n_hidden_, self.vocab_size_],
dtype="float32",
name="softmax_w",
default_initializer=fluid.initializer.UniformInitializer(
low=-self.init_scale_, high=self.init_scale_))
softmax_b = layers.create_parameter(
[self.vocab_size_],
dtype="float32",
name='softmax_b',
default_initializer=fluid.initializer.UniformInitializer(
low=-self.init_scale_, high=self.init_scale_))
logits = layers.matmul(output, softmax_w)
logits = layers.elementwise_add(logits, softmax_b)
logits = layers.reshape(logits,
shape=[-1, self.vocab_size_],
inplace=True)
# correct predictions
labels_reshaped = layers.reshape(labels, [-1])
pred = layers.cast(layers.argmax(logits, 1), dtype="int64")
correct_pred = layers.cast(layers.equal(pred, labels_reshaped),
dtype="int64")
self.pred_ = pred
# predicting unknown is always considered wrong
# only in paddle 1.8
unk_tensor = layers.fill_constant(layers.shape(labels_reshaped),
value=self.unk_symbol_,
dtype='int64')
pred_unk = layers.cast(layers.equal(pred, unk_tensor), dtype="int64")
correct_unk = layers.elementwise_mul(pred_unk, correct_pred)
# predicting padding is always considered wrong
pad_tensor = layers.fill_constant(layers.shape(labels_reshaped),
value=self.pad_symbol_,
dtype='int64')
pred_pad = layers.cast(layers.equal(pred, pad_tensor), dtype="int64")
correct_pad = layers.elementwise_mul(pred_pad, correct_pred)
# Reshape logits to be a 3-D tensor for sequence loss
logits = layers.reshape(logits, [-1, self.seq_len_, self.vocab_size_])
labels = layers.reshape(labels, [-1, self.seq_len_, 1])
loss = layers.softmax_with_cross_entropy(logits=logits,
label=labels,
soft_label=False,
return_softmax=False)
sequence_mask = layers.reshape(sequence_mask_ph,
[-1, self.seq_len_, 1])
loss = layers.reduce_mean(layers.elementwise_mul(loss, sequence_mask))
eval_metric_ops = fluid.layers.reduce_sum(correct_pred) \
- fluid.layers.reduce_sum(correct_unk) \
- fluid.layers.reduce_sum(correct_pad)
self.loss_ = loss
self.correct_ = eval_metric_ops
self.input_name_list_ = [
'features', 'labels', 'seq_len_ph', 'seq_mask_ph', 'init_hidden',
'init_cell'
]
self.target_var_names_ = [
self.loss_, self.last_hidden_, self.last_cell_, self.correct_
]
self.program_ = fluid.default_main_program()
self.startup_program_ = fluid.default_startup_program()
def _build_rnn_graph(self, inputs, init_hidden, init_cell,
sequence_length_ph):
rnn_out, last_hidden, last_cell = basic_lstm(
input=inputs,
init_hidden=init_hidden,
init_cell=init_cell,
hidden_size=self.n_hidden_,
num_layers=self.num_layers_,
batch_first=True,
dropout_prob=self.dropout_prob_,
sequence_length=sequence_length_ph,
param_attr=ParamAttr(
initializer=fluid.initializer.UniformInitializer(
low=-self.init_scale_, high=self.init_scale_)),
bias_attr=ParamAttr(initializer=fluid.initializer.Constant(0.0)),
forget_bias=0.0)
return rnn_out, last_hidden, last_cell
| 39.774336 | 78 | 0.589276 | 1,026 | 8,989 | 4.840156 | 0.211501 | 0.018325 | 0.026581 | 0.022956 | 0.318768 | 0.229762 | 0.201168 | 0.147402 | 0.108739 | 0.108739 | 0 | 0.015228 | 0.320614 | 8,989 | 225 | 79 | 39.951111 | 0.797937 | 0.092558 | 0 | 0.152941 | 0 | 0 | 0.038003 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052941 | false | 0 | 0.064706 | 0.023529 | 0.158824 | 0.005882 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f05fd861ae06dfc722e2350b4af98a9d48fbfb33 | 2,590 | py | Python | Predict-image.py | nikraftarf/Smile-Detection-DeepLearning | 450137923277a2a176652160749b5eaac92a74ee | [
"MIT"
] | 1 | 2019-10-02T18:52:30.000Z | 2019-10-02T18:52:30.000Z | Predict-image.py | nikraftarf/Smile-Detection-DeepLearning | 450137923277a2a176652160749b5eaac92a74ee | [
"MIT"
] | null | null | null | Predict-image.py | nikraftarf/Smile-Detection-DeepLearning | 450137923277a2a176652160749b5eaac92a74ee | [
"MIT"
] | null | null | null | from torch.autograd import Variable
from torchvision import transforms
import cv2
import torch
import dlib
import numpy as np
import face_recognition
from PIL import Image
label_map={0:'laugh',1:'poker',2:'smile'}
detector = dlib.get_frontal_face_detector()
color_green = (0,255,0)
line_width = 3
cuda=False
device = torch.device('cpu')
model=torch.load('D:/internship/mainproject/my_resnet101_lr2_SGD_model.pth',map_location=device)
model.eval()
test_transforms = transforms.Compose([transforms.Resize(224),
transforms.ToTensor(),
])
def predict_image(image):
image_tensor = test_transforms(image).float()
image_tensor = image_tensor.unsqueeze_(0)
input = Variable(image_tensor)
input = input.to(device)
output = model(input)
# print(output)
index = output.data.cpu().numpy().argmax()
return index
def argmax(prediction):
prediction = prediction.cpu()
print('1',prediction)
prediction = prediction.detach().numpy()
print('p',prediction)
top_1 = np.argmax(np.abs(prediction),axis=1)
print(top_1)
score = np.amax(prediction)
score = '{:6f}'.format(score)
prediction = top_1[0]
result = label_map[prediction]
return result,score
fps = 0
show_score = 0
show_res = 'Nothing'
sequence = 0
frame = cv2.imread('your image path',cv2.IMREAD_UNCHANGED)
to_pil = transforms.ToPILImage()
# image = to_pil(frame)
image = face_recognition.load_image_file("your image path")
imResize = cv2.resize(frame, (224, 224))
face_locations = face_recognition.face_locations(imResize,number_of_times_to_upsample=1,model="hog")
for face_location in face_locations:
# Print the location of each face in this image
top, right, bottom, left = face_location
face_image = imResize[top:bottom, left:right]
pil_image = Image.fromarray(face_image)
frame1=np.array(pil_image)
index = predict_image(pil_image)
res = label_map[index]
print(res)
winname = 'smile-detection'
cv2.namedWindow(winname) # Create a named window
cv2.moveWindow(winname,500,250)
scale_percent = 300 # percent of original size
width = int(frame1.shape[1] * scale_percent / 100)
height = int(frame1.shape[0] * scale_percent / 100)
dim = (width, height)
# resize image
resized = cv2.resize(frame, dim, interpolation = cv2.INTER_AREA)
cv2.putText(resized,'%s' %res,(10,10),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0, 255, 0),2)
cv2.imshow(winname,resized)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 30.116279 | 101 | 0.691892 | 349 | 2,590 | 4.977077 | 0.406877 | 0.025331 | 0.005757 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.038168 | 0.190734 | 2,590 | 85 | 102 | 30.470588 | 0.790553 | 0.05444 | 0 | 0 | 0 | 0 | 0.058649 | 0.023799 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029412 | false | 0 | 0.117647 | 0 | 0.176471 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f060230961fdd328010c9170b1aa838bab3938d9 | 4,532 | py | Python | cup_project/data_etl/etl.py | MthBr/kg_embedding_for_medical_booking_data | 1a44ad1f51e9d39766ed93fcdf26ce4c757cc130 | [
"MIT"
] | null | null | null | cup_project/data_etl/etl.py | MthBr/kg_embedding_for_medical_booking_data | 1a44ad1f51e9d39766ed93fcdf26ce4c757cc130 | [
"MIT"
] | null | null | null | cup_project/data_etl/etl.py | MthBr/kg_embedding_for_medical_booking_data | 1a44ad1f51e9d39766ed93fcdf26ce4c757cc130 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Version 3, wip, of CUP_etl_2v1
ETL module, Jupyter like, that trandofrms data from csv/database to pikle
This could be improved by accessing direcly the Postgres database
@author:
"""
#%% Importing
from cup_project.config import data_dir, reportings_dir
import etl_utils as cup_load
import numpy as np
# Dataset paths
raw_data_dir = data_dir / 'raw'
interm_data_dir = raw_data = data_dir / 'intermediate'
describe_data_dir = reportings_dir / 'description'
#%% Pickle of CUP
types_cup = {
'sa_data_ins': 'str',
'sa_deleted' : 'str',
'sa_data_del': 'str',
'sa_ass_cf': 'str',
'sa_data_pren': 'str',
'sa_utente_id': 'str',
'sa_contratto_id': 'str',
'sa_data_app': 'str',
'sa_mese_app_id': 'str',
'sa_uop_codice_id': 'str',
'sa_comune_id': 'str',
'sa_branca_id': 'str',
'sa_pre_id': 'str',
'sa_med_id': 'str',
'sa_ese_id_lk': 'str',
'sa_sesso_id': 'str',
'sa_is_ad': 'str',
'sa_spr_id': 'str',
'sa_ut_id': 'str',
'sa_operazione': 'str',
'sa_stato_pren': 'str',
'sa_eta_id': 'int64',
'sa_impegnativa_id': 'str',
'sa_dti_id': 'str',
'sa_contatto_id': 'str',
'sa_gg_attesa': 'int64',
'sa_gg_attesa_pdisp': 'int64',
'sa_num_prestazioni': 'int64',
'sa_classe_priorita': 'str',
'sa_is_pre_eseguita': 'str',
'sa_data_prescr': 'str',
'sa_primo_accesso': 'str',
'sa_asl': 'str'
}
types_annul = {
'sa_data_ins': 'str',
'sa_deleted' : 'str',
'sa_data_del': 'str',
'sa_ass_cf': 'str',
'sa_data_pren': 'str',
'sa_utente_id': 'str',
'sa_utente_del': 'str',
'sa_contratto_id': 'str',
'sa_data_app': 'str',
'sa_mese_app_id': 'str',
'sa_uop_codice_id': 'str',
'sa_comune_id': 'str',
'sa_branca_id': 'str',
'sa_pre_id': 'str',
'sa_med_id': 'str',
'sa_ese_id_lk': 'str',
'sa_sesso_id': 'str',
'sa_is_ad': 'str',
'sa_spr_id': 'str',
'sa_ut_id': 'str',
'sa_operazione': 'str',
'sa_stato_pren': 'str',
'sa_eta_id': 'int64',
'sa_impegnativa_id': 'str',
'sa_dti_id': 'str',
'sa_contatto_id': 'str',
'sa_gg_attesa': 'int64',
'sa_gg_attesa_pdisp': 'int64',
'sa_num_prestazioni': 'int64',
'sa_classe_priorita': 'str',
'sa_data_prescr': 'str',
'sa_primo_accesso': 'str',
'sa_asl': 'str'
}
types_cassa = {
'sa_data_ins': 'str',
'sa_deleted' : 'str',
'sa_data_del': 'str',
'sa_utente_id': 'str',
'sa_cassa_id': 'str',
'sa_ass_cf': 'str',
'sa_data_prest': 'str',
'sa_mese_id': 'str',
'sa_data_mov': 'str',
'sa_mese_mov_id': 'str',
'sa_uop_codice_id': 'str',
'sa_comune_id': 'str',
'sa_branca_id': 'str',
'sa_pre_id': 'str',
'sa_med_id': 'str',
'sa_ese_id': 'str',
'sa_cntr_id': 'str',
'sa_sesso_id': 'str',
'sa_eta_id': 'int64',
'sa_is_ad': 'str',
'sa_impegnativa_id': 'str',
'sa_mov_id': 'str',
'sa_dti_pk': 'str',
'sa_dti_id': 'str',
'sa_dti_prg': 'str',
'sa_dti_is_pren': 'str',
'lordo': 'float64',
'ticket': 'float64',
'quota': 'float64',
'prestazioni': 'int64',
'prestazioni_ad': 'int64',
'importo_movimento': 'float64',
'importo_impegnativa': 'float64',
'importo_prest_impegnativa': 'float64',
'importo_quota_impegnativa': 'float64',
'sa_codice_causale': 'str',
'sa_asl': 'str',
}
#%% Pickle of CUP
# file_name = 'dwh_mis_cup'
# sep=','
# dates_cup = ['sa_data_ins','sa_data_pren','sa_data_app','sa_data_prescr']
# cup_load.load_describe_save(file_name, sep, raw_data_dir, describe_data_dir, interm_data_dir, dates_cassa, types_cassa)
#%% Pickle of CASSA
file_name = 'dwh_mis_cassa'
sep=';'
dates_cassa = ['sa_data_ins','sa_data_prest','sa_data_mov']
cup_load.load_describe_save(file_name, sep, raw_data_dir, describe_data_dir, interm_data_dir, dates_cassa, types_cassa)
#%% Pickle of BRANCHE
file_name = 'branche'
sep=';'
cup_load.load_describe_save(file_name, sep, raw_data_dir, describe_data_dir, interm_data_dir)
#%% Pickle of PRESTAZIONI
#TODO | 28.149068 | 121 | 0.563769 | 606 | 4,532 | 3.760726 | 0.20297 | 0.17771 | 0.132075 | 0.017552 | 0.610355 | 0.585344 | 0.560333 | 0.551996 | 0.551996 | 0.551996 | 0 | 0.012361 | 0.268094 | 4,532 | 161 | 122 | 28.149068 | 0.674706 | 0.127317 | 0 | 0.663934 | 0 | 0 | 0.427954 | 0.012706 | 0 | 0 | 0 | 0.006211 | 0 | 1 | 0 | false | 0 | 0.057377 | 0 | 0.057377 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0604684f1c950c26a8a754ea057145a8debf449 | 3,673 | py | Python | src/sofvsr/apply.py | SeleSchaefer/super_resolution | bf28a959fb150ceeadbd9f0bcfc12f3025cf82f4 | [
"MIT"
] | 5 | 2019-11-11T10:01:52.000Z | 2020-12-08T11:56:33.000Z | src/sofvsr/apply.py | SeleSchaefer/super_resolution | bf28a959fb150ceeadbd9f0bcfc12f3025cf82f4 | [
"MIT"
] | 1 | 2020-06-13T06:39:44.000Z | 2020-06-13T06:39:44.000Z | src/sofvsr/apply.py | SeleSchaefer/super_resolution | bf28a959fb150ceeadbd9f0bcfc12f3025cf82f4 | [
"MIT"
] | 1 | 2020-07-16T23:07:28.000Z | 2020-07-16T23:07:28.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# =============================================================================
# Created By : Simon Schaefer
# Description : Apply SOFVSR model to given batch of images.
# SOFVSR converts the current as well as the prev and next image
# to a the YCbCr space and concatenates their Y axes as a network
# input. Afterwards it scales up Cb and Cr using bilinear
# interpolation and concatenates the predicted HR Y axis with the
# interpolated Cb and Cr values. Finally, the image is converted
# to RGB space.
# =============================================================================
import os
import numpy as np
from PIL import Image
import torch
from torch.autograd import Variable
from sofvsr.modules import SOFVSR as SOFVSR_NET
from sofvsr.data_utils import ycbcr2rgb, rgb2ycbcr
class SOFVSR(object):
def __init__(self, model, scale, use_gpu=True):
# Load SOFVSR model from model path.
self._net = SOFVSR_NET(upscale_factor=scale)
model_path = os.path.join(os.environ["SR_PROJECT_MODELS_PATH"], "sofvsr")
model_path = os.path.join(model_path, model + '.pth')
ckpt = torch.load(model_path)
self._net.load_state_dict(ckpt)
if use_gpu: self._net.cuda()
# Set auxialiary variables.
self._use_gpu = use_gpu
self._scale = scale
def apply(self, LR0, LR1, LR2):
# Input checks.
assert LR0.size() == LR1.size() == LR2.size()
# Input preprocessing - Create Y cube image.
LR0_y, LR1_y, LR2_y = self._to_y_image(LR0, LR1, LR2)
LR_y_cube = torch.cat((LR0_y, LR1_y, LR2_y), 1)
# Input preprocessing - Create Cb & Cr interpolation images.
LR1_bicubic = torch.nn.functional.interpolate(LR1,
scale_factor=self._scale, mode='bilinear')
_, SR_cb, SR_cr = self._rgb2ycbcr(LR1_bicubic)
SR_cb, SR_cr = self._expand_dim(SR_cb), self._expand_dim(SR_cr)
# Apply model to input and return outputs.
LR_y_cube = Variable(LR_y_cube)
if self._use_gpu: LR_y_cube = LR_y_cube.cuda()
SR_y = self._net(LR_y_cube)
SR_y = self._expand_dim(SR_y)
# Image postprocessing.
SR_ycbcr = torch.cat((SR_y, SR_cb, SR_cr), 1)
SR_rgb = self._ycbcr2rgb(SR_ycbcr)
return SR_rgb
def _to_y_image(self, *tensors):
def rgb_to_y(x):
x, sz = Variable(x.data.new(*x.size())), x.size()
x = x[:, 0, :, :]*65.481+x[:, 1, :, :]*128.553+x[:, 2, :, :]*24.966+16
return self._expand_dim(x/255.0)
return [rgb_to_y(x) for x in tensors]
def _rgb2ycbcr(self, x):
y = 0.257*x[:, 0, :, :]+0.504*x[:, 1, :, :]+0.098*x[:, 2, :, :]+16/255.0
cb = -0.148*x[:, 0, :, :]-0.291*x[:, 1, :, :]+0.439*x[:, 2, :, :]+128/255.0
cr = 0.439*x[:, 0, :, :]-0.368*x[:, 1, :, :]-0.071*x[:, 2, :, :]+128/255.0
return y, cb, cr
def _ycbcr2rgb(self, x):
img_r = 1.164*(x[:, 0, :, :]-16/255.0)+1.596*(x[:, 2, :, :]-128/255.0)
img_r = self._expand_dim(img_r)
img_g = 1.164*(x[:, 0, :, :]-16/255.0)-0.392*(x[:, 1, :, :]-128/255.0)
img_g = img_g-0.813*(x[:, 2, :, :]-128/255.0)
img_g = self._expand_dim(img_g)
img_b = 1.164*(x[:, 0, :, :]-16/255.0)+2.017*(x[:, 1, :, :]-128/255.0)
img_b = self._expand_dim(img_b)
return torch.cat((img_r,img_g,img_b), 1)
def _expand_dim(self, x):
if len(x.size()) == 2: x = x.unsqueeze_(0)
return x.unsqueeze_(0).view(x.size()[1],1,x.size()[2],x.size()[3])
| 44.253012 | 83 | 0.555677 | 562 | 3,673 | 3.428826 | 0.281139 | 0.022833 | 0.047224 | 0.016606 | 0.107421 | 0.056046 | 0.018682 | 0 | 0 | 0 | 0 | 0.075136 | 0.249932 | 3,673 | 82 | 84 | 44.792683 | 0.624319 | 0.25456 | 0 | 0 | 0 | 0 | 0.014711 | 0.008091 | 0 | 0 | 0 | 0 | 0.018182 | 1 | 0.127273 | false | 0 | 0.127273 | 0 | 0.381818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0626cbd6d41f5d088aafbecea438cc513ed9b96 | 7,617 | py | Python | tests/experiment/test_experiment_config.py | AirbusAerial/raster-vision | cfa7826169392e497fb57a540eb952fc6cee3a98 | [
"Apache-2.0"
] | 2 | 2019-04-17T13:04:23.000Z | 2020-10-04T10:28:27.000Z | tests/experiment/test_experiment_config.py | Yochengliu/raster-vision | f5badc387df86ce02d84e0e274a08026dbf65bd6 | [
"Apache-2.0"
] | null | null | null | tests/experiment/test_experiment_config.py | Yochengliu/raster-vision | f5badc387df86ce02d84e0e274a08026dbf65bd6 | [
"Apache-2.0"
] | null | null | null | import unittest
import rastervision as rv
from tests import data_file_path
class TestExperimentConfig(unittest.TestCase):
@staticmethod
def get_test_task():
task = rv.TaskConfig.builder(rv.OBJECT_DETECTION) \
.with_chip_size(300) \
.with_classes({
'car': (1, 'blue'),
'building': (2, 'red')}) \
.with_chip_options(neg_ratio=0.0,
ioa_thresh=1.0,
window_method='sliding') \
.with_predict_options(merge_thresh=0.1,
score_thresh=0.5) \
.build()
return task
def test_object_detection_exp(self):
root_uri = '/some/dummy/root'
img_path = '/dummy.tif'
label_path = '/dummy.json'
backend_conf_path = data_file_path(
'tf_object_detection/'
'embedded_ssd_mobilenet_v1_coco.config')
pretrained_model = ('https://dummy.com/model.gz')
task = self.get_test_task()
backend = rv.BackendConfig.builder(rv.TF_OBJECT_DETECTION) \
.with_task(task) \
.with_template(backend_conf_path) \
.with_pretrained_model(pretrained_model) \
.with_train_options(sync_interval=None,
do_monitoring=False) \
.build()
raster_source = rv.RasterSourceConfig.builder(rv.GEOTIFF_SOURCE) \
.with_uri(img_path) \
.with_channel_order([0, 1, 2]) \
.with_stats_transformer() \
.build()
scene = rv.SceneConfig.builder() \
.with_task(task) \
.with_id('od_test') \
.with_raster_source(raster_source) \
.with_label_source(label_path) \
.build()
dataset = rv.DatasetConfig.builder() \
.with_train_scene(scene) \
.with_validation_scene(scene) \
.build()
analyzer = rv.analyzer.StatsAnalyzerConfig()
e = rv.ExperimentConfig.builder() \
.with_id('object-detection-test') \
.with_root_uri(root_uri) \
.with_task(task) \
.with_backend(backend) \
.with_dataset(dataset) \
.with_analyzer(analyzer) \
.with_train_key('model_name') \
.build()
msg = e.to_proto()
e2 = rv.ExperimentConfig.from_proto(msg)
self.assertEqual(e.train_uri, '/some/dummy/root/train/model_name')
self.assertEqual(e.analyze_uri,
'/some/dummy/root/analyze/object-detection-test')
self.assertEqual(e.analyze_uri, e2.analyze_uri)
self.assertEqual(e.chip_uri, e2.chip_uri)
self.assertEqual(e.train_uri, e2.train_uri)
self.assertEqual(e.predict_uri, e2.predict_uri)
self.assertEqual(e.eval_uri, e2.eval_uri)
self.assertEqual(e2.dataset.train_scenes[0].label_source.uri,
'/dummy.json')
self.assertEqual(
e2.dataset.train_scenes[0].raster_source.channel_order, [0, 1, 2])
def test_experiment_missing_configs_id(self):
task = self.get_test_task()
# missing ID
with self.assertRaises(rv.ConfigError):
rv.ExperimentConfig.builder() \
.with_root_uri('') \
.with_task(task) \
.with_backend('') \
.with_dataset('') \
.with_analyzer('') \
.with_train_uri('') \
.build()
def test_experiment_missing_configs_backend(self):
task = self.get_test_task()
# missing backend
with self.assertRaises(rv.ConfigError):
rv.ExperimentConfig.builder() \
.with_id('') \
.with_root_uri('') \
.with_task(task) \
.with_dataset('') \
.with_analyzer('') \
.with_train_uri('') \
.build()
def test_experiment_missing_train_key(self):
task = self.get_test_task()
# missing root_uri and other uris
with self.assertRaises(rv.ConfigError):
rv.ExperimentConfig.builder() \
.with_id('') \
.with_task(task) \
.with_backend('') \
.with_dataset('') \
.build()
def test_experiment_missing_multiple_configs(self):
task = self.get_test_task()
# missing root_uri and dataset and analyzer
with self.assertRaises(rv.ConfigError):
rv.ExperimentConfig.builder() \
.with_id('') \
.with_task(task) \
.with_backend('') \
.with_train_uri('') \
.with_evaluators(['']) \
.build()
def test_no_missing_config_max_with_root(self):
task = self.get_test_task()
# maximum args with root_uri
try:
rv.ExperimentConfig.builder() \
.with_id('') \
.with_root_uri('/dummy/root/uri') \
.with_task(task) \
.with_backend('') \
.with_dataset('') \
.with_evaluators(['']) \
.with_analyze_uri('') \
.with_chip_uri('') \
.with_predict_uri('') \
.with_eval_uri('') \
.with_bundle_uri('') \
.build()
except rv.ConfigError:
self.fail('ConfigError raised unexpectedly')
def test_no_missing_config_min_with_root(self):
task = self.get_test_task()
# minimum args with_root_uri
try:
rv.ExperimentConfig.builder() \
.with_id('') \
.with_evaluators(['']) \
.with_root_uri('/dummy/root/uri') \
.with_task(task) \
.with_backend('') \
.with_dataset('') \
.build()
except rv.ConfigError:
self.fail('ConfigError raised unexpectedly')
if __name__ == '__main__':
unittest.main()
| 41.851648 | 78 | 0.425627 | 607 | 7,617 | 5.004942 | 0.220758 | 0.029954 | 0.03555 | 0.0474 | 0.471692 | 0.377551 | 0.377551 | 0.320277 | 0.293614 | 0.233048 | 0 | 0.007576 | 0.48011 | 7,617 | 181 | 79 | 42.082873 | 0.759596 | 0.020218 | 0 | 0.482993 | 0 | 0 | 0.050027 | 0.018374 | 0 | 0 | 0 | 0 | 0.088435 | 1 | 0.054422 | false | 0 | 0.020408 | 0 | 0.088435 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f063b48274cd1b438f4dc5f97aa0155bc8dce5d2 | 11,915 | py | Python | libraries/botbuilder-dialogs/botbuilder/dialogs/component_dialog.py | Fl4v/botbuilder-python | 4003d713beb8fb986a01cfd11632eabc65858618 | [
"MIT"
] | 388 | 2019-05-07T15:53:21.000Z | 2022-03-28T20:29:46.000Z | libraries/botbuilder-dialogs/botbuilder/dialogs/component_dialog.py | Fl4v/botbuilder-python | 4003d713beb8fb986a01cfd11632eabc65858618 | [
"MIT"
] | 1,286 | 2019-05-07T23:38:19.000Z | 2022-03-31T10:44:16.000Z | libraries/botbuilder-dialogs/botbuilder/dialogs/component_dialog.py | Fl4v/botbuilder-python | 4003d713beb8fb986a01cfd11632eabc65858618 | [
"MIT"
] | 168 | 2019-05-14T20:23:25.000Z | 2022-03-16T06:49:14.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from botbuilder.core import TurnContext
from .dialog import Dialog
from .dialog_context import DialogContext
from .dialog_turn_result import DialogTurnResult
from .dialog_state import DialogState
from .dialog_turn_status import DialogTurnStatus
from .dialog_reason import DialogReason
from .dialog_set import DialogSet
from .dialog_instance import DialogInstance
class ComponentDialog(Dialog):
"""
A :class:`botbuilder.dialogs.Dialog` that is composed of other dialogs
:var persisted_dialog state:
:vartype persisted_dialog_state: str
"""
persisted_dialog_state = "dialogs"
def __init__(self, dialog_id: str):
"""
Initializes a new instance of the :class:`ComponentDialog`
:param dialog_id: The ID to assign to the new dialog within the parent dialog set.
:type dialog_id: str
"""
super(ComponentDialog, self).__init__(dialog_id)
if dialog_id is None:
raise TypeError("ComponentDialog(): dialog_id cannot be None.")
self._dialogs = DialogSet()
self.initial_dialog_id = None
# TODO: Add TelemetryClient
async def begin_dialog(
self, dialog_context: DialogContext, options: object = None
) -> DialogTurnResult:
"""
Called when the dialog is started and pushed onto the parent's dialog stack.
If the task is successful, the result indicates whether the dialog is still
active after the turn has been processed by the dialog.
:param dialog_context: The :class:`botbuilder.dialogs.DialogContext` for the current turn of the conversation.
:type dialog_context: :class:`botbuilder.dialogs.DialogContext`
:param options: Optional, initial information to pass to the dialog.
:type options: object
:return: Signals the end of the turn
:rtype: :class:`botbuilder.dialogs.Dialog.end_of_turn`
"""
if dialog_context is None:
raise TypeError("ComponentDialog.begin_dialog(): outer_dc cannot be None.")
# Start the inner dialog.
dialog_state = DialogState()
dialog_context.active_dialog.state[self.persisted_dialog_state] = dialog_state
inner_dc = DialogContext(self._dialogs, dialog_context.context, dialog_state)
inner_dc.parent = dialog_context
turn_result = await self.on_begin_dialog(inner_dc, options)
# Check for end of inner dialog
if turn_result.status != DialogTurnStatus.Waiting:
# Return result to calling dialog
return await self.end_component(dialog_context, turn_result.result)
# Just signal waiting
return Dialog.end_of_turn
async def continue_dialog(self, dialog_context: DialogContext) -> DialogTurnResult:
"""
Called when the dialog is continued, where it is the active dialog and the
user replies with a new activity.
.. remarks::
If the task is successful, the result indicates whether the dialog is still
active after the turn has been processed by the dialog. The result may also
contain a return value.
If this method is *not* overriden the component dialog calls the
:meth:`botbuilder.dialogs.DialogContext.continue_dialog` method on it's inner dialog
context. If the inner dialog stack is empty, the component dialog ends,
and if a :class:`botbuilder.dialogs.DialogTurnResult.result` is available, the component dialog
uses that as it's return value.
:param dialog_context: The parent dialog context for the current turn of the conversation.
:type dialog_context: :class:`botbuilder.dialogs.DialogContext`
:return: Signals the end of the turn
:rtype: :class:`botbuilder.dialogs.Dialog.end_of_turn`
"""
if dialog_context is None:
raise TypeError("ComponentDialog.begin_dialog(): outer_dc cannot be None.")
# Continue execution of inner dialog.
dialog_state = dialog_context.active_dialog.state[self.persisted_dialog_state]
inner_dc = DialogContext(self._dialogs, dialog_context.context, dialog_state)
inner_dc.parent = dialog_context
turn_result = await self.on_continue_dialog(inner_dc)
if turn_result.status != DialogTurnStatus.Waiting:
return await self.end_component(dialog_context, turn_result.result)
return Dialog.end_of_turn
async def resume_dialog(
self, dialog_context: DialogContext, reason: DialogReason, result: object = None
) -> DialogTurnResult:
"""
Called when a child dialog on the parent's dialog stack completed this turn, returning
control to this dialog component.
.. remarks::
Containers are typically leaf nodes on the stack but the dev is free to push other dialogs
on top of the stack which will result in the container receiving an unexpected call to
:meth:`ComponentDialog.resume_dialog()` when the pushed on dialog ends.
To avoid the container prematurely ending we need to implement this method and simply
ask our inner dialog stack to re-prompt.
:param dialog_context: The dialog context for the current turn of the conversation.
:type dialog_context: :class:`botbuilder.dialogs.DialogContext`
:param reason: Reason why the dialog resumed.
:type reason: :class:`botbuilder.dialogs.DialogReason`
:param result: Optional, value returned from the dialog that was called.
:type result: object
:return: Signals the end of the turn
:rtype: :class:`botbuilder.dialogs.Dialog.end_of_turn`
"""
await self.reprompt_dialog(dialog_context.context, dialog_context.active_dialog)
return Dialog.end_of_turn
async def reprompt_dialog(
self, context: TurnContext, instance: DialogInstance
) -> None:
"""
Called when the dialog should re-prompt the user for input.
:param context: The context object for this turn.
:type context: :class:`botbuilder.core.TurnContext`
:param instance: State information for this dialog.
:type instance: :class:`botbuilder.dialogs.DialogInstance`
"""
# Delegate to inner dialog.
dialog_state = instance.state[self.persisted_dialog_state]
inner_dc = DialogContext(self._dialogs, context, dialog_state)
await inner_dc.reprompt_dialog()
# Notify component
await self.on_reprompt_dialog(context, instance)
async def end_dialog(
self, context: TurnContext, instance: DialogInstance, reason: DialogReason
) -> None:
"""
Called when the dialog is ending.
:param context: The context object for this turn.
:type context: :class:`botbuilder.core.TurnContext`
:param instance: State information associated with the instance of this component dialog.
:type instance: :class:`botbuilder.dialogs.DialogInstance`
:param reason: Reason why the dialog ended.
:type reason: :class:`botbuilder.dialogs.DialogReason`
"""
# Forward cancel to inner dialog
if reason == DialogReason.CancelCalled:
dialog_state = instance.state[self.persisted_dialog_state]
inner_dc = DialogContext(self._dialogs, context, dialog_state)
await inner_dc.cancel_all_dialogs()
await self.on_end_dialog(context, instance, reason)
def add_dialog(self, dialog: Dialog) -> object:
"""
Adds a :class:`Dialog` to the component dialog and returns the updated component.
:param dialog: The dialog to add.
:return: The updated :class:`ComponentDialog`.
:rtype: :class:`ComponentDialog`
"""
self._dialogs.add(dialog)
if not self.initial_dialog_id:
self.initial_dialog_id = dialog.id
return self
async def find_dialog(self, dialog_id: str) -> Dialog:
"""
Finds a dialog by ID.
:param dialog_id: The dialog to add.
:return: The dialog; or None if there is not a match for the ID.
:rtype: :class:`botbuilder.dialogs.Dialog`
"""
return await self._dialogs.find(dialog_id)
async def on_begin_dialog(
self, inner_dc: DialogContext, options: object
) -> DialogTurnResult:
"""
Called when the dialog is started and pushed onto the parent's dialog stack.
.. remarks::
If the task is successful, the result indicates whether the dialog is still
active after the turn has been processed by the dialog.
By default, this calls the :meth:`botbuilder.dialogs.Dialog.begin_dialog()`
method of the component dialog's initial dialog.
Override this method in a derived class to implement interrupt logic.
:param inner_dc: The inner dialog context for the current turn of conversation.
:type inner_dc: :class:`botbuilder.dialogs.DialogContext`
:param options: Optional, initial information to pass to the dialog.
:type options: object
"""
return await inner_dc.begin_dialog(self.initial_dialog_id, options)
async def on_continue_dialog(self, inner_dc: DialogContext) -> DialogTurnResult:
"""
Called when the dialog is continued, where it is the active dialog and the user replies with a new activity.
:param inner_dc: The inner dialog context for the current turn of conversation.
:type inner_dc: :class:`botbuilder.dialogs.DialogContext`
"""
return await inner_dc.continue_dialog()
async def on_end_dialog( # pylint: disable=unused-argument
self, context: TurnContext, instance: DialogInstance, reason: DialogReason
) -> None:
"""
Ends the component dialog in its parent's context.
:param turn_context: The :class:`botbuilder.core.TurnContext` for the current turn of the conversation.
:type turn_context: :class:`botbuilder.core.TurnContext`
:param instance: State information associated with the inner dialog stack of this component dialog.
:type instance: :class:`botbuilder.dialogs.DialogInstance`
:param reason: Reason why the dialog ended.
:type reason: :class:`botbuilder.dialogs.DialogReason`
"""
return
async def on_reprompt_dialog( # pylint: disable=unused-argument
self, turn_context: TurnContext, instance: DialogInstance
) -> None:
"""
:param turn_context: The :class:`botbuilder.core.TurnContext` for the current turn of the conversation.
:type turn_context: :class:`botbuilder.dialogs.DialogInstance`
:param instance: State information associated with the inner dialog stack of this component dialog.
:type instance: :class:`botbuilder.dialogs.DialogInstance`
"""
return
async def end_component(
self, outer_dc: DialogContext, result: object # pylint: disable=unused-argument
) -> DialogTurnResult:
"""
Ends the component dialog in its parent's context.
.. remarks::
If the task is successful, the result indicates that the dialog ended after the
turn was processed by the dialog.
:param outer_dc: The parent dialog context for the current turn of conversation.
:type outer_dc: class:`botbuilder.dialogs.DialogContext`
:param result: Optional, value to return from the dialog component to the parent context.
:type result: object
:return: Value to return.
:rtype: :class:`botbuilder.dialogs.DialogTurnResult.result`
"""
return await outer_dc.end_dialog(result)
| 43.17029 | 118 | 0.682501 | 1,450 | 11,915 | 5.489655 | 0.141379 | 0.048995 | 0.060804 | 0.017085 | 0.602136 | 0.530779 | 0.495352 | 0.466583 | 0.45 | 0.415704 | 0 | 0 | 0.247084 | 11,915 | 275 | 119 | 43.327273 | 0.887304 | 0.077801 | 0 | 0.364706 | 0 | 0 | 0.03672 | 0.013967 | 0 | 0 | 0 | 0.003636 | 0 | 1 | 0.023529 | false | 0 | 0.105882 | 0 | 0.294118 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0644ab6366a33d81d90eef2ede4bb678506cdf2 | 2,476 | py | Python | example/SimpleMOF.py | EmilioSchi/Niched-Pareto-Genetic-Algorithm-NPGA | 835aeca88f1273589deec02034cb4a0d92d108b3 | [
"Apache-2.0"
] | 10 | 2020-07-06T01:28:51.000Z | 2022-03-26T05:02:49.000Z | example/SimpleMOF.py | EmilioSchi/Niched-Pareto-Genetic-Algorithm-NPGA | 835aeca88f1273589deec02034cb4a0d92d108b3 | [
"Apache-2.0"
] | null | null | null | example/SimpleMOF.py | EmilioSchi/Niched-Pareto-Genetic-Algorithm-NPGA | 835aeca88f1273589deec02034cb4a0d92d108b3 | [
"Apache-2.0"
] | 3 | 2020-07-06T01:28:54.000Z | 2021-09-17T01:25:52.000Z | import math
import NPGA
import matplotlib.pyplot as plt
import numpy as np
def scaleMinMax(x, xmin, xmax, mindesired, maxdesired):
return ((x - xmin) / (xmax - xmin) * (maxdesired - mindesired) + mindesired)
def graytodec(bin_list):
"""
Convert from Gray coding to binary coding.
We assume big endian encoding.
"""
b = bin_list[0]
d = int(b) * (2**(len(bin_list)-1))
for i, e in enumerate(range(len(bin_list) - 2, -1, -1)):
b = str(int(b != bin_list[i + 1]))
d += int(b) * (2**e)
return d
def decodechromosome(bits):
dec = graytodec(bits)
max_current = math.pow(2, len(bits)) - 1
value = scaleMinMax(dec, 0, max_current, -10, 10)
return value
class StaticGen:
Generation = 1
def display(statistics):
xpop = []
ypop = []
for candidate in statistics.population:
xpop.append(decodechromosome(candidate.Genes))
ypop.append(candidate.Fitness)
xbest = []
ybest = []
for specie in statistics.Species:
xbest.append(decodechromosome(specie.Genes))
ybest.append(specie.Fitness)
xEUbest = [decodechromosome(statistics.EuclideanBetter.Genes)]
yEUbest = [statistics.EuclideanBetter.Fitness]
x = np.linspace(-10,10,100)
y21 = [F1(i) for i in x if True]
y22 = [F2(i) for i in x if True]
plt.figure(1)
plt.clf()
plt.axis([-10, 10, -20, 180])
#plt.legend(['Generation'], loc=1)
plt.plot(x, y21, 'k')
plt.plot(x, y22, 'k')
plt.plot(xpop, ypop, 'ko')
plt.plot(xbest, ybest, 'go')
plt.plot(xEUbest, yEUbest, 'ro')
plt.title('Simple MO problem, GENERATION: ' + str(StaticGen.Generation))
plt.grid()
plt.draw()
plt.pause(0.1)
plt.show(block=False)
print(statistics.EuclideanBetter.Genes, end='\t')
print(statistics.EuclideanBetter.Fitness)
StaticGen.Generation = StaticGen.Generation + 1
def F1(x):
return (x + 2) * (x + 2) - 10
def F2(x):
return (x - 2) * (x - 2) + 20
def getfitness(candidate):
x = decodechromosome(candidate)
return [[F1(x), 'minimize'], [F2(x), 'minimize']]
def test():
geneset = '01'
genelen = [128]
def fnDisplay(statistics): display(statistics)
def fnGetFitness(genes): return getfitness(genes)
optimalFitness = [0, 0]
GA = NPGA.NichedParetoGeneticAlgorithm(
fnGetFitness, fnDisplay, optimalFitness,
geneset, genelen, population_size = 30,
max_generation = 100, crossover_rate = 0.65,
mutation_rate = 1/128, niche_radius = 0.08,
prc_tournament_size = 0.2, fastmode = True,
multithreadmode = True)
paretosolution = GA.Evolution()
test()
plt.show()
| 24.514851 | 77 | 0.67811 | 348 | 2,476 | 4.784483 | 0.37931 | 0.021021 | 0.010811 | 0.007207 | 0.03003 | 0.03003 | 0.016817 | 0 | 0 | 0 | 0 | 0.040291 | 0.168013 | 2,476 | 100 | 78 | 24.76 | 0.767961 | 0.043215 | 0 | 0 | 0 | 0 | 0.025021 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.053333 | 0.053333 | 0.293333 | 0.026667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f06566cbb4c4f52f9bfdb70fc99effa07ea0b7b1 | 8,351 | py | Python | scripts/run_batch_iterative.py | thesukantadey/OpeNPDN | 116ce3c4ba82d326f04041606008baf3049b12b0 | [
"BSD-3-Clause"
] | 13 | 2019-11-17T04:03:26.000Z | 2021-09-02T13:56:00.000Z | scripts/run_batch_iterative.py | thesukantadey/OpeNPDN | 116ce3c4ba82d326f04041606008baf3049b12b0 | [
"BSD-3-Clause"
] | 3 | 2020-07-02T14:51:08.000Z | 2022-02-04T07:46:41.000Z | scripts/run_batch_iterative.py | thesukantadey/OpeNPDN | 116ce3c4ba82d326f04041606008baf3049b12b0 | [
"BSD-3-Clause"
] | 12 | 2019-12-28T17:48:16.000Z | 2022-01-02T13:08:35.000Z | #!/usr/bin/env python3
import sys
import subprocess
import numpy as np
sys.path.append('src')
from T6_PSI_settings import T6_PSI_settings
settings_obj = T6_PSI_settings.load_obj();
if len(sys.argv)>1 and sys.argv[1] == "no_congestion":
congestion_enabled = 0
else:
congestion_enabled = 1
num_of_parallel = settings_obj.num_parallel_runs
num_of_maps_per_run = settings_obj.num_per_run
start_value = settings_obj.start_maps
num_maps = settings_obj.num_maps
if congestion_enabled ==1 :
merge_file_prefix = "CNN_"
else:
merge_file_prefix = "CNN_wo_cong_"
validation_percent = settings_obj.validation_percent
test_percent = settings_obj.test_percent
current_unit = settings_obj.current_unit
VDD = settings_obj.VDD
size_region_x = settings_obj.WIDTH_REGION*1e6
size_region_y = settings_obj.LENGTH_REGION*1e6
NUM_REGIONS_X = settings_obj.NUM_REGIONS_X
NUM_REGIONS_Y = settings_obj.NUM_REGIONS_Y
current_map_num_regions = settings_obj.current_map_num_regions
ps =[]
map_proc = 0
n=1;
while map_proc < num_maps:
if len(ps) < num_of_parallel :
if map_proc+num_of_maps_per_run < num_maps :
if congestion_enabled ==1:
p=subprocess.Popen(["python3","src/generate_training_data.py","%d"%(map_proc+start_value),"%d"%(num_of_maps_per_run)])
else:
p=subprocess.Popen(["python3","src/generate_training_data.py","%d"%(map_proc+start_value),"%d"%(num_of_maps_per_run),"%s"%sys.argv[1]])
else:
if congestion_enabled ==1:
p=subprocess.Popen(["python3","src/generate_training_data.py","%d"%(map_proc+start_value),"%d"%(num_maps-map_proc)])
else:
p=subprocess.Popen(["python3","src/generate_training_data.py","%d"%(map_proc+start_value),"%d"%(num_maps-map_proc),"%s"%sys.argv[1]])
ps.append(p)
map_proc = map_proc + num_of_maps_per_run
print("Launching job %d"%n)
n =n+1;
else:
p = ps[0]
p.wait()
del ps[0]
for p in ps:
p.wait()
print("Runs completed")
print("Reading state variables")
map_proc =0
while map_proc <num_maps:
if map_proc+num_of_maps_per_run < num_maps :
state_csv_file = settings_obj.parallel_run_dir+"state_%d_to_%d.csv"%(start_value+map_proc,start_value+map_proc+num_of_maps_per_run-1)
congest_csv_file = settings_obj.parallel_run_dir+"congest_%d_to_%d.csv"%(start_value+map_proc,start_value+map_proc+num_of_maps_per_run-1)
current_csv_file = settings_obj.parallel_run_dir+"current_maps_%d_to_%d.csv"%(start_value+map_proc,start_value+map_proc+num_of_maps_per_run-1)
else:
state_csv_file = settings_obj.parallel_run_dir+"state_%d_to_%d.csv"%(start_value+map_proc,start_value+num_maps-1)
congest_csv_file = settings_obj.parallel_run_dir+"congest_%d_to_%d.csv"%(start_value+map_proc,start_value+num_maps-1)
current_csv_file = settings_obj.parallel_run_dir+"current_maps_%d_to_%d.csv"%(start_value+map_proc,start_value+num_maps-1)
state = np.genfromtxt(state_csv_file, delimiter = ',')
state= np.reshape(state,(-1,1))
if congestion_enabled ==1:
congest = np.genfromtxt(congest_csv_file, delimiter = ',')
currents = np.genfromtxt(current_csv_file, delimiter = ',')
if(map_proc == 0):
state_data = state
current_data = currents
if congestion_enabled ==1:
congest_data = congest
else:
state_data = np.vstack((state_data, state))
current_data = np.vstack((current_data,currents))
if congestion_enabled ==1:
congest_data = np.vstack((congest_data, congest))
map_proc = map_proc+num_of_maps_per_run
with open( settings_obj.work_dir + 'work/'+merge_file_prefix+'state_%d_to_%d.csv' %(start_value,
start_value + num_maps - 1), 'wb') as outfile:
np.savetxt(outfile,state_data, delimiter=',', fmt='%d')
if congestion_enabled ==1:
with open( settings_obj.work_dir + 'work/'+merge_file_prefix+'congest_%d_to_%d.csv' %(start_value,
start_value + num_maps - 1), 'wb') as outfile:
np.savetxt(outfile,congest_data, delimiter=',', fmt='%f')
print("Reading current maps")
count = 0
map_database = np.array(current_data)
template_database = np.array(state_data)
if congestion_enabled ==1:
congest_database = np.array(congest_data)
#map_database = np.zeros((current_map_num_regions*current_map_num_regions*num_maps,int(3*size_region_x)*int(3*size_region_y)))
#template_database = np.zeros((current_map_num_regions*current_map_num_regions*num_maps,1))
#congest_database = np.zeros((current_map_num_regions*current_map_num_regions*num_maps,9))
#for i in range(start_value,start_value+num_maps):
# power_map_file = settings_obj.map_dir + "current_map_%d.csv"%(i)
# currents = np.genfromtxt(power_map_file, delimiter = ',')
# currents = (currents*current_unit)/VDD
# state = state_data[i-start_value]
# congest = congest_data[i-start_value]
# for n,template in enumerate(state):
# y = int(n/ NUM_REGIONS_X)
# x = n % NUM_REGIONS_X
# xcor = int(x * size_region_x)
# ycor = int(y * size_region_y)
# end_xcor = int(xcor + size_region_x)
# end_ycor = int(ycor + size_region_y)
# current_dis = currents[xcor:end_xcor, ycor:end_ycor]
# map_database[count] = current_dis.reshape(-1)
# template_database[count] = template
# congest_database[count] = congest[n]
# count +=1
#
print("Creating training and validation datasets")
data_size = template_database.shape[0]
val_num = int(validation_percent*data_size/100)
test_num = int(test_percent *data_size/100)
train_num = int(data_size - val_num - test_num)
np.random.choice(data_size, size=(val_num+test_num), replace=False)
choice = np.random.choice(range(data_size), size=(val_num,), replace=False)
ind = np.zeros(data_size, dtype=bool)
ind[choice] = True
rest = ~ind
val_currents = map_database[ind,:]
val_template = template_database[ind,:]
if congestion_enabled ==1:
val_congest = congest_database[ind,:]
map_database = map_database[rest,:]
template_database = template_database[rest,:]
data_size = template_database.shape[0]
if congestion_enabled ==1:
congest_database = congest_database[rest,:]
choice = np.random.choice(range(data_size), size=(test_num,), replace=False)
ind = np.zeros(data_size, dtype=bool)
ind[choice] = True
rest = ~ind
test_currents = map_database[ind,:]
test_template = template_database[ind,:]
if congestion_enabled ==1:
test_congest = congest_database[ind,:]
train_currents = map_database[rest,:]
train_template = template_database[rest,:]
if congestion_enabled ==1:
train_congest = congest_database[rest,:]
with open(settings_obj.CNN_data_dir+merge_file_prefix+"val_currents.csv" , 'wb') as outfile:
np.savetxt(outfile,val_currents,delimiter=',',fmt='%4.3e')
with open(settings_obj.CNN_data_dir+merge_file_prefix+"val_template.csv" , 'wb') as outfile:
np.savetxt(outfile,val_template,delimiter=',',fmt='%d')
with open(settings_obj.CNN_data_dir+merge_file_prefix+"test_currents.csv" , 'wb') as outfile:
np.savetxt(outfile,test_currents,delimiter=',',fmt='%4.3e')
with open(settings_obj.CNN_data_dir+merge_file_prefix+"test_template.csv" , 'wb') as outfile:
np.savetxt(outfile,test_template,delimiter=',',fmt='%d')
with open(settings_obj.CNN_data_dir+merge_file_prefix+"train_currents.csv", 'wb') as outfile:
np.savetxt(outfile,train_currents,delimiter=',',fmt='%4.3e')
with open(settings_obj.CNN_data_dir+merge_file_prefix+"train_template.csv", 'wb') as outfile:
np.savetxt(outfile,train_template,delimiter=',',fmt='%d')
if congestion_enabled ==1:
with open(settings_obj.CNN_data_dir+merge_file_prefix+"val_congest.csv" , 'wb') as outfile:
np.savetxt(outfile,val_congest,delimiter=',',fmt='%f')
with open(settings_obj.CNN_data_dir+merge_file_prefix+"test_congest.csv" , 'wb') as outfile:
np.savetxt(outfile,test_congest,delimiter=',',fmt='%f')
with open(settings_obj.CNN_data_dir+merge_file_prefix+"train_congest.csv" , 'wb') as outfile:
np.savetxt(outfile,train_congest,delimiter=',',fmt='%f')
| 42.607143 | 151 | 0.707221 | 1,245 | 8,351 | 4.382329 | 0.106024 | 0.064516 | 0.046188 | 0.047654 | 0.594758 | 0.571664 | 0.530609 | 0.528409 | 0.428886 | 0.399927 | 0 | 0.009883 | 0.163932 | 8,351 | 195 | 152 | 42.825641 | 0.771555 | 0.137229 | 0 | 0.286713 | 0 | 0 | 0.097605 | 0.023113 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.027972 | 0 | 0.027972 | 0.034965 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0662d6209dc2a38f74f67d0796199758c0c86a8 | 19,314 | py | Python | websiteV2/utils/query.py | meng950813/scholar_discovery_sys | 8f03df509e796a5c37189cd8aae0c114d0fa5e90 | [
"MIT"
] | 1 | 2019-03-26T10:19:43.000Z | 2019-03-26T10:19:43.000Z | websiteV2/utils/query.py | meng950813/scholar_discovery_sys | 8f03df509e796a5c37189cd8aae0c114d0fa5e90 | [
"MIT"
] | 3 | 2021-03-20T00:39:17.000Z | 2021-06-01T23:31:32.000Z | websiteV2/utils/query.py | meng950813/scholar_discovery_sys | 8f03df509e796a5c37189cd8aae0c114d0fa5e90 | [
"MIT"
] | 1 | 2022-02-24T01:16:09.000Z | 2022-02-24T01:16:09.000Z | # coding=utf-8
from __future__ import division
import pickle
import time, jieba, math
import jieba.posseg as pseg
import os
class Subject:
"""
计算教师关于被搜索内容的语言模型得分,LDA评分,pagerank评分,最后得出教师的评分
"""
def __init__(self, sub, id_name, path):
'''
初始化Subject对象
:param sub:所计算的学科代码及主题数
:param id_name: 教师信息
'''
# ({"code": 学科代码, "k": 主题数}
self.sub = sub
# self.id_name 字典
self.id_name = id_name
code = self.sub['code']
k = self.sub['k']
print("load:" + code)
self.basepath = path
self.path = os.path.join(path, 'querydata', code, 'k' + str(k))
# 词的索引 wordIndex
self.lmindex = pickle.load(open(os.path.join(self.path, 'wordIndex'),'rb'))
# word和topic的关系
self.ldaword = pickle.load(open(os.path.join(self.path, 'wordToTopic'), 'rb'))
# 教师和topic的关系
self.ldaexp = pickle.load(open(os.path.join(self.path, 'teacherTopic'), 'rb'))
# 教师PageRank评分
self.pagerank = pickle.load(open(os.path.join(self.path, 'teacherRank'), 'rb'))
self.cal = 0.9
def cal_lda_one_word(self, word, teacher_id):
'''
计算教师关于搜索内容中某词的lda得分
:param word:搜索内容可能有多个词,依次处理,这里的word是搜索内容中的一个词
:param teacher_id:用于限制、筛选需要排名的教师,可以为空
:return:res type:dict 教师id及对上述word的lda得分
'''
"""计算单个词的专家lda得分"""
ld = self.ldaword.get(word)
# sort {topic_id1:value,...} 筛选出value>1.0e-06 value降序排序
sort = {}
# res {teacher_id1:value,...}
res = {}
if ld != None:
if teacher_id is not None:
ld = {k: ld[k] for k in ld if k in teacher_id}
# 对字典中的项,进行值升序排序,然后逆序,返回一个列表 [(topic_id1:value),(topic_id2:value),(topic_id3:value),...]
sortld = sorted(ld.items(), key=lambda item: item[1], reverse=True)
a = [r for r in sortld if r[1] > 1.0e-06]
for i in a:
sort[i[0]] = i[1]
for j in sort.keys():
# j是 topic_id
# ldaexp 教师和topic的关系
for m in self.ldaexp.keys():
# m是teacher_id
if j in self.ldaexp[m]:
# id为m的老师对某主题的值乘这个主题对这个词的值
if m in res:
res[m] += self.ldaexp[m][j] * sort[j]
else:
res[m] = self.ldaexp[m][j] * sort[j]
return res
def cal_one_word(self, word, teacher_id):
'''
计算教师关于搜索内容中的一个词的语言模型得分
:param word:搜索内容可能有多个词,依次处理,这里的word是搜索内容中的一个词
:param teacher_id:用于限制、筛选需要排名的教师,可以为空
:return:res type:dict 返回教师ID及其对上述word的得分
'''
lm = self.lmindex.get(word) # type dict
res = {}
# 引入平滑系数
# lm可能为空,因为这是一个学科的倒排索引表,可能没这个单词
if lm != None:
# lm {teacher_id1: word1出现的次数 / 总词数,teacher_id2: word1出现的次数 / 总词数, col_fre: word[w] / length}
if teacher_id is not None:
lm = {k: lm[k] for k in lm if k in teacher_id or k == "col_fre"}
for l in lm.keys():
# l teacher_id
if l != 'col_fre':
res[l] = self.cal * lm[l] + (1 - self.cal) * lm['col_fre']
res['col'] = lm['col_fre']
return res
def cal_rank(self, res, lda, cof):
rank = {}
exp_list = [r for wd in res.keys() for r in res[wd]]
exp_list = list(set(exp_list))
exp_list_lda = [r for wd in lda.keys() for r in lda[wd]]
exp_list_lda = list(set(exp_list_lda))
exp_list.extend(exp_list_lda)
if 'col' in exp_list:
exp_list.remove('col')
for r in exp_list:
rank[r] = cof
# wd word
for wd in res.keys():
if len(res[wd]) != 0:
# 如果res[wd]中有r这个teacher_id,那么给这个
if res[wd].get(r):
rank[r] *= res[wd][r]
else:
rank[r] *= res[wd]['col']
if wd in lda and lda[wd].get(r):
adjust = lda[wd][r]
rank[r] *= adjust
else:
rank[r] *= 1e-6
for wd in lda:
if wd not in res and r in lda[wd]:
rank[r] *= lda[wd][r]
else:
rank[r] *= 1e-6
if self.pagerank.get(r):
rank[r] *= self.pagerank[r] * self.id_name[r]["composite_score"]
return rank
def do_query(self, words, teacher_id):
'''
进行搜索操作,并返回搜索结果
:param words:搜索内容
:param teacher_id: 用于限制、筛选需要排名的教师
:return: result 教师id及关于搜索内容的评分,降序排序
'''
temp_res = {}
temp_lda = {}
for word in words:
temp_res[word] = self.cal_one_word(word, teacher_id)
temp_lda[word] = self.cal_lda_one_word(word, teacher_id)
for word in words:
if word in temp_res and not temp_res[word]:
# Python 字典 pop() 方法删除字典给定键 key 及对应的值,返回值为被删除的值。key 值必须给出。 否则,返回 default 值。
temp_res.pop(word)
if word in temp_lda and not temp_lda[word]:
temp_lda.pop(word)
if not temp_res and not temp_lda:
return []
# 返回xy(x的y次方) 的值。
cof = math.pow(10e-6, len(words) - max(len(temp_res), len(temp_lda)))
level = math.pow(10e-6, len(words) + 1)
rank = self.cal_rank(temp_res, temp_lda, cof)
sortrk = sorted(rank.items(), key=lambda item: item[1], reverse=True)
result = [(r[0], r[1]) for r in sortrk ]
return result
class Query:
'''
对输入内容进行搜索,并打印及返回搜索结果
'''
def __init__(self, subs, path):
'''
初始化Query对象
:param subs:学科代码及主题数
'''
# [{"code": '01', "k": 46}, {"code": '02', "k": 98}]
self.subs = subs
# {teacher_id1:{id:xx,name:xxx},...}
self.id_name = pickle.load(open(os.path.join(path,'querydata','teacherName'), 'rb'))
self.institution_info = pickle.load(open(os.path.join(path,'querydata','InstitutionName'), 'rb'))
self.school_info =pickle.load(open(os.path.join(path,'querydata','SchoolName'), 'rb'))
# self.Subject {code1:Subject(sub1),sode2:Subject2(sub2)}
self.Subject = {sub['code']: Subject(sub, self.id_name,path) for sub in self.subs}
self.stop = []
stopword = [line.strip() for line in open(os.path.join(path,'querydata','StopWords.txt'), encoding='utf-8').readlines()]
stopword1 = [line.strip() for line in open(os.path.join(path,'querydata','stop_word_4.txt'), encoding='utf-8').readlines()]
stopwords = [i.split(':')[0] for i in stopword1]
self.stop.extend(stopword)
self.stop.extend(stopwords)
self.fill = ['vn', 'n', 'nr', 'nr1', 'nr2', 'nrj', 'nrf', 'ns', 'nsf',
'nt', 'nz', 'nl', 'ng']
jieba.load_userdict(os.path.join(path,'querydata','userdict.txt'))
def prints(self, result):
'''
搜索教师后,打印教师信息
:param result:进行搜索后返回的信息,包括教师id及对搜索内容的得分,降序排序
:return:
'''
query_result = []
for code in result:
size = len(result[code])
if size == 0:
continue
# 教师个数
print("学科:%s,有关教师个数:%d" % (code, size))
teacher = result[code]
for t in teacher:
# 教师名字:(id:权重)
# print(self.id_name[t[0]]["NAME"]+":"+str(t))
query_result.append((self.id_name[t[0]]["NAME"], str(t[1])))
# print()
return query_result
def prints_for_institution(self, result, school=None):
'''
将搜索到的老师转化为学院信息,并打印
:param result:进行搜索后返回的老师信息,包括教师id及对搜索内容的得分,降序排序
:return:院系的搜索结果,学校名+学院名
'''
# 学院信息
institution_info = {}
for code in result:
# 学科代码code下的教师权值信息
teacher_info_s = result[code]
for teacher_info in teacher_info_s:
# 教师id
teacher_id = teacher_info[0]
# 学院id
institution_id = self.id_name[teacher_id]['INSTITUTION_ID']
# 将老师的权值归入老师对应的学院中,以计算学院信息
if institution_id in institution_info:
institution_info[institution_id] += teacher_info[1]
else:
institution_info[institution_id] = teacher_info[1]
# 学院按权值从大到小排序
institution_rank = dict(sorted(institution_info.items(), key=lambda x: x[1], reverse=True))
# 返回学院的学校名+学院名+学院权值
query_result = []
if school != None:
for institution_id in institution_rank:
# 学院所属的学校名字
schoolName = self.institution_info[institution_id]['SCHOOL_NAME']
# 打印所查学校的院系
if schoolName == school:
query_result.append((schoolName, self.institution_info[institution_id]['NAME'],institution_rank[institution_id]))
else:
for institution_id in institution_rank:
schoolName = self.institution_info[institution_id]['SCHOOL_NAME']
# print(schoolName + self.institution_info[institution_id]['NAME'] + str(institution_rank[institution_id]))
query_result.append((schoolName, self.institution_info[institution_id]['NAME'],institution_rank[institution_id]))
return query_result
def prints_for_school(self, result, city=None):
'''
将搜索到的老师转化为学校信息,打印并返回
:param result: 搜索得到的结果,每个学科代码下的教师id和评分
:return:学校的搜索结果,学校名+省+城市名+评分
'''
# 学校信息
school_info = {}
for code in result:
# 学科代码code下的教师权值信息
teacher_info_s = result[code]
for teacher_info in teacher_info_s:
# 教师id
teacher_id = teacher_info[0]
# 学院id
school_id = self.id_name[teacher_id]['SCHOOL_ID']
# 将老师的权值归入老师对应的学院中,以计算学院信息
if school_id in school_info:
school_info[school_id] += teacher_info[1]
else:
school_info[school_id] = teacher_info[1]
# 学院按权值从大到小排序
school_rank = dict(sorted(school_info.items(), key=lambda x: x[1], reverse=True))
# 根据是否有city限制进行操作
# 返回 学校名+省份+市+得分
query_result = []
if city == None:
for school_id in school_rank:
school_infomation = self.school_info[school_id]
query_result.append(
{'school_name': school_infomation['NAME'], 'province': school_infomation['PROVINCE'],
'city': school_infomation['CITY'], 'score': school_rank[school_id]})
else:
for school_id in school_rank:
city_name = self.school_info[school_id]['CITY']
if city_name == city:
school_infomation = self.school_info[school_id]
query_result.append(
{'school_name': school_infomation['NAME'], 'province': school_infomation['PROVINCE'],
'city': school_infomation['CITY'], 'score': school_rank[school_id]})
return query_result
def prints_for_teacher(self, result,school_name):
'''
根据搜索到的老师信息得到老师所在的学院名,学校名,并打印
:param result:进行搜索后返回的老师信息,包括老师姓名及老师学院,以老师得分降序排序
:return:院系的搜索结果,老师id+老师名+学院id+学院名+学校id+学校名
'''
teacher_info = []
#获取老师所在的所有学院
institution_name = []
for code in result:
# 学科代码code下的教师权值信息
teacher_info_s = result[code]
for teacher_info1 in teacher_info_s:
teacher_id = teacher_info1[0]
institution_id = self.id_name[teacher_id]['INSTITUTION_ID']
if self.institution_info[institution_id]['NAME'] not in institution_name and self.institution_info[institution_id]['SCHOOL_NAME'] == school_name:
institution_name.append(self.institution_info[institution_id]['NAME'])
#初始化所有学院的老师总分
institution_dict = []
for i in institution_name:
institution_dict.append({
"teacher_rank":0,
"institution_name":i
})
#获取老师的分数和学院名
for code in result:
# 学科代码code下的教师权值信息
teacher_info_s = result[code]
for teacher_info1 in teacher_info_s:
# 教师id
teacher_id = teacher_info1[0]
# 学院id
institution_id = self.id_name[teacher_id]['INSTITUTION_ID']
if self.institution_info[institution_id]['SCHOOL_NAME'] == school_name:
teacher_info.append({
'teacher_rank': teacher_info1[1],
'institution_name': self.institution_info[institution_id]['NAME']
})
#计算学院中老师的总分
for i in teacher_info:
for j in institution_dict:
if i['institution_name'] == j['institution_name']:
j["teacher_rank"] = j["teacher_rank"] + i["teacher_rank"]
#按学院中老师的总分对学院排名
for i in institution_dict:
for j in institution_dict:
if i['teacher_rank'] > j['teacher_rank']:
a = i['teacher_rank']
b = i['institution_name']
i['institution_name'] = j['institution_name']
i['teacher_rank'] = j['teacher_rank']
j['institution_name'] = b
j['teacher_rank'] = a
#取排名前三的学院
three_institution = []
for i in institution_dict[0:3]:
three_institution.append(i['institution_name'])
#获取学院前三的老师信息
teacher_info_true = []
for code in result:
# 学科代码code下的教师权值信息
teacher_info_s = result[code]
for teacher_info1 in teacher_info_s:
# 教师id
teacher_id = teacher_info1[0]
# 学院id
institution_id = self.id_name[teacher_id]['INSTITUTION_ID']
if self.institution_info[institution_id]['SCHOOL_NAME'] == school_name and self.institution_info[institution_id]['NAME'] in three_institution:
teacher_info_true.append({
'teacher_id': teacher_id,
'teacher_name': self.id_name[teacher_id]["NAME"],
'institution_name': self.institution_info[institution_id]['NAME'],
'institution_id': institution_id,
'school_name':self.institution_info[institution_id]['SCHOOL_NAME'],
'school_id': self.id_name[teacher_id]['SCHOOL_ID'],
'score': teacher_info1[1]
})
return teacher_info_true
def do_query(self, text, filer):
'''
:param text:输入的搜索文本
:param filer:过滤器
:return: 老师id和得分
'''
# 将输入内容进行分词
# text = jieba.cut(text,cut_all=True)
# texts = ''
# for word in text:
# texts += word+' '
seg_list = pseg.cut(text)
words = []
for word, flag in seg_list:
if flag in self.fill and word not in self.stop:
# 是名词且不是停用词,将其纳入搜索列表
words.append(word)
if "school" in filer and len(filer["school"]) > 0:
teacher_id = {t for t in self.id_name if str(self.id_name[t]['SCHOOL_ID']) in filer['school']}
else:
teacher_id = None
# 筛选符合院系信息的老师
if "institution" in filer and filer['institution'] != None and len(filer['institution']) > 0:
teacher_id = {t for t in self.id_name if str(self.id_name[t]['INSTITUTION_ID']) in filer['institution']}
else:
teacher_id = None
if "name" in filer and len(filer["name"]) > 0:
if teacher_id:
teacher_id = {t for t in teacher_id if self.id_name[t]['name'].find(filer["name"]) >= 0}
else:
teacher_id = {t for t in self.id_name if self.id_name[t]['name'].find(filer["name"]) >= 0}
result = {}
# teacher_id dict None
for sub in self.Subject:
if "code" in filer and len(filer['code']) > 0 and sub not in filer['code']:
continue
else:
# self.Subject_for_teacher {code1:Subject_for_teacher(sub1),sode2:Subject_for_teacher2(sub2)}
result[sub] = self.Subject[sub].do_query(words, teacher_id)
# result {code:[teacher_id:value,].,code:[],...}
return result
def queryForTeacher(words, institution_id='1526'):
'''
搜索教师
:param words:搜索内容
:param institution_id: 搜索教师时,该教师被限制的学院ID
:return: 搜索结果 学院 人
'''
filer = {}
filer['institution'] = institution_id
result = query.do_query(words, filer)
result_info = query.prints(result)
print(result_info)
return result_info
subject = [
{"code": '0801', "k": 12},
{"code": '0802', "k": 18}, {"code": '0803', "k": 10}, {"code": '0804', "k": 18},
{"code": '0805', "k": 12}, {"code": '0806', "k": 10}, {"code": '0807', "k": 22},
{"code": '0808', "k": 12}, {"code": '0809', "k": 14}, {"code": '080901', "k": 12}, {"code": '0810', "k": 12},
{"code": '0811', "k": 16}, {"code": '081101', "k": 10}, {"code": '0812', "k": 10}, {"code": '081202', "k": 10},
{"code": '0814', "k": 14}, {"code": '0815', "k": 20},
{"code": '0817', "k": 16}, {"code": '0818', "k": 12},
{"code": '0822', "k": 18}, {"code": '0823', "k": 14},
{"code": '0824', "k": 20}, {"code": '0825', "k": 10}, {"code": '0826', "k": 10},
{"code": '0827', "k": 10}, {"code": '0828', "k": 12},
{"code": '0830', "k": 20}, {"code": '0831', "k": 14}, {"code": '0832', "k": 10}]
path = os.path.join(os.getcwd(), 'static')
query = Query(subject, path=path)
def query_all(range, words, limit=None):
'''
用于搜索的接口函数
:param query_range:查询范围
:param words: 搜索的关键词
:param limit 搜索限制,如搜索学校时限制是市,搜索学院时,限制是学校,可以为空,为空为全国范围搜索
:return:result_info 搜索结果,如学校是 学校名,省,城市名,评分的信息
'''
query_range = range
if query_range == '省':
pass
if query_range == '市':
pass
if query_range == '学校':
filer = {}
result = query.do_query(words, filer)
result_info = query.prints_for_school(result, limit)
return result_info
if query_range == '学院':
filer = {}
result = query.do_query(words, filer)
result_info = query.prints_for_institution(result, limit)
return result_info
if query_range == '老师':
filer = {}
school_name = limit
result = query.do_query(words, filer)
result_info = query.prints_for_teacher(result,school_name)
return result_info
if query_range == '教师':
filer = {}
filer['institution'] = limit
result = query.do_query(words, filer)
result_info = query.prints(result)
return result_info
if __name__ == '__main__':
results = query_all('老师', '计算机',"南京大学")
print(results)
# print(query_all("老师","计算机"))
| 38.245545 | 161 | 0.538573 | 2,274 | 19,314 | 4.396658 | 0.165347 | 0.034207 | 0.021004 | 0.044809 | 0.431786 | 0.381576 | 0.334667 | 0.30016 | 0.236047 | 0.209242 | 0 | 0.022055 | 0.330952 | 19,314 | 504 | 162 | 38.321429 | 0.751664 | 0.147561 | 0 | 0.316294 | 0 | 0 | 0.087548 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041534 | false | 0.00639 | 0.015974 | 0 | 0.111821 | 0.041534 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f06718b0677a884368580faadb14cd29509f1508 | 16,633 | py | Python | experiments/simple-cold-function-threaded-twelve/simple-cold-function-threaded-twelve.py | zanderhavgaard/thesis-code | d9f193e622b8b98ec88c33006f8e0e1dbb3d17fc | [
"MIT"
] | null | null | null | experiments/simple-cold-function-threaded-twelve/simple-cold-function-threaded-twelve.py | zanderhavgaard/thesis-code | d9f193e622b8b98ec88c33006f8e0e1dbb3d17fc | [
"MIT"
] | 2 | 2020-04-28T07:59:30.000Z | 2020-05-17T15:36:04.000Z | experiments/simple-cold-function-threaded-twelve/simple-cold-function-threaded-twelve.py | zanderhavgaard/thesis-code | d9f193e622b8b98ec88c33006f8e0e1dbb3d17fc | [
"MIT"
] | null | null | null | import sys
import json
import time
from pprint import pprint
from benchmarker import Benchmarker
from datetime import datetime
import traceback
from mysql_interface import SQL_Interface
import function_lib as lib
# =====================================================================================
# Read cli arguments from calling script
# name of the terraform experiment
experiment_name = sys.argv[1]
# unique identifier string tying this experiment together with the
# experiments conducted for the other cloud providers in this round
experiment_meta_identifier = sys.argv[2]
# name of cloud function provider for this experiment
provider = sys.argv[3]
# name of the client provider
client_provider = sys.argv[4]
# relative path to experiment.env file
env_file_path = sys.argv[5]
# dev_mode
dev_mode = eval(sys.argv[6]) if len(sys.argv) > 6 else False
# verbose mode
verbose = eval(sys.argv[7]) if len(sys.argv) > 7 else False
# =====================================================================================
# describe experiment, should be verbose enough to figure
# out what the experiment does and what it attempts to test
description = f"""
{experiment_name}: This experiment tests the time it takes for
function instances to no longer be available due to inactivity in a multithreaded
environment.
The experiment is conducted by first creating a cold time baseline. This is done by i
nvoking a single function with 12 concurrent requests and average over the results.
Then the same function is is invoked 10 times, again with 12 concurrent requests and
averaged as a warm-time baseline.
Then the function is invoked with 12 multithreaded requests continually with increasing
delay between invocations, until the avg of the requests falls within the cold time
baseline.
"""
# =====================================================================================
# create the benchmarker
benchmarker = Benchmarker(experiment_name=experiment_name,
experiment_meta_identifier=experiment_meta_identifier,
provider=provider,
client_provider=client_provider,
experiment_description=description,
env_file_path=env_file_path,
dev_mode=dev_mode,
verbose=verbose)
# =====================================================================================
# create database interface for logging results
db = SQL_Interface(dev_mode)
# name of table to insert data into
table = 'Coldstart'
# =====================================================================================
# set meta data for experiment
# UUID from experiment
experiment_uuid = benchmarker.experiment.uuid
# what function to test on (1-3)
fx = 'function2'
# sleep for 15 minutes to ensure coldstart
if not dev_mode:
time.sleep(45*60)
# number of threads to be used
threads = 12
# values used for aborting experiment if it runs more than 24 hours
_timeout = 24 * 60 * 60
start_time = time.time()
# cold benchmark
benchmark = None
# time to sleep in between invocations, start at 5 minutes
sleep_time = 300
# increment for each iteration
increment = sleep_time
# granularity of result
granularity = 20
# value for last response latency
latest_latency_time = 0
# flags for controlling granularity of sleep value
large_increment = True
minute_increment = True
# results specific gathered and logged from logic of this experiment
results = []
# sift away errors at runtime and report them later
errors = []
# ======================================================================================
# Convienience methods needed for this experiment
# invoke function and return the result dict
def invoke():
global threads
# sift away potential error responses and transform responseformat to list of dicts from list of dict of dicts
invocations = list(filter(None, [x if 'error' not in x else errors.append(x) for x in map(lambda x: lib.get_dict(x),
benchmarker.invoke_function_conccurrently(function_name=fx,
numb_threads=threads,
function_args= {'throughput_time':0.2}))]))
print('Trace for invocation with thread_numb:', threads)
if len(errors) != 0:
print('ERRORS',len(errors))
pprint(errors)
print()
if len(invocations) != 0:
print(f'values from invoke with {len(invocations)} invocations')
latency_with_identifier = latency = list(map(lambda x: (x['instance_identifier'],x['execution_start']-x['invocation_start']),invocations))
print('identifier, latency')
pprint(latency_with_identifier)
print()
invo_start = list(map(lambda x: x['invocation_start'],invocations))
invo_start.sort()
first_invocation = invo_start[0]
invo_start_dist = list(map(lambda x: x-first_invocation,invo_start))
print('invo_start_distribution')
pprint(invo_start_dist)
print()
execution_start = list(map(lambda x: x['execution_start'],invocations))
execution_start.sort()
first_execution = execution_start[0]
execution_start_dist = list(map(lambda x: x-first_execution,execution_start))
print('execution_start_dist')
pprint(execution_start_dist)
print()
print('=====================================')
# return result for as an acumulated dict or None for failure
return None if invocations == {} else lib.accumulate_dicts(invocations)
# the wrapper ends the experiment if any it can not return a valid value
def err_func(): return benchmarker.end_experiment()
# convinience for not having to repeat values
def validate(x, y, z=None):
global threads
return lib.iterator_wrapper(x, y, experiment_name, None, err_func)
# creates list of invocation dicts.
# args: tuble(x,y) -> x=length_of_list, y=error_point_string
create_invocation_list = lambda x=(5, 'create invocation_list'): [
validate(invoke, x[1]) for i in range(x[0]) ]
# parse data that needs to be logged to database.
def append_result(
invo_id,
minutes,
seconds,
granularity,
cold,
final,
) -> None:
global threads, benchmark
results.append({
'exp_id': experiment_uuid,
'invo_id': invo_id,
'minutes': minutes,
'seconds': seconds,
'granularity': granularity,
'threads': threads,
'benchmark': benchmark,
'cold': cold,
'final': final
})
# =====================================================================================
# The actual logic if the experiment
def find_benchmark():
from functools import reduce
iterations = 50
response_times = []
# should be a cold invocation
first_res = validate(invoke,'first_res in find_benchmark')
cold_latency = first_res['execution_start'] - first_res['invocation_start']
if verbose:
print('first cold invocation:')
pprint(first_res)
print()
if verbose:
print(f'invoking function {iterations} times to find an average latency')
for i in range(iterations):
t1 = time.time()
res = validate(invoke,f'number {i} in warm_time baseline')
t2 = time.time()
time.sleep(1)
response_times.append(
(i, res['execution_start']-res['invocation_start'], t2-t1, res['instance_identifier'])
)
response_times.sort(key=lambda x: x[1])
sliced = response_times[10:40]
sliced_avg = reduce(lambda x,y: x+y[1],[0.0] + sliced)/len(sliced)
benchmark = sliced_avg * 2 if sliced_avg > 0.22 else 1.0
if verbose:
print(f'coldtime_latency: {cold_latency}')
print(f'found average {sliced_avg}, benchmark: {benchmark}')
return (cold_latency, sliced_avg, benchmark)
def check_coldtime(sleep: int, coldtime: float):
global benchmark
if verbose:
print('check_coldtime', sleep, coldtime)
if(coldtime > benchmark):
print(f'benchmark found: {benchmark}, with {coldtime} as coldtime')
return
elif(sleep > 7200):
raise Exception(
'Benchmark could not be established after 2 hours sleep_time')
else:
time.sleep(sleep)
local_coldtime, avg_warmtime, benchmark = find_benchmark()
if(coldtime < benchmark):
check_coldtime(sleep+1200, coldtime)
# Find the values for when coldtimes occure
def set_cold_values():
global sleep_time, increment, granularity, latest_latency_time, large_increment, minute_increment
while(True):
if time.time() - start_time > _timeout:
print('ABORTING due to 24 hour time constraint from set_cold_values function\n')
benchmarker.end_experiment()
# log experiments specific results, hence results not obtainable from the generic Invocation object
lib.log_experiment_specifics(experiment_name,
experiment_uuid,
len(errors),
db.log_exp_result([lib.dict_to_query(x, table) for x in results]))
sys.exit()
time.sleep(sleep_time)
result_dict = validate(invoke,f'invoking function: {fx} from cold start experiment')
latest_latency_time = result_dict['execution_start'] - result_dict['invocation_start']
if(verbose):
lib.dev_mode_print('logging time from set_cold_values', [
('experiment_uuid,result_dict[\'instance_identifier\']',experiment_uuid, result_dict['instance_identifier']),
('sleep_time / 60', int(sleep_time / 60)),
('sleep_time % 60', int( sleep_time % 60)),
('increment', increment),
('coldtime', latest_latency_time > benchmark),
('Final result', False),
('latest_latency_time',latest_latency_time),
])
if(latest_latency_time > benchmark):
if large_increment:
sleep_time -= increment
large_increment = False
increment = 60
sleep_time += increment
elif minute_increment:
sleep_time -= 60
minute_increment = False
increment = granularity
sleep_time += increment
else:
append_result(
result_dict['identifier'],
int(sleep_time / 60),
int(sleep_time % 60),
increment,
latest_latency_time > benchmark,
False)
return
else:
sleep_time += increment
def verify_result():
global sleep_time, granularity
# variefy that result is valid by using same sleeptime between invocations 5 times
iter_count = 5 if not dev_mode else 2
while(iter_count > 0):
if time.time() - start_time > _timeout:
print('ABORTING due to 24 hour time constraint from varification loop\n')
benchmarker.end_experiment()
# log experiments specific results, hence results not obtainable from the generic Invocation object
lib.log_experiment_specifics(experiment_name,
experiment_uuid,
len(errors),
db.log_exp_result([lib.dict_to_query(x, table) for x in results]))
sys.exit()
time.sleep(sleep_time)
result_dict = validate(invoke, f'invoking function: {fx} from validation of cold start experiment')
latency_time = result_dict['execution_start'] - result_dict['invocation_start']
if(verbose):
lib.dev_mode_print(f'logging cold time: {latency_time > benchmark} -> coldtime exp', [
('experiment_uuid,result_dict[instance_identifier]',experiment_uuid, result_dict['instance_identifier']),
('sleep_time / 60', int(sleep_time / 60)),
('sleep_time % 60', int(sleep_time % 60)),
('increment', increment),
('coldtime', latency_time > benchmark),
('Final result', False),
('latency', latency_time)
])
append_result(
result_dict['identifier'],
int(sleep_time / 60),
int(sleep_time % 60),
increment,
latency_time > benchmark,
False)
if(latency_time < benchmark):
sleep_time += granularity
iter_count = 5 if not dev_mode else 2
else:
iter_count -= 1
# run one last time and log result as final or
time.sleep(sleep_time)
result_dict = validate(invoke, f'invoking function: {fx} from final invocation of cold start experiment')
latency_time = result_dict['execution_start'] - result_dict['invocation_start']
if latency_time > benchmark:
# log final result
append_result(
result_dict['identifier'],
int(sleep_time / 60),
int(sleep_time % 60),
granularity,
True,
True)
else:
sleep_time += granularity
verify_result()
try:
# initial_cold_start_response = validate(invoke, 'initial coldtime')
# coldtime = initial_cold_start_response['execution_start'] - initial_cold_start_response['invocation_start']
# if verbose:
# print('init coldtime', coldtime)
# calculates avg. time for warm function, default is 5 invocations as input and keys execution_start - invocation_start
# invo_list = create_invocation_list()
# avg_warmtime = validate(lib.reduce_dict_by_keys,
# 'avg_warmtime',
# (invo_list, ('execution_start', 'invocation_start')) )
# coldtime is adjusted by 10% to avoid coldtime being an outlier
# openfaas sometimes has large variation in cold time
# if coldtime > (10 * avg_warmtime):
# benchmark = avg_warmtime * 10
# else:
# benchmark = coldtime * 0.8
# benchmark = avg_warmtime * 1.75
coldtime, avg_warmtime, benchmark = find_benchmark()
if verbose:
print('init benchmark', benchmark)
# sleep for 40 minutes if coldtime is not cold
check_coldtime(40*60, coldtime)
if(verbose):
lib.dev_mode_print('Initial Coldtime ', [
('coldtime', coldtime),
('benchmark', benchmark),
('avg_warmtime', avg_warmtime)
])
if(verbose):
lib.dev_mode_print('pre set_cold_values() coldtime exp', [
('sleep_time', sleep_time),
('increment', increment),
('granularity', granularity),
('latest_latency_time',latest_latency_time)
])
set_cold_values()
if(verbose):
lib.dev_mode_print('post set_cold_values() coldtime exp', [
('sleep_time', sleep_time),
('increment', increment),
('granularity', granularity),
('latest_latency_time',latest_latency_time)])
verify_result()
# =====================================================================================
# end of the experiment
benchmarker.end_experiment()
# =====================================================================================
# log experiments specific results, hence results not obtainable from the generic Invocation object
lib.log_experiment_specifics(experiment_name,
experiment_uuid,
len(errors),
db.log_exp_result([lib.dict_to_query(x, table) for x in results]))
except Exception as e:
# this will print to logfile
print(f'Ending experiment {experiment_name} due to fatal runtime error')
print(str(datetime.now()))
print('Error message: ', str(e))
print(f'Trace: {traceback.format_exc()}')
print('-----------------------------------------')
benchmarker.end_experiment()
| 37.802273 | 146 | 0.58889 | 1,832 | 16,633 | 5.167576 | 0.185044 | 0.031372 | 0.017429 | 0.014788 | 0.27073 | 0.257315 | 0.234288 | 0.225203 | 0.219077 | 0.213373 | 0 | 0.011973 | 0.281909 | 16,633 | 439 | 147 | 37.888383 | 0.780643 | 0.227199 | 0 | 0.323024 | 0 | 0 | 0.206996 | 0.015808 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027491 | false | 0 | 0.034364 | 0.003436 | 0.079038 | 0.127148 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f06734cf6b9fb091bc2ec4b1c804761b68587e7d | 827 | py | Python | weather/core/views.py | josevictorp81/Weather | 5e421b152456218d44344f71b47c44addaf1c28d | [
"MIT"
] | null | null | null | weather/core/views.py | josevictorp81/Weather | 5e421b152456218d44344f71b47c44addaf1c28d | [
"MIT"
] | null | null | null | weather/core/views.py | josevictorp81/Weather | 5e421b152456218d44344f71b47c44addaf1c28d | [
"MIT"
] | null | null | null | from django.shortcuts import render
import requests
from django.conf import settings
def index(request):
if request.method == 'POST':
city = request.POST['city']
url = f'http://api.openweathermap.org/data/2.5/weather?q={city}&units=metric&lang=pt_br&appid={settings.OPEN_WEATHER_MAP_API_KEY}'
result = requests.get(url=url).json()
#print(result)
weather = {
'city': city,
'temperature': result['main']['temp'],
'description': result['weather'][0]['description'],
'icon': result['weather'][0]['icon'],
'place': result['sys']['country'],
}
data = {
'weather': weather,
}
return render(request, 'index.html', data)
else:
return render(request, 'index.html')
| 30.62963 | 138 | 0.565901 | 91 | 827 | 5.087912 | 0.56044 | 0.084233 | 0.060475 | 0.103672 | 0.12095 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006656 | 0.273277 | 827 | 26 | 139 | 31.807692 | 0.763727 | 0.015719 | 0 | 0 | 0 | 0.047619 | 0.292743 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.142857 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f067f79e68fce36b708aa737c039c4f49e0fa75c | 2,352 | py | Python | bin/90_convert_movie_to_gif.py | scpepper69/rp_eevee_clock | 5b07552edb639ea3bfc539a0da2316eeb379c2c4 | [
"MIT"
] | null | null | null | bin/90_convert_movie_to_gif.py | scpepper69/rp_eevee_clock | 5b07552edb639ea3bfc539a0da2316eeb379c2c4 | [
"MIT"
] | null | null | null | bin/90_convert_movie_to_gif.py | scpepper69/rp_eevee_clock | 5b07552edb639ea3bfc539a0da2316eeb379c2c4 | [
"MIT"
] | null | null | null | import math
import sys
import cv2
from PIL import Image
def get_fps_n_count(video_path):
cap = cv2.VideoCapture(video_path)
if not cap.isOpened():
return (None, None)
count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
fps = round(cap.get(cv2.CAP_PROP_FPS))
cap.release()
cv2.destroyAllWindows()
return (fps, count)
def aspect_ratio(width, height):
gcd = math.gcd(width, height)
ratio_w = width // gcd
ratio_h = height // gcd
return (ratio_w, ratio_h)
def resize_based_on_aspect_ratio(aspect_ratio, base_width, max_width=400):
if base_width < max_width:
return None
base = max_width / aspect_ratio[0]
new_w = int(base * aspect_ratio[0])
new_h = int(base * aspect_ratio[1])
return (new_w, new_h)
def get_frame_range(video_path, start_frame, stop_frame, step_frame):
cap = cv2.VideoCapture(video_path)
if not cap.isOpened():
return None
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
asp = aspect_ratio(width, height)
width_height = resize_based_on_aspect_ratio(asp, width, max_width=400)
im_list = []
for n in range(start_frame, stop_frame, step_frame):
cap.set(cv2.CAP_PROP_POS_FRAMES, n)
ret, frame = cap.read()
if ret:
if width_height is not None:
frame = cv2.resize(frame, dsize=width_height)
img_array = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
im = Image.fromarray(img_array)
im_list.append(im)
cap.release()
cv2.destroyAllWindows()
return im_list
def make_gif(filename, im_list):
im_list[0].save(filename, save_all=True, append_images=im_list[1:], loop=0)
def main(target_file):
video_file = target_file
fps, count = get_fps_n_count(video_file)
if fps is None:
print("Cannot open the video file.")
return
start_sec = 0
stop_sec = 8
start_frame = int(start_sec * fps)
stop_frame = int(stop_sec * fps)
step_frame = 3
print("Convert Start")
im_list = get_frame_range(video_file, start_frame, stop_frame, step_frame)
if im_list is None:
print("Cannot open the video file.")
return
make_gif('test.gif', im_list)
print("end")
if __name__ == "__main__":
main(sys.argv[1])
| 24.5 | 79 | 0.659864 | 354 | 2,352 | 4.090395 | 0.251412 | 0.037293 | 0.03453 | 0.033149 | 0.354282 | 0.236878 | 0.217541 | 0.127072 | 0.127072 | 0.073204 | 0 | 0.016704 | 0.236395 | 2,352 | 95 | 80 | 24.757895 | 0.789532 | 0 | 0 | 0.205882 | 0 | 0 | 0.036596 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.088235 | false | 0 | 0.058824 | 0 | 0.279412 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f06fef855a45b62ac2ba773a3c113357aea669d9 | 6,884 | py | Python | tests/tests.py | mhSangar/python-resize-image | a4e645792ef30c5fcc558df6da6de18b1ecb95ea | [
"MIT"
] | 95 | 2015-04-27T14:48:41.000Z | 2019-02-12T08:35:11.000Z | tests/tests.py | mhSangar/python-resize-image | a4e645792ef30c5fcc558df6da6de18b1ecb95ea | [
"MIT"
] | 19 | 2015-09-27T09:57:10.000Z | 2019-01-26T14:38:31.000Z | tests/tests.py | mhSangar/python-resize-image | a4e645792ef30c5fcc558df6da6de18b1ecb95ea | [
"MIT"
] | 35 | 2015-05-07T15:14:10.000Z | 2019-01-28T16:09:45.000Z | import os
import shutil
import unittest
from contextlib import contextmanager
from PIL import Image
from resizeimage import resizeimage
from resizeimage.imageexceptions import ImageSizeError
class TestValidateDecorator(unittest.TestCase):
def validation(x, y):
if x < y:
raise Exception()
else:
return True
@staticmethod
@resizeimage.validate(validation)
def func(x, y):
return x * y
def test_no_exception(self):
"""
Test that when the validate function does not raise an
error, the correct result is returned.
"""
self.assertEqual(self.func(42, 2), 84)
def test_exception(self):
"""
Test that when the validate fails, the exception is
properly propagated.
"""
with self.assertRaises(Exception):
self.func(2, 42)
def test_no_validation(self):
"""
Test that when the validate fails, the exception is
properly propagated.
"""
self.assertEqual(self.func(2, 42, validate=False), 84)
def test_validation_only_no_exception(self):
"""
Test that when the validate is called directly it returns
`True`
"""
def validate(x):
if x < 0:
raise Exception()
else:
return True
class TestResizeimage(unittest.TestCase):
"""
Run tests for all functions
the given image for testing is 800x533
"""
@classmethod
def setUpClass(self):
"""
Setup a temporary directory to store image
"""
path = os.path.dirname(__file__)
self.test_image_filepath = os.path.join(path, "test-image.jpeg")
tmpname = 'tmp-images'
self._tmp_dir = os.path.join(path, tmpname)
if os.path.isdir(self._tmp_dir):
shutil.rmtree(self._tmp_dir)
os.makedirs(self._tmp_dir)
def _tmp_filename(self, filename):
"""
Get relative path for the given filename
"""
return os.path.join(self._tmp_dir, filename)
@contextmanager
def _open_test_image(self):
with open(self.test_image_filepath, 'r+b') as f:
image = Image.open(f)
yield image
def test_resize_crop(self):
"""
Test that the image resized with resize_crop
has the expected size
"""
with self._open_test_image() as img:
img = resizeimage.resize_crop(img, [200, 200])
filename = self._tmp_filename('crop.jpeg')
img.save(filename, img.format)
with Image.open(filename) as image:
self.assertEqual(image.size, (200, 200))
def test_can_not_resize_crop_larger_size(self):
"""
Test that resizing an image with resize_crop
to a size larger than the original raises an error
"""
with self._open_test_image() as img:
with self.assertRaises(ImageSizeError):
resizeimage.resize_crop(img, (801, 534))
def test_resize_cover(self):
"""
Test that the image resized with resize_cover
has the expected size
"""
with self._open_test_image() as img:
img = resizeimage.resize_cover(img, [200, 100])
filename = self._tmp_filename('resize-cover.jpeg')
img.save(filename, img.format)
with Image.open(filename) as image:
self.assertEqual(image.size, (200, 100))
def test_can_not_resize_cover_larger_size(self):
"""
Test that resizing an image with resize_cover
to a size larger than the original raises an error
"""
with self._open_test_image() as img:
with self.assertRaises(ImageSizeError):
resizeimage.resize_cover(img, (801, 534))
def test_resize_contain(self):
"""
Test that the image resized with resize_contain
has the expected size
"""
with self._open_test_image() as img:
img = resizeimage.resize_contain(img, [200, 100])
filename = self._tmp_filename('resize-contain.jpeg')
img.save(filename, img.format)
with Image.open(filename) as image:
self.assertEqual(image.size, (200, 100))
def test_resize_contain_larger_size(self):
"""
Test that the image resized with resize_contain
has the expected size
"""
with self._open_test_image() as img:
img = resizeimage.resize_contain(img, [801, 534])
filename = self._tmp_filename('resize-contain-larger.jpeg')
img.save(filename, img.format)
with Image.open(filename) as image:
self.assertEqual(image.size, (801, 534))
def test_resize_width(self):
"""
Test that the image resized with resize_width
has the expected size
"""
with self._open_test_image() as img:
img = resizeimage.resize_width(img, 200)
filename = self._tmp_filename('resize-width.jpeg')
img.save(filename, img.format)
with Image.open(filename) as image:
self.assertEqual(image.size[0], 200)
def test_can_not_resize_larger_width(self):
"""
Test that resizing an image with resize_width
to a size larger than the original raises an error
"""
with self._open_test_image() as img:
with self.assertRaises(ImageSizeError):
resizeimage.resize_width(img, 801)
def test_resize_height(self):
"""
Test that the image resized with resize_height
has the expected size
"""
with self._open_test_image() as img:
img = resizeimage.resize_height(img, 200)
filename = self._tmp_filename('resize-height.jpeg')
img.save(filename, img.format)
with Image.open(filename) as image:
self.assertEqual(image.size[1], 200)
def test_can_not_resize_larger_height(self):
with self._open_test_image() as img:
with self.assertRaises(ImageSizeError):
resizeimage.resize_height(img, 534)
def test_resize_thumbnail(self):
"""
Test that the image resized with resize_thumbnail
has the expected size
"""
with self._open_test_image() as img:
img = resizeimage.resize_thumbnail(img, [200, 200])
filename = self._tmp_filename('resize-thumbnail.jpeg')
img.save(filename, img.format)
with Image.open(filename) as image:
self.assertEqual(image.size, (200, 133))
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestResizeimage)
unittest.TextTestRunner(verbosity=2).run(suite)
| 32.780952 | 72 | 0.605898 | 821 | 6,884 | 4.902558 | 0.159562 | 0.031801 | 0.041739 | 0.043727 | 0.641739 | 0.61764 | 0.589068 | 0.539627 | 0.447453 | 0.447453 | 0 | 0.023625 | 0.3052 | 6,884 | 209 | 73 | 32.937799 | 0.817897 | 0.17853 | 0 | 0.321739 | 0 | 0 | 0.03162 | 0.009117 | 0 | 0 | 0 | 0 | 0.121739 | 1 | 0.182609 | false | 0 | 0.06087 | 0.008696 | 0.295652 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f07072e0cdecbf2111247aa4e190203f130eb490 | 2,881 | py | Python | hipsternet/im2col.py | poppoo/hipsternet | 64ec355a62d758c298a46845a7797a7b2265c88b | [
"Unlicense"
] | null | null | null | hipsternet/im2col.py | poppoo/hipsternet | 64ec355a62d758c298a46845a7797a7b2265c88b | [
"Unlicense"
] | null | null | null | hipsternet/im2col.py | poppoo/hipsternet | 64ec355a62d758c298a46845a7797a7b2265c88b | [
"Unlicense"
] | null | null | null | import numpy as np
def get_im2col_indices(x_shape, field_height, field_width, padding=1, stride=1):
# First figure out what the size of the output should be
N, C, H, W = x_shape
assert (H + 2 * padding - field_height) % stride == 0
assert (W + 2 * padding - field_height) % stride == 0
out_height = int((H + 2 * padding - field_height) / stride + 1)
out_width = int((W + 2 * padding - field_width) / stride + 1)
#i0得出k*k*c个横坐标,例如3*3的kernel,C个channel,则i0为[0,0,0,1,1,1,2,2,2]*channel
i0 = np.repeat(np.arange(field_height), field_width)
i0 = np.tile(i0, C)
#一共会出out_height*out_width个像素点,在im2col中为一行,每个像素点是原图的各channel与对应的kernel的卷积的加和
#i1表示kernel的window纵向滑动量,因为输出每行有out_width个像素点,则每经过out_width个点之后,kernel就进入图的下一行
#因此out_height=0时,i1=0,到out_height到图最底部时,i1=out_height-1,
#因此i1形式为[0,....,0,1....,1,............,out_height-1],每个数重复out_width次
i1 = stride * np.repeat(np.arange(out_height), out_width)
#j0为kernel window的基础纵坐标,3*3*channel时的形式为[0,1,2,0,1,2,0,1,2]*channel
j0 = np.tile(np.arange(field_width), field_height * C)
#窗口纵向滑动量, 形式与i1相反,在out_width这条边行进时滑动量从0->out_width-1,换行时归零
j1 = stride * np.tile(np.arange(out_width), out_height)
#reshape(-1,1) -> shape变为n*1的vector,i,j的一列的n个数就是kernel的基础横纵坐标,n=k*k*c
#reshape(1,-1) -> 变为1*m,依顺序偏移左边, m=out_h*out_w
i = i0.reshape(-1, 1) + i1.reshape(1, -1)
j = j0.reshape(-1, 1) + j1.reshape(1, -1)
#k为channel的index,每k*k个点进入下一个channel
k = np.repeat(np.arange(C), field_height * field_width).reshape(-1, 1)
return (k.astype(int), i.astype(int), j.astype(int))
def im2col_indices(x, field_height, field_width, padding=1, stride=1):
""" An implementation of im2col based on some fancy indexing """
# Zero-pad the input
p = padding
x_padded = np.pad(x, ((0, 0), (0, 0), (p, p), (p, p)), mode='constant')
k, i, j = get_im2col_indices(x.shape, field_height, field_width, padding, stride)
#在C个channel上,各取k*k个点,横纵坐标由i,j控制,共有(k*k*C) * (out_width*out_height)个点
cols = x_padded[:, k, i, j]
C = x.shape[1]
cols = cols.transpose(1, 2, 0).reshape(field_height * field_width * C, -1)
return cols
def col2im_indices(cols, x_shape, field_height=3, field_width=3, padding=1,
stride=1):
""" An implementation of col2im based on fancy indexing and np.add.at """
N, C, H, W = x_shape
H_padded, W_padded = H + 2 * padding, W + 2 * padding
x_padded = np.zeros((N, C, H_padded, W_padded), dtype=cols.dtype)
k, i, j = get_im2col_indices(x_shape, field_height, field_width, padding, stride)
cols_reshaped = cols.reshape(C * field_height * field_width, -1, N)
cols_reshaped = cols_reshaped.transpose(2, 0, 1)
np.add.at(x_padded, (slice(None), k, i, j), cols_reshaped)
if padding == 0:
return x_padded
return x_padded[:, :, padding:-padding, padding:-padding]
| 47.229508 | 85 | 0.666782 | 464 | 2,881 | 3.984914 | 0.260776 | 0.077339 | 0.069227 | 0.09086 | 0.224446 | 0.200649 | 0.142239 | 0.114657 | 0.09086 | 0.09086 | 0 | 0.04482 | 0.179104 | 2,881 | 60 | 86 | 48.016667 | 0.736998 | 0.304061 | 0 | 0.055556 | 0 | 0 | 0.00404 | 0 | 0 | 0 | 0 | 0 | 0.055556 | 1 | 0.083333 | false | 0 | 0.027778 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f072223163473b20d3e47ea18d307c2bfa428bda | 965 | py | Python | reconbot/notificationprinters/formatter.py | acv/reconbot | 0ecce7081871b83d85180e71015a6d3fb38c1bac | [
"MIT"
] | 1 | 2021-10-19T02:46:28.000Z | 2021-10-19T02:46:28.000Z | reconbot/notificationprinters/formatter.py | acv/reconbot | 0ecce7081871b83d85180e71015a6d3fb38c1bac | [
"MIT"
] | 1 | 2021-05-06T19:50:02.000Z | 2021-05-06T19:50:02.000Z | reconbot/notificationprinters/formatter.py | acv/reconbot | 0ecce7081871b83d85180e71015a6d3fb38c1bac | [
"MIT"
] | null | null | null | import re
class Formatter(object):
def __init__(self, printer, notification):
self.printer = printer
self.notification = notification
def __format__(self, format_string):
pattern = r'([a-zA-Z_]+)\(([a-zA-Z_]+)(?:\s*,\s*([a-zA-Z_]+))?\)'
matches = re.match(pattern, format_string)
if not matches:
return format_string
groups = matches.groups()
if not hasattr(self.printer, groups[0]):
raise Exception('Unknown method "%s" in format "%s"' % (matches.group(1), format_string))
method = getattr(self.printer, groups[0])
keys = list(filter(lambda k: k is not None, groups[1:]))
for key in keys:
if key not in self.notification:
raise Exception('Unknown attribute "%s" in notification "%s"' % (key, repr(self.notification)))
args = list(map(lambda key: self.notification[key], keys))
return method(*args)
| 31.129032 | 111 | 0.595855 | 120 | 965 | 4.666667 | 0.391667 | 0.078571 | 0.021429 | 0.064286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00561 | 0.26114 | 965 | 30 | 112 | 32.166667 | 0.779804 | 0 | 0 | 0 | 0 | 0.05 | 0.133679 | 0.053886 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.05 | 0 | 0.3 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f072a68a54b9847d46b27353b675a6987c8fdc57 | 1,297 | py | Python | jpc.py | l-mda/JsonRpc | 34f830d3a34089e1364de9c6e14c5f263cd1c7ff | [
"MIT"
] | 2 | 2019-04-23T15:54:49.000Z | 2019-07-26T09:48:59.000Z | jpc.py | dyseo/JsonDb | 34f830d3a34089e1364de9c6e14c5f263cd1c7ff | [
"MIT"
] | null | null | null | jpc.py | dyseo/JsonDb | 34f830d3a34089e1364de9c6e14c5f263cd1c7ff | [
"MIT"
] | null | null | null | import json
from body import Response
from typing import Any
class Jpc:
def __init__(self, files):
self.files = files
op = open(self.files, "r").read()
if "{" not in op or "}" not in op and len(op) < 3:
json.dump({}, open(self.files, "w"))
self.reads = Response(open(self.files, "r").read())
def is_exist(self, key: str) -> bool:
r = self.reads
if key in r.keys():
return True
return False
def add(self, key: str, data: Any) -> bool:
r = self.reads
if self.is_exist(key):
raise Exception("Duplicate key, {} key already exist!".format(key))
json.dump({key:data}, open(self.files, "w"), indent=4)
def purge(self) -> bool:
r = self.reads
if r == {}:
raise Exception("File already empty!")
r = {}
json.dump(r, open(self.files, "w"))
return True
def delete(self, key: str) -> bool:
r = self.reads
if not self.is_exist(key):
raise AttributeError("key {} not found in data".format(key))
del r[key]
json.dump(r, open(self.files, "w"), indent=4)
def update(self, key:str, data: Any) -> bool:
r = self.reads
if not self.is_exist(key) and r != {}:
raise AttributeError("key {} not found in data".format(key))
if r == {}:
r.update({key:data})
else:
r[key] = data
json.dump(r, open(self.files, "w"), indent=4)
return True
| 25.94 | 70 | 0.625289 | 214 | 1,297 | 3.752336 | 0.252336 | 0.100872 | 0.113325 | 0.087173 | 0.526775 | 0.438356 | 0.438356 | 0.37609 | 0.331258 | 0.144458 | 0 | 0.003854 | 0.199692 | 1,297 | 49 | 71 | 26.469388 | 0.76975 | 0 | 0 | 0.325581 | 0 | 0 | 0.086353 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.139535 | false | 0 | 0.069767 | 0 | 0.325581 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0733e60b1efecd14fb0685d07fe397334cf8142 | 3,019 | py | Python | backend/tests/functional/api/order/test_select_by_slug.py | willrp/willbuyer | 069836a91c777ede6f62a16daa9f26e555d66bcb | [
"MIT"
] | 4 | 2020-02-19T09:27:23.000Z | 2021-11-26T00:42:06.000Z | backend/tests/functional/api/order/test_select_by_slug.py | willrp/willbuyer | 069836a91c777ede6f62a16daa9f26e555d66bcb | [
"MIT"
] | null | null | null | backend/tests/functional/api/order/test_select_by_slug.py | willrp/willbuyer | 069836a91c777ede6f62a16daa9f26e555d66bcb | [
"MIT"
] | null | null | null | import pytest
import requests
from uuid import uuid4
from webservices.willorders.backend.tests.factories import OrderFactory, ProductFactory, OrderProductFactory
from backend.util.response.order.order import OrderSchema
from backend.util.response.error import ErrorSchema
from backend.util.slug import uuid_to_slug
@pytest.fixture(scope="function", autouse=True)
def factory_session(willorders_ws_db_session):
OrderFactory._meta.sqlalchemy_session = willorders_ws_db_session
ProductFactory._meta.sqlalchemy_session = willorders_ws_db_session
OrderProductFactory._meta.sqlalchemy_session = willorders_ws_db_session
def test_select_by_slug(domain_url, auth_user, auth_session, es_create, willorders_ws_db_session):
prod_list = es_create("products", 5)
user_slug = auth_user.uuid_slug
obj = OrderFactory.create(user_slug=user_slug)
willorders_ws_db_session.commit()
slug = obj.uuid_slug
prod_id_list = [p.meta["id"] for p in prod_list]
amount = 1
for es_id in prod_id_list:
product = ProductFactory.create(es_id=es_id)
OrderProductFactory.create(order=obj, product=product, amount=amount)
amount += 1
willorders_ws_db_session.commit()
response = auth_session.get(
domain_url + "/api/order/%s" % slug
)
data = response.json()
OrderSchema().load(data)
assert response.status_code == 200
assert data["slug"] == slug
assert data["product_types"] == len(prod_list)
assert data["items_amount"] == ((1 + len(prod_list)) * len(prod_list)) / 2
assert len(data["products"]) == len(prod_list)
for item in [item.to_dict() for item in obj.items]:
product = next(p for p in data["products"] if p["id"] == item["item_id"])
assert product["amount"] == item["amount"]
response = auth_session.get(
domain_url + "/api/order/WILLrogerPEREIRAslugBR"
)
data = response.json()
assert data["error"] == {}
assert response.status_code == 404
def test_select_by_slug_wrong_user(domain_url, auth_session, es_create, willorders_ws_db_session):
prod_list = es_create("products", 3)
user_slug = uuid_to_slug(uuid4())
obj = OrderFactory.create(user_slug=user_slug)
willorders_ws_db_session.commit()
slug = obj.uuid_slug
prod_id_list = [p.meta["id"] for p in prod_list]
amount = 1
for es_id in prod_id_list:
product = ProductFactory.create(es_id=es_id)
OrderProductFactory.create(order=obj, product=product, amount=amount)
amount += 1
willorders_ws_db_session.commit()
response = auth_session.get(
domain_url + "/api/order/%s" % slug
)
data = response.json()
assert data["error"] == {}
assert response.status_code == 404
def test_select_by_slug_unauthorized(domain_url):
response = requests.get(
domain_url + "/api/order/WILLrogerPEREIRAslugBR",
verify=False
)
data = response.json()
ErrorSchema().load(data)
assert response.status_code == 401
| 31.778947 | 108 | 0.709506 | 403 | 3,019 | 5.042184 | 0.203474 | 0.059055 | 0.068898 | 0.103346 | 0.617126 | 0.593996 | 0.531004 | 0.468996 | 0.449803 | 0.449803 | 0 | 0.008929 | 0.183836 | 3,019 | 94 | 109 | 32.117021 | 0.815747 | 0 | 0 | 0.464789 | 0 | 0 | 0.064922 | 0.021862 | 0 | 0 | 0 | 0 | 0.15493 | 1 | 0.056338 | false | 0 | 0.098592 | 0 | 0.15493 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0739714f1a007689f54ff2de4083c8be4beb7bf | 1,492 | py | Python | codes/day11_task2.py | tayyrov/AdventOfCode | 69003407fd345ea76f8125b4b132e5b5d5ea33ab | [
"MIT"
] | 1 | 2021-12-07T10:54:48.000Z | 2021-12-07T10:54:48.000Z | codes/day11_task2.py | tayyrov/AdventOfCode | 69003407fd345ea76f8125b4b132e5b5d5ea33ab | [
"MIT"
] | null | null | null | codes/day11_task2.py | tayyrov/AdventOfCode | 69003407fd345ea76f8125b4b132e5b5d5ea33ab | [
"MIT"
] | null | null | null | """
Advent Of Code 2021
Day 11
Date: 11-12-2021
Site: https://adventofcode.com/2021/day/11
Author: Tayyrov
"""
def isValid(r, c):
return 0 <= c < cols and 0 <= r < rows
def allFlashed(matrix):
return max(max(row) for row in matrix) == 0
input_file = open('../input_files/day11_input', 'r')
matrix = [list(map(int, list(line.strip()))) for line in input_file.readlines()]
rows = len(matrix)
cols = len(matrix[0])
directions = [(1, 0), (-1, 0), (0, 1), (0, -1), (1, -1), (-1, 1), (1, 1), (-1, -1)]
steps = 0
total_flashes = 0
while True:
stack = []
flashed = set()
for r in range(rows):
for c in range(cols):
if matrix[r][c] == 9:
stack.append((r, c))
total_flashes += 1
flashed.add((r, c))
matrix[r][c] = 0
else:
matrix[r][c] += 1
while stack:
r, c = stack.pop()
for dx, dy in directions:
new_r, new_c = r + dx, c + dy
if isValid(new_r, new_c) and (new_r, new_c) not in flashed:
if matrix[new_r][new_c] == 9:
total_flashes += 1
flashed.add((new_r, new_c))
matrix[new_r][new_c] = 0
stack.append((new_r, new_c))
else:
matrix[new_r][new_c] += 1
steps += 1
if allFlashed(matrix):
print(steps)
break | 26.175439 | 84 | 0.47185 | 209 | 1,492 | 3.258373 | 0.30622 | 0.023495 | 0.082232 | 0.093979 | 0.142438 | 0.013216 | 0.013216 | 0.013216 | 0 | 0 | 0 | 0.057297 | 0.380027 | 1,492 | 57 | 85 | 26.175439 | 0.678919 | 0.068365 | 0 | 0.102564 | 0 | 0 | 0.020347 | 0.019593 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051282 | false | 0 | 0 | 0.051282 | 0.102564 | 0.025641 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0741078b2536eb3888dbfacc4793cfdcfba13cb | 4,394 | py | Python | ModelInputsAndRunScripts/WeatheringParamStudy/grain_hill_dakota_friendly_driver.py | gregtucker/tucker_mccoy_hobley_grain_hill_manuscript | 179c73dc4e2f8b971028f23619e12acaf6a0ea7f | [
"MIT"
] | null | null | null | ModelInputsAndRunScripts/WeatheringParamStudy/grain_hill_dakota_friendly_driver.py | gregtucker/tucker_mccoy_hobley_grain_hill_manuscript | 179c73dc4e2f8b971028f23619e12acaf6a0ea7f | [
"MIT"
] | null | null | null | ModelInputsAndRunScripts/WeatheringParamStudy/grain_hill_dakota_friendly_driver.py | gregtucker/tucker_mccoy_hobley_grain_hill_manuscript | 179c73dc4e2f8b971028f23619e12acaf6a0ea7f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon May 30th 22:21:07 2016
Simple driver for GrainHill model, based on example by Charlie Shobe for
his Brake model.
"""
import os
print('grain_hill_dakota_friendly_driver here. cwd = ' + os.getcwd())
import grain_hill_as_class
from landlab import load_params
import numpy as np
import sys
grain_hill_as_class = reload(grain_hill_as_class)
def two_node_diff(a):
"""Calculate and return diffs over two nodes instead of one."""
N = len(a)
return a[2:] - a[:(N-2)]
def calc_fractional_soil_cover(grain_hill):
"""Calculate and return fractional soil versus rock cover."""
num_soil_air_faces = 0.0
num_rock_air_faces = 0.0
grid = grain_hill.grid
node_state = grain_hill.ca.node_state
for link in range(grid.number_of_links):
tail = grid.node_at_link_tail[link]
head = grid.node_at_link_head[link]
if node_state[tail] == 0: # if tail is air, see if head is rock/sed
if node_state[head] == 7:
num_soil_air_faces += 1
elif node_state[head] == 8:
num_rock_air_faces += 1
elif node_state[head] == 0: # if head is air, see if tail is rock/sed
if node_state[tail] == 7:
num_soil_air_faces += 1
elif node_state[tail] == 8:
num_rock_air_faces += 1
total_surf_faces = num_soil_air_faces + num_rock_air_faces
frac_rock = num_rock_air_faces / total_surf_faces
frac_soil = num_soil_air_faces / total_surf_faces
print('Total number of surface faces: ' + str(total_surf_faces))
print('Number of soil-air faces: ' + str(num_soil_air_faces))
print('Number of rock-air faces: ' + str(num_rock_air_faces))
print('Percent rock-air faces: ' + str(100.0 * frac_rock))
print('Percent soil-air faces: ' + str(100.0 * frac_soil))
return frac_soil
dx = 0.1 # assumed node spacing, m
#DAKOTA stuff: setting input files
input_file = 'inputs.txt' #DAKOTA creates this
#INPUT VARIABLES
# read parameter values from file
params = load_params(input_file)
num_cols = params['number_of_node_columns']
num_rows = int(np.round(0.866 * 1.0 * (num_cols - 1)))
print('Launching run with ' + str(num_rows) + ' rows and ' + str(num_cols) + ' cols')
params['number_of_node_columns'] = num_cols
params['number_of_node_rows'] = num_rows
params['disturbance_rate'] = 10.0 ** params['disturbance_rate']
params['uplift_interval'] = 10.0 ** params['uplift_interval']
wprime = 0.4 * (10.0 ** params['weathering_rate'])
params['weathering_rate'] = wprime / params['uplift_interval']
# Calculate run duration
#
# Time for the domain to rise by L, where L is # of node cols
t1 = params['uplift_interval'] * num_cols
print('Time for domain rise:')
print(t1)
# Time to generate, on average, 10 * L disturbance events per column
t2 = 10 * num_cols / params['disturbance_rate']
print('Time for 0.1 (10) L disturbances per column:')
print(t2)
# Take the minimum
tt = min(t1, t2)
# Time to have at least ten uplift events
t3 = 10 * params['uplift_interval']
# Take the max
params['run_duration'] = max(tt, t3)
if params['run_duration'] > 580000.0:
print('WARNING: something is wrong')
params['run_duration'] = 1.0
print('Run duration used:')
print(params['run_duration'])
params['plot_interval'] = 1.1 * params['run_duration']
params['output_interval'] = params['run_duration']
print('Running grainhill, params:')
print(params)
sys.stdout.flush()
# instantiate a GrainHill model
grain_hill = grain_hill_as_class.GrainHill((num_rows, num_cols), **params)
#run the model
grain_hill.run()
# compute and write the results
(elev_profile, soil) = grain_hill.get_profile_and_soil_thickness(grain_hill.grid,
grain_hill.ca.node_state)
max_elev = np.amax(elev_profile)
N = len(elev_profile)
mean_grad_left = np.mean(two_node_diff(elev_profile[:((N+1)/2)])/1.73205)
mean_grad_right = np.mean(-two_node_diff(elev_profile[((N+1)/2):])/1.73205)
mean_grad = (mean_grad_left + mean_grad_right) / 2
frac_soil = calc_fractional_soil_cover(grain_hill)
myfile = open('results.out', 'w')
myfile.write(str(max_elev) + ' ' + str(mean_grad) + ' ' + str(frac_soil)
+ '\n')
myfile.close()
# Make a plot to file
import matplotlib.pyplot as plt
grain_hill.grid.hexplot('node_state')
plt.savefig('final_hill.png')
| 33.037594 | 85 | 0.689577 | 688 | 4,394 | 4.155523 | 0.27907 | 0.044771 | 0.033578 | 0.03148 | 0.181882 | 0.152501 | 0.085345 | 0.052466 | 0.052466 | 0.03148 | 0 | 0.028892 | 0.188666 | 4,394 | 132 | 86 | 33.287879 | 0.773072 | 0.17797 | 0 | 0.047059 | 0 | 0 | 0.199552 | 0.021551 | 0 | 0 | 0 | 0 | 0 | 1 | 0.023529 | false | 0 | 0.070588 | 0 | 0.117647 | 0.188235 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f076bec9cb8c527dacb8169facec297c589e3258 | 4,227 | py | Python | face_rec/api/views.py | dilawarm/Facial-Recognition | 454b4fedfacac7abc59c20520858be69445b07b4 | [
"MIT"
] | 3 | 2020-03-17T18:08:12.000Z | 2021-03-24T09:55:49.000Z | face_rec/api/views.py | dilawarm/Facial-Recognition | 454b4fedfacac7abc59c20520858be69445b07b4 | [
"MIT"
] | 12 | 2020-03-31T11:48:12.000Z | 2022-02-27T01:23:37.000Z | face_rec/api/views.py | dilawarm/Facial-Recognition | 454b4fedfacac7abc59c20520858be69445b07b4 | [
"MIT"
] | 1 | 2020-04-02T10:54:30.000Z | 2020-04-02T10:54:30.000Z | from rest_framework.response import Response
from rest_framework import status
from rest_framework.decorators import api_view
from .serializers import IdentitySerializer, UploadSerializer
from .models import Identity, Upload
from rest_framework.views import APIView
from rest_framework.parsers import MultiPartParser, FormParser
import face_recognition
import os
import cv2
import numpy as np
import pickle
import time
class IdentityView(APIView):
parser_classes = (MultiPartParser, FormParser)
def get(self, request, *args, **kwargs):
posts = Identity.objects.all()
serializer = IdentitySerializer(posts, many=True)
return Response(serializer.data)
def post(self, request, *args, **kwargs):
posts_serializer = IdentitySerializer(data=request.data)
if posts_serializer.is_valid():
name = request.data["name"]
filename = request.data["image"]
posts_serializer.save()
feed_ai(name, filename)
return Response(posts_serializer.data, status=status.HTTP_201_CREATED)
else:
print('error', posts_serializer.errors)
return Response(posts_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class UploadView(APIView):
parser_classes = (MultiPartParser, FormParser)
def get(self, request, *args, **kwargs):
posts = Upload.objects.all()
serializer = UploadSerializer(posts, many=True)
return Response(serializer.data)
def post(self, request, *args, **kwargs):
posts_serializer = UploadSerializer(data=request.data)
if posts_serializer.is_valid():
filename = request.data["image"]
posts_serializer.save()
ai_find(filename)
return Response(posts_serializer.data, status=status.HTTP_201_CREATED)
else:
print('error', posts_serializer.errors)
return Response(posts_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(["GET"])
def homepage(request):
options = ["Create identity", "Find identity"]
return Response(status=status.HTTP_200_OK, data={"data": options})
def feed_ai(name, filename):
try:
known_faces = pickle.load(open("api/data/known_faces.pkl","rb"))
known_identities = pickle.load(open("api/data/known_identities.pkl","rb"))
except EOFError:
known_faces = []
known_identities = []
print(known_faces)
print(known_identities)
img = face_recognition.load_image_file(f"media/post_images/{filename}")
img_encoded = face_recognition.face_encodings(img)[0] # We assume that there is only one face of identity per image.
known_faces.append(img_encoded)
known_identities.append(name)
pickle.dump(known_faces, open("api/data/known_faces.pkl", "wb"))
pickle.dump(known_identities, open("api/data/known_identities.pkl", "wb"))
def ai_find(filename):
try:
known_faces = pickle.load(open("api/data/known_faces.pkl", "rb"))
known_identities = pickle.load(open("api/data/known_identities.pkl", "rb"))
except EOFError:
known_faces = []
known_identities = []
img = face_recognition.load_image_file(f"media/upload_images/{filename}")
height, width, channels = img.shape
face_locs = face_recognition.face_locations(img, model="cnn")
img_encodings = face_recognition.face_encodings(img, face_locs)
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
for encoding, loc in zip(img_encodings, face_locs):
res = face_recognition.compare_faces(known_faces, encoding, 0.6)
match = None
if True in res:
match = known_identities[res.index(True)]
print(f"Match = {match}")
t_l = (loc[3], loc[0])
b_r = (loc[1], loc[2])
cv2.rectangle(img, t_l, b_r, [0, 255, 0], 3)
t_l = (loc[3], loc[2])
b_r = (loc[1], loc[2]+22)
cv2.rectangle(img, t_l, b_r, [0, 255, 0], cv2.FILLED)
cv2.putText(img, match, (loc[3]+10, loc[2]+15),
cv2.FONT_HERSHEY_SIMPLEX,
0.75, (0, 0, 0), 2)
cv2.imwrite(f"face_rec_frontend/public/ai_output/{filename}", img) | 39.138889 | 120 | 0.66572 | 532 | 4,227 | 5.103383 | 0.276316 | 0.066298 | 0.024309 | 0.035359 | 0.520074 | 0.490608 | 0.46372 | 0.432044 | 0.403315 | 0.403315 | 0 | 0.018794 | 0.219541 | 4,227 | 108 | 121 | 39.138889 | 0.804183 | 0.014194 | 0 | 0.365591 | 0 | 0 | 0.084253 | 0.06289 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075269 | false | 0 | 0.139785 | 0 | 0.333333 | 0.053763 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f07b5b437d112d27b6d66fac5d7016340f8ac5b9 | 704 | py | Python | RecoParticleFlow/PFProducer/python/pfBasedElectronIso_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | RecoParticleFlow/PFProducer/python/pfBasedElectronIso_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | RecoParticleFlow/PFProducer/python/pfBasedElectronIso_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
from CommonTools.ParticleFlow.pfParticleSelection_cff import *
from RecoParticleFlow.PFProducer.electronPFIsolationDeposits_cff import *
from RecoParticleFlow.PFProducer.electronPFIsolationValues_cff import *
pfSelectedElectrons = cms.EDFilter(
"GenericPFCandidateSelector",
src = cms.InputTag("particleFlow"),
cut = cms.string("abs(pdgId())==11")
)
pfBasedElectronIsoTask = cms.Task(
pfParticleSelectionTask,
pfSelectedElectrons,
electronPFIsolationDepositsTask,
electronPFIsolationValuesTask
)
pfBasedElectronIsoSequence = cms.Sequence(pfBasedElectronIsoTask)
#COLIN: is this file used in RECO? in PF2PAT? same for photons.
| 32 | 73 | 0.801136 | 61 | 704 | 9.196721 | 0.704918 | 0.048128 | 0.046346 | 0.103387 | 0.139037 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004862 | 0.12358 | 704 | 21 | 74 | 33.52381 | 0.904376 | 0.088068 | 0 | 0 | 0 | 0 | 0.084375 | 0.040625 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f07b64e6369dba3c4b9b32ff2f8d7592a8b93c09 | 11,343 | py | Python | build/lib.win-amd64-3.7/ifis_tools/from_taudem.py | nicolas998/ifis_tools | f7b06473a916324fc37937bc5e9034cc57bc1623 | [
"MIT"
] | 3 | 2019-09-05T14:47:02.000Z | 2021-11-12T15:31:56.000Z | build/lib.win-amd64-3.7/ifis_tools/from_taudem.py | nicolas998/ifis_tools | f7b06473a916324fc37937bc5e9034cc57bc1623 | [
"MIT"
] | 2 | 2019-11-13T21:36:22.000Z | 2019-12-16T21:16:43.000Z | build/lib.win-amd64-3.7/ifis_tools/from_taudem.py | nicolas998/ifis_tools | f7b06473a916324fc37937bc5e9034cc57bc1623 | [
"MIT"
] | null | null | null | import pandas as pd
import geopandas as gp
import numpy as np
import pylab as pl
from struct import pack, unpack
import io
import gdal
from osgeo import ogr
import osgeo
#from wmf import wmf
pd.options.mode.chained_assignment = None
def read_raster(path_map,isDEMorDIR=False,dxp=None, noDataP = None,isDIR = False,DIRformat = 'r.watershed'):
'Funcion: read_map\n'\
'Descripcion: Lee un mapa raster soportado por GDAL.\n'\
'Parametros Obligatorios:.\n'\
' -path_map: path donde se encuentra el mapa.\n'\
'Parametros Opcionales:.\n'\
' -isDEMorDIR: Pasa las propiedades de los mapas al modulo cuencas \n'\
' escrito en fortran \n'\
' -dxp: tamano plano del mapa\n'\
' -noDataP: Valor para datos nulos en el mapa (-9999)\n'\
' -DIRformat: donde se ha conseguido el mapa dir (r.watershed) \n'\
' - r.watershed: mapa de direcciones obtenido por la funcion de GRASS\n'\
' - opentopo: mapa de direcciones de http://www.opentopography.org/\n'\
' -isDIR: (FALSE) es este un mapa de direcciones\n'\
'Retorno:.\n'\
' Si no es DEM o DIR retorna todas las propieades del elemento en un vector.\n'\
' En el siguiente orden: ncols,nrows,xll,yll,dx,nodata.\n'\
' Si es DEM o DIR le pasa las propieades a cuencas para el posterior trazado.\n'\
' de cuencas y link_ids.\n' \
#Abre el mapa
direction=gdal.Open(path_map)
#Projection
proj = osgeo.osr.SpatialReference(wkt=direction.GetProjection())
EPSG_code = proj.GetAttrValue('AUTHORITY',1)
#lee la informacion del mapa
ncols=direction.RasterXSize
nrows=direction.RasterYSize
banda=direction.GetRasterBand(1)
noData=banda.GetNoDataValue()
geoT=direction.GetGeoTransform()
dx=geoT[1]
dy = np.abs(geoT[-1])
xll=geoT[0]; yll=geoT[3]-nrows*dy
#lee el mapa
Mapa=direction.ReadAsArray()
direction.FlushCache()
del direction
return Mapa.T.astype(float),[ncols,nrows,xll,yll,dx,dy,noData],EPSG_code
def save_array2raster(Array, ArrayProp, path, EPSG = 4326, Format = 'GTiff'):
dst_filename = path
#Formato de condiciones del mapa
x_pixels = Array.shape[0] # number of pixels in x
y_pixels = Array.shape[1] # number of pixels in y
PIXEL_SIZE_x = ArrayProp[4] # size of the pixel...
PIXEL_SIZE_y = ArrayProp[5] # size of the pixel...
x_min = ArrayProp[2]
y_max = ArrayProp[3] + ArrayProp[5] * ArrayProp[1] # x_min & y_max are like the "top left" corner.
driver = gdal.GetDriverByName(Format)
#Para encontrar el formato de GDAL
NP2GDAL_CONVERSION = {
"uint8": 1,
"int8": 1,
"uint16": 2,
"int16": 3,
"uint32": 4,
"int32": 5,
"float32": 6,
"float64": 7,
"complex64": 10,
"complex128": 11,
}
gdaltype = NP2GDAL_CONVERSION[Array.dtype.name]
# Crea el driver
dataset = driver.Create(
dst_filename,
x_pixels,
y_pixels,
1,
gdaltype,)
#coloca la referencia espacial
dataset.SetGeoTransform((
x_min, # 0
PIXEL_SIZE_x, # 1
0, # 2
y_max, # 3
0, # 4
-PIXEL_SIZE_y))
#coloca la proyeccion a partir de un EPSG
proj = osgeo.osr.SpatialReference()
texto = 'EPSG:' + str(EPSG)
proj.SetWellKnownGeogCS( texto )
dataset.SetProjection(proj.ExportToWkt())
#Coloca el nodata
band = dataset.GetRasterBand(1)
if ArrayProp[-1] is None:
band.SetNoDataValue(wmf.cu.nodata.astype(int).max())
else:
band.SetNoDataValue(int(ArrayProp[-1]))
#Guarda el mapa
dataset.GetRasterBand(1).WriteArray(Array.T)
dataset.FlushCache()
def rainfall_raster_ranks(path_rain_frame, path_ranks):
# Reads a raster of the rainfall fields and creates a raster with the ranks
m, p, epsg = read_raster(path_rain_frame)
rank = np.arange(1,m.size+1)
rank = rank.reshape(m.shape)
save_array2raster(rank , p, path_ranks+'.tif', EPSG=int(epsg))
# Creates a ranks polygon based on the raster ranks.
src_ds = gdal.Open(path_ranks+'.tif')
srcband = src_ds.GetRasterBand(1)
#Create output datasource
spatialReference = osgeo.osr.SpatialReference()
spatialReference.ImportFromEPSG(int(epsg))
dst_layername = path_ranks
drv = ogr.GetDriverByName("ESRI Shapefile")
dst_ds = drv.CreateDataSource( dst_layername + ".shp" )
dst_layer = dst_ds.CreateLayer(dst_layername, spatialReference )
gdal.Polygonize( srcband, None, dst_layer, -1, [], callback=None )
dst_ds.Destroy()
def saveBin(lid, lid_vals, count, fn):
io_buffer_size = 4+4*100000
if count > 0:
lid = (lid[lid_vals > 1])
lid_vals = (lid_vals[lid_vals > 1])
fh = io.open(fn, 'wb', io_buffer_size)
fh.write(pack('<I', count))
for vals in zip(lid, lid_vals):
fh.write(pack('<If', *vals))
fh.close()
class network:
def __init__(self, net_path, hills_path = None, hills_epsg = 2163):
'''Defines the network class that contains all the requirements to set up a project for
hlm'''
#Defines the initial partameters for the network
self.network = gp.read_file(net_path)
self.network['link'] = self.network['LINKNO']
self.network.set_index('LINKNO', inplace=True)
self.network_centroids = None
self.network_ranks = None
#computes the area for each hillslope
if hills_path is not None:
self.hills = gp.read_file(hills_path)
self.hills.rename(columns={'DN':'link'}, inplace = True)
self.hills.set_index('link', inplace = True)
self.hills.to_crs(epsg = hills_epsg, inplace = True)
idx = self.hills.index.intersection(self.network.index)
self.network['area'] = self.hills.loc[idx].geometry.area
print('Area of each hillslope computed from the hills shapefile')
def network2points(self):
'''Converts the network elements to centroids, ideal to get the
rainfall ranks references'''
x =[]
y = []
for link in self.network.index:
geo = self.network.loc[link, 'geometry']
x.append(geo.centroid.x)
y.append(geo.centroid.y)
net_centroids = gp.GeoDataFrame(self.network[['link','strmOrder']], geometry = gp.points_from_xy(x, y),
crs = self.network.crs)
self.network_centroids = net_centroids
print('Centroids had been saved under self.network_centroids')
#return net_centroids
def get_rainfall_lookup(self, path_rain_ranks):
'''Generates the lookup table between the links and a rainfall that is going to be used
the rain ranks must be the one obtained with *rainfall_raster_ranks*. By now this operation
is done one to one.'''
# Reads the rainfall ranks and project it
rain_ranks = gp.read_file(path_rain_ranks)
rain_ranks = rain_ranks.to_crs(self.network.crs)
print('1. rain ranks readed and projected to the current crs')
# Checks if centroids are already defined
if self.network_centroids is None:
print('2. Network points not defined, defining them...')
self.network2points()
print('3. Network points defined')
# Performs the spatial join
points_ranked = gp.sjoin(self.network_centroids, rain_ranks, how = 'left', op = 'within')
self.rain_ranks = points_ranked
print('4. ranks obtained results stored in self.rain_ranks')
def rain2links(self, rain, path_rain = None):
'''Converts a grid (tif) file of rainfall to the shape of the network
using the lookup table obtained by *get_rainfall_lookup*'''
if rain is None:
if path_rain is not None:
#Read and transform rainfall to its ranks
rain, p, ep = read_raster(path_rain)
rain = rain.T
rain = rain.reshape(rain.size)
else:
print('Error: No rain variable, no path to rain variable')
else:
rain = rain.reshape(rain.size)
#Put the rinfall in links
self.rain_ranks['rain'] = 0
self.rain_ranks['rain'] = rain[self.rain_ranks['FID']]
# Return the links and the rainfall
return self.rain_ranks['rain']
def write_rvr(self, path, sub_net = None):
'''Writes and rvr file based on a network extracted from the base network'''
#Selects the subnet if it is avaiable
if sub_net is not None:
net_elem = sub_net
else:
net_elem = self.network
#Writes the rvr file for HLM
with open(path,'w',newline='\n') as f:
f.write('%d\n' % net_elem.shape[0])
f.write('\n')
for link in net_elem.index:
f.write('%d\n' % link)
if net_elem.loc[link,'USLINKNO1'] == -1:
f.write('0\n')
else:
f.write('2 %d %d\n' % (net_elem.loc[link,'USLINKNO1'], net_elem.loc[link,'USLINKNO2']))
f.write('\n')
f.close()
def get_subnet(self, link):
'''Allows to define a new network inside of the base network'''
lista = [link]
count = 0
while count < len(lista) or count > self.network.shape[0]:
link = lista[count]
if self.network.loc[link, 'USLINKNO1'] != -1:
lista.append(self.network.loc[link, 'USLINKNO1'])
lista.append(self.network.loc[link, 'USLINKNO2'])
count += 1
return network(self.network.loc[lista])
def get_prm(self):
for_prm = self.network[['DSContArea','Length','AREA']]
for_prm['DSContArea'] = for_prm['DSContArea'] / 1e6
for_prm.shape[0] == self.network.shape[0]
for_prm.loc[for_prm['Length'] == 0, 'Length'] = 1
for_prm.loc[for_prm['AREA'] == 0, 'AREA'] = 1/1e4
for_prm['Length'] = for_prm['Length'] / 1000
self.prm = for_prm
def set_prm_for_model(self, model = 608):
if model == 608:
attr = {'vh':0.02,'a_r':1.67,'a':3.2e-6,'b':17,'c':5.4e-7,'d':32,
'k3':2.045e-6,'ki_fac':0.07,'TopDepth':0.1,'NoFlow':1.48,'Td':999,
'Beta':1.67,'lambda1':0.4,'lambda2':-0.1,'vo':0.435}
self.prm_format = {'DSContArea':'%.3f','Length':'%.3f','AREA':'%.5f',
'vh':'%.4f','a_r':'%.4f','a':'%.2e','b':'%.1f','c':'%.2e','d':'%.1f',
'k3':'%.2e','ki_fac':'%.3f','TopDepth':'%.3f','NoFlow':'%.3f','Td':'%.2f',
'Beta':'%.3f','lambda1':'%.3f','lambda2':'%.2f','vo':'%.3f'}
self.prm = self.prm.assign(**attr)
def write_prm(self, path):
with open(path,'w',newline='\n') as f:
f.write('%d\n\n' % self.prm.shape[0])
for link in self.prm.index:
f.write('%d\n' % link)
for c,k in zip(self.prm.loc[link],self.prm_format.keys()):
fm = self.prm_format[k]+' '
f.write(fm % c)
f.write('\n\n')
| 41.097826 | 111 | 0.595433 | 1,533 | 11,343 | 4.305284 | 0.283757 | 0.041667 | 0.011818 | 0.010909 | 0.060303 | 0.023636 | 0.009697 | 0.009697 | 0.009697 | 0.009697 | 0 | 0.024146 | 0.277087 | 11,343 | 275 | 112 | 41.247273 | 0.780732 | 0.213788 | 0 | 0.068807 | 0 | 0 | 0.185311 | 0.005578 | 0 | 0 | 0 | 0 | 0 | 1 | 0.059633 | false | 0 | 0.045872 | 0 | 0.123853 | 0.03211 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f07ba26722a113a5c1f5b4145b90aa2d48592f80 | 1,510 | py | Python | datascientist/model/classification/skl/linear_model/perceptron.py | kritikaparmar-programmer/DataScientist | b70f25b4afe28a2862a4ebfba163d162f645fba1 | [
"MIT"
] | 1 | 2020-12-05T11:09:13.000Z | 2020-12-05T11:09:13.000Z | datascientist/model/classification/skl/linear_model/perceptron.py | kritikaparmar-programmer/DataScientist | b70f25b4afe28a2862a4ebfba163d162f645fba1 | [
"MIT"
] | null | null | null | datascientist/model/classification/skl/linear_model/perceptron.py | kritikaparmar-programmer/DataScientist | b70f25b4afe28a2862a4ebfba163d162f645fba1 | [
"MIT"
] | null | null | null | from sklearn.metrics import jaccard_score
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
from sklearn.linear_model import Perceptron
import numpy as np
def _perceptron(*, train, test, x_predict=None, metrics, penalty=None, alpha=0.0001, fit_intercept=True, max_iter=1000, tol=0.001,
shuffle=True, verbose=0, eta0=1.0, n_jobs=None, random_state=0, early_stopping=False, validation_fraction=0.1, n_iter_no_change=5,
class_weight=None, warm_start=False):
"""For for info visit :
https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Perceptron.html#sklearn.linear_model.Perceptron
"""
model = Perceptron(penalty=penalty, alpha=alpha, fit_intercept=fit_intercept, max_iter=max_iter, tol=tol, shuffle=shuffle,
verbose=verbose, eta0=eta0, n_jobs=n_jobs, random_state=random_state, early_stopping=early_stopping, validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change, class_weight=class_weight, warm_start=warm_start)
model.fit(train[0], train[1])
model_name = 'Perceptron'
y_hat = model.predict(test[0])
if metrics == 'f1_score':
accuracy = f1_score(test[1], y_hat)
if metrics == 'jaccard_score':
accuracy = jaccard_score(test[1], y_hat)
if metrics == 'accuracy_score':
accuracy = accuracy_score(test[1], y_hat)
if x_predict is None:
return (model_name, accuracy, None)
y_predict = model.predict(x_predict)
return (model_name, accuracy, y_predict)
| 43.142857 | 142 | 0.75298 | 225 | 1,510 | 4.795556 | 0.306667 | 0.040779 | 0.050046 | 0.066728 | 0.111214 | 0.057461 | 0.042632 | 0 | 0 | 0 | 0 | 0.024596 | 0.138411 | 1,510 | 34 | 143 | 44.411765 | 0.804766 | 0.092053 | 0 | 0 | 0 | 0 | 0.033284 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.208333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f07dd97db7db1ed2d557cbc0875cc980028080d8 | 1,272 | py | Python | src/Engine/Display/sprites_group/debug_3D.py | MiguelReuter/Volley-ball-game | 67d830cc528f3540b236d8191f582adb1827dbde | [
"MIT"
] | 4 | 2019-04-15T20:39:29.000Z | 2022-02-04T10:51:37.000Z | src/Engine/Display/sprites_group/debug_3D.py | MiguelReuter/Volley-ball-game | 67d830cc528f3540b236d8191f582adb1827dbde | [
"MIT"
] | null | null | null | src/Engine/Display/sprites_group/debug_3D.py | MiguelReuter/Volley-ball-game | 67d830cc528f3540b236d8191f582adb1827dbde | [
"MIT"
] | 1 | 2019-11-30T01:05:29.000Z | 2019-11-30T01:05:29.000Z | # encoding : UTF-8
import pygame as pg
from Settings.general_settings import BKGND_TRANSPARENCY_COLOR
class Debug3D(pg.sprite.GroupSingle):
"""
Class for 3D shapes displaying on game window.
"""
def __init__(self):
pg.sprite.GroupSingle.__init__(self)
self.image = None
self.rect_list = []
self.prev_rect_list = []
def create_image(self, size=(0, 0)):
"""
Create image and set colorkey.
:param tuple(int, int) size: size of image to create
:return: None
"""
self.image = pg.Surface(size)
self.image.fill(BKGND_TRANSPARENCY_COLOR)
self.image.set_colorkey(BKGND_TRANSPARENCY_COLOR)
def update(self, objects):
"""
Update image and list of rects to redraw.
This method is not really optimised. For each frame, the previous rects are cleared and current rects are
redraw, even nothing has changed.
:param list() objects: list of objects with a draw_debug() method whichreturn None or a list of rects
:return: None
"""
self.prev_rect_list = self.rect_list
self.rect_list = []
# clear previous rects
for r in self.prev_rect_list:
self.image.fill(BKGND_TRANSPARENCY_COLOR, r)
# draw objects
for obj in objects:
rects = obj.draw_debug()
if rects is not None:
self.rect_list += rects
| 24.461538 | 107 | 0.711478 | 189 | 1,272 | 4.624339 | 0.412698 | 0.064073 | 0.100687 | 0.05492 | 0.144165 | 0.080092 | 0 | 0 | 0 | 0 | 0 | 0.004892 | 0.196541 | 1,272 | 52 | 108 | 24.461538 | 0.850294 | 0.390723 | 0 | 0.095238 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.095238 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f07f25d76a316a079bae3193119a5a5dfd7d834a | 1,600 | py | Python | old/experiment.py | mewmiyu/MDP_HMM_Solvers | 4a2878e663163d5b71084a482d920613bf12a405 | [
"MIT"
] | null | null | null | old/experiment.py | mewmiyu/MDP_HMM_Solvers | 4a2878e663163d5b71084a482d920613bf12a405 | [
"MIT"
] | null | null | null | old/experiment.py | mewmiyu/MDP_HMM_Solvers | 4a2878e663163d5b71084a482d920613bf12a405 | [
"MIT"
] | null | null | null | import numpy as np
from mushroom_rl.environments import GridWorld
if __name__ == '__main__':
from mushroom_rl.core import Core
from mushroom_rl.algorithms.value import QLearning
from mushroom_rl.policy import EpsGreedy
from mushroom_rl.utils.parameters import Parameter
from mushroom_rl.utils.dataset import compute_J
# Set the seed
np.random.seed(1)
# Create the grid environment
env = GridWorld(height=5, width=5, start=(0, 0), goal=(2, 2))
# Using an epsilon-greedy policy
epsilon = Parameter(value=0.1)
pi = EpsGreedy(epsilon=epsilon)
env.reset()
env.render()
learning_rate = Parameter(.1 / 10)
approximator_params = dict(input_shape=10,
output_shape=(env.info.action_space.n,),
n_actions=env.info.action_space.n)
agent = QLearning(env.info, pi, learning_rate=learning_rate)
print(env.info)
# Reinforcement learning experiment
core = Core(agent, env)
# Visualize initial policy for 3 episodes
dataset = core.evaluate(n_episodes=3, render=True)
# Print the average objective value before learning
J = np.mean(compute_J(dataset, env.info.gamma))
print(f'Objective function before learning: {J}')
# Train
core.learn(n_steps=2000, n_steps_per_fit=1, render=False)
# Visualize results for 3 episodes
dataset = core.evaluate(n_episodes=3, render=True)
# Print the average objective value after learning
J = np.mean(compute_J(dataset, env.info.gamma))
print(f'Objective function after learning: {J}')
| 32 | 71 | 0.68875 | 218 | 1,600 | 4.908257 | 0.40367 | 0.06729 | 0.078505 | 0.035514 | 0.306542 | 0.271028 | 0.271028 | 0.271028 | 0.271028 | 0.271028 | 0 | 0.018385 | 0.218125 | 1,600 | 49 | 72 | 32.653061 | 0.83693 | 0.176875 | 0 | 0.142857 | 0 | 0 | 0.065034 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0.107143 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f08085263769cca1034fdede5cc24cb6a39f27eb | 1,520 | py | Python | svg_to_qt/core/maker.py | devpontsho/svg_to_qt | 88a764fc6860a2c92b9d7ac3c4482542c0118a3b | [
"MIT"
] | null | null | null | svg_to_qt/core/maker.py | devpontsho/svg_to_qt | 88a764fc6860a2c92b9d7ac3c4482542c0118a3b | [
"MIT"
] | null | null | null | svg_to_qt/core/maker.py | devpontsho/svg_to_qt | 88a764fc6860a2c92b9d7ac3c4482542c0118a3b | [
"MIT"
] | null | null | null | __author__ = 'Pontsho Maseko'
__version__ = 1.0
__all__ = ['create_svg', 'write_svg']
def create_svg(data: str) -> None:
"""Create svg from data given.
:param data: Dictionary with instructions to build svg.
:return: None.
"""
# Code
svg_code = '<svg height="{Height}" width="{Width}">{Code}\n</svg>'
# Objects to draws
code = ''
for key in data['draws']:
# Dic
dic = data['draws'][key]
tags = ''
for tag in dic:
# Create style
if tag == 'style':
# Tag
style_tag = 'style="'
# For every style
for style in dic['style']:
style_tag += '{Key}:{Value};'.format(Key=style, Value=dic['style'][style])
# Add style tag to end
tags += ' {}"'.format(style_tag)
# Else other tags
else:
tags += ' {Key}="{Value}"'.format(Key=tag, Value=dic[tag])
# Append to code
code += '\n\t<{Key}{Tags} />'.format(Key = key, Tags=tags)
# Add to the svg_code
svg_code = svg_code.format(Height=data['height'], Width=data['width'], Code=code)
# Write svg
write_svg(svg_code, data['output'])
def write_svg(data: str, output: str = '') -> None:
"""Write out svg
:param data: The svg code.
:return: None.
"""
with open(output, 'w') as f:
f.write(data)
print('SVG : {}'.format(output)) | 24.126984 | 94 | 0.498026 | 182 | 1,520 | 4.021978 | 0.296703 | 0.057377 | 0.045082 | 0.038251 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002016 | 0.347368 | 1,520 | 63 | 95 | 24.126984 | 0.735887 | 0.194737 | 0 | 0 | 0 | 0 | 0.167233 | 0.025467 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0 | 0 | 0.083333 | 0.041667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f080bcf50e11e07a2a9d6261e3c986af6566e7e5 | 10,747 | py | Python | Analytics/resources/request_for_attribute.py | thanosbnt/SharingCitiesDashboard | 5d123691d1f25d0b85e20e4e8293266bf23c9f8a | [
"Apache-2.0"
] | 4 | 2018-11-21T14:42:18.000Z | 2020-05-11T10:52:59.000Z | Analytics/resources/request_for_attribute.py | thanosbnt/SharingCitiesDashboard | 5d123691d1f25d0b85e20e4e8293266bf23c9f8a | [
"Apache-2.0"
] | 60 | 2018-11-21T15:11:59.000Z | 2019-12-02T10:46:44.000Z | Analytics/resources/request_for_attribute.py | thanosbnt/SharingCitiesDashboard | 5d123691d1f25d0b85e20e4e8293266bf23c9f8a | [
"Apache-2.0"
] | 7 | 2018-11-21T14:42:44.000Z | 2019-11-28T16:24:14.000Z | """
API resource class, retrieve attriubte data from database
Parameters can be passed with url using GET requests
required
:param attribute: attribute name(s)
:type attribute: string
:return The requested attribute details from the database
:rtype: JSON
OR
:param attributedata: attribute name(s)
:type attributedata: string
:return The requested attribute data from the database
:rtype: JSON
optional
:param limit: number of records to be returned (default 30)
:param offset: number of records to skip (default 30)
:param fromdate: start date of the records returned
:param todate: end date of the records returned
Note: fromdate and todate both needs to be present in order for date filtering to work
:param operation: mathematical operation to be performed
:param grouped: whether sensor records to be grouped at hourly intervals
:param per_sensor: whether the sensor records are to be grouped at hourly intervals and per individual sensor. (Defaults to False)
:type limit: integer
:type offset: integer
:type fromdate: date string (YYYY-MM-DD)
:type todate: date string (YYYY-MM-DD)
:type operation: string
:type grouped: boolean
:type per_sensor: booleam
Few example queries:
{URL}?attributedata='<name-of-attribute>&limit=60&offset=60' // Retrieve records but increase limit and skip 60
{URL}?attributedata='<name1><name2>&limit=60&offset=60&fromdate=2018-11-22&todate=2018-11-24'
{URL}?attributedata='<name1><name2>&limit=1000&grouped=True&per_sensor=True&freq='1H' // Retrieves records and groups the data at hourly intervals
{URL}?attributedata='<name1><name2>&limit=1000&grouped=True&per_sensor=False&freq='1H' // Retrieves records and groups the data from all sensors of same attribute at hourly intervals
{URL}?attributedata='<name1><name2>&limit=1000&grouped=True&harmonising_method=long // Harmonisies all attributes in the query to match the attribute with the most records. It also reformats the data to be structured as long (row stacked) or wide (column stacked)
"""
from datetime import datetime
import statistics
from flask_restful import Resource, reqparse, inputs
from db import db
from models.theme import Theme
from models.attributes import Attributes
from models.theme import SubTheme
from models.attribute_data import ModelClass
from models.sensor_attribute import SensorAttribute
from models.sensor import Sensor
from resources.predict import predict
from resources.helper_functions import is_number
from resources.request_grouped import request_grouped_data, request_harmonised_data
LIMIT = 30
OFFSET = 30
class RequestForAttribute(Resource):
parser = reqparse.RequestParser()
parser.add_argument('attribute', type=str, store_missing=False)
parser.add_argument('attributedata', type=str, store_missing=False)
parser.add_argument('limit', type=int, store_missing=False)
parser.add_argument('offset', type=int, store_missing=False)
parser.add_argument('fromdate', type=str, store_missing=False)
parser.add_argument('todate', type=str, store_missing=False)
parser.add_argument('operation',
type=str,
choices=('mean', 'median', 'sum'),
store_missing=False)
parser.add_argument('grouped', type=inputs.boolean, store_missing=False)
parser.add_argument('freq', type=str,
choices=('W', '1D', '1H', '1Min'),
store_missing=False)
parser.add_argument('harmonising_method',
type=str,
choices=('long', 'wide', 'geo'),
store_missing=False)
parser.add_argument('per_sensor', type=inputs.boolean, store_missing=False)
parser.add_argument('sensorid', type=str)
parser.add_argument('n_predictions', type=int, store_missing = False)
parser.add_argument('predictions', type=inputs.boolean, store_missing = False)
def get(self):
args = self.parser.parse_args()
attribute_data, attributes, sensorid, n_predictions, predictions, grouped, harmonising_method, per_sensor, freq = None, [], None, 100, None, None, None, None, '1H'
if 'attributedata' in args:
attribute_data = args['attributedata']
if 'attribute' in args and args['attribute'] is not None:
_attributes = args['attribute']
if _attributes != '':
attributes = _attributes.split(',')
if 'grouped' in args:
grouped = args['grouped']
if 'harmonising_method' in args:
harmonising_method = args['harmonising_method']
if 'per_sensor' in args:
per_sensor = args['per_sensor']
if 'freq' in args:
freq = args['freq']
if 'predictions' in args:
predictions = args['predictions']
if predictions >=100:
predictions = 100
if 'n_predictions' in args:
n_predictions = args['n_predictions']
if 'sensorid' in args:
sensorid = args['sensorid']
if attribute_data is not None:
global LIMIT, OFFSET
data = None
operation = None
if 'limit' in args and args['limit'] is not None:
LIMIT = args['limit']
if 'offset' in args and args['offset'] is not None:
OFFSET = args['offset']
if 'operation' in args and args['operation'] is not None:
operation = args['operation']
if ('fromdate' in args and args['fromdate'] is not None
and 'todate' in args and args['todate'] is not None):
data = self.get_attribute_data(attribute_data, LIMIT, OFFSET,
args['fromdate'], args['todate'], operation)
if predictions:
data.append(self.get_predictions(attribute_table = data[0]["Attribute_Table"],
sensor_id = sensorid,
n_pred = n_predictions))
else:
if grouped:
if harmonising_method:
data = self.get_attribute_data(attribute_data, LIMIT, OFFSET, operation=operation)
data = request_harmonised_data(data, harmonising_method=harmonising_method)
else:
data = self.get_attribute_data(attribute_data, LIMIT, OFFSET, operation=operation)
data = request_grouped_data(data, per_sensor=per_sensor, freq=freq)
else:
data = self.get_attribute_data(attribute_data, LIMIT, OFFSET, operation=operation)
if predictions:
#### Ceck for data
if data[0]["Total_Records"] != 0:
#### Check for non numeric data
if is_number(data[0]["Attribute_Values"][0]["Value"]):
data.append(self.get_predictions(attribute_table = data[0]["Attribute_Table"],
sensor_id = sensorid,
n_pred = n_predictions))
else:
print("Cannot predict non-numeric data")
pass
else:
pass
return data, 200
if attributes:
_attrs = []
attr = Attributes.get_by_name_in(attributes)
for a in attr:
_attrs.append(a.json())
return _attrs, 200
return {
"error": "error occured while processing request"
}, 400
'''
@Params
attribute_name: is string passed as parameter with the URL
limit: Default is 30, number of records to be returned
offset: From where the records needs to start
Filters:
fromdate: Format for passing date is YYYY-MM-DD
todate: Format for the passing the is YYYY-MM-DD
operation: Mathematical operations that can be performed on data
accepted values are: 'mean', 'median', 'sum'
(More to be added)
'''
def get_attribute_data(self, attribute_name, limit, offset,
fromdate=None, todate=None, operation=None):
# clearing previous metadata
db.metadata.clear()
attrs = attribute_name.split(',')
attributes = Attributes.get_by_name_in(attrs)
data = []
for attribute in attributes:
model = ModelClass(attribute.table_name.lower())
count = db.session.query(model).count()
values = []
if fromdate is not None and todate is not None:
if operation is None:
values = db.session.query(model) \
.filter(model.api_timestamp >= fromdate) \
.filter(model.api_timestamp <= todate) \
.limit(limit).offset(abs(count - offset)) \
.all()
else:
values = db.session.query(model) \
.filter(model.api_timestamp >= fromdate) \
.filter(model.api_timestamp <= todate) \
.all()
else:
if operation is None:
### refactored the query to fetch the latest values by default
values = db.session.query(model).order_by(desc(model.api_timestamp)).limit(limit).all() # \
# values = db.session.query(model).limit(limit) \
# .offset(abs(count - offset)).all()
else:
values = db.session.query(model).all()
_common = {
'Attribute_Table': attribute.table_name,
'Attribute_Name': attribute.name,
'Attribute_Description': attribute.description,
'Attribute_Unit_Value': attribute.unit_value,
'Total_Records': count
}
temp = []
if operation is None:
for i in range(len(values)-1, -1, -1):
temp.append({
'Sensor_id': values[i].s_id,
'Value': values[i].value,
'Timestamp': str(values[i].api_timestamp)
})
_common['Attribute_Values'] = temp
else:
_values = [v.value for v in values]
_int_values = list(map(float, _values))
_operation_result = 0
if operation == 'sum':
_operation_result = sum(_int_values)
elif operation == 'mean':
_operation_result = sum(_int_values) / len(_int_values)
elif operation == 'median':
_operation_result = statistics.median(_int_values)
_common['Result_' + operation] = _operation_result
data.append(_common)
return data
'''
@Params
attribute_table: is string passed as parameter with the URL
sensor_id: is string passed as parameter with the URL
'''
def get_predictions(self, attribute_table, sensor_id, n_pred):
db.metadata.clear()
_data = []
_timestamps = []
_limit = 10000
model = ModelClass(attribute_table.lower())
if db.session.query(model).count() < 100:
pred_data = {
"Predictions": "not enough data to make reliable predictions"
}
return pred_data
else:
# check for sensor_id
if sensor_id:
values = db.session.query(model) \
.filter(model.s_id == sensor_id) \
.limit(_limit) \
.all()
if len(values) < 100:
pred_data = {
"Predictions": "not enough data to make reliable predictions"
}
return pred_data
else:
values = db.session.query(model) \
.limit(_limit) \
.all()
if len(values) < 100:
pred_data = {
"Predictions": "not enough data to make reliable predictions"
}
return pred_data
for val in values:
_data.append(float(val.value))
_timestamps.append(val.api_timestamp)
_pred, _mape, _method = predict(_data, _timestamps, n_pred)
if sensor_id:
_sensorid = sensor_id
else:
_sensorid = "All sensors"
pred_data = {
"Sensor_id": _sensorid,
"Forcasting_engine": _method,
"Mean_Absolute_Percentage_Error": _mape,
"Predictions": _pred
}
return pred_data
| 32.966258 | 265 | 0.69638 | 1,408 | 10,747 | 5.169034 | 0.173295 | 0.025007 | 0.032701 | 0.037923 | 0.352982 | 0.303243 | 0.256252 | 0.244435 | 0.180132 | 0.159522 | 0 | 0.012034 | 0.195869 | 10,747 | 325 | 266 | 33.067692 | 0.830132 | 0.25421 | 0 | 0.297674 | 0 | 0 | 0.125207 | 0.006489 | 0 | 0 | 0 | 0 | 0 | 1 | 0.013953 | false | 0.009302 | 0.060465 | 0 | 0.12093 | 0.004651 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f081117d2b8fd37bf09bbfbdf3068616be9ae9ee | 938 | py | Python | setup.py | dev-hato/sudden-death | 89fb9827bc67f03822e279704ad6af24f47e8fb0 | [
"MIT"
] | null | null | null | setup.py | dev-hato/sudden-death | 89fb9827bc67f03822e279704ad6af24f47e8fb0 | [
"MIT"
] | 127 | 2020-07-11T02:25:31.000Z | 2022-03-30T03:04:22.000Z | setup.py | dev-hato/sudden-death | 89fb9827bc67f03822e279704ad6af24f47e8fb0 | [
"MIT"
] | null | null | null | """
パッケージインストール用スクリプト
"""
from setuptools import setup
def _requires_from_file(filename):
is_in_packages = False
requires = []
with open(filename, encoding='UTF-8') as _f:
for _r in _f:
_r = _r.strip()
if _r == '[packages]':
is_in_packages = True
elif _r.startswith('['):
is_in_packages = False
elif _r and is_in_packages:
requires.append(_r.replace('"', '')
.replace(' ', '')
.replace('=', '', 1))
return requires
setup(
name="sudden_death",
version="0.0.1",
license="MIT",
description="突然の死(ハリフキダシ)を生成するツール",
author="koluku",
url="https://github.com/koluku/sudden-death",
install_requires=_requires_from_file('Pipfile'),
packages=['sudden_death'],
package_data={
"sudden_death": ["py.typed"],
}
)
| 23.45 | 53 | 0.530917 | 97 | 938 | 4.845361 | 0.56701 | 0.034043 | 0.102128 | 0.07234 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007911 | 0.326226 | 938 | 39 | 54 | 24.051282 | 0.735759 | 0.018124 | 0 | 0.068966 | 0 | 0 | 0.155531 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.034483 | 0 | 0.103448 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f081b8754101cb1711ff8ca056bbec341178ea28 | 5,224 | py | Python | behind/chats/consumers.py | teamsalad/behind-api | b0bb92226e76defde4d4a2c48379153e6793a4e9 | [
"MIT"
] | null | null | null | behind/chats/consumers.py | teamsalad/behind-api | b0bb92226e76defde4d4a2c48379153e6793a4e9 | [
"MIT"
] | 6 | 2021-03-19T01:22:45.000Z | 2021-06-10T18:59:30.000Z | behind/chats/consumers.py | teamsalad/behind-api | b0bb92226e76defde4d4a2c48379153e6793a4e9 | [
"MIT"
] | null | null | null | import datetime
import logging
from channels.db import database_sync_to_async
from channels.generic.websocket import AsyncJsonWebsocketConsumer
from chats.models import ChatMessage, ChatParticipant, ChatRoom
from users.models import User
logger = logging.getLogger('django.request')
class ChatConsumer(AsyncJsonWebsocketConsumer):
"""
ws/ --> Just for websocket stuff.
api/ --> HTTP stuff.
"""
async def connect(self):
"""
Accept connection if user is authorized and
is a participant of the chat room.
:return: None
"""
self.user = self.scope['user']
self.chat_room_id = self.scope['url_route']['kwargs']['id']
self.chat_room_group_name = f'chat_room_{self.chat_room_id}'
if not self.user.is_authenticated:
logger.warning('Anonymous connection occurred')
await self.disconnect(1000)
elif not ChatParticipant.objects.filter(
chat_room_id=self.chat_room_id,
user_id=self.user.id
).exists():
logger.warning(f'User {self.user.username} not participant of chat room')
await self.disconnect(1001)
else:
await self.channel_layer.group_add(
self.chat_room_group_name,
self.channel_name
)
await self.accept()
await self.channel_layer.group_send(
self.chat_room_group_name,
{
'type': 'chat_state',
'user_id': self.user.id,
'state': 'CONNECTED'
}
)
async def disconnect(self, code):
# TODO: use code to make error messages for disconnection reasons
await self.channel_layer.group_send(
self.chat_room_group_name,
{
'type': 'chat_state',
'user_id': self.user.id,
'state': 'DISCONNECTED'
}
)
await self.channel_layer.group_discard(
self.chat_room_group_name,
self.channel_name
)
async def receive_json(self, content, **kwargs):
# Send message to room group
if content['type'] == 'chat_message':
await self.channel_layer.group_send(
self.chat_room_group_name,
{
'type': content['type'],
'message': content['message'],
'user_id': self.user.id
}
)
if content['type'] == 'chat_timer':
await self.channel_layer.group_send(
self.chat_room_group_name,
{
'type': content['type'],
'time_left': content['time_left'],
'user_id': self.user.id
}
)
if content['type'] == 'chat_state':
await self.channel_layer.group_send(
self.chat_room_group_name,
{
'type': content['type'],
'state': content['state'],
'user_id': self.user.id
}
)
async def chat_message(self, event):
# Save chat message
if self.user.id == event['user_id']:
await database_sync_to_async(ChatMessage.objects.create)(
message=event['message'],
user_id=self.user.id,
chat_room_id=self.chat_room_id
)
await self.send_push_notification(
event['user_id'],
event['message']
)
await self.send_json({
'user_id': event['user_id'],
'message': event['message']
})
async def chat_timer(self, event):
hour, minute, second = (int(x) for x in event['time_left'].split(":"))
chat_room = await self.update_time(
self.chat_room_id,
datetime.time(0, minute, second)
)
await self.send_json({
'user_id': event['user_id'],
'time_left': chat_room.time_left.strftime("%H:%M:%S")
})
@database_sync_to_async
def update_time(self, chat_room_id, time_left):
chat_room = ChatRoom.objects.get(id=chat_room_id)
if datetime.time(0, 0, 0) <= time_left < chat_room.time_left:
chat_room.time_left = time_left
chat_room.save()
return chat_room
@database_sync_to_async
def send_push_notification(self, user_id, message):
other_participant = ChatParticipant.objects \
.filter(chat_room_id=self.chat_room_id) \
.exclude(user_id=user_id) \
.first()
user = User.objects.get(id=other_participant.user_id)
if user.can_send_push_notification('chat'):
user.active_device().send_message(
title=self.user.username,
body=message,
sound='default',
data={'chat_room_id': self.chat_room_id}
)
async def chat_state(self, event):
await self.send_json({
'user_id': event['user_id'],
'state': event['state']
})
| 34.143791 | 85 | 0.542496 | 570 | 5,224 | 4.721053 | 0.221053 | 0.092159 | 0.071349 | 0.04162 | 0.404682 | 0.342995 | 0.282423 | 0.264586 | 0.23783 | 0.175399 | 0 | 0.003557 | 0.354135 | 5,224 | 152 | 86 | 34.368421 | 0.794013 | 0.031394 | 0 | 0.275591 | 0 | 0 | 0.100897 | 0.005911 | 0 | 0 | 0 | 0.006579 | 0 | 1 | 0.015748 | false | 0 | 0.047244 | 0 | 0.07874 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0841df5555e4cc44a467a6158c1647600c8bbab | 3,758 | py | Python | system/ports/config/sys_ports.py | apapillon/core | f0a1fe8dbd292ff75dcc8e60dcd59ff67b165db1 | [
"0BSD"
] | 1 | 2020-03-10T07:27:17.000Z | 2020-03-10T07:27:17.000Z | system/ports/config/sys_ports.py | apapillon/core | f0a1fe8dbd292ff75dcc8e60dcd59ff67b165db1 | [
"0BSD"
] | null | null | null | system/ports/config/sys_ports.py | apapillon/core | f0a1fe8dbd292ff75dcc8e60dcd59ff67b165db1 | [
"0BSD"
] | null | null | null | # coding: utf-8
"""*****************************************************************************
* Copyright (C) 2018 Microchip Technology Inc. and its subsidiaries.
*
* Subject to your compliance with these terms, you may use Microchip software
* and any derivatives exclusively with Microchip products. It is your
* responsibility to comply with third party license terms applicable to your
* use of third party software (including open source software) that may
* accompany Microchip software.
*
* THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES, WHETHER
* EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED
* WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A
* PARTICULAR PURPOSE.
*
* IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE,
* INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND
* WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS
* BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE
* FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN
* ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY,
* THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE.
*****************************************************************************"""
################################################################################
#### Business Logic ####
################################################################################
def genPortsHeaderFile(symbol, event):
symbol.setEnabled(event["value"])
def genPortsHeaderMappingFile(symbol, event):
symbol.setEnabled(event["value"])
def genPortsSystemDefFile(symbol, event):
symbol.setEnabled(event["value"])
############################################################################
#### Code Generation ####
############################################################################
genSysPortsCommonFiles = harmonyCoreComponent.createBooleanSymbol("ENABLE_SYS_PORTS", None)
genSysPortsCommonFiles.setLabel("Enable System Ports")
genSysPortsCommonFiles.setDefaultValue(False)
portsHeaderFile = harmonyCoreComponent.createFileSymbol("PORTS_HEADER", None)
portsHeaderFile.setSourcePath("system/ports/templates/sys_ports.h.ftl")
portsHeaderFile.setOutputName("sys_ports.h")
portsHeaderFile.setDestPath("system/ports/")
portsHeaderFile.setProjectPath("config/" + configName + "/system/ports/")
portsHeaderFile.setType("HEADER")
portsHeaderFile.setOverwrite(True)
portsHeaderFile.setEnabled(False)
portsHeaderFile.setDependencies(genPortsHeaderFile, ["ENABLE_SYS_PORTS"])
portsHeaderFile.setMarkup(True)
portsHeaderMappingFile = harmonyCoreComponent.createFileSymbol("PORTS_MAPPING", None)
portsHeaderMappingFile.setSourcePath("system/ports/templates/sys_ports_mapping.h.ftl")
portsHeaderMappingFile.setOutputName("sys_ports_mapping.h")
portsHeaderMappingFile.setDestPath("system/ports/")
portsHeaderMappingFile.setProjectPath("config/" + configName + "/system/ports/")
portsHeaderMappingFile.setType("HEADER")
portsHeaderMappingFile.setOverwrite(True)
portsHeaderMappingFile.setEnabled(False)
portsHeaderMappingFile.setDependencies(genPortsHeaderMappingFile, ["ENABLE_SYS_PORTS"])
portsHeaderMappingFile.setMarkup(True)
portsSystemDefFile = harmonyCoreComponent.createFileSymbol("PORTS_DEF", None)
portsSystemDefFile.setType("STRING")
portsSystemDefFile.setOutputName("core.LIST_SYSTEM_DEFINITIONS_H_INCLUDES")
portsSystemDefFile.setSourcePath("system/ports/templates/system/definitions.h.ftl")
portsSystemDefFile.setMarkup(True)
portsSystemDefFile.setOverwrite(True)
portsSystemDefFile.setEnabled(False)
portsSystemDefFile.setDependencies(genPortsSystemDefFile, ["ENABLE_SYS_PORTS"])
| 48.179487 | 91 | 0.716072 | 358 | 3,758 | 7.458101 | 0.432961 | 0.02397 | 0.020974 | 0.030337 | 0.105243 | 0.074532 | 0.029963 | 0 | 0 | 0 | 0 | 0.001446 | 0.079564 | 3,758 | 77 | 92 | 48.805195 | 0.770454 | 0.363225 | 0 | 0.081081 | 0 | 0 | 0.20311 | 0.082604 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081081 | false | 0 | 0 | 0 | 0.081081 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2b4ecce69c545c24f933e708da03bb84a669f14 | 1,152 | py | Python | feature_loader.py | StomachCold/HCTransformers | 47dbecf8689989e4d8cd4024f330931fe8615ddf | [
"Apache-2.0"
] | 3 | 2022-03-18T03:42:49.000Z | 2022-03-27T08:24:06.000Z | feature_loader.py | StomachCold/HCTransformers | 47dbecf8689989e4d8cd4024f330931fe8615ddf | [
"Apache-2.0"
] | null | null | null | feature_loader.py | StomachCold/HCTransformers | 47dbecf8689989e4d8cd4024f330931fe8615ddf | [
"Apache-2.0"
] | 1 | 2022-03-21T09:21:17.000Z | 2022-03-21T09:21:17.000Z | import torch
import numpy as np
import h5py
class SimpleHDF5Dataset:
def __init__(self, file_handle = None):
if file_handle == None:
self.f = ''
self.all_feats_dset = []
self.all_labels = []
self.total = 0
else:
self.f = file_handle
self.all_feats_dset = self.f['all_feats'][...]
self.all_labels = self.f['all_labels'][...]
def __getitem__(self, i):
return torch.Tensor(self.all_feats_dset[i,:]), int(self.all_labels[i])
def __len__(self):
return self.total
def init_loader(filename):
with h5py.File(filename, 'r') as f:
fileset = SimpleHDF5Dataset(f)
feats = fileset.all_feats_dset
labels = fileset.all_labels
while np.sum(feats[-1]) == 0:
feats = np.delete(feats,-1,axis = 0)
labels = np.delete(labels,-1,axis = 0)
class_list = np.unique(np.array(labels)).tolist()
inds = range(len(labels))
cl_data_file = {}
for cl in class_list:
cl_data_file[cl] = []
for ind in inds:
cl_data_file[labels[ind]].append( feats[ind])
return cl_data_file
| 29.538462 | 78 | 0.594618 | 158 | 1,152 | 4.082278 | 0.322785 | 0.065116 | 0.074419 | 0.074419 | 0.062016 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013205 | 0.27691 | 1,152 | 38 | 79 | 30.315789 | 0.761104 | 0 | 0 | 0 | 0 | 0 | 0.017361 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.088235 | 0.058824 | 0.323529 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2b7675064b7dc3d50aefeec6e781a2c83d0d44b | 2,337 | py | Python | importers/intrinsics.py | tofis/human4d_dataset | ffa87275302c25ef16cec6ab99acdb9410b762b8 | [
"MIT"
] | 8 | 2020-11-20T15:10:10.000Z | 2022-01-17T08:21:10.000Z | importers/intrinsics.py | tofis/human4d_dataset | ffa87275302c25ef16cec6ab99acdb9410b762b8 | [
"MIT"
] | 1 | 2021-02-10T18:35:59.000Z | 2021-04-23T12:13:03.000Z | importers/intrinsics.py | tofis/human4d_dataset | ffa87275302c25ef16cec6ab99acdb9410b762b8 | [
"MIT"
] | 3 | 2020-12-10T02:48:08.000Z | 2021-07-18T12:06:20.000Z | import json
import numpy
import torch
#intrinsics_dict = None
def load_intrinsics_repository(filename, stream='Depth'):
#global intrinsics_dict
with open(filename, 'r') as json_file:
intrinsics_repository = json.load(json_file)
if (stream == 'Depth'):
intrinsics_dict = dict((intrinsics['Device'], \
intrinsics['Depth Intrinsics'][0]['1280x720'])\
for intrinsics in intrinsics_repository)
elif (stream == 'RGB'):
intrinsics_dict = dict((intrinsics['Device'], \
intrinsics['Color Intrinsics'][0]['1280x720'])\
for intrinsics in intrinsics_repository)
return intrinsics_dict
def load_rotation_translation(filename):
#global intrinsics_dict
with open(filename, 'r') as json_file:
intrinsics_repository = json.load(json_file)
intrinsics_dict = dict((intrinsics['Device'], \
{
'R' : numpy.asarray(intrinsics['Color Depth Rotation'], dtype=numpy.float32).reshape([1, 3, 3]),
't' : numpy.asarray(intrinsics['Color Depth Translation'], dtype=numpy.float32).reshape([3, 1])
})\
for intrinsics in intrinsics_repository)
return intrinsics_dict
def get_intrinsics(name, intrinsics_dict, scale=1, data_type=torch.float32):
#global intrinsics_dict
if intrinsics_dict is not None:
intrinsics_data = numpy.array(intrinsics_dict[name])
intrinsics = torch.tensor(intrinsics_data).reshape(3, 3).type(data_type)
intrinsics[0, 0] = intrinsics[0, 0] / scale
intrinsics[0, 2] = intrinsics[0, 2] / scale
intrinsics[1, 1] = intrinsics[1, 1] / scale
intrinsics[1, 2] = intrinsics[1, 2] / scale
intrinsics_inv = intrinsics.inverse()
return intrinsics, intrinsics_inv
raise ValueError("Intrinsics repository is empty")
def get_intrinsics_with_scale(intrinsics_original, scale=1, data_type=torch.float32):
intrinsics = intrinsics_original.clone().detach()
intrinsics[0, 0] = intrinsics[0, 0] / scale
intrinsics[0, 2] = intrinsics[0, 2] / scale
intrinsics[1, 1] = intrinsics[1, 1] / scale
intrinsics[1, 2] = intrinsics[1, 2] / scale
intrinsics_inv = intrinsics.inverse()
return intrinsics, intrinsics_inv | 41.732143 | 112 | 0.649551 | 267 | 2,337 | 5.535581 | 0.209738 | 0.113667 | 0.032476 | 0.056834 | 0.638024 | 0.571719 | 0.476996 | 0.476996 | 0.427605 | 0.34912 | 0 | 0.036415 | 0.2362 | 2,337 | 56 | 113 | 41.732143 | 0.791597 | 0.037655 | 0 | 0.545455 | 0 | 0 | 0.069457 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.068182 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2b99e78920f95b5e69d4f13258b5a68be2cca52 | 15,478 | py | Python | pyscisci/datasource/DBLP.py | jisungyoon/pyscisci | 0b687a4389633951c9112cf9a09c1525ed8c5e12 | [
"MIT"
] | null | null | null | pyscisci/datasource/DBLP.py | jisungyoon/pyscisci | 0b687a4389633951c9112cf9a09c1525ed8c5e12 | [
"MIT"
] | null | null | null | pyscisci/datasource/DBLP.py | jisungyoon/pyscisci | 0b687a4389633951c9112cf9a09c1525ed8c5e12 | [
"MIT"
] | null | null | null | import os
import sys
import json
import gzip
import pandas as pd
import numpy as np
from nameparser import HumanName
import requests
from lxml import etree
from io import BytesIO
# determine if we are loading from a jupyter notebook (to make pretty progress bars)
if 'ipykernel' in sys.modules:
from tqdm.notebook import tqdm
else:
from tqdm import tqdm
from pyscisci.datasource.readwrite import load_preprocessed_data, load_int, load_float, load_html_str
from pyscisci.database import BibDataBase
class DBLP(BibDataBase):
"""
Base class for DBLP interface.
The DBLP comes as a single xml file. It can be downloaded from [DBLP](https://dblp.uni-trier.de/) via `donwload_from_source`
There is no citation information!
"""
def __init__(self, path2database='', keep_in_memory=False, show_progress=True):
self.path2database = path2database
self.keep_in_memory = keep_in_memory
self.show_progress = show_progress
self._affiliation_df = None
self._pub_df = None
self._journal_df = None
self._author_df = None
self._pub2year = None
self._pub2ref_df = None
self._pub2refnoself_df = None
self._author2pub_df = None
self._paa_df = None
self._pub2field_df=None
self._fieldinfo_df = None
self.PublicationIdType = int
self.AffiliationIdType = int
self.AuthorIdType = str
def _blank_dblp_publication(self, PublicationId = 0):
record = {}
record['PublicationId'] = PublicationId
record['Title'] = ''
record['Year'] = 0
record['Volume'] = 0
record['Number'] = ''
record['Pages'] = ''
record['JournalId'] = ''
record['EE'] = ''
record['TeamSize'] = 0
record['Month'] = 1
record['DocType'] = ''
return record
def _save_dataframes(self, ifile, publication_df, author_df, author_columns, author2pub_df):
publication_df = pd.DataFrame(publication_df)
publication_df['PublicationId'] = publication_df['PublicationId'].astype(int)
publication_df['Year'] = publication_df['Year'].astype(int)
publication_df['Volume'] = pd.to_numeric(publication_df['Volume'])
publication_df['TeamSize'] = publication_df['TeamSize'].astype(int)
publication_df.to_hdf( os.path.join(self.path2database,'publication', 'publication{}.hdf'.format(ifile)), key = 'pub', mode='w')
author_df = pd.DataFrame(author_df, columns = author_columns)
author_df['AuthorId'] = author_df['AuthorId'].astype(int)
author_df.to_hdf( os.path.join(self.path2database,'author', 'author{}.hdf'.format(ifile)), key = 'author', mode='w')
author2pub_df = pd.DataFrame(author2pub_df, columns = ['PublicationId', 'AuthorId', 'AuthorSequence'], dtype=int)
author2pub_df.to_hdf( os.path.join(self.path2database,'publicationauthor', 'publicationauthor{}.hdf'.format(ifile)), key = 'pa', mode='w')
def preprocess(self, xml_file_name = 'dblp.xml.gz', process_name=True, num_file_lines=10**6, show_progress=True):
"""
Bulk preprocess of the DBLP raw data.
Parameters
----------
:param process_name: bool, default True
If True, then when processing the raw file, the package `NameParser <https://nameparser.readthedocs.io/en/latest/>`_
will be used to split author FullNames.
:param xml_file_name: str, default 'dblp.xml.gz'
The xml file name.
:param num_file_lines: int, default 10**6
The processed data will be saved into smaller DataFrames, each with `num_file_lines` rows.
:param show_progress: bool, default True
Show progress with processing of the data.
"""
ACCEPT_DOCTYPES = set(['article', 'inproceedings', 'proceedings', 'book', 'incollection', 'phdthesis', 'mastersthesis'])
REJECT_DOCTYPES = set(['www'])
DATA_ITEMS = ['title', 'booktitle', 'year', 'journal', 'ee',' url', 'month', 'mdate', 'isbn', 'publisher']
SKIP_FIELDS = ['note', 'cite', 'cdrom', 'crossref', 'editor', 'series', 'tt', 'school', 'chapter', 'address']
doctype = {'article': 'j', 'book':'b', '':'', 'phdthesis':'phd', 'proceedings':'c', 'inproceedings':'c',
'mastersthesis':'ms', 'incollection':'c'}
html_format_keys = ['<sub>', '</sub>', '<sup>', '</sup>', '<i>', '</i>']
if show_progress:
print("Starting to preprocess the DBLP database.")
if not os.path.exists(os.path.join(self.path2database, 'publication')):
os.mkdir(os.path.join(self.path2database, 'publication'))
if not os.path.exists(os.path.join(self.path2database, 'author')):
os.mkdir(os.path.join(self.path2database, 'author'))
if not os.path.exists(os.path.join(self.path2database, 'publicationauthor')):
os.mkdir(os.path.join(self.path2database, 'publicationauthor'))
publication_df = []
author_df = []
author2pub_df = []
journal_df = []
PublicationId = 1
AuthorId = 1
aname2aid = {}
author_columns = ['AuthorId', 'FullName']
if process_name:
author_columns += ['LastName', 'FirstName', 'MiddleName']
JournalId = 1
jname2jid = {}
pub_record = self._blank_dblp_publication(PublicationId)
pub_authors = []
AuthorCount = 0
ifile = 0
# read dtd - this takes
path2database = self.path2database # remove self to use inside of this class
class DTDResolver(etree.Resolver):
def resolve(self, system_url, public_id, context):
return self.resolve_filename(os.path.join(path2database, system_url), context)
if '.gz' in xml_file_name:
with gzip.open(os.path.join(self.path2database, xml_file_name), 'r') as infile:
xml_file = infile.read()
else:
with open(os.path.join(self.path2database, xml_file_name), 'r') as infile:
xml_file = infile.read().encode('latin1')
# extract the desired fields from the XML tree #
bytesxml = BytesIO(xml_file)
xmltree = etree.iterparse(bytesxml, load_dtd=True, resolve_entities=True)
xmltree.resolvers.add(DTDResolver())
if show_progress:
print("Xml tree parsed, iterating through elements.")
last_position = 0
xml_size = bytesxml.getbuffer().nbytes
with tqdm(total=xml_size, unit='iB', unit_scale=True, desc='dblp.xml', leave=True, disable=not show_progress) as pbar:
for event, elem in xmltree:
if elem.tag == 'title' or elem.tag == 'booktitle':
pub_record['Title'] = load_html_str(elem.text)
elif elem.tag == 'year':
pub_record['Year'] = load_int(elem.text)
elif elem.tag == 'month':
pub_record['Month'] = load_int(elem.text)
elif elem.tag == 'volume':
pub_record['Volume'] = load_int(elem.text)
elif elem.tag == 'number':
pub_record['Number'] = load_html_str(elem.text)
elif elem.tag == 'pages':
pub_record['Pages'] = load_html_str(elem.text)
elif elem.tag == 'journal':
pub_record['JournalId'] = load_html_str(elem.text)
elif elem.tag == 'url':
pub_record['URL'] = load_html_str(elem.text)
elif elem.tag == 'ee':
pub_record['EE'] = load_html_str(elem.text)
elif elem.tag == 'author':
AuthorCount += 1
fullname = load_html_str(elem.text)
if aname2aid.get(fullname, None) is None:
if process_name:
fullname = ''.join([i for i in fullname if not i.isdigit()]).strip()
hname = HumanName(fullname)
author_df.append([AuthorId, fullname, hname.last, hname.first, hname.middle])
else:
author_df.append([AuthorId, fullname])
aname2aid[fullname] = AuthorId
AuthorId += 1
pub_authors.append([PublicationId, aname2aid[fullname], AuthorCount])
elif elem.tag in ACCEPT_DOCTYPES:
pub_record['TeamSize'] = AuthorCount
pub_record['DocType'] = doctype[load_html_str(elem.tag)]
publication_df.append(pub_record)
author2pub_df.extend(pub_authors)
PublicationId += 1
pub_record = self._blank_dblp_publication(PublicationId)
AuthorCount = 0
pub_authors = []
# update progress bar
pbar.update(bytesxml.tell() - last_position)
last_position = bytesxml.tell()
if num_file_lines > 0 and (PublicationId % num_file_lines) == 0:
self._save_dataframes(ifile, publication_df, author_df, author_columns, author2pub_df)
ifile += 1
publication_df = []
author_df = []
author2pub_df = []
elif elem.tag in REJECT_DOCTYPES:
# the record was from a rejected category so reset record
pub_record = self._blank_dblp_publication(PublicationId)
AuthorCount = 0
pub_authors = []
elif elem.tag in SKIP_FIELDS:
pass
del xmltree
self._save_dataframes(ifile, publication_df, author_df, author_columns, author2pub_df)
def download_from_source(self, source_url='https://dblp.uni-trier.de/xml/', xml_file_name = 'dblp.xml.gz',
dtd_file_name = 'dblp.dtd', show_progress=True):
"""
Download the DBLP raw xml file and the dtd formating information from [DBLP](https://dblp.uni-trier.de/).
1. dblp.xml.gz - the compressed xml file
2. dblp.dtd - the dtd containing xml syntax
The files will be saved to the path specified by `path2database`.
Parameters
----------
:param source_url: str, default 'https://dblp.uni-trier.de/xml/'
The base url from which to download.
:param xml_file_name: str, default 'dblp.xml.gz'
The xml file name.
:param dtd_file_name: str, default 'dblp.dtd'
The dtd file name.
:param show_progress: bool, default True
Show progress with processing of the data.
"""
block_size = 1024 #1 Kibibyte
req_stream = requests.get(os.path.join(source_url, xml_file_name), stream=True)
total_size = int(req_stream.headers.get('content-length', 0))
with tqdm(total=total_size, unit='iB', unit_scale=True, desc='dblp.xml.gz', leave=True, disable=not show_progress) as pbar:
with open(os.path.join(self.path2database, xml_file_name), "wb") as outfile:
for block in req_stream.iter_content(block_size):
outfile.write(block)
# update progress bar
pbar.update(len(block))
with open(os.path.join(self.path2database, dtd_file_name), 'w') as outfile:
outfile.write(requests.get(os.path.join(source_url, dtd_file_name)).content.decode('latin1'))
def parse_affiliations(self, preprocess = False):
raise NotImplementedError("DBLP is stored as a single xml file. Run preprocess to parse the file.")
def parse_authors(self, preprocess = False, process_name = True, num_file_lines = 5*10**6):
raise NotImplementedError("DBLP is stored as a single xml file. Run preprocess to parse the file.")
def parse_publications(self, preprocess = False, num_file_lines=10**7):
raise NotImplementedError("DBLP is stored as a single xml file. Run preprocess to parse the file.")
def parse_references(self, preprocess = False, num_file_lines=10**7):
raise NotImplementedError("DBLP does not contain reference or citation information.")
def parse_publicationauthoraffiliation(self, preprocess = False, num_file_lines=10**7):
raise NotImplementedError("DBLP is stored as a single xml file. Run preprocess to parse the file.")
def parse_fields(self, preprocess = False, num_file_lines=10**7):
raise NotImplementedError("DBLP does not contain field information.")
@property
def author2pub_df(self):
"""
The DataFrame keeping all publication, author relationships. Columns may depend on the specific datasource.
Columns
-------
'PublicationId', 'AuthorId', 'AuthorOrder'
"""
if self._author2pub_df is None:
if self.keep_in_memory:
self._author2pub_df = self.load_publicationauthor(show_progress=self.show_progress)
else:
return self.load_publicationauthor(show_progress=self.show_progress)
return self._author2pub_df
def load_publicationauthor(self, preprocess = True, columns = None, isindict = None, duplicate_subset = None,
duplicate_keep = 'last', dropna = None, show_progress=False):
"""
Load the PublicationAuthor DataFrame from a preprocessed directory. For DBLP, you must run preprocess before
the dataframe is available for use.
Parameters
----------
preprocess : bool, default True, Optional
Attempt to load from the preprocessed directory.
columns : list, default None, Optional
Load only this subset of columns
isindict : dict, default None, Optional
Dictionary of format {"ColumnName":"ListofValues"} where "ColumnName" is a data column
and "ListofValues" is a sorted list of valid values. A DataFrame only containing rows that appear in
"ListofValues" will be returned.
duplicate_subset : list, default None, Optional
Drop any duplicate entries as specified by this subset of columns
duplicate_keep : str, default 'last', Optional
If duplicates are being dropped, keep the 'first' or 'last'
(see `pandas.DataFram.drop_duplicates <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.drop_duplicates.html>`_)
dropna : list, default None, Optional
Drop any NaN entries as specified by this subset of columns
Returns
-------
DataFrame
PublicationAuthor DataFrame.
"""
if show_progress:
show_progress='Loading PublicationAuthor'
if preprocess and os.path.exists(os.path.join(self.path2database, 'publicationauthor')):
return load_preprocessed_data('publicationauthor', path2database=self.path2database, columns=columns, isindict=isindict,
duplicate_subset=duplicate_subset, duplicate_keep=duplicate_keep, dropna=dropna, show_progress=show_progress)
else:
raise NotImplementedError("DBLP is stored as a single xml file. Run preprocess to parse the file.")
| 40.624672 | 151 | 0.614227 | 1,786 | 15,478 | 5.162374 | 0.212206 | 0.016703 | 0.018438 | 0.021258 | 0.337093 | 0.318547 | 0.280694 | 0.240022 | 0.181453 | 0.160629 | 0 | 0.008722 | 0.281496 | 15,478 | 380 | 152 | 40.731579 | 0.82034 | 0.194276 | 0 | 0.15311 | 0 | 0 | 0.127709 | 0.001925 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066986 | false | 0.004785 | 0.066986 | 0.004785 | 0.167464 | 0.009569 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2bbac266f03227d161bb8138ea586907d13b0f3 | 3,617 | py | Python | Python/ds/Bestfit.py | Khushboo85277/NeoAlgo | 784d7b06c385336425ed951918d1ab37b854d29f | [
"MIT"
] | 897 | 2020-06-25T00:12:52.000Z | 2022-03-24T00:49:31.000Z | Python/ds/Bestfit.py | adarshnjena/NeoAlgo | 77a92858d2bf970054ef31c2f55a6d79917a786a | [
"MIT"
] | 5,707 | 2020-06-24T17:53:28.000Z | 2022-01-22T05:03:15.000Z | Python/ds/Bestfit.py | adarshnjena/NeoAlgo | 77a92858d2bf970054ef31c2f55a6d79917a786a | [
"MIT"
] | 1,817 | 2020-06-25T03:51:05.000Z | 2022-03-29T05:14:07.000Z | """
The best fit strategy will not allocate a block of size > N , as it is found in the first-fit method;
instead it will continue searching to find a suitable block so that the block size is closer to the block size of request.
The below program is an implementation of best fit algorithm using array data structure.
"""
# Block class is used as the fixed memory blocks for allocation
class Block:
def __init__(self):
self.size = 0
self.ID = 0
self.fragment = 0
# process class is used for allocating memory for the requesting processes
class process:
def __init__(self):
self.Num = 0
self.size = 0
self.block = None
# initialiseBlocks function initializes all the blocks with sizes and id
def initialiseBlocks(arr, sizes, n):
for i in range(n):
arr[i].size = sizes[i]
arr[i].fragment = sizes[i]
arr[i].ID = i + 1
# printResult function prints the result of the memory allocation strategy
def printResult(arr2, numOfProcess):
print(
"Process No Process Size Block ID Block Size Block Fragment"
)
for i in range(numOfProcess):
print(
str(arr2[i].Num)
+ " "
+ str(arr2[i].size)
+ " "
+ str(arr2[i].block.ID)
+ " "
+ str(arr2[i].block.size)
+ " "
+ str(arr2[i].block.fragment)
)
# bestfit function allocates memory to processes using bestfit allocation algorithm
def bestfit(arr, sizes, n, arr2, numOfProcess):
minBlock = Block()
for i in range(numOfProcess):
min = 100
for j in range(n):
if arr2[i].size <= arr[j].fragment and arr[j].fragment < min:
min = arr[j].fragment
minBlock = arr[j]
minBlock.fragment = minBlock.fragment - arr2[i].size
arr2[i].block = Block()
arr2[i].block.size = minBlock.size
arr2[i].block.ID = minBlock.ID
arr2[i].block.fragment = minBlock.fragment
print("Best Fit Allocation")
printResult(arr2, numOfProcess)
# Driver code
if __name__ == "__main__":
sizes = [60, 20, 12, 35, 64, 42, 31, 35, 40, 50]
arr = []
for i in range(10):
arr.append(Block())
initialiseBlocks(arr, sizes, 10)
numOfProcess = int(
input("Enter the number of process for memory to be allocated : ")
)
print("Enter the sizes required by the processes in the order of requirement")
psize = list(map(int, input().split(" ")))
arr2 = []
for i in range(numOfProcess):
arr2.append(process())
arr2[i].size = psize[i]
arr2[i].Num = i + 1
bestfit(arr, sizes, 10, arr2, numOfProcess)
"""
Sample I/O:
Enter the number of process for memory to be allocated : 5
Enter the sizes required by the processes in the order of requirement
12 11 10 5 23
Best Fit Allocation
Process No Process Size Block ID Block Size Block Fragment
1 12 3 12 0
2 11 2 20 9
3 10 7 31 21
4 5 2 20 4
5 23 4 35 12
Time complexity : O(n)
space complexity : O(n)
"""
| 34.447619 | 122 | 0.525574 | 446 | 3,617 | 4.226457 | 0.282511 | 0.034483 | 0.037135 | 0.029178 | 0.215915 | 0.161273 | 0.161273 | 0.161273 | 0.161273 | 0.161273 | 0 | 0.045993 | 0.392867 | 3,617 | 104 | 123 | 34.778846 | 0.812386 | 0.189936 | 0 | 0.206349 | 0 | 0 | 0.156485 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.079365 | false | 0 | 0 | 0 | 0.111111 | 0.095238 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2bd1761cbcafb02aeb590b2db3a355311fca88a | 1,675 | py | Python | install/test/metric.py | philipcwhite/collect-three | 3f700d8a2c97cd1a280bbf3df47d5a397f1424eb | [
"Apache-2.0"
] | 3 | 2021-08-24T13:21:55.000Z | 2022-02-11T19:55:45.000Z | install/test/metric.py | philipcwhite/collect-three | 3f700d8a2c97cd1a280bbf3df47d5a397f1424eb | [
"Apache-2.0"
] | null | null | null | install/test/metric.py | philipcwhite/collect-three | 3f700d8a2c97cd1a280bbf3df47d5a397f1424eb | [
"Apache-2.0"
] | 1 | 2022-02-08T12:48:56.000Z | 2022-02-08T12:48:56.000Z | from google.protobuf.json_format import MessageToJson
from opentelemetry.proto.metrics.v1.metrics_pb2 import Metric, ResourceMetrics, InstrumentationLibraryMetrics, NumberDataPoint
from opentelemetry.proto.common.v1.common_pb2 import KeyValue
from opentelemetry.proto.collector.metrics.v1.metrics_service_pb2 import ExportMetricsServiceRequest
import random, time
# Instantiate Classes
my_exportmetricsservicerequest = ExportMetricsServiceRequest()
my_resourcemetrics = ResourceMetrics()
my_instrumentationlibrarymetrics = InstrumentationLibraryMetrics()
my_metric = Metric()
my_number = NumberDataPoint()
my_keyvalue = KeyValue()
# Create Metric
my_metric.name = 'otel.cpu.percent'
my_metric.description = 'CPU Percent'
my_metric.unit = '%'
my_metric.sum.aggregation_temporality = 2 # Cumulative
my_number.time_unix_nano = int(time.time()*1000000000)
my_number.as_int = random.randint(0,100)
# Create Resource Tag
my_keyvalue.key = 'host.name'
my_keyvalue.value.string_value = 'white05'
# Create packet
my_metric.sum.data_points.extend([my_number])
my_instrumentationlibrarymetrics.metrics.extend([my_metric])
my_resourcemetrics.resource.attributes.extend([my_keyvalue])
my_resourcemetrics.instrumentation_library_metrics.extend([my_instrumentationlibrarymetrics])
my_exportmetricsservicerequest.resource_metrics.extend([my_resourcemetrics])
# Write packets to files
with open("metric.pb", "wb") as f:
f.write(my_exportmetricsservicerequest.SerializeToString())
with open("metric.protobuf", "w") as f:
f.write(str(my_exportmetricsservicerequest))
with open("metric.json", "w") as f:
f.write(str(MessageToJson(my_exportmetricsservicerequest)))
| 37.222222 | 126 | 0.820299 | 195 | 1,675 | 6.835897 | 0.374359 | 0.042011 | 0.049512 | 0.020255 | 0.019505 | 0.019505 | 0 | 0 | 0 | 0 | 0 | 0.014974 | 0.082985 | 1,675 | 44 | 127 | 38.068182 | 0.852865 | 0.060299 | 0 | 0 | 0 | 0 | 0.052967 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2c25c8da07d6dfe08f9981c44dff2fa05b2987f | 3,988 | py | Python | qstode/cli/scuttle_importer.py | piger/qstode | f22ec113bfaa29bbf2ed9548cd4de63b18de5de3 | [
"BSD-3-Clause"
] | 7 | 2017-05-11T09:11:44.000Z | 2021-08-23T22:29:58.000Z | qstode/cli/scuttle_importer.py | piger/qstode | f22ec113bfaa29bbf2ed9548cd4de63b18de5de3 | [
"BSD-3-Clause"
] | 1 | 2016-02-08T12:10:07.000Z | 2016-02-08T22:33:54.000Z | qstode/cli/scuttle_importer.py | piger/qstode | f22ec113bfaa29bbf2ed9548cd4de63b18de5de3 | [
"BSD-3-Clause"
] | null | null | null | """
qstode.cli.scuttle_importer
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Utility function to import data from a Scuttle json export file
:copyright: (c) 2012 by Daniel Kertesz
:license: BSD, see LICENSE for more details.
"""
import json
import click
from ..model.bookmark import Bookmark, Tag, Link, TAG_MIN, TAG_MAX, tag_name_re
from ..model.user import User
from qstode.app import app, db
from qstode.cli.helpers import ObjectCache, parse_datetime, unescape
from qstode.utils import generate_password
# Constants from Scuttle
SCUTTLE_PRIVATE = 2
# Cache for Tags and Links
class TagCache(ObjectCache):
model_class = Tag
class LinkCache(ObjectCache):
model_class = Link
def list_duplicate_emails(data):
emails = {}
for user in data["users"]:
email = user.get("email")
if email:
emails.setdefault(email, 0)
emails[email] += 1
for email, tot in emails.items():
if tot > 1:
print(email, tot)
def cleanup_tags(tags):
"""Run some validation on a list of tag names; can return an empty list"""
rv = []
for tag in tags:
# strip commas
tag = tag.replace(",", "")
# unescape, strip
tag = unescape(tag)
tag = tag.strip()
# validate
if len(tag) < TAG_MIN or len(tag) > TAG_MAX:
continue
if not tag_name_re.match(tag):
continue
rv.append(tag)
return set([tag.lower() for tag in rv])
@app.cli.command()
@click.argument("filename")
def import_scuttle(filename):
"""Import data from a Scuttle JSON export file"""
data = None
tag_cache = TagCache()
link_cache = LinkCache()
users = {}
all_users = []
with open(filename, "r", encoding="utf-8") as fd:
data = json.load(fd, encoding="utf-8")
tot = len(data["users"])
for i, db_user in enumerate(data["users"]):
print("Importing user %d of %d" % (i + 1, tot))
username = db_user.get("username")
email = db_user.get("email", "").lower()
name = db_user.get("name", "").strip()
if not name:
name = username
# XXX why do we need to generate/set a new password here?
password = generate_password()
if username is None:
print("Skipping user without username: id=%r" % db_user["id"])
continue
elif not email:
print("Skipping user without email address: id=%r" % db_user["id"])
continue
# We merge bookmarks for users with the same e-mail address
# XXX users in scuttle are identified by their username while their
# name is the "display_name".
if email not in users:
user = User(username, email, password, display_name=name)
user.password = db_user["password"]
user.created_at = parse_datetime(db_user["created_at"])
users[email] = user
else:
user = users[email]
for db_bookmark in db_user["bookmarks"]:
title = unescape(db_bookmark["title"])
private = db_bookmark["status"] == SCUTTLE_PRIVATE
notes = unescape(db_bookmark["description"])
created_on = parse_datetime(db_bookmark["created_at"])
modified_on = parse_datetime(db_bookmark["modified_at"])
bookmark = Bookmark(
title, private=private, created_on=created_on, modified_on=modified_on, notes=notes
)
bookmark.link = link_cache.get(db_bookmark["url"])
tags = cleanup_tags(db_bookmark["tags"])
for tag_name in tags:
tag = tag_cache.get(tag_name)
bookmark.tags.append(tag)
user.bookmarks.append(bookmark)
all_users.append(user)
db.Session.add_all(all_users)
try:
db.Session.commit()
except Exception as e:
print("Caught exception!")
print(e)
db.Session.rollback()
| 28.898551 | 99 | 0.599047 | 505 | 3,988 | 4.605941 | 0.306931 | 0.023216 | 0.011608 | 0.012898 | 0.068788 | 0.047291 | 0.030954 | 0.030954 | 0 | 0 | 0 | 0.003879 | 0.288867 | 3,988 | 137 | 100 | 29.109489 | 0.816291 | 0.153711 | 0 | 0.044944 | 0 | 0 | 0.0772 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033708 | false | 0.044944 | 0.101124 | 0 | 0.191011 | 0.067416 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2c5e34dd9db434a8993acf8a390215964088d1a | 1,316 | py | Python | index/handlers/books0p3_reports/process.py | lishnih/index | 79da0951c388557b4c0ed07a67666510847af529 | [
"MIT"
] | 1 | 2018-04-07T03:33:31.000Z | 2018-04-07T03:33:31.000Z | index/handlers/books0p3_reports/process.py | lishnih/index | 79da0951c388557b4c0ed07a67666510847af529 | [
"MIT"
] | null | null | null | index/handlers/books0p3_reports/process.py | lishnih/index | 79da0951c388557b4c0ed07a67666510847af529 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding=utf-8
# Stan 2012-04-08
from __future__ import (division, absolute_import,
print_function, unicode_literals)
import os
import logging
import xlrd
from ...reg.result import *
from .stuff.data_funcs import filter_match
from .sheet import proceed_sheet
def proceed(filename, runtime, FILE):
options = runtime.get('options', {})
basename = os.path.basename(filename)
root, ext = os.path.splitext(basename)
ext = ext.lower()
if ext in ['.xls', '.xlsx', '.xlsm', '.xlsb']:
# Sheet
if ext == '.xls':
book = xlrd.open_workbook(filename, on_demand=True, formatting_info=True)
else:
reg_debug(FILE, "Option 'formatting_info=True' is not implemented yet!")
book = xlrd.open_workbook(filename, on_demand=True)
sheets = book.sheet_names()
sheets_filter = options.get('sheets_filter')
sheets_list = [i for i in sheets if filter_match(i, sheets_filter)]
brief = [sheets, '---', sheets_list]
reg_debug(FILE, brief)
FILE.nsheets = book.nsheets
for name in sheets_list:
sh = book.sheet_by_name(name)
i = sheets.index(name)
proceed_sheet(sh, runtime, i, FILE)
book.unload_sheet(name)
| 28 | 85 | 0.62614 | 168 | 1,316 | 4.72619 | 0.458333 | 0.04534 | 0.030227 | 0.050378 | 0.100756 | 0.100756 | 0.100756 | 0.100756 | 0 | 0 | 0 | 0.009231 | 0.259119 | 1,316 | 46 | 86 | 28.608696 | 0.805128 | 0.041793 | 0 | 0 | 0 | 0 | 0.078759 | 0.017502 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033333 | false | 0 | 0.233333 | 0 | 0.266667 | 0.033333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2c81a65ef8ecfc05b84f62416811a560783fa3b | 5,059 | py | Python | freeq.py | Enaunimes/freeq | 50a708b2bdcde79d6d01ace260806576e9e78a64 | [
"BSD-2-Clause"
] | 85 | 2016-05-13T21:14:08.000Z | 2022-01-30T05:14:13.000Z | freeq.py | imherro/freeq | 50a708b2bdcde79d6d01ace260806576e9e78a64 | [
"BSD-2-Clause"
] | 3 | 2016-09-21T05:58:43.000Z | 2020-03-07T07:08:33.000Z | freeq.py | imherro/freeq | 50a708b2bdcde79d6d01ace260806576e9e78a64 | [
"BSD-2-Clause"
] | 30 | 2016-05-21T09:36:44.000Z | 2021-05-12T23:08:39.000Z | #!/usr/bin/env python3
import re
import string
import sys
import os
import argparse
from collections import Counter
__all__ = ['WordFinder', 'Book']
lemmas = {}
with open('lemmas.txt') as fin:
for line in fin:
line = line.strip()
headword = line.split('\t')[0]
try:
related = line.split('\t')[1]
except IndexError:
related = None
lemmas[headword] = related
valid_words = set()
for headword, related in lemmas.items():
valid_words.add(headword)
if related:
valid_words.update(set(related.split()))
class WordFinder(object):
'''A compound structure of dictionary and set to store word mapping'''
def __init__(self):
"""Initialize lame containers for 'quick' search
Structure of main_table
{
'a':{
# All related words and the headword start with same letter
'abandon': {'abandons', 'abandoned', 'abandoning'},
'apply': {'applies', 'applied', 'applying'},
# headword with no related word
'abeam': None,
...
},
'b': {...},
'c': {...},
...
}
Structure of special_table
{
# 1+ related words does not share the same starting letter
# with heasdword
'although': {'altho', 'tho', 'though'},
'bad': {'badder', 'baddest', 'badly', 'badness', 'worse', 'worst'},
...
}
"""
self.main_table = {}
for char in string.ascii_lowercase:
self.main_table[char] = {}
self.special_table = {}
for headword, related in lemmas.items():
# Only 3 occurrences of uppercase in lemmas.txt, which include 'I'
# Trading precision for simplicity
headword = headword.lower()
try:
related = related.lower()
except AttributeError:
related = None
if related:
for word in related.split():
if word[0] != headword[0]:
self.special_table[headword] = set(related.split())
break
else:
self.main_table[headword[0]][headword] = set(related.split())
else:
self.main_table[headword[0]][headword] = None
def find_headword(self, word):
"""Search the 'table' and return the original form of a word"""
word = word.lower()
alpha_table = self.main_table[word[0]]
if word in alpha_table:
return word
for headword, related in alpha_table.items():
if related and (word in related):
return headword
for headword, related in self.special_table.items():
if word == headword:
return word
if word in related:
return headword
# This should never happen after the removal of words not in valid_words
# in Book.__init__()
return None
# TODO
def find_related(self, headword):
pass
def is_dirt(word):
return word not in valid_words
def list_dedup(list_object):
"""Return the deduplicated copy of given list"""
temp_list = []
for item in list_object:
if item not in temp_list:
temp_list.append(item)
return temp_list
class Book(object):
def __init__(self, filepath):
with open(filepath) as bookfile:
content = bookfile.read().lower()
self.temp_list = re.split(r'\b([a-zA-Z-]+)\b', content)
self.temp_list = [item for item in self.temp_list if not is_dirt(item)]
finder = WordFinder()
self.temp_list = [finder.find_headword(item) for item in self.temp_list]
def freq(self):
"""Count word frequencies and return a collections.Counter object"""
cnt = Counter()
for word in self.temp_list:
cnt[word] += 1
return cnt
# TODO
def stat(self):
pass
if __name__ == '__main__':
if sys.platform == 'nt':
sys.stderr.write("I haven't tested the code on Windows. Feedback is welcome.\n")
LINE_SEP = os.linesep
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', dest='input_file')
parser.add_argument('-o', '--output', dest='output_file')
args = parser.parse_args()
book = Book(args.input_file)
result = book.freq()
# Maximum width of the ocurrence column
max_width = max(len(str(v)) for v in result.values())
report = []
for word in sorted(result, key=lambda x: result[x], reverse=True):
report.append('{:>{}} {}'.format(result[word], max_width, word))
if args.output_file:
with open(args.output_file, 'w') as output:
output.write(LINE_SEP.join(report))
output.write(LINE_SEP)
else:
print(LINE_SEP.join(report))
| 29.412791 | 88 | 0.554457 | 587 | 5,059 | 4.650767 | 0.335605 | 0.029304 | 0.026374 | 0.029304 | 0.085714 | 0.065934 | 0.043223 | 0 | 0 | 0 | 0 | 0.003261 | 0.333267 | 5,059 | 171 | 89 | 29.584795 | 0.806107 | 0.227515 | 0 | 0.166667 | 0 | 0 | 0.044456 | 0 | 0 | 0 | 0 | 0.005848 | 0 | 1 | 0.078431 | false | 0.019608 | 0.058824 | 0.009804 | 0.235294 | 0.009804 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2c82c174770cb5eb0eb93e2d7bc61d8dcb81240 | 1,578 | py | Python | nmj/utils.py | ericcolleu/python-nmj | d61741efaa858a88bbd45844ec11ac193cce3cde | [
"MIT"
] | 1 | 2021-01-31T11:41:22.000Z | 2021-01-31T11:41:22.000Z | nmj/utils.py | ericcolleu/python-nmj | d61741efaa858a88bbd45844ec11ac193cce3cde | [
"MIT"
] | null | null | null | nmj/utils.py | ericcolleu/python-nmj | d61741efaa858a88bbd45844ec11ac193cce3cde | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import locale
import os
import requests
import shutil
import logging
import tempfile
import PIL
from PIL import Image
_LOGGER = logging.getLogger(__name__)
def to_unicode(text):
if isinstance(text, str):
return text
if hasattr(text, '__unicode__'):
return text.__unicode__()
text = str(text)
try:
return str(text, 'utf-8')
except UnicodeError:
pass
try:
return str(text, locale.getpreferredencoding())
except UnicodeError:
pass
return str(text, 'latin1')
def resize_image(orig, dest, width):
img = Image.open(orig)
wpercent = (width/float(img.size[0]))
hsize = int((float(img.size[1])*float(wpercent)))
img = img.resize((width,hsize), PIL.Image.ANTIALIAS)
img.save(dest)
def download_image(url, filepath, width=None):
if os.path.isfile(filepath):
return
if not os.path.isdir(os.path.dirname(filepath)):
os.makedirs(os.path.dirname(filepath))
r = requests.get(url, stream=True)
if r.status_code == 200:
with tempfile.NamedTemporaryFile() as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
if width:
resize_image(f.name, filepath, width)
else:
shutil.copyfile(f.name, filepath)
def print_details(object_, prefix="", logger=None):
logger = logger or _LOGGER
logger.debug("%s %s details (%s):", prefix, object, type(object_))
logger.debug("%s %s:", prefix, dir(object_))
for attr in dir(object_):
if not attr.startswith("__"):
try:
logger.debug("%s %s = %s", prefix, attr, getattr(object_, attr))
except:
logger.debug("%s %s = Unknown value", prefix, attr)
| 23.552239 | 68 | 0.697719 | 227 | 1,578 | 4.726872 | 0.39207 | 0.00932 | 0.044734 | 0.048462 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006002 | 0.15526 | 1,578 | 66 | 69 | 23.909091 | 0.79895 | 0.013308 | 0 | 0.12963 | 0 | 0 | 0.051447 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0.037037 | 0.148148 | 0 | 0.333333 | 0.018519 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2c8abb2f08c31eed65564c1496fbbfc26881c99 | 4,729 | py | Python | pre-processor/csv-combiner.py | malin1993ml/QueryBot5000 | 58908dcd6d542b935dd8aa0f62b2dfe78430f61e | [
"Apache-2.0"
] | 82 | 2018-04-20T19:59:42.000Z | 2022-03-29T05:13:44.000Z | pre-processor/csv-combiner.py | pentium3/QueryBot5000 | 7aace45fc9e13019931f73f837c8feb10a3cd142 | [
"Apache-2.0"
] | 4 | 2018-12-04T09:42:55.000Z | 2021-04-01T13:18:58.000Z | pre-processor/csv-combiner.py | pentium3/QueryBot5000 | 7aace45fc9e13019931f73f837c8feb10a3cd142 | [
"Apache-2.0"
] | 28 | 2018-05-03T14:13:36.000Z | 2021-12-28T01:20:40.000Z | #!/usr/bin/env python3
import sys
import glob
import collections
import time
import csv
import os
import datetime
import gzip
import re
import argparse
from multiprocessing import Process
csv.field_size_limit(sys.maxsize)
STATEMENTS = ['select', 'SELECT', 'INSERT', 'insert', 'UPDATE', 'update', 'delete', 'DELETE']
DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
TIME_STAMP_STEP = datetime.timedelta(minutes=1)
def MakeCSVFiles(workload_dict, min_timestamp, max_timestamp, output_dir):
print("Generating CSV files...")
print(output_dir)
# Create the result folder if not exists
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# delete any old existing files
for old_file in os.listdir(output_dir):
os.remove(output_dir + old_file)
template_count = 0
for template in workload_dict:
template_timestamps = workload_dict[
template] # time stamps for ith cluster
num_queries_for_template = sum(template_timestamps.values())
# write to csv file
with open(output_dir + 'template' + str(template_count) +
".csv", 'w') as csvfile:
template_writer = csv.writer(csvfile, dialect='excel')
template_writer.writerow([num_queries_for_template, template])
for entry in sorted(template_timestamps):
template_writer.writerow([entry, template_timestamps[entry]])
csvfile.close()
template_count += 1
print("Template count: " + str(template_count))
def AddEntry(template, reader, min_timestamp, max_timestamp, templated_workload):
# Finer process the template a bit to reduce the total template numbers
template = re.sub(r"&&&", r"#", template)
template = re.sub(r"@@@", r"#", template)
template = re.sub(r"[nN]ull", r"#", template)
template = re.sub(r"NULL", r"#", template)
template = re.sub(r"\s+", r" ", template)
template = re.sub(r"\( ", r"(", template)
template = re.sub(r" \)", r")", template)
template = re.sub(r"([^ ])\(", r"\1 (", template)
template = re.sub(r"\)([^ ])", r") \1", template)
template = re.sub(r" IN \([^\(]*?\)", r" IN ()", template)
template = re.sub(r" in \([^\(]*?\)", r" IN ()", template)
template = re.sub(r"([=<>,!\?])([^ ])", r"\1 \2", template)
template = re.sub(r"([^ ])=", r"\1 =", template)
#if (template.find("gradAdmissions2#Test") > 0 and template.find("INSERT") >= 0 and
if (template.find("INSERT") >= 0 and
template.find("VALUES") > 0):
template = template[: template.find("VALUES") + 6]
for line in reader:
time_stamp = datetime.datetime.strptime(line[0], DATETIME_FORMAT)
count = int(line[1])
if not template in templated_workload:
# add template
templated_workload[template] = dict()
if time_stamp in templated_workload[template]:
templated_workload[template][time_stamp] += count
else:
templated_workload[template][time_stamp] = count
min_timestamp = min(min_timestamp, time_stamp)
max_timestamp = max(max_timestamp, time_stamp)
return (templated_workload, min_timestamp, max_timestamp)
def Combine(input_dir, output_dir):
templated_workload = dict()
min_timestamp = datetime.datetime.max
max_timestamp = datetime.datetime.min
target = os.path.join(input_dir, "*/*template*.csv")
print(target)
files = sorted([ x for x in glob.glob(target) ])
cnt = 0
for x in files:
print(x)
with open(x, 'r') as f:
reader = csv.reader(f)
queries, template = next(reader)
#statement = template.split(' ',1)[0]
#if not statement in STATEMENTS:
# continue
templated_workload, min_timestamp, max_timestamp = AddEntry(template, reader,
min_timestamp, max_timestamp, templated_workload)
cnt += 1
#if cnt == 1000:
# break
print(min_timestamp)
print(max_timestamp)
with open('templates.txt', 'w') as template_file:
[ template_file.write(t + "\n") for t in sorted(templated_workload.keys()) ]
MakeCSVFiles(templated_workload, min_timestamp, max_timestamp, output_dir)
# ==============================================
# main
# ==============================================
if __name__ == '__main__':
aparser = argparse.ArgumentParser(description='Templated query csv combiner')
aparser.add_argument('--input_dir', help='Input Data Directory')
aparser.add_argument('--output_dir', help='Output Data Directory')
args = vars(aparser.parse_args())
Combine(args['input_dir'], args['output_dir'] + '/')
| 34.021583 | 93 | 0.620216 | 573 | 4,729 | 4.954625 | 0.263525 | 0.084537 | 0.059528 | 0.064107 | 0.255019 | 0.239521 | 0.137725 | 0.13702 | 0.125749 | 0.081367 | 0 | 0.006814 | 0.224149 | 4,729 | 138 | 94 | 34.268116 | 0.766966 | 0.106154 | 0 | 0 | 0 | 0 | 0.10095 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032609 | false | 0 | 0.119565 | 0 | 0.163043 | 0.076087 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2cae4396e2edfab737b4968564cd729df67c734 | 2,769 | py | Python | rl_groundup/eligibility_traces/sarsa_lambda.py | TristanBester/rl_groundup | 2e981667e21330a35a6ab2a642e278aaaf4dca84 | [
"MIT"
] | 1 | 2021-04-20T00:43:43.000Z | 2021-04-20T00:43:43.000Z | rl_groundup/eligibility_traces/sarsa_lambda.py | TristanBester/rl_groundup | 2e981667e21330a35a6ab2a642e278aaaf4dca84 | [
"MIT"
] | null | null | null | rl_groundup/eligibility_traces/sarsa_lambda.py | TristanBester/rl_groundup | 2e981667e21330a35a6ab2a642e278aaaf4dca84 | [
"MIT"
] | null | null | null | # Created by Tristan Bester.
import sys
import numpy as np
sys.path.append('../')
from envs import GridWorld
from functions import LinearPolicy
from utils import print_episode, encode_sa_pair, test_linear_policy, \
eps_greedy_policy_bin_features
'''
Sarsa lambda with binary features and linear function approximation used to
estimate the optimal policy for the Gridworld environment defined of page 48
of "Reinforcement Learning: An Introduction."
The algorithm can be found on page 250 of the same text.
Book reference:
Sutton, R. and Barto, A., 2014. Reinforcement Learning:
An Introduction. 1st ed. London: The MIT Press.
'''
def sarsa_lambda(env, lamda, alpha, gamma, epsilon, n_episodes):
# Initialize state-action value function.
q = LinearPolicy(env.observation_space_size * env.action_space_size, 0, \
env.action_space_size)
for episode in range(n_episodes):
done = False
obs = env.reset()
action = eps_greedy_policy_bin_features(q, obs, epsilon, \
env.observation_space_size, env.action_space_size)
z = np.zeros(env.observation_space_size * env.action_space_size)
while not done:
obs_prime, reward, done = env.step(action)
delta = reward
sa_vec = encode_sa_pair(obs, action, env.observation_space_size, \
env.action_space_size)
idx_active = np.argwhere(sa_vec == 1)
delta -= np.sum(q.weights[idx_active])
# Accumulating traces.
z[idx_active] += 1
if done:
# Update weights.
q.weights += alpha * delta * z
else:
action_prime = eps_greedy_policy_bin_features(q, obs_prime, epsilon, \
env.observation_space_size, env.action_space_size)
sa_prime_vec = encode_sa_pair(obs_prime, action_prime, \
env.observation_space_size, env.action_space_size)
idx_active = np.argwhere(sa_prime_vec == 1)
delta += gamma * np.sum(q.weights[idx_active])
# Update weights.
q.weights += alpha * delta * z
# Update accumulating traces.
z = gamma * lamda * z
obs = obs_prime
action = action_prime
if episode % 100 == 0:
print_episode(episode, n_episodes)
print_episode(n_episodes, n_episodes)
return q
if __name__ == '__main__':
gamma = 1
lamda = 0.5
alpha = 0.001
epsilon = 0.1
n_episodes = 10000
env = GridWorld()
q = sarsa_lambda(env, lamda, alpha, gamma, epsilon, n_episodes)
test_linear_policy(env, q, 5)
| 36.92 | 86 | 0.615746 | 346 | 2,769 | 4.679191 | 0.349711 | 0.072267 | 0.060531 | 0.077826 | 0.384188 | 0.345893 | 0.318715 | 0.242125 | 0.191476 | 0.07659 | 0 | 0.017179 | 0.306248 | 2,769 | 74 | 87 | 37.418919 | 0.825612 | 0.053088 | 0 | 0.14 | 0 | 0 | 0.004931 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02 | false | 0 | 0.1 | 0 | 0.14 | 0.06 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2cba7bc8db41efd2d32453b5e1b30e4234e2f95 | 926 | py | Python | main.py | DJWOMS/useful | 051e1c0b676e241d143d1b17ab119b916ca00384 | [
"BSD-3-Clause"
] | 44 | 2020-08-01T07:55:53.000Z | 2022-03-04T19:14:07.000Z | main.py | DJWOMS/useful | 051e1c0b676e241d143d1b17ab119b916ca00384 | [
"BSD-3-Clause"
] | null | null | null | main.py | DJWOMS/useful | 051e1c0b676e241d143d1b17ab119b916ca00384 | [
"BSD-3-Clause"
] | 18 | 2020-08-01T17:04:50.000Z | 2022-02-17T05:18:38.000Z | from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from starlette.middleware.sessions import SessionMiddleware
from tortoise.contrib.fastapi import register_tortoise
from src.config import settings
from src.app import routers
app = FastAPI(
title="Useful",
description="Author - DJWOMS",
version="0.2.0",
)
app.add_middleware(
CORSMiddleware,
allow_origins=settings.BACKEND_CORS_ORIGINS,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.add_middleware(SessionMiddleware, secret_key=settings.SECRET_KEY)
app.include_router(routers.api_router, prefix=settings.API_V1_STR)
register_tortoise(
app,
db_url=settings.DATABASE_URI,
modules={"models": settings.APPS_MODELS},
generate_schemas=False,
add_exception_handlers=True,
)
#
# if __name__ == "__main__":
# uvicorn.run(app, host="127.0.0.1", port=80, debug=True)
| 23.15 | 69 | 0.75162 | 116 | 926 | 5.741379 | 0.551724 | 0.033033 | 0.048048 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015038 | 0.138229 | 926 | 39 | 70 | 23.74359 | 0.819549 | 0.092873 | 0 | 0 | 0 | 0 | 0.04067 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.222222 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2ce27e8efe0c215313e3fef8e0e035d4b70a0d1 | 1,347 | py | Python | features/steps/bgp.py | dingqi0201/News-Rec-System | c238633e4dff4f52b17239733ecedb4984696e12 | [
"BSD-3-Clause"
] | null | null | null | features/steps/bgp.py | dingqi0201/News-Rec-System | c238633e4dff4f52b17239733ecedb4984696e12 | [
"BSD-3-Clause"
] | null | null | null | features/steps/bgp.py | dingqi0201/News-Rec-System | c238633e4dff4f52b17239733ecedb4984696e12 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding:utf-8 -*-
"""
for bgp.feature
~~~~~~~~
2020/10/9
"""
from behave import *
from flask import Response
@given('输入BGP-IP, ASN和描述 "{bgp_ip}", "{bgp_asn}", "{bgp_desc}", "{bgp_test_float}"')
@given('输入BGP-IP和描述, ASN错误 "{bgp_ip}", "{bgp_asn}", "{bgp_desc}", "{bgp_test_float}"')
@given('BGP-IP输入错误 "{bgp_ip}", "{bgp_asn}", "{bgp_desc}", "{bgp_test_float}"')
@given('浮点数输入错误 "{bgp_ip}", "{bgp_asn}", "{bgp_desc}", "{bgp_test_float}"')
def step_impl(ctx, bgp_ip, bgp_asn, bgp_desc, bgp_test_float):
ctx.bgp_ip = bgp_ip
ctx.bgp_asn = bgp_asn
ctx.bgp_desc = bgp_desc
ctx.bgp_test_float = bgp_test_float
@when('执行添加BGP操作')
def step_impl(ctx):
ctx.resp = ctx.client.post('/bgp/add', data={
'bgp_ip': ctx.bgp_ip,
'bgp_asn': ctx.bgp_asn,
'bgp_desc': ctx.bgp_desc,
'bgp_test_float': ctx.bgp_test_float,
})
@then('BGP-IP添加成功')
def step_impl(ctx):
assert isinstance(ctx.resp, Response)
assert ctx.resp.status_code == 200
res = ctx.resp.json
print('bgp_add: {}'.format(res))
assert res['ok'] == 1
@then('BGP添加失败')
def step_impl(ctx):
assert isinstance(ctx.resp, Response)
assert ctx.resp.status_code == 200
res = ctx.resp.json
print('bgp_add: {}'.format(res))
assert res['ok'] == 0
assert res['err_code'] > 0
assert res['msg']
| 26.411765 | 86 | 0.622866 | 205 | 1,347 | 3.829268 | 0.263415 | 0.057325 | 0.13758 | 0.084076 | 0.566879 | 0.545223 | 0.545223 | 0.505732 | 0.505732 | 0.429299 | 0 | 0.015399 | 0.180401 | 1,347 | 50 | 87 | 26.94 | 0.695652 | 0.042316 | 0 | 0.314286 | 0 | 0 | 0.306299 | 0 | 0 | 0 | 0 | 0 | 0.228571 | 1 | 0.114286 | false | 0 | 0.057143 | 0 | 0.171429 | 0.057143 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2ce964a7bf22cb59b651b2160a65178784b4776 | 5,008 | py | Python | cnn_norm_resources/cnn_norm_codes/sieves/AffixationSieve.py | meghbhalerao/cnn-norm-obo | ae45bc2d10e6996962d546cb6b8e4a297a5227ee | [
"MIT"
] | null | null | null | cnn_norm_resources/cnn_norm_codes/sieves/AffixationSieve.py | meghbhalerao/cnn-norm-obo | ae45bc2d10e6996962d546cb6b8e4a297a5227ee | [
"MIT"
] | null | null | null | cnn_norm_resources/cnn_norm_codes/sieves/AffixationSieve.py | meghbhalerao/cnn-norm-obo | ae45bc2d10e6996962d546cb6b8e4a297a5227ee | [
"MIT"
] | null | null | null |
import util.Concept
import util.Ling
import util.Util
class AffixationSieve(Sieve):
def __init__(self):
super().__init__()
def apply(self, concept):
self.transformName(concept)
return normalize(concept.getNamesKnowledgeBase())
def transformName(self, concept):
namesForTransformation = concept.getNamesKnowledgeBase()
transformedNames = []
for nameForTransformation in namesForTransformation:
transformedNames = Util.addUnique(transformedNames, self.affix(nameForTransformation))
concept.setNamesKnowledgeBase(transformedNames)
def getAllStringTokenSuffixationCombinations(self, stringTokens):
suffixatedPhrases = []
for stringToken in stringTokens:
suffix = self.ling.getSuffixStr(stringToken);
forSuffixation = None if suffix=="" else self.ling.getSuffixMap()[suffix]
if len(suffixatedPhrases) == 0:
if forSuffixation == None:
suffixatedPhrases.append(stringToken)
elif len(forSuffixation) == 0:
suffixatedPhrases.append(stringToken.replace(suffix, ""))
else:
for i in range(forSuffixation.size()):
suffixatedPhrases.append(stringToken.replace(suffix, forSuffixation[i]))
else:
if (forSuffixation == None):
for i in range(len(suffixatedPhrases)):
suffixatedPhrases[i] = suffixatedPhrases[i] + " " +stringToken
elif (len(forSuffixation) == 0):
for i in range(suffixatedPhrases.size()):
suffixatedPhrases[i] = suffixatedPhrases[i] + " " + stringToken.replace(suffix, "")
else:
tempSuffixatedPhrases = []
for i in range(suffixatedPhrases.size()):
suffixatedPhrase = suffixatedPhrases[i]
for j in range(forSuffixation.size()):
tempSuffixatedPhrases.append(suffixatedPhrase+" "+stringToken.replace(suffix, forSuffixation.get(j)))
suffixatedPhrases = list(tempSuffixatedPhrases)
tempSuffixatedPhrases = None
return suffixatedPhrases
def getUniformStringTokenSuffixations(self, stringTokens, string):
suffixatedPhrases = []
for stringToken in stringTokens:
suffix = self.ling.getSuffixStr(stringToken);
forSuffixation = None if suffix=="" else self.ling.getSuffixMap()[suffix]
if forSuffixation == None:
continue
if (len(forSuffixation) == 0):
suffixatedPhrases.append(string.replace(suffix, ""))
continue
for i in range(forSuffixation.size()):
suffixatedPhrases.append(string.replace(suffix, forSuffixation[i]))
return suffixatedPhrases
def suffixation(self, stringTokens, string):
suffixatedPhrases = self.get_suffixation_combinations(stringTokens)
suffixatedPhrases.extend(self.getUniformStringTokenSuffixations(stringTokens, string))
return suffixatedPhrases
def prefixation(self, stringTokens, string):
prefixatedPhrase = ""
for stringToken in stringTokens:
prefix = self.ling.getPrefixStr(stringToken)
forPrefixation = "" if prefix=="" else self.ling.getPrefixMap()[prefix]
prefixatedPhrase = (stringToken if prefix=="" else stringToken.replace(prefix, forPrefixation)) if prefixatedPhrase=="" else (prefixatedPhrase + " " + stringToken if prefix=="" else prefixatedPhrase + " " + stringToken.replace(prefix, forPrefixation))
return prefixatedPhrase
def affixation(self, stringTokens, string):
affixatedPhrase = ""
for stringToken in stringTokens:
affix = (self.ling.AFFIX.split("|")[0] if self.ling.AFFIX.split("|")[0] in stringToken else self.ling.AFFIX.split("|")[1]) if search(".*("+self.ling.AFFIX+").*", stringToken) else ""
forAffixation = "" if affix=="" else self.ling.getAffixMap()[affix]
affixatedPhrase = (stringToken if affix=="" else stringToken.replace(affix, forAffixation)) if affixatedPhrase=="" else (affixatedPhrase+" "+stringToken if affix=="" else affixatedPhrase + " " + stringToken.replace(affix, forAffixation))
return affixatedPhrase
def affix(self,string):
stringTokens = string.split(" ")
newPhrases = self.suffixation(stringTokens, string)
newPhrases = Util.setList(newPhrases, self.prefixation(stringTokens, string))
newPhrases = Util.setList(newPhrases, self.affixation(stringTokens, string))
return newPhrases | 47.245283 | 263 | 0.609026 | 389 | 5,008 | 7.81491 | 0.177378 | 0.028947 | 0.019737 | 0.018092 | 0.343092 | 0.182237 | 0.161184 | 0.126316 | 0.092105 | 0.092105 | 0 | 0.001985 | 0.295927 | 5,008 | 106 | 264 | 47.245283 | 0.860182 | 0 | 0 | 0.296296 | 0 | 0 | 0.003395 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.037037 | 0 | 0.246914 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2d3177d7dd7c5fd53719c318df3f35d8e9486fe | 628 | py | Python | examples/805_move_to_home.py | gramaziokohler/workshop_donbosco_2022 | a6310dd6f8ab59eaa0c1cd83aa5857aeb420c5c2 | [
"MIT"
] | 1 | 2022-03-17T10:56:32.000Z | 2022-03-17T10:56:32.000Z | examples/805_move_to_home.py | gramaziokohler/workshop_donbosco_2022 | a6310dd6f8ab59eaa0c1cd83aa5857aeb420c5c2 | [
"MIT"
] | null | null | null | examples/805_move_to_home.py | gramaziokohler/workshop_donbosco_2022 | a6310dd6f8ab59eaa0c1cd83aa5857aeb420c5c2 | [
"MIT"
] | null | null | null | import math
from rtde_control import RTDEControlInterface as RTDEControl
from rtde_receive import RTDEReceiveInterface as RTDEReceive
from compas.robots import Configuration
if __name__ == "__main__":
# Create UR Client
ur_c = RTDEControl("127.0.0.1")
ur_r = RTDEReceive("127.0.0.1")
print("Connected.")
# Print received values
config = Configuration.from_revolute_values([0] * 6)
print(config)
# Move robot the new pos
speed = 0.5 # rad/s
accel = 1.4 # rad/s^2
nowait = False
ur_c.moveJ(config.joint_values, speed, accel, nowait)
# End of Code
print("Finished")
| 23.259259 | 60 | 0.686306 | 87 | 628 | 4.770115 | 0.597701 | 0.038554 | 0.024096 | 0.028916 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.038618 | 0.216561 | 628 | 26 | 61 | 24.153846 | 0.804878 | 0.138535 | 0 | 0 | 0 | 0 | 0.082397 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.266667 | 0 | 0.266667 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2d4e636f59525f9759ecfe0fa1118ef3e3a2695 | 2,214 | py | Python | tests/test_application.py | nuecho/rasa-integration-testing | 4e5f55634a8effb29011f32667983d96b7c69740 | [
"Apache-2.0"
] | 2 | 2020-09-29T13:53:51.000Z | 2020-10-24T19:33:27.000Z | tests/test_application.py | nuecho/rasa-integration-testing | 4e5f55634a8effb29011f32667983d96b7c69740 | [
"Apache-2.0"
] | 22 | 2020-05-27T17:15:56.000Z | 2020-11-27T19:27:59.000Z | tests/test_application.py | nuecho/rasa-integration-testing | 4e5f55634a8effb29011f32667983d96b7c69740 | [
"Apache-2.0"
] | 1 | 2021-02-06T03:58:52.000Z | 2021-02-06T03:58:52.000Z | import sys
from io import StringIO
from unittest import TestCase
from click.testing import CliRunner
from httmock import HTTMock, all_requests, response
from rasa_integration_testing.application import EXIT_FAILURE, EXIT_SUCCESS, cli
CONFIGS_PATH = "tests/main_scenarios"
SUCCESS_CONFIGURATION_PATH = f"{CONFIGS_PATH}/success"
FAILURE_CONFIGURATION_PATH = f"{CONFIGS_PATH}/fail"
MIXED_DIFF_CONFIGURATION_PATH = f"{CONFIGS_PATH}/mixed_diff"
SUBSET_DIRECTORY = "subset"
NONEXISTENT_SUBSET_DIRECTORY = "foo"
class TestRunner(TestCase):
def setUp(self):
self.runner = CliRunner()
output = StringIO()
sys.stdout = output
self.output = output
def test_successful_scenario(self):
with HTTMock(request_response):
execution = self.runner.invoke(cli, [SUCCESS_CONFIGURATION_PATH])
self.assertEqual(EXIT_SUCCESS, execution.exit_code)
def test_successful_subdirectory(self):
with HTTMock(request_response):
execution = self.runner.invoke(
cli, [SUCCESS_CONFIGURATION_PATH, SUBSET_DIRECTORY]
)
self.assertEqual(EXIT_SUCCESS, execution.exit_code)
def test_missing_subdirectory(self):
with HTTMock(request_response):
execution = self.runner.invoke(
cli, [SUCCESS_CONFIGURATION_PATH, NONEXISTENT_SUBSET_DIRECTORY]
)
self.assertEqual(EXIT_FAILURE, execution.exit_code)
def test_unsuccessful_scenario(self):
with HTTMock(request_response):
execution = self.runner.invoke(cli, [FAILURE_CONFIGURATION_PATH])
self.assertIsInstance(execution.exception, SystemExit)
self.assertEqual(EXIT_FAILURE, execution.exit_code)
def test_mixed_diff_scenario(self):
with HTTMock(request_response):
execution = self.runner.invoke(cli, [MIXED_DIFF_CONFIGURATION_PATH])
self.assertIsInstance(execution.exception, SystemExit)
self.assertEqual(EXIT_FAILURE, execution.exit_code)
@all_requests
def request_response(url, request):
headers = {"content-type": "application/json"}
return response(200, request.body, headers, None, 5, request)
| 36.295082 | 80 | 0.714544 | 243 | 2,214 | 6.251029 | 0.279835 | 0.089533 | 0.049375 | 0.072416 | 0.592495 | 0.515471 | 0.515471 | 0.515471 | 0.515471 | 0.412113 | 0 | 0.002269 | 0.203704 | 2,214 | 60 | 81 | 36.9 | 0.859331 | 0 | 0 | 0.291667 | 0 | 0 | 0.055556 | 0.021229 | 0 | 0 | 0 | 0 | 0.145833 | 1 | 0.145833 | false | 0 | 0.125 | 0 | 0.3125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2d5a068fe62b54488a7e60adefc9a252775c54a | 2,621 | py | Python | 18a.py | znuxor/adventofcode2017 | 79d0df07f24ea8d2793df3b1c853a85b760791c1 | [
"BSD-3-Clause"
] | null | null | null | 18a.py | znuxor/adventofcode2017 | 79d0df07f24ea8d2793df3b1c853a85b760791c1 | [
"BSD-3-Clause"
] | null | null | null | 18a.py | znuxor/adventofcode2017 | 79d0df07f24ea8d2793df3b1c853a85b760791c1 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from collections import defaultdict
with open('18a_data.txt', 'r') as f:
input_data = f.read().split('\n')[:-1]
pipes = [[], []]
def snd(curr_inst, prog_id, regs, played, *values):
if type(values[0]) is int:
played[0] = values[0]
else:
played[0] = regs[values[0]]
def set_reg(curr_inst, prog_id, regs, played, *values):
if type(values[1]) is int:
regs[values[0]] = values[1]
else:
regs[values[0]] = regs[values[1]]
def add_reg(curr_inst, prog_id, regs, played, *values):
if type(values[1]) is int:
regs[values[0]] += values[1]
else:
regs[values[0]] += regs[values[1]]
def mul_reg(curr_inst, prog_id, regs, played, *values):
if type(values[1]) is int:
regs[values[0]] *= values[1]
else:
regs[values[0]] *= regs[values[1]]
def mod_reg(curr_inst, prog_id, regs, played, *values):
if type(values[1]) is int:
regs[values[0]] %= values[1]
else:
regs[values[0]] %= regs[values[1]]
def rcv(curr_inst, prog_id, regs, played, *values):
if type(values[0]) is int:
if values[0]:
print(played)
print('done!')
raise Exception('done lol')
else:
if regs[values[0]]:
print(played)
print('done!')
raise Exception('done lol')
def jump(curr_inst, prog_id, regs, played, *values):
if type(values[0]) is int:
if values[0] > 0:
if type(values[1]) is int:
curr_inst[0] += values[1]
else:
curr_inst[0] += regs[values[1]]
else:
if regs[values[0]] > 0:
if type(values[1]) is int:
curr_inst[0] += values[1] - 1
else:
curr_inst[0] += regs[values[1]] - 1
operations = {
'snd': snd,
'set': set_reg,
'add': add_reg,
'mul': mul_reg,
'mod': mod_reg,
'rcv': rcv,
'jgz': jump
}
def program_launch(prog_id):
curr_inst = [-1]
regs = defaultdict(lambda: 0)
played = [None]
while curr_inst[0] >= -1 and curr_inst[0] < len(input_data):
curr_inst[0] += 1
# print(curr_inst, prog_id)
# print(regs)
# print(input_data[curr_inst])
# print()
#__import__('time').sleep(0.1)
opcode, *rest_of_op = input_data[curr_inst[0]].split(' ')
rest_of_op = list(int(i) if i.lstrip('-').isdigit() else i for i in rest_of_op)
operations[opcode](curr_inst, prog_id, regs, played, *rest_of_op)
if __name__ == '__main__':
program_launch(0)
| 25.446602 | 87 | 0.547882 | 378 | 2,621 | 3.62963 | 0.198413 | 0.110787 | 0.088192 | 0.091837 | 0.618805 | 0.577988 | 0.560496 | 0.560496 | 0.524781 | 0.524781 | 0 | 0.032655 | 0.287295 | 2,621 | 102 | 88 | 25.696078 | 0.70182 | 0.056085 | 0 | 0.328767 | 0 | 0 | 0.029185 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.109589 | false | 0 | 0.013699 | 0 | 0.123288 | 0.054795 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2d633c66961f0c96213667eadfa32b9d22224ea | 1,708 | py | Python | api/src/opentrons/protocol_engine/resources/resource_providers.py | Corey-ONeal/opentrons-app_ws-remote | a255b76c8a07457787d575da12b2d5bdb6220a91 | [
"Apache-2.0"
] | null | null | null | api/src/opentrons/protocol_engine/resources/resource_providers.py | Corey-ONeal/opentrons-app_ws-remote | a255b76c8a07457787d575da12b2d5bdb6220a91 | [
"Apache-2.0"
] | null | null | null | api/src/opentrons/protocol_engine/resources/resource_providers.py | Corey-ONeal/opentrons-app_ws-remote | a255b76c8a07457787d575da12b2d5bdb6220a91 | [
"Apache-2.0"
] | null | null | null | """Resource providers."""
from __future__ import annotations
from .id_generator import IdGenerator
from .deck_data_provider import DeckDataProvider
from .labware_data_provider import LabwareDataProvider
class ResourceProviders:
"""ResourceProviders container class.
Wraps various data providers that define procedures to pull and generate
data for engine setup and command execution.
"""
_id_generator: IdGenerator
_labware_data: LabwareDataProvider
_deck_data: DeckDataProvider
@classmethod
def create(cls) -> ResourceProviders:
"""Create a ResourceProviders container and its children."""
id_generator = IdGenerator()
labware_data = LabwareDataProvider()
deck_data = DeckDataProvider(labware_data=labware_data)
return cls(
id_generator=id_generator,
labware_data=labware_data,
deck_data=deck_data,
)
def __init__(
self,
id_generator: IdGenerator,
labware_data: LabwareDataProvider,
deck_data: DeckDataProvider,
) -> None:
"""Initialize a ResourceProviders container."""
self._id_generator = id_generator
self._labware_data = labware_data
self._deck_data = deck_data
@property
def id_generator(self) -> IdGenerator:
"""Get the unique ID generator resource."""
return self._id_generator
@property
def labware_data(self) -> LabwareDataProvider:
"""Get the labware data provider resource."""
return self._labware_data
@property
def deck_data(self) -> DeckDataProvider:
"""Get the deck data provider resource."""
return self._deck_data
| 29.448276 | 76 | 0.687354 | 174 | 1,708 | 6.454023 | 0.275862 | 0.127337 | 0.058771 | 0.077471 | 0.256456 | 0.203028 | 0.203028 | 0.203028 | 0.203028 | 0 | 0 | 0 | 0.242389 | 1,708 | 57 | 77 | 29.964912 | 0.867852 | 0.22541 | 0 | 0.083333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.138889 | false | 0 | 0.111111 | 0 | 0.472222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |