hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
626ca85c68ccb838a1a2052ee3e4a957073d8320 | 3,858 | py | Python | scanpy_recipes/submit.py | TheJacksonLaboratory/scanpy_recipes | 09715f66fca0ca8101757fbcd9d3244603e4dd3a | [
"BSD-3-Clause"
] | 5 | 2018-12-18T18:46:57.000Z | 2022-03-22T09:42:25.000Z | scanpy_recipes/submit.py | TheJacksonLaboratory/scanpy_recipes | 09715f66fca0ca8101757fbcd9d3244603e4dd3a | [
"BSD-3-Clause"
] | 42 | 2018-12-19T20:38:26.000Z | 2020-02-24T14:32:34.000Z | scanpy_recipes/submit.py | TheJacksonLaboratory/scanpy_recipes | 09715f66fca0ca8101757fbcd9d3244603e4dd3a | [
"BSD-3-Clause"
] | null | null | null | import os
import subprocess
SAMPLE_SCRIPT = """#!/usr/bin/env bash
{header}
Rscript - <<eog
setwd("{wd}")
options(stringsAsFactors = FALSE, row.names = 1, as.is = T)
log2cpm <- read.csv("{sampleid}_counts.csv", row.names = 1, stringsAsFactors = FALSE)
colnames(log2cpm) <- gsub(".", "-", colnames(log2cpm), fixed = T)
tsne.data <- read.csv("{sampleid}_umap3d.csv", row.names = 1, stringsAsFactors = FALSE)
featuredata <- read.csv("{sampleid}_features.csv", row.names = 1, stringsAsFactors = FALSE)
save(log2cpm, featuredata, tsne.data, file="{rdsfile}")
print("Done")
eog
"""
class Submitter(object):
header = ""
sample_script = SAMPLE_SCRIPT
def __init__(self):
self.scheduler = None
self.submit_command = None
self.header_kwargs = {"sampleid", "wd", "walltime"}
self.script_kwargs = {"sampleid", "wd", "rdsfile"}
def format(self, **kwargs):
missing_header_kwargs = self.header_kwargs - set(kwargs.keys())
assert missing_header_kwargs == set(), \
f"Need to supply header kwargs: [{missing_header_kwargs}]"
self.header = self.header.format(**kwargs)
self.output_dir = kwargs.get("wd")
missing_script_kwargs = self.script_kwargs - set(kwargs.keys())
assert missing_script_kwargs == set(), \
f"Need to supply script kwargs: [{missing_script_kwargs}]"
self.sample_script = self.sample_script.format(header=self.header, **kwargs)
def submit(self, additional_script="", **kwargs):
save_file = os.path.join(self.output_dir, ".submit_rds_creation.sh")
with open(save_file, "w") as fout:
fout.write(self.sample_script)
cmd = f"{self.submit_command} {save_file}"
output = subprocess.check_output(cmd, shell=True).decode("ascii").strip()
print(f"Rds creation submitted as {self.scheduler} job {output}.")
print(f"Output will be located in [{self.output_dir}].")
class PBSSubmitter(Submitter):
header = (
"#PBS -N {sampleid}_rds_creation\n"
"#PBS -o {wd}/{sampleid}_rds_creation.log\n"
"#PBS -j oe\n"
"#PBS -l walltime={walltime}\n"
"#PBS -l vmem={mem}mb\n"
"#PBS -l nodes=1:ppn=1\n"
"module load R"
)
def __init__(self, ):
super().__init__()
self.scheduler = "pbs"
self.submit_command = "qsub"
class SlurmSubmitter(Submitter):
header = (
"#SBATCH --job-name={sampleid}_rds_creation\n"
"#SBATCH --output={wd}/{sampleid}_rds_creation.log\n"
"#SBATCH --mail-type=FAIL\n\n"
"#SBATCH --partition=batch\n"
"#SBATCH --time={walltime}\n"
"#SBATCH --nodes=1\n"
"#SBATCH --ntasks-per-node=1\n"
"#SBATCH --mem={mem}\n"
"#SBATCH --export=NONE\n"
"module load R"
)
def __init__(self, ):
super().__init__()
self.scheduler = "slurm"
self.submit_command = "sbatch"
class LocalSubmitter(Submitter):
# string formatting will only barf if keys present in string are not
# present in format call, but not the other way around. We can exploit
# that here.
header = ""
def __init__(self, ):
super().__init__()
self.scheduler = "local"
self.submit_command = "$SHELL"
SUBMITTERS = {
"pbs": PBSSubmitter,
"slurm": SlurmSubmitter,
"local": LocalSubmitter,
}
def submit_rds_job(
sampleid,
output_dir,
rds_filename,
walltime="00:15:00",
mem=64000,
scheduler="pbs"
):
if scheduler not in SUBMITTERS.keys():
raise ValueError(f"Cannot handle scheduler [{scheduler}]!")
submitter = SUBMITTERS[scheduler]()
submitter.format(
sampleid=sampleid, wd=output_dir, walltime=walltime, mem=mem,
rdsfile=rds_filename
)
submitter.submit()
| 31.622951 | 91 | 0.619233 | 465 | 3,858 | 4.96129 | 0.324731 | 0.024274 | 0.036844 | 0.015605 | 0.195059 | 0.195059 | 0.053316 | 0.039012 | 0.039012 | 0.039012 | 0 | 0.008116 | 0.233541 | 3,858 | 121 | 92 | 31.884298 | 0.772066 | 0.038103 | 0 | 0.121212 | 0 | 0.030303 | 0.373348 | 0.107634 | 0 | 0 | 0 | 0 | 0.020202 | 1 | 0.070707 | false | 0 | 0.020202 | 0 | 0.181818 | 0.030303 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
626dcd33cee04e3f68925ff2469f3522640b3c12 | 2,764 | py | Python | msa/command_line.py | Gunmer/maven-setting-manager | a2e0eb4f18cc761844f05d5c4d83890bf36981e6 | [
"MIT"
] | 3 | 2019-01-11T11:11:50.000Z | 2022-01-29T01:50:24.000Z | msa/command_line.py | Gunmer/maven-setting-manager | a2e0eb4f18cc761844f05d5c4d83890bf36981e6 | [
"MIT"
] | null | null | null | msa/command_line.py | Gunmer/maven-setting-manager | a2e0eb4f18cc761844f05d5c4d83890bf36981e6 | [
"MIT"
] | null | null | null | import argparse
from msa.actions import add_action, delete_action, use_action, list_action, doctor_action
from msa.model.setting import Setting
from msa.repositories.setting_repository import SettingRepository
from msa.services.file_service import FileService
from msa.utils.config import Config
from msa.utils.log import Log
def main():
parser = argparse.ArgumentParser(prog='msa', usage='msa [-h] actions')
subparsers = parser.add_subparsers(title='actions', metavar='')
parser.add_argument('-v', '--version', help='Show version', action='store_true')
parser.add_argument('-d', '--debug', action='store_true')
use_parser = subparsers.add_parser('use', help='Select the setting to use')
use_parser.set_defaults(func=use_action.execute)
use_parser.add_argument('setting', help='Select setting for use')
use_parser.add_argument('-d', '--debug', action='store_true')
list_parser = subparsers.add_parser('ls', help='Show a list setting')
list_parser.set_defaults(func=list_action.execute)
list_parser.add_argument('-d', '--debug', action='store_true')
add_parser = subparsers.add_parser('add', help='Add a new setting')
add_parser.set_defaults(func=add_action.execute)
add_parser.add_argument('alias', help='Alias of setting')
add_parser.add_argument('file', help='File name of setting')
add_parser.add_argument('-d', '--debug', action='store_true')
delete_parser = subparsers.add_parser('del', help='Delete a setting')
delete_parser.set_defaults(func=delete_action.execute)
delete_parser.add_argument('setting', help='Select setting for delete')
delete_parser.add_argument('-d', '--debug', action='store_true')
doctor_parser = subparsers.add_parser('doctor', help='Tool for diagnostic and fix some issues')
doctor_parser.set_defaults(func=doctor_action.execute)
doctor_parser.add_argument('-f', '--fix', help='Fix some issues', action='store_true')
args = parser.parse_args()
args.config = Config()
args.log = Log(args.debug)
__initialize(args)
if args.version:
print('msa version: {}'.format(Config.version))
elif hasattr(args, 'func'):
args.func(args)
else:
parser.print_help()
def __initialize(args):
repository = SettingRepository(logger=args.log, config=args.config)
file_manager = FileService(logger=args.log, config=args.config)
if not file_manager.directory_exist():
print('... Creating directory ...')
file_manager.create_directory()
print('... Creating database ...')
repository.create_settings_table()
if repository.find_one_by('default') is None:
print('... Adding default settings ...')
repository.create(Setting('default', ''))
| 40.057971 | 99 | 0.713459 | 361 | 2,764 | 5.254848 | 0.249307 | 0.061676 | 0.098577 | 0.047443 | 0.198735 | 0.198735 | 0.146547 | 0.146547 | 0 | 0 | 0 | 0 | 0.147974 | 2,764 | 68 | 100 | 40.647059 | 0.80552 | 0 | 0 | 0 | 0 | 0 | 0.195369 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.134615 | 0 | 0.173077 | 0.096154 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
626e53324286e43cefa651216d43ef7f1b968503 | 330 | py | Python | homework_01/non_recursive_algorithms/fibonacci_loop.py | ufpa-organization-repositories/computer-theory-2 | c61e2c887d2820aeb09f581bbfa2bb89d1f541e6 | [
"MIT"
] | null | null | null | homework_01/non_recursive_algorithms/fibonacci_loop.py | ufpa-organization-repositories/computer-theory-2 | c61e2c887d2820aeb09f581bbfa2bb89d1f541e6 | [
"MIT"
] | null | null | null | homework_01/non_recursive_algorithms/fibonacci_loop.py | ufpa-organization-repositories/computer-theory-2 | c61e2c887d2820aeb09f581bbfa2bb89d1f541e6 | [
"MIT"
] | null | null | null | # sistema que verifica se uma funcão não é aproximada com a outra
import time
fibonacci = 0
aux = 0
x = int(input("Entre com o inteiro da série de fibonacci: "))
tempo_inicial = time.time()
for i in range(x):
fibonacci += i + 1
print('fibonacci: ', fibonacci)
print("--- %s segundos ---" % (time.time() - tempo_inicial))
| 18.333333 | 65 | 0.669697 | 51 | 330 | 4.294118 | 0.705882 | 0.109589 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011278 | 0.193939 | 330 | 18 | 66 | 18.333333 | 0.81203 | 0.190909 | 0 | 0 | 0 | 0 | 0.274436 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.111111 | 0 | 0.111111 | 0.222222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
627176544ba106398d10695009b60e23c95eca06 | 9,139 | py | Python | supervisor/api/network.py | peddamat/home-assistant-supervisor-test | 5da55772bcb2db3c6d8432cbc08e2ac9fbf480c4 | [
"Apache-2.0"
] | 1 | 2021-09-22T00:15:17.000Z | 2021-09-22T00:15:17.000Z | supervisor/api/network.py | peddamat/home-assistant-supervisor-test | 5da55772bcb2db3c6d8432cbc08e2ac9fbf480c4 | [
"Apache-2.0"
] | 200 | 2020-10-13T06:35:51.000Z | 2022-03-31T06:03:35.000Z | supervisor/api/network.py | peddamat/home-assistant-supervisor-test | 5da55772bcb2db3c6d8432cbc08e2ac9fbf480c4 | [
"Apache-2.0"
] | 2 | 2021-09-22T00:13:58.000Z | 2021-09-22T15:06:27.000Z | """REST API for network."""
import asyncio
from ipaddress import ip_address, ip_interface
from typing import Any, Awaitable, Dict
from aiohttp import web
import attr
import voluptuous as vol
from ..const import (
ATTR_ACCESSPOINTS,
ATTR_ADDRESS,
ATTR_AUTH,
ATTR_CONNECTED,
ATTR_DNS,
ATTR_DOCKER,
ATTR_ENABLED,
ATTR_FREQUENCY,
ATTR_GATEWAY,
ATTR_HOST_INTERNET,
ATTR_ID,
ATTR_INTERFACE,
ATTR_INTERFACES,
ATTR_IPV4,
ATTR_IPV6,
ATTR_MAC,
ATTR_METHOD,
ATTR_MODE,
ATTR_NAMESERVERS,
ATTR_PARENT,
ATTR_PRIMARY,
ATTR_PSK,
ATTR_SIGNAL,
ATTR_SSID,
ATTR_SUPERVISOR_INTERNET,
ATTR_TYPE,
ATTR_VLAN,
ATTR_WIFI,
DOCKER_NETWORK,
DOCKER_NETWORK_MASK,
)
from ..coresys import CoreSysAttributes
from ..exceptions import APIError, HostNetworkNotFound
from ..host.const import AuthMethod, InterfaceType, WifiMode
from ..host.network import (
AccessPoint,
Interface,
InterfaceMethod,
IpConfig,
VlanConfig,
WifiConfig,
)
from .utils import api_process, api_validate
_SCHEMA_IP_CONFIG = vol.Schema(
{
vol.Optional(ATTR_ADDRESS): [vol.Coerce(ip_interface)],
vol.Optional(ATTR_METHOD): vol.Coerce(InterfaceMethod),
vol.Optional(ATTR_GATEWAY): vol.Coerce(ip_address),
vol.Optional(ATTR_NAMESERVERS): [vol.Coerce(ip_address)],
}
)
_SCHEMA_WIFI_CONFIG = vol.Schema(
{
vol.Optional(ATTR_MODE): vol.Coerce(WifiMode),
vol.Optional(ATTR_AUTH): vol.Coerce(AuthMethod),
vol.Optional(ATTR_SSID): str,
vol.Optional(ATTR_PSK): str,
}
)
# pylint: disable=no-value-for-parameter
SCHEMA_UPDATE = vol.Schema(
{
vol.Optional(ATTR_IPV4): _SCHEMA_IP_CONFIG,
vol.Optional(ATTR_IPV6): _SCHEMA_IP_CONFIG,
vol.Optional(ATTR_WIFI): _SCHEMA_WIFI_CONFIG,
vol.Optional(ATTR_ENABLED): vol.Boolean(),
}
)
def ipconfig_struct(config: IpConfig) -> Dict[str, Any]:
"""Return a dict with information about ip configuration."""
return {
ATTR_METHOD: config.method,
ATTR_ADDRESS: [address.with_prefixlen for address in config.address],
ATTR_NAMESERVERS: [str(address) for address in config.nameservers],
ATTR_GATEWAY: str(config.gateway) if config.gateway else None,
}
def wifi_struct(config: WifiConfig) -> Dict[str, Any]:
"""Return a dict with information about wifi configuration."""
return {
ATTR_MODE: config.mode,
ATTR_AUTH: config.auth,
ATTR_SSID: config.ssid,
ATTR_SIGNAL: config.signal,
}
def vlan_struct(config: VlanConfig) -> Dict[str, Any]:
"""Return a dict with information about VLAN configuration."""
return {
ATTR_ID: config.id,
ATTR_PARENT: config.interface,
}
def interface_struct(interface: Interface) -> Dict[str, Any]:
"""Return a dict with information of a interface to be used in th API."""
return {
ATTR_INTERFACE: interface.name,
ATTR_TYPE: interface.type,
ATTR_ENABLED: interface.enabled,
ATTR_CONNECTED: interface.connected,
ATTR_PRIMARY: interface.primary,
ATTR_IPV4: ipconfig_struct(interface.ipv4) if interface.ipv4 else None,
ATTR_IPV6: ipconfig_struct(interface.ipv6) if interface.ipv6 else None,
ATTR_WIFI: wifi_struct(interface.wifi) if interface.wifi else None,
ATTR_VLAN: vlan_struct(interface.vlan) if interface.vlan else None,
}
def accesspoint_struct(accesspoint: AccessPoint) -> Dict[str, Any]:
"""Return a dict for AccessPoint."""
return {
ATTR_MODE: accesspoint.mode,
ATTR_SSID: accesspoint.ssid,
ATTR_FREQUENCY: accesspoint.frequency,
ATTR_SIGNAL: accesspoint.signal,
ATTR_MAC: accesspoint.mac,
}
class APINetwork(CoreSysAttributes):
"""Handle REST API for network."""
def _get_interface(self, name: str) -> Interface:
"""Get Interface by name or default."""
name = name.lower()
if name == "default":
for interface in self.sys_host.network.interfaces:
if not interface.primary:
continue
return interface
else:
try:
return self.sys_host.network.get(name)
except HostNetworkNotFound:
pass
raise APIError(f"Interface {name} does not exist") from None
@api_process
async def info(self, request: web.Request) -> Dict[str, Any]:
"""Return network information."""
return {
ATTR_INTERFACES: [
interface_struct(interface)
for interface in self.sys_host.network.interfaces
],
ATTR_DOCKER: {
ATTR_INTERFACE: DOCKER_NETWORK,
ATTR_ADDRESS: str(DOCKER_NETWORK_MASK),
ATTR_GATEWAY: str(self.sys_docker.network.gateway),
ATTR_DNS: str(self.sys_docker.network.dns),
},
ATTR_HOST_INTERNET: self.sys_host.network.connectivity,
ATTR_SUPERVISOR_INTERNET: self.sys_supervisor.connectivity,
}
@api_process
async def interface_info(self, request: web.Request) -> Dict[str, Any]:
"""Return network information for a interface."""
interface = self._get_interface(request.match_info.get(ATTR_INTERFACE))
return interface_struct(interface)
@api_process
async def interface_update(self, request: web.Request) -> None:
"""Update the configuration of an interface."""
interface = self._get_interface(request.match_info.get(ATTR_INTERFACE))
# Validate data
body = await api_validate(SCHEMA_UPDATE, request)
if not body:
raise APIError("You need to supply at least one option to update")
# Apply config
for key, config in body.items():
if key == ATTR_IPV4:
interface.ipv4 = attr.evolve(
interface.ipv4 or IpConfig(InterfaceMethod.STATIC, [], None, []),
**config,
)
elif key == ATTR_IPV6:
interface.ipv6 = attr.evolve(
interface.ipv6 or IpConfig(InterfaceMethod.STATIC, [], None, []),
**config,
)
elif key == ATTR_WIFI:
interface.wifi = attr.evolve(
interface.wifi
or WifiConfig(
WifiMode.INFRASTRUCTURE, "", AuthMethod.OPEN, None, None
),
**config,
)
elif key == ATTR_ENABLED:
interface.enabled = config
await asyncio.shield(self.sys_host.network.apply_changes(interface))
@api_process
def reload(self, request: web.Request) -> Awaitable[None]:
"""Reload network data."""
return asyncio.shield(self.sys_host.network.update())
@api_process
async def scan_accesspoints(self, request: web.Request) -> Dict[str, Any]:
"""Scan and return a list of available networks."""
interface = self._get_interface(request.match_info.get(ATTR_INTERFACE))
# Only wlan is supported
if interface.type != InterfaceType.WIRELESS:
raise APIError(f"Interface {interface.name} is not a valid wireless card!")
ap_list = await self.sys_host.network.scan_wifi(interface)
return {ATTR_ACCESSPOINTS: [accesspoint_struct(ap) for ap in ap_list]}
@api_process
async def create_vlan(self, request: web.Request) -> None:
"""Create a new vlan."""
interface = self._get_interface(request.match_info.get(ATTR_INTERFACE))
vlan = int(request.match_info.get(ATTR_VLAN))
# Only ethernet is supported
if interface.type != InterfaceType.ETHERNET:
raise APIError(
f"Interface {interface.name} is not a valid ethernet card for vlan!"
)
body = await api_validate(SCHEMA_UPDATE, request)
vlan_config = VlanConfig(vlan, interface.name)
ipv4_config = None
if ATTR_IPV4 in body:
ipv4_config = IpConfig(
body[ATTR_IPV4].get(ATTR_METHOD, InterfaceMethod.AUTO),
body[ATTR_IPV4].get(ATTR_ADDRESS, []),
body[ATTR_IPV4].get(ATTR_GATEWAY, None),
body[ATTR_IPV4].get(ATTR_NAMESERVERS, []),
)
ipv6_config = None
if ATTR_IPV6 in body:
ipv6_config = IpConfig(
body[ATTR_IPV6].get(ATTR_METHOD, InterfaceMethod.AUTO),
body[ATTR_IPV6].get(ATTR_ADDRESS, []),
body[ATTR_IPV6].get(ATTR_GATEWAY, None),
body[ATTR_IPV6].get(ATTR_NAMESERVERS, []),
)
vlan_interface = Interface(
"",
True,
True,
False,
InterfaceType.VLAN,
ipv4_config,
ipv6_config,
None,
vlan_config,
)
await asyncio.shield(self.sys_host.network.apply_changes(vlan_interface))
| 32.293286 | 87 | 0.621622 | 1,020 | 9,139 | 5.372549 | 0.160784 | 0.016606 | 0.032847 | 0.026277 | 0.314964 | 0.255839 | 0.201095 | 0.166606 | 0.144708 | 0.103285 | 0 | 0.004892 | 0.284276 | 9,139 | 282 | 88 | 32.407801 | 0.8329 | 0.053616 | 0 | 0.103139 | 0 | 0 | 0.024658 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03139 | false | 0.004484 | 0.053812 | 0 | 0.139013 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
627322f9fbcc60c090c2038b683731fc397ce989 | 4,852 | py | Python | themis/rule_analysis/review_result/rule_result.py | z790704069/archery | 3bc2d3bc5fb302d18e2ff5ee96fbe562eeceacfd | [
"Apache-2.0"
] | 3 | 2019-10-10T08:09:09.000Z | 2021-04-07T02:35:31.000Z | themis/rule_analysis/review_result/rule_result.py | jonn-yan/Archery | a8a236db12fce2784fb4aeddc7e744026af0b983 | [
"Apache-2.0"
] | 2 | 2020-06-06T00:13:26.000Z | 2021-06-10T22:10:19.000Z | themis/rule_analysis/review_result/rule_result.py | jonn-yan/Archery | a8a236db12fce2784fb4aeddc7e744026af0b983 | [
"Apache-2.0"
] | 2 | 2020-07-20T01:13:17.000Z | 2020-09-28T08:15:22.000Z | # -*- coding: utf-8 -*-
import uuid
import time
import random
class ReviewResult(object):
def __init__(self, mongo_client, db_type, rule_type, username,
rule_status):
self.mongo_client = mongo_client
self.db_type = db_type
self.rule_type = rule_type
self.rule_status = rule_status
self.task_owner = username
self.task_id = self.gen_uuid()
self.rule_info = self._get_rule_info()
self.factor = [["a", "b", "c", "d", "e", "f", "g", "h", "i", "j",
"k", "l", "m", "n", "o", "p", "q"],
["r", "s", "t", "u", "v", "w", "x", "y", "z"]]
def gen_uuid(self):
return str(uuid.uuid1())
def _get_rule_info(self):
"""
根据rule_type,db_type,rule_status等获取规则
"""
sql = {
"rule_type": self.rule_type,
"db_type": self.db_type,
"rule_status": self.rule_status
}
rule_data = self.mongo_client.find("rule", sql)
temp = {}
for value in rule_data:
temp.update({
value["rule_name"]: {
"weight": value["weight"],
"max_score": value["max_score"],
"input_parms": value["input_parms"],
"rule_desc": value["rule_desc"],
"rule_cmd": value["rule_cmd"],
"rule_complexity": value["rule_complexity"],
"rule_cmd_attach": value.get("rule_cmd_attach", None),
"obj_info_type": value.get("obj_info_type", None)
}
})
return temp
def job_init(self, **kwargs):
"""
初始化job信息,包括创建时间,创建用户,状态,任务id,以及一些描述信息等,返回任务id
"""
task_start_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
capture_time_s = kwargs.get("start_date", "")
capture_time_e = kwargs.get("stop_date", capture_time_s)
operator_user = kwargs.get("operator_user")
job_record = {
"name": "#".join([self.task_owner, self.rule_type.lower()]),
"id": self.task_id,
"status": "2",
"create_time": task_start_time,
"end_time": "",
"operator_user": operator_user,
"desc": {
"db_ip": kwargs.get("task_ip", "127.0.0.1"),
"port": kwargs.get("task_port", 1521),
"owner": self.task_owner,
"rule_type": self.rule_type.upper(),
"instance_name": kwargs.get("instance_name"),
"capture_time_s": capture_time_s,
"capture_time_e": capture_time_e
}
}
self.mongo_client.insert("job", job_record)
return self.task_id
def obj_result(self, results):
for rule_name in results.keys():
results[rule_name].update({
"input_parms": self.rule_info[rule_name]["input_parms"],
"rule_desc": self.rule_info[rule_name]["rule_desc"]
})
return results
def get_obj(self, key, obj):
pass
def mysql_result(self, sqlstat, sqltext, sqlplan, weight):
"""
生成mysql的解析结果
"""
results = {}
for key, value in sqlstat.items():
if value:
results[key] = {}
for data in value:
sql_id = "#".join([str(data["checksum"], "1", "v")])
temp_sql_paln = sqlplans[data["checksum"]]
temp_sql_text = sqltext[data["checksum"]]
if len(temp_sql_text) > 25:
text = temp_sql_text[:25]
else:
text = ""
rule_name = key
results[key].update({
sql_id: {
"sql_id": data["checksum"],
"plan_hash_value": int(1),
"sql_text": text,
"sql_fulltext": temp_sql_text,
"plan": temp_sql_plan,
"stat": data,
"obj_info": {},
"obj_name": None
}
})
scores = len(value) * float(weight)
results[key].update({
"input_parms": [],
"rule_name": key,
"rule_desc": desc,
"scores": scores
})
return results
def gen_random_collection(self):
"""
随机生成mongo中collection的名称
"""
tmp0 = "tmp" + "".join(random.sample(self.factor[0], 3))
tmp1 = "tmp" + "".join(random.sample(self.factor[1], 3))
return tmp0, tmp1
| 35.676471 | 78 | 0.463108 | 505 | 4,852 | 4.172277 | 0.283168 | 0.034172 | 0.028477 | 0.022781 | 0.082107 | 0.027527 | 0 | 0 | 0 | 0 | 0 | 0.009278 | 0.400247 | 4,852 | 135 | 79 | 35.940741 | 0.714777 | 0.029266 | 0 | 0.072072 | 0 | 0 | 0.134936 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.072072 | false | 0.009009 | 0.027027 | 0.009009 | 0.162162 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
62732ead68f40bf265f23cb83b3530c32c6e7b6e | 2,084 | py | Python | giraffez/fmt.py | istvan-fodor/giraffez | 6b4d27eb1a1eaf188c6885c7364ef27e92b1b957 | [
"Apache-2.0"
] | 122 | 2016-08-18T21:12:58.000Z | 2021-11-24T14:45:19.000Z | giraffez/fmt.py | istvan-fodor/giraffez | 6b4d27eb1a1eaf188c6885c7364ef27e92b1b957 | [
"Apache-2.0"
] | 68 | 2016-08-31T18:19:16.000Z | 2021-11-01T19:21:22.000Z | giraffez/fmt.py | istvan-fodor/giraffez | 6b4d27eb1a1eaf188c6885c7364ef27e92b1b957 | [
"Apache-2.0"
] | 44 | 2016-08-19T01:22:21.000Z | 2022-03-23T17:39:40.000Z | # -*- coding: utf-8 -*-
#
# Copyright 2016 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import textwrap
from .constants import *
from .errors import *
from .logging import log
from ._compat import *
def escape_quotes(s):
return s.replace("'", "''")
def format_indent(body, indent=" ", initial=""):
wrapper = textwrap.TextWrapper(initial_indent=initial,
subsequent_indent=indent)
return wrapper.fill(body)
def format_table(lines, header=True):
widths = [0] * len(lines[0])
for line in lines:
widths = [max(a, len(str(b))) for a, b in zip(widths, line)]
output = []
for i, line in enumerate(lines):
output.append(" | ".join([str(field).ljust(w) for w, field in zip(widths, line)]))
if i == 0 and header:
output.append("-"*len(output[0]))
return "\n".join(output)
def safe_name(s):
return s.replace(" ", "_").lower()
def quote_string(s, quote_character="'"):
return "{0}{1}{0}".format(quote_character, s)
def replace_cr(s):
if not isinstance(s, str):
return s
return s.strip().replace('\r', '\n')
def truncate(s, n=7, c=70, placeholder="..."):
if not isinstance(s, basestring):
raise GiraffeError("Cannot truncate non-string value")
lines = s.split('\n')
line_count = len(lines)
char_count = len(s)
if char_count < (n * c) and line_count < n:
return s
if char_count > (n * c):
s = s[0:n*c]
lines = s.split('\n')
lines = "\n".join(lines[0:n])
return "{} {}".format(lines, placeholder)
| 29.771429 | 90 | 0.644914 | 305 | 2,084 | 4.35082 | 0.445902 | 0.045215 | 0.018086 | 0.024115 | 0.0211 | 0.0211 | 0 | 0 | 0 | 0 | 0 | 0.012789 | 0.212092 | 2,084 | 69 | 91 | 30.202899 | 0.795372 | 0.28023 | 0 | 0.095238 | 0 | 0 | 0.048518 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.119048 | 0.071429 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6275c3dd6fda7ce4fe42952a2e0fedae8a584035 | 4,071 | py | Python | lib/runKeras.py | BenGutteridge/HumBugDB | e102eba477862e340dc1b52670ab1283278a8cf9 | [
"MIT"
] | 1 | 2021-09-06T18:50:50.000Z | 2021-09-06T18:50:50.000Z | lib/runKeras.py | BenGutteridge/HumBugDB | e102eba477862e340dc1b52670ab1283278a8cf9 | [
"MIT"
] | null | null | null | lib/runKeras.py | BenGutteridge/HumBugDB | e102eba477862e340dc1b52670ab1283278a8cf9 | [
"MIT"
] | null | null | null | import tensorflow as tf
import config_keras
import config
from keras.utils import to_categorical
# Deep learning
# Keras-related imports
from keras.models import Sequential
from keras.layers import Lambda, Dense, Dropout, Activation, Flatten, LSTM
from keras.layers import Convolution1D, MaxPooling2D, Convolution2D
from keras import backend as K
# K.set_image_dim_ordering('th')
from keras.callbacks import ModelCheckpoint, RemoteMonitor, EarlyStopping
from keras.models import load_model
from keras.layers import Conv2D, MaxPooling2D
from keras.regularizers import l2
import os
def train_model(X_train, y_train):
y_train = tf.keras.utils.to_categorical(y_train, 2)
################################ CONVOLUTIONAL NEURAL NETWORK ################################
## NN parameters
class_weight = {0: 1.,
1: 1.,
}
input_shape = (1, X_train.shape[2], X_train.shape[-1])
# BNN parameters
dropout=config_keras.dropout # change to 0.05
# Regularise
tau = config_keras.tau
lengthscale = config_keras.lengthscale
reg = lengthscale**2 * (1 - dropout) / (2. * len(X_train) * tau)
W_regularizer=l2(reg) # regularisation used in layers
model = Sequential()
n_dense = 128
nb_classes = 2
# number of convolutional filters
nb_conv_filters = 32
# num_hidden = 236
nb_conv_filters_2 = 64
convout1 = Activation('relu')
convout2 = Activation('relu')
model.add(Conv2D(nb_conv_filters, kernel_size = (3,3),
activation = 'relu', padding = 'valid', strides = 1,
input_shape = input_shape))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Lambda(lambda x: K.dropout(x,level=dropout)))
model.add(Conv2D(nb_conv_filters_2, kernel_size = (3,3),
activation = 'relu', padding = 'valid'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Lambda(lambda x: K.dropout(x,level=dropout)))
model.add(Conv2D(nb_conv_filters_2, kernel_size = (3,3),
activation = 'relu', padding = 'valid'))
# model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Lambda(lambda x: K.dropout(x,level=dropout)))
# # model.add(Dropout(0.2))
model.add(Conv2D(nb_conv_filters_2, kernel_size = (3,3),
activation = 'relu', padding = 'valid'))
model.add(Lambda(lambda x: K.dropout(x,level=dropout)))
# model.add(Conv2D(nb_conv_filters_2, kernel_size = (3,3),
# activation = 'relu', padding = 'valid'))
# model.add(Lambda(lambda x: K.dropout(x,level=dropout)))
model.add(Flatten())
# # Shared between MLP and CNN:
model.add(Dense(n_dense, activation='relu'))
model.add(Lambda(lambda x: K.dropout(x,level=dropout)))
model.add(Dense(nb_classes, activation='softmax',W_regularizer=l2(reg)))
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
# if checkpoint_name is not None:
# os.path.join(os.path.pardir, 'models', 'keras', checkpoint_name)
model_name = 'Win_' + str(config.win_size) + '_Stride_' + str(config.step_size) + '_BNN.h5'
checkpoint_filepath = os.path.join(os.path.pardir, 'models/keras', model_name) # Need to makedir there too.
model_checkpoint_callback = ModelCheckpoint(
filepath=checkpoint_filepath,
save_weights_only=False,
monitor='val_accuracy',
mode='max',
save_best_only=True)
model.fit(x=X_train, y=y_train, batch_size=config_keras.batch_size, epochs=config_keras.epochs, verbose=1, validation_split=config_keras.validation_split,
validation_data=None,
shuffle=True, class_weight=class_weight, sample_weight=None, initial_epoch=0,
steps_per_epoch=None, validation_steps=None, callbacks=[model_checkpoint_callback])
# print('Saving model to:', os.path.join(os.path.pardir, 'models', 'keras', checkpoint_name))
return model
def evaluate_model(model, X_test, y_test, n_samples):
all_y_pred = []
for n in range(n_samples):
all_y_pred.append(model.predict(X_test))
return all_y_pred
def load_model(filepath):
model = tf.keras.models.load_model(filepath, custom_objects={"dropout": config_keras.dropout})
return model | 32.568 | 156 | 0.709899 | 572 | 4,071 | 4.856643 | 0.291958 | 0.051836 | 0.032757 | 0.043197 | 0.307415 | 0.295896 | 0.286177 | 0.286177 | 0.260619 | 0.260619 | 0 | 0.020214 | 0.149349 | 4,071 | 125 | 157 | 32.568 | 0.781981 | 0.172439 | 0 | 0.202703 | 0 | 0 | 0.045094 | 0.007313 | 0 | 0 | 0 | 0 | 0 | 1 | 0.040541 | false | 0 | 0.175676 | 0 | 0.256757 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6277056995c72db70c644852ed150d8b628d4456 | 5,058 | py | Python | src/tests/test_parse.py | dcmoura/ipython-spyql | c35741e2b06e305352c12e3747a70f06828b9d63 | [
"MIT"
] | 1,454 | 2015-01-02T17:28:27.000Z | 2022-03-24T14:17:06.000Z | src/tests/test_parse.py | dcmoura/ipython-spyql | c35741e2b06e305352c12e3747a70f06828b9d63 | [
"MIT"
] | 165 | 2015-01-06T15:43:04.000Z | 2022-03-31T16:06:42.000Z | src/tests/test_parse.py | dcmoura/ipython-spyql | c35741e2b06e305352c12e3747a70f06828b9d63 | [
"MIT"
] | 273 | 2015-01-09T19:20:18.000Z | 2022-03-31T16:24:36.000Z | import json
import os
from pathlib import Path
from six.moves import configparser
from sql.parse import connection_from_dsn_section, parse, without_sql_comment
try:
from traitlets.config.configurable import Configurable
except ImportError:
from IPython.config.configurable import Configurable
empty_config = Configurable()
default_connect_args = {"options": "-csearch_path=test"}
def test_parse_no_sql():
assert parse("will:longliveliz@localhost/shakes", empty_config) == {
"connection": "will:longliveliz@localhost/shakes",
"sql": "",
"result_var": None,
}
def test_parse_with_sql():
assert parse(
"postgresql://will:longliveliz@localhost/shakes SELECT * FROM work",
empty_config,
) == {
"connection": "postgresql://will:longliveliz@localhost/shakes",
"sql": "SELECT * FROM work",
"result_var": None,
}
def test_parse_sql_only():
assert parse("SELECT * FROM work", empty_config) == {
"connection": "",
"sql": "SELECT * FROM work",
"result_var": None,
}
def test_parse_postgresql_socket_connection():
assert parse("postgresql:///shakes SELECT * FROM work", empty_config) == {
"connection": "postgresql:///shakes",
"sql": "SELECT * FROM work",
"result_var": None,
}
def test_expand_environment_variables_in_connection():
os.environ["DATABASE_URL"] = "postgresql:///shakes"
assert parse("$DATABASE_URL SELECT * FROM work", empty_config) == {
"connection": "postgresql:///shakes",
"sql": "SELECT * FROM work",
"result_var": None,
}
def test_parse_shovel_operator():
assert parse("dest << SELECT * FROM work", empty_config) == {
"connection": "",
"sql": "SELECT * FROM work",
"result_var": "dest",
}
def test_parse_connect_plus_shovel():
assert parse("sqlite:// dest << SELECT * FROM work", empty_config) == {
"connection": "sqlite://",
"sql": "SELECT * FROM work",
"result_var": None,
}
def test_parse_shovel_operator():
assert parse("dest << SELECT * FROM work", empty_config) == {
"connection": "",
"sql": "SELECT * FROM work",
"result_var": "dest",
}
def test_parse_connect_plus_shovel():
assert parse("sqlite:// dest << SELECT * FROM work", empty_config) == {
"connection": "sqlite://",
"sql": "SELECT * FROM work",
"result_var": "dest",
}
class DummyConfig:
dsn_filename = Path("src/tests/test_dsn_config.ini")
def test_connection_from_dsn_section():
result = connection_from_dsn_section(section="DB_CONFIG_1", config=DummyConfig())
assert result == "postgres://goesto11:seentheelephant@my.remote.host:5432/pgmain"
result = connection_from_dsn_section(section="DB_CONFIG_2", config=DummyConfig())
assert result == "mysql://thefin:fishputsfishonthetable@127.0.0.1/dolfin"
class Bunch:
def __init__(self, **kwds):
self.__dict__.update(kwds)
class ParserStub:
opstrs = [
[],
["-l", "--connections"],
["-x", "--close"],
["-c", "--creator"],
["-s", "--section"],
["-p", "--persist"],
["--append"],
["-a", "--connection_arguments"],
["-f", "--file"],
]
_actions = [Bunch(option_strings=o) for o in opstrs]
parser_stub = ParserStub()
def test_without_sql_comment_plain():
line = "SELECT * FROM author"
assert without_sql_comment(parser=parser_stub, line=line) == line
def test_without_sql_comment_with_arg():
line = "--file moo.txt --persist SELECT * FROM author"
assert without_sql_comment(parser=parser_stub, line=line) == line
def test_without_sql_comment_with_comment():
line = "SELECT * FROM author -- uff da"
expected = "SELECT * FROM author"
assert without_sql_comment(parser=parser_stub, line=line) == expected
def test_without_sql_comment_with_arg_and_comment():
line = "--file moo.txt --persist SELECT * FROM author -- uff da"
expected = "--file moo.txt --persist SELECT * FROM author"
assert without_sql_comment(parser=parser_stub, line=line) == expected
def test_without_sql_comment_unspaced_comment():
line = "SELECT * FROM author --uff da"
expected = "SELECT * FROM author"
assert without_sql_comment(parser=parser_stub, line=line) == expected
def test_without_sql_comment_dashes_in_string():
line = "SELECT '--very --confusing' FROM author -- uff da"
expected = "SELECT '--very --confusing' FROM author"
assert without_sql_comment(parser=parser_stub, line=line) == expected
def test_without_sql_comment_with_arg_and_leading_comment():
line = "--file moo.txt --persist --comment, not arg"
expected = "--file moo.txt --persist"
assert without_sql_comment(parser=parser_stub, line=line) == expected
def test_without_sql_persist():
line = "--persist my_table --uff da"
expected = "--persist my_table"
assert without_sql_comment(parser=parser_stub, line=line) == expected
| 27.944751 | 85 | 0.655002 | 595 | 5,058 | 5.30084 | 0.215126 | 0.076094 | 0.08624 | 0.048193 | 0.646164 | 0.59163 | 0.55929 | 0.55707 | 0.497464 | 0.497464 | 0 | 0.003477 | 0.203836 | 5,058 | 180 | 86 | 28.1 | 0.779737 | 0 | 0 | 0.357724 | 0 | 0 | 0.31554 | 0.064255 | 0 | 0 | 0 | 0 | 0.154472 | 1 | 0.154472 | false | 0 | 0.065041 | 0 | 0.268293 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6277f988f04a0750aeffb263c6b0de7fcd8e9831 | 807 | py | Python | check2.py | teuben/study7 | e0c4f369b4b66795e34129b63a53bb2eb45f1792 | [
"MIT"
] | null | null | null | check2.py | teuben/study7 | e0c4f369b4b66795e34129b63a53bb2eb45f1792 | [
"MIT"
] | 2 | 2020-01-18T04:40:28.000Z | 2020-03-10T21:02:47.000Z | check2.py | teuben/study7 | e0c4f369b4b66795e34129b63a53bb2eb45f1792 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
#
from astroquery.admit import ADMIT
if True:
import pickle
a = pickle.load(open('alma.pickle','rb'))
a = ADMIT()
a.check()
print("AVAILABLE KEYS ",a.keys)
payload = {
"source_name_alma": "NGC 123",
# needs s_region to work.
# "source_name_resolver": "M16",
"vlsr": 88.0,
"formula": 'CO*|H2O',
"project_abstract": "*YSO* | *young stellar object*",
"source_snr": ">3",
# "spatial_resolution": "<10",
}
a.query(**payload)
# not allowed no keywords
try :
a.query()
except Exception:
print("correctly caught no keyword exception")
# Example of using kw that doesn't exist
try:
a.query(foobar="nope")
except Exception as e:
print(f"Correctly caught bad keyword. {e}")
| 22.416667 | 65 | 0.592317 | 104 | 807 | 4.519231 | 0.711538 | 0.038298 | 0.038298 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020033 | 0.257745 | 807 | 35 | 66 | 23.057143 | 0.764608 | 0.234201 | 0 | 0.086957 | 0 | 0 | 0.328969 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.086957 | 0 | 0.086957 | 0.130435 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6278bfcd24e37ebca31355830a486f8a7777940f | 2,051 | py | Python | python/samples/expectation.py | NVIDIA/cuQuantum | 0f00494d4639d760228ac002e83e6d2d3dd97eca | [
"BSD-3-Clause"
] | 52 | 2021-12-04T20:39:12.000Z | 2022-03-29T11:52:55.000Z | python/samples/expectation.py | NVIDIA/cuQuantum | 0f00494d4639d760228ac002e83e6d2d3dd97eca | [
"BSD-3-Clause"
] | 3 | 2022-02-01T22:46:50.000Z | 2022-03-24T01:52:29.000Z | python/samples/expectation.py | NVIDIA/cuQuantum | 0f00494d4639d760228ac002e83e6d2d3dd97eca | [
"BSD-3-Clause"
] | 18 | 2021-12-20T17:52:07.000Z | 2022-03-29T02:27:58.000Z | # Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
import cupy as cp
import cuquantum
from cuquantum import custatevec as cusv
nIndexBits = 3
nSvSize = (1 << nIndexBits)
nBasisBits = 1
basisBits = np.asarray([1], dtype=np.int32)
h_sv = np.asarray([0.0+0.0j, 0.0+0.1j, 0.1+0.1j, 0.1+0.2j,
0.2+0.2j, 0.3+0.3j, 0.3+0.4j, 0.4+0.5j], dtype=np.complex64)
d_sv = cp.asarray(h_sv)
# the gate matrix can live on either host (np) or device (cp)
matrix = cp.asarray([1.0+0.0j, 2.0+1.0j, 2.0-1.0j, 3.0+0.0j], dtype=np.complex64)
if isinstance(matrix, cp.ndarray):
matrix_ptr = matrix.data.ptr
elif isinstance(matrix, np.ndarray):
matrix_ptr = matrix.ctypes.data
else:
raise ValueError
# expectation values must stay on host
expect = np.empty((2,), dtype=np.float64)
expect_expected = np.asarray([4.1, 0.0], dtype=np.float64)
####################################################################################
# cuStateVec handle initialization
handle = cusv.create()
# check the size of external workspace
workspaceSize = cusv.compute_expectation_get_workspace_size(
handle, cuquantum.cudaDataType.CUDA_C_32F, nIndexBits, matrix_ptr, cuquantum.cudaDataType.CUDA_C_32F,
cusv.MatrixLayout.ROW, nBasisBits, cuquantum.ComputeType.COMPUTE_32F)
if workspaceSize > 0:
workspace = cp.cuda.memory.alloc(workspaceSize)
workspace_ptr = workspace.ptr
else:
workspace_ptr = 0
# apply gate
cusv.compute_expectation(
handle, d_sv.data.ptr, cuquantum.cudaDataType.CUDA_C_32F, nIndexBits,
expect.ctypes.data, cuquantum.cudaDataType.CUDA_C_64F,
matrix_ptr, cuquantum.cudaDataType.CUDA_C_32F, cusv.MatrixLayout.ROW,
basisBits.ctypes.data, nBasisBits,
cuquantum.ComputeType.COMPUTE_32F, workspace_ptr, workspaceSize)
# destroy handle
cusv.destroy(handle)
# check result
if not np.allclose(expect, expect_expected, atol=1E-6):
raise ValueError("results mismatch")
else:
print("test passed")
| 31.075758 | 105 | 0.695271 | 297 | 2,051 | 4.700337 | 0.353535 | 0.010029 | 0.089542 | 0.093123 | 0.213467 | 0.139685 | 0.081662 | 0.081662 | 0.081662 | 0.081662 | 0 | 0.053899 | 0.149683 | 2,051 | 65 | 106 | 31.553846 | 0.74656 | 0.14627 | 0 | 0.075 | 0 | 0 | 0.016304 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.025 | 0.1 | 0 | 0.1 | 0.025 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6279c1077cf2e0c872523c01d8a119aa05bd7780 | 4,022 | py | Python | examples/blockstats.py | abitmore/beem | 2026833a836007e45f16395a9ca3b31d02e98f87 | [
"MIT"
] | 118 | 2018-03-06T07:26:19.000Z | 2022-03-21T20:16:04.000Z | examples/blockstats.py | abitmore/beem | 2026833a836007e45f16395a9ca3b31d02e98f87 | [
"MIT"
] | 248 | 2018-03-20T18:03:39.000Z | 2022-03-28T16:38:09.000Z | examples/blockstats.py | abitmore/beem | 2026833a836007e45f16395a9ca3b31d02e98f87 | [
"MIT"
] | 81 | 2018-04-27T15:27:52.000Z | 2021-10-31T06:14:25.000Z | import sys
from datetime import datetime, timedelta
from prettytable import PrettyTable
import argparse
from timeit import default_timer as timer
import logging
from beem.blockchain import Blockchain
from beem.block import Block
from beem import Hive, Blurt, Steem
from beem.utils import parse_time
from beem.nodelist import NodeList
log = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def parse_args(args=None):
d = 'Show op type stats for either hive, blurt or steem.'
parser = argparse.ArgumentParser(description=d)
parser.add_argument('blockchain', type=str, nargs='?',
default=sys.stdin,
help='Blockchain (hive, blurt or steem)')
return parser.parse_args(args)
def main(args=None):
args = parse_args(args)
blockchain = args.blockchain
nodelist = NodeList()
nodelist.update_nodes(weights={"block": 1})
if blockchain == "hive" or blockchain is None:
max_batch_size = 50
threading = False
thread_num = 16
block_debug = 1000
nodes = nodelist.get_hive_nodes()
blk_inst = Hive(node=nodes, num_retries=3, num_retries_call=3, timeout=30)
elif blockchain == "blurt":
max_batch_size = None
threading = False
thread_num = 8
block_debug = 20
nodes = ["https://rpc.blurt.buzz/", "https://api.blurt.blog", "https://rpc.blurtworld.com", "https://rpc.blurtworld.com"]
blk_inst = Blurt(node=nodes, num_retries=3, num_retries_call=3, timeout=30)
elif blockchain == "steem":
max_batch_size = 50
threading = False
thread_num = 16
block_debug = 1000
nodes = nodelist.get_steem_nodes()
blk_inst = Steem(node=nodes, num_retries=3, num_retries_call=3, timeout=30)
else:
raise Exception("Wrong parameter, can be hive, blurt or steem")
print(blk_inst)
block_count = 0
total_ops = 0
total_trx = 0
duration_s = 60 * 60 * 1
blocksperday = int(duration_s / 3)
blockchain = Blockchain(blockchain_instance=blk_inst, )
current_block_num = blockchain.get_current_block_num()
last_block_id = current_block_num - blocksperday
last_block = Block(last_block_id, blockchain_instance=blk_inst)
stopTime = last_block.time() + timedelta(seconds=duration_s)
start = timer()
op_stats = {}
for entry in blockchain.blocks(start=last_block_id, max_batch_size=max_batch_size, threading=threading, thread_num=thread_num):
if "block" in entry:
block_time = parse_time(entry["block"]["timestamp"])
else:
block_time = entry["timestamp"]
if block_time > stopTime:
break
block_count += 1
if "block" in entry:
trxs = entry["block"]["transactions"]
else:
trxs = entry["transactions"]
for tx in trxs:
total_trx += 1
for op in tx["operations"]:
if "_operation" in op["type"]:
op_type = op["type"][:-10]
else:
op_type = op["type"]
if op_type in op_stats:
op_stats[op_type] += 1
else:
op_stats[op_type] = 1
total_ops += 1
ops_per_day = total_ops / block_count * blocksperday
if block_count % (block_debug) == 0:
print("%d blocks remaining... estimated ops per day: %.1f" % (blocksperday - block_count, ops_per_day))
duration = timer() - start
t = PrettyTable(["Type", "Count", "percentage"])
t.align = "l"
op_list = []
for o in op_stats:
op_list.append({"type": o, "n": op_stats[o], "perc": op_stats[o] / total_ops * 100})
op_list_sorted = sorted(op_list, key=lambda x: x['n'], reverse=True)
for op in op_list_sorted:
t.add_row([op["type"], op["n"], "%.2f %%" % op["perc"]])
print(t)
if __name__ == '__main__':
sys.exit(main())
| 34.672414 | 131 | 0.61462 | 517 | 4,022 | 4.555126 | 0.282398 | 0.025478 | 0.025478 | 0.020382 | 0.138429 | 0.126539 | 0.126539 | 0.126539 | 0.126539 | 0.126539 | 0 | 0.018531 | 0.275485 | 4,022 | 115 | 132 | 34.973913 | 0.789636 | 0 | 0 | 0.16 | 0 | 0 | 0.112631 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02 | false | 0 | 0.11 | 0 | 0.14 | 0.03 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
627a01c4f879f37289cba66de0bb51c608bf5566 | 894 | py | Python | mt/cli/mt_cluster_reboot.py | iamwm/mrofiler | 36735090e4b6b54ee29aaa3bc7887438d023787d | [
"MIT"
] | null | null | null | mt/cli/mt_cluster_reboot.py | iamwm/mrofiler | 36735090e4b6b54ee29aaa3bc7887438d023787d | [
"MIT"
] | null | null | null | mt/cli/mt_cluster_reboot.py | iamwm/mrofiler | 36735090e4b6b54ee29aaa3bc7887438d023787d | [
"MIT"
] | null | null | null | import os.path
import click as click
from mt.conf.parser import refresh_global_config, global_config, refresh_mongo_cmd_lines
from mt.core.connector import ShardingCluster
from mt.operation.reboot.cluster import save_cmd_lines_of_shards, reboot_cluster_shards
@click.command()
@click.option("--conf", '-f', type=str, default='./mt.yaml', help="mt yaml config path", required=True)
def cluster_reboot(conf: str):
"""
reboot mongo cluster
"""
refresh_global_config(conf)
mongo_uri = global_config.get('mongo_cluster_config', {}).get('mongo_uri')
if not mongo_uri:
print('no available mongo uri')
exit(1)
c = ShardingCluster(mongo_uri)
save_cmd_lines_of_shards(c)
cmd_save_path = global_config.get('cmd_save_path')
refresh_mongo_cmd_lines(cmd_save_path)
reboot_cluster_shards(c)
if __name__ == '__main__':
cluster_reboot()
| 26.294118 | 103 | 0.734899 | 128 | 894 | 4.765625 | 0.367188 | 0.098361 | 0.054098 | 0.065574 | 0.065574 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001326 | 0.1566 | 894 | 33 | 104 | 27.090909 | 0.807692 | 0.022371 | 0 | 0 | 0 | 0 | 0.125874 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.25 | 0 | 0.3 | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
627ae381d6f3f93a7e8e2674219028070a717094 | 3,559 | py | Python | discovery-provider/src/queries/get_balances.py | Tenderize/audius-protocol | aa15844e3f12812fe8aaa81e2cb6e5c5fa89ff51 | [
"Apache-2.0"
] | null | null | null | discovery-provider/src/queries/get_balances.py | Tenderize/audius-protocol | aa15844e3f12812fe8aaa81e2cb6e5c5fa89ff51 | [
"Apache-2.0"
] | null | null | null | discovery-provider/src/queries/get_balances.py | Tenderize/audius-protocol | aa15844e3f12812fe8aaa81e2cb6e5c5fa89ff51 | [
"Apache-2.0"
] | null | null | null | import logging
from datetime import datetime, timedelta
from typing import List
from redis import Redis
from sqlalchemy.orm.session import Session
from src.models import UserBalance
from src.solana.constants import WAUDIO_DECIMALS
logger = logging.getLogger(__name__)
# How stale of a zero user balance we tolerate before refreshing
BALANCE_REFRESH = 12 * 60 * 60
LAZY_REFRESH_REDIS_PREFIX = "USER_BALANCE_REFRESH_LAZY"
IMMEDIATE_REFRESH_REDIS_PREFIX = "USER_BALANCE_REFRESH_IMMEDIATE"
def does_user_balance_need_refresh(user_balance: UserBalance) -> bool:
"""Returns whether a given user_balance needs update.
Very heuristic-y:
- If we've never updated before (new balance entry), update now
- If the balance has not been updated in BALANCE_REFRESH seconds
"""
if user_balance.updated_at == user_balance.created_at:
return True
delta = timedelta(seconds=BALANCE_REFRESH)
needs_refresh = user_balance.updated_at < (datetime.now() - delta)
return needs_refresh
def enqueue_lazy_balance_refresh(redis: Redis, user_ids: List[int]):
# unsafe to call redis.sadd w/ empty array
if not user_ids:
return
redis.sadd(LAZY_REFRESH_REDIS_PREFIX, *user_ids)
def enqueue_immediate_balance_refresh(redis: Redis, user_ids: List[int]):
# unsafe to call redis.sadd w/ empty array
if not user_ids:
return
redis.sadd(IMMEDIATE_REFRESH_REDIS_PREFIX, *user_ids)
def get_balances(session: Session, redis: Redis, user_ids: List[int]):
"""Gets user balances.
Returns mapping { user_id: balance }
Enqueues in Redis user balances requiring refresh.
"""
# Find user balances
query: List[UserBalance] = (
(session.query(UserBalance)).filter(UserBalance.user_id.in_(user_ids)).all()
)
# Construct result dict from query result
result = {
user_balance.user_id: {
"owner_wallet_balance": user_balance.balance,
"associated_wallets_balance": user_balance.associated_wallets_balance,
"associated_sol_wallets_balance": user_balance.associated_sol_wallets_balance,
"waudio_balance": user_balance.waudio,
"total_balance": str(
int(user_balance.balance)
+ int(user_balance.associated_wallets_balance)
+ int(user_balance.associated_sol_wallets_balance)
* 10 ** WAUDIO_DECIMALS
+ int(user_balance.waudio) * 10 ** WAUDIO_DECIMALS
),
}
for user_balance in query
}
# Find user_ids that don't yet have a balance
user_ids_set = set(user_ids)
fetched_user_ids_set = {x.user_id for x in query}
needs_balance_set = user_ids_set - fetched_user_ids_set
# Add new balances to result set
no_balance_dict = {
user_id: {
"owner_wallet_balance": "0",
"associated_wallets_balance": "0",
"associated_sol_wallets_balance": "0",
"total_balance": "0",
"waudio_balance": "0",
}
for user_id in needs_balance_set
}
result.update(no_balance_dict)
# Get old balances that need refresh
needs_refresh = [
user_balance.user_id
for user_balance in query
if does_user_balance_need_refresh(user_balance)
]
# Enqueue new balances to Redis refresh queue
# 1. All users who need a new balance
# 2. All users who need a balance refresh
enqueue_lazy_balance_refresh(redis, list(needs_balance_set) + needs_refresh)
return result
| 33.261682 | 90 | 0.691767 | 462 | 3,559 | 5.032468 | 0.255411 | 0.108817 | 0.030968 | 0.037849 | 0.373763 | 0.213333 | 0.115269 | 0.083441 | 0.083441 | 0.083441 | 0 | 0.006245 | 0.235178 | 3,559 | 106 | 91 | 33.575472 | 0.847906 | 0.210171 | 0 | 0.090909 | 0 | 0 | 0.096377 | 0.060507 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060606 | false | 0 | 0.106061 | 0 | 0.242424 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
627bb066e8707f5c6b15cbdc15ff10ab92b17406 | 1,631 | py | Python | weallcode/views/programs.py | rgroves/weallcode-website | ead60d3272dbbfe610b2d500978d1de44aef6386 | [
"MIT"
] | 15 | 2019-05-04T00:24:00.000Z | 2021-08-21T16:34:05.000Z | weallcode/views/programs.py | rgroves/weallcode-website | ead60d3272dbbfe610b2d500978d1de44aef6386 | [
"MIT"
] | 73 | 2019-04-24T15:53:42.000Z | 2021-08-06T20:41:41.000Z | weallcode/views/programs.py | rgroves/weallcode-website | ead60d3272dbbfe610b2d500978d1de44aef6386 | [
"MIT"
] | 20 | 2019-04-26T20:13:08.000Z | 2021-06-21T14:53:21.000Z | from django.urls import reverse_lazy
from django.utils import timezone
from django.views.generic import TemplateView
from coderdojochi.models import Course, Session
from .common import DefaultMetaTags
class ProgramsView(DefaultMetaTags, TemplateView):
template_name = "weallcode/programs.html"
url = reverse_lazy("weallcode-programs")
title = f"Programs | We All Code"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# WEEKEND CLASSES
weekend_classes = (
Session.objects.filter(
is_active=True,
start_date__gte=timezone.now(),
course__course_type=Course.WEEKEND,
)
.order_by("start_date")
.prefetch_related("course", "location")
)
if not self.request.user.is_authenticated or not self.request.user.role == "mentor":
weekend_classes = weekend_classes.filter(is_public=True)
context["weekend_classes"] = weekend_classes
# SUMMER CAMP CLASSES
summer_camp_classes = (
Session.objects.filter(
is_active=True,
start_date__gte=timezone.now(),
course__course_type=Course.CAMP,
)
.order_by("start_date")
.prefetch_related("course", "location")
)
if not self.request.user.is_authenticated or not self.request.user.role == "mentor":
summer_camp_classes = summer_camp_classes.filter(is_public=True)
context["summer_camp_classes"] = summer_camp_classes
return context
| 31.365385 | 92 | 0.638872 | 177 | 1,631 | 5.627119 | 0.372881 | 0.084337 | 0.10241 | 0.072289 | 0.542169 | 0.542169 | 0.389558 | 0.389558 | 0.389558 | 0.389558 | 0 | 0 | 0.272839 | 1,631 | 51 | 93 | 31.980392 | 0.839798 | 0.021459 | 0 | 0.333333 | 0 | 0 | 0.098556 | 0.014438 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027778 | false | 0 | 0.138889 | 0 | 0.305556 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
627de92cfee1df9559460076e122d327ce5da796 | 14,957 | py | Python | upvote/gae/datastore/models/santa_test.py | farmersbusinessnetwork/upvote | a59653db40fabc43807e6f82cf68161fc50a7305 | [
"Apache-2.0"
] | null | null | null | upvote/gae/datastore/models/santa_test.py | farmersbusinessnetwork/upvote | a59653db40fabc43807e6f82cf68161fc50a7305 | [
"Apache-2.0"
] | 1 | 2018-11-06T20:51:36.000Z | 2018-11-06T20:51:36.000Z | upvote/gae/datastore/models/santa_test.py | farmersbusinessnetwork/upvote | a59653db40fabc43807e6f82cf68161fc50a7305 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for santa.py."""
import datetime
import mock
from google.appengine.ext import db
from google.appengine.ext import ndb
from upvote.gae import settings
from upvote.gae.datastore import test_utils
from upvote.gae.datastore import utils as datastore_utils
from upvote.gae.datastore.models import santa
from upvote.gae.lib.testing import basetest
from upvote.shared import constants
class SantaModelTest(basetest.UpvoteTestCase):
"""Test Santa Models."""
def setUp(self):
super(SantaModelTest, self).setUp()
self.santa_blockable = santa.SantaBlockable(
id='aaaabbbbccccdddd',
id_type=constants.ID_TYPE.SHA256,
blockable_hash='aaaabbbbccccdddd',
file_name='Mac.app',
publisher='Arple',
product_name='New Shiny',
version='2.0')
self.santa_certificate = santa.SantaCertificate(
id='mmmmnnnnoooopppp',
id_type=constants.ID_TYPE.SHA256,
blockable_hash='mmmmnnnnoooopppp',
file_name='MagicCert',
publisher='Total Legit CA',
version='7.0',
common_name='Trustee',
organization='Big Lucky',
organizational_unit='The Unit')
quarantine = santa.QuarantineMetadata(
data_url='http://notbad.com',
referer_url='http://sourceforge.com',
downloaded_dt=datetime.datetime.utcnow(),
agent_bundle_id='123456')
now = datetime.datetime.utcnow()
self.santa_event = santa.SantaEvent(
blockable_key=self.santa_blockable.key,
event_type=constants.EVENT_TYPE.ALLOW_BINARY,
file_name='Mac.app',
file_path='/',
executing_user='foo',
first_blocked_dt=now,
last_blocked_dt=now,
quarantine=quarantine)
self.santa_blockable.put()
self.santa_certificate.put()
self.santa_event.put()
self.PatchEnv(settings.ProdEnv, ENABLE_BIGQUERY_STREAMING=True)
class SantaBlockableTest(SantaModelTest):
def testToDict_ContainsOs(self):
with self.LoggedInUser():
the_dict = self.santa_blockable.to_dict()
self.assertEqual(
constants.PLATFORM.MACOS,
the_dict.get('operating_system_family', None))
def testToDict_ContainsIsVotingAllowed(self):
blockable = test_utils.CreateBlockable()
with self.LoggedInUser():
self.assertIn('is_voting_allowed', blockable.to_dict())
def testIsVotingAllowed_CertIsBlacklisted(self):
"""IsVotingAllowed() called on blockable signed by a blacklisted cert."""
blockable_cert = test_utils.CreateSantaBlockable()
blockable = test_utils.CreateSantaBlockable(cert_key=blockable_cert.key)
test_utils.CreateSantaRule(
blockable_cert.key,
rule_type=constants.RULE_TYPE.CERTIFICATE,
policy=constants.RULE_POLICY.BLACKLIST)
with self.LoggedInUser():
allowed, reason = blockable.IsVotingAllowed()
self.assertFalse(allowed)
self.assertIsNotNone(reason)
def testIsVotingAllowed_Admin_CertIsBlacklisted(self):
"""IsVotingAllowed() called on blockable signed by a blacklisted cert."""
blockable_cert = test_utils.CreateSantaBlockable()
blockable = test_utils.CreateSantaBlockable(cert_key=blockable_cert.key)
test_utils.CreateSantaRule(
blockable_cert.key,
rule_type=constants.RULE_TYPE.CERTIFICATE,
policy=constants.RULE_POLICY.BLACKLIST)
with self.LoggedInUser(admin=True):
_, reason = blockable.IsVotingAllowed()
# Ensure voting isn't disabled because of the blacklisted cert.
self.assertNotEqual(
constants.VOTING_PROHIBITED_REASONS.BLACKLISTED_CERT, reason)
def testIsVotingAllowed_CallTheSuper(self):
santa_blockable = test_utils.CreateSantaBlockable()
with self.LoggedInUser():
with mock.patch.object(
santa.base.Blockable, 'IsVotingAllowed') as mock_method:
santa_blockable.IsVotingAllowed()
self.assertTrue(mock_method.called)
def testChangeState_Success(self):
# Verify the SantaBlockable is in the default state of UNTRUSTED.
blockable = test_utils.CreateSantaBlockable()
blockable_hash = blockable.blockable_hash
blockable = santa.SantaBlockable.get_by_id(blockable_hash)
self.assertIsNotNone(blockable)
self.assertEqual(constants.STATE.UNTRUSTED, blockable.state)
# Note the state change timestamp.
old_state_change_dt = blockable.state_change_dt
# Change the state.
blockable.ChangeState(constants.STATE.BANNED)
# Reload, and verify the state change.
blockable = santa.SantaBlockable.get_by_id(blockable_hash)
self.assertIsNotNone(blockable)
self.assertEqual(constants.STATE.BANNED, blockable.state)
self.assertBigQueryInsertion(constants.BIGQUERY_TABLE.BINARY)
# And the state change timestamp should be increased.
self.assertTrue(blockable.state_change_dt > old_state_change_dt)
def testChangeState_BinaryRowCreation_NoBlockableHash(self):
hashless_santa_blockable = santa.SantaBlockable(
id='aaaabbbbccccdddd',
id_type=constants.ID_TYPE.SHA256,
file_name='Whatever.app')
hashless_santa_blockable.ChangeState(constants.STATE.SUSPECT)
self.assertBigQueryInsertion(constants.BIGQUERY_TABLE.BINARY)
def testResetState(self):
blockable = test_utils.CreateSantaBlockable(
state=constants.STATE.BANNED, flagged=True)
blockable.ResetState()
actual_binary = blockable.key.get()
self.assertEqual(actual_binary.state, constants.STATE.UNTRUSTED)
self.assertFalse(actual_binary.flagged)
self.assertBigQueryInsertion(constants.BIGQUERY_TABLE.BINARY)
class SantaCertificateTest(basetest.UpvoteTestCase):
def setUp(self):
super(SantaCertificateTest, self).setUp()
self.PatchEnv(settings.ProdEnv, ENABLE_BIGQUERY_STREAMING=True)
def testChangeState(self):
# Verify the SantaCertificate is in the default state of UNTRUSTED.
cert = test_utils.CreateSantaCertificate()
blockable_hash = cert.blockable_hash
cert = santa.SantaCertificate.get_by_id(blockable_hash)
self.assertIsNotNone(cert)
self.assertEqual(constants.STATE.UNTRUSTED, cert.state)
# Note the state change timestamp.
old_state_change_dt = cert.state_change_dt
# Change the state.
cert.ChangeState(constants.STATE.BANNED)
# Reload, and verify the state change.
cert = santa.SantaCertificate.get_by_id(blockable_hash)
self.assertIsNotNone(cert)
self.assertEqual(constants.STATE.BANNED, cert.state)
self.assertBigQueryInsertion(constants.BIGQUERY_TABLE.CERTIFICATE)
# And the state change timestamp should be increased.
self.assertTrue(cert.state_change_dt > old_state_change_dt)
def testResetState(self):
cert = test_utils.CreateSantaCertificate(
state=constants.STATE.BANNED, flagged=True)
cert.ResetState()
actual_cert = cert.key.get()
self.assertEqual(actual_cert.state, constants.STATE.UNTRUSTED)
self.assertFalse(actual_cert.flagged)
self.assertBigQueryInsertion(constants.BIGQUERY_TABLE.CERTIFICATE)
class SantaEventTest(SantaModelTest):
def testRunByLocalAdminSantaEvent(self):
self.assertFalse(self.santa_event.run_by_local_admin)
self.santa_event.executing_user = constants.LOCAL_ADMIN.MACOS
self.assertTrue(self.santa_event.run_by_local_admin)
def testDedupeSantaEvent(self):
later_dt = (
self.santa_event.last_blocked_dt + datetime.timedelta(seconds=1))
later_event = datastore_utils.CopyEntity(
self.santa_event,
quarantine=None,
event_type=constants.EVENT_TYPE.BLOCK_CERTIFICATE,
last_blocked_dt=later_dt)
self.santa_event.Dedupe(later_event)
self.assertEqual(
constants.EVENT_TYPE.BLOCK_CERTIFICATE, self.santa_event.event_type)
self.assertIsNotNone(self.santa_event.quarantine)
def testDedupeSantaEvent_AddOldQuarantineData(self):
quarantine = self.santa_event.quarantine
self.santa_event.quarantine = None
self.santa_event.put()
earlier_dt = (
self.santa_event.first_blocked_dt - datetime.timedelta(seconds=1))
earlier_event = datastore_utils.CopyEntity(
self.santa_event,
quarantine=quarantine,
event_type=constants.EVENT_TYPE.BLOCK_CERTIFICATE,
first_blocked_dt=earlier_dt)
self.santa_event.Dedupe(earlier_event)
self.assertNotEqual(
constants.EVENT_TYPE.BLOCK_CERTIFICATE, self.santa_event.event_type)
self.assertIsNotNone(self.santa_event.quarantine)
def testDedupeSantaEvent_AddNewerQuarantineData(self):
new_quarantine = datastore_utils.CopyEntity(
self.santa_event.quarantine, data_url='http://3vil.com')
later_dt = (
self.santa_event.last_blocked_dt + datetime.timedelta(seconds=1))
later_event = datastore_utils.CopyEntity(
self.santa_event,
quarantine=new_quarantine,
last_blocked_dt=later_dt)
self.santa_event.Dedupe(later_event)
self.assertEqual(
'http://3vil.com', self.santa_event.quarantine.data_url)
def testGiantQuarantineUrl(self):
# Ensure URLs that exceed the NDB size limit for indexed properties (1500
# bytes) may be set on QuarantineMetadata URL fields.
self.santa_event.quarantine.data_url = 'http://3vil.com/' + 'a' * 1500
self.santa_event.put()
class SantaBundleTest(SantaModelTest):
def testIgnoreCalculateScoreBeforeUpload(self):
bundle = test_utils.CreateSantaBundle(uploaded_dt=None)
test_utils.CreateVote(bundle)
# Trigger the SantaBundle.score ComputedProperty calculation.
bundle.put()
# The score should have not reflected the real score until the bundle is
# uploaded.
self.assertEqual(0, bundle.key.get().score)
def testTranslatePropertyQuery_CertId(self):
field, val = 'cert_id', 'bar'
new_field, new_val = santa.SantaBundle.TranslatePropertyQuery(field, val)
self.assertEqual(val, ndb.Key(urlsafe=new_val).id())
self.assertEqual('main_cert_key', new_field)
def testTranslatePropertyQuery_CertId_NoQueryValue(self):
field, val = 'cert_id', None
new_field, new_val = santa.SantaBundle.TranslatePropertyQuery(field, val)
self.assertIsNone(new_val)
self.assertEqual('main_cert_key', new_field)
def testTranslatePropertyQuery_NotCertId(self):
pair = ('foo', 'bar')
self.assertEqual(pair, santa.SantaBundle.TranslatePropertyQuery(*pair))
def testIsVotingAllowed_BundleUpload(self):
bundle = test_utils.CreateSantaBundle(uploaded_dt=None)
self.assertFalse(bundle.has_been_uploaded)
allowed, reason = bundle.IsVotingAllowed()
self.assertFalse(allowed)
self.assertEqual(
constants.VOTING_PROHIBITED_REASONS.UPLOADING_BUNDLE, reason)
def testIsVotingAllowed_HasFlaggedBinary(self):
# First, create two unflagged binaries.
blockables = test_utils.CreateSantaBlockables(2)
bundle = test_utils.CreateSantaBundle(bundle_binaries=blockables)
with self.LoggedInUser():
allowed, reason = bundle.IsVotingAllowed()
self.assertTrue(allowed)
# Now flag one of the binaries.
blockables[0].flagged = True
blockables[0].put()
allowed, reason = bundle.IsVotingAllowed()
self.assertFalse(allowed)
self.assertEqual(
constants.VOTING_PROHIBITED_REASONS.FLAGGED_BINARY, reason)
def testIsVotingAllowed_HasFlaggedCert(self):
blockable = test_utils.CreateSantaBlockable(
cert_key=self.santa_certificate.key)
bundle = test_utils.CreateSantaBundle(bundle_binaries=[blockable])
with self.LoggedInUser():
allowed, reason = bundle.IsVotingAllowed()
self.assertTrue(allowed)
self.santa_certificate.flagged = True
self.santa_certificate.put()
allowed, reason = bundle.IsVotingAllowed()
self.assertFalse(allowed)
self.assertEqual(constants.VOTING_PROHIBITED_REASONS.FLAGGED_CERT, reason)
def testIsVotingAllowed_DisableHasFlaggedChecks(self):
blockables = test_utils.CreateSantaBlockables(26)
bundle = test_utils.CreateSantaBundle(bundle_binaries=blockables)
# Flag one of the binaries.
blockables[0].flagged = True
blockables[0].put()
with self.LoggedInUser():
# Ensure that the normal call succeeds in finding the flagged binary.
allowed, reason = bundle.IsVotingAllowed()
self.assertFalse(allowed)
self.assertEqual(
constants.VOTING_PROHIBITED_REASONS.FLAGGED_BINARY, reason)
# In a transaction, the 26 searched blockables should exceed the allowed
# limit of 25.
with self.assertRaises(db.BadRequestError):
ndb.transaction(
lambda: bundle.IsVotingAllowed(enable_flagged_checks=True), xg=True)
# With the checks disabled, IsVotingAllowed shouldn't raise an exception.
def Test():
allowed, reason = bundle.IsVotingAllowed(enable_flagged_checks=False)
self.assertTrue(allowed)
self.assertIsNone(reason)
ndb.transaction(Test, xg=True)
def testIsVotingAllowed_CallTheSuper(self):
bundle = test_utils.CreateSantaBundle()
with self.LoggedInUser():
with mock.patch.object(
santa.base.Blockable, 'IsVotingAllowed') as mock_method:
bundle.IsVotingAllowed()
self.assertTrue(mock_method.called)
def testToDict(self):
bundle = test_utils.CreateSantaBundle()
with self.LoggedInUser():
dict_ = bundle.to_dict()
self.assertTrue(dict_['has_been_uploaded'])
self.assertIsNone(dict_['cert_id'])
def testToDict_CertId(self):
blockable = test_utils.CreateSantaBlockable(
cert_key=self.santa_certificate.key)
bundle = test_utils.CreateSantaBundle(
main_cert_key=self.santa_certificate.key,
bundle_binaries=[blockable])
with self.LoggedInUser():
dict_ = bundle.to_dict()
self.assertTrue(dict_['has_been_uploaded'])
self.assertEqual(self.santa_certificate.key.id(), dict_['cert_id'])
def testPersistsStateChange(self):
bundle = test_utils.CreateSantaBundle(uploaded_dt=None)
bundle.ChangeState(constants.STATE.SUSPECT)
self.assertBigQueryInsertion(constants.BIGQUERY_TABLE.BUNDLE)
def testResetsState(self):
bundle = test_utils.CreateSantaBundle(uploaded_dt=None)
bundle.ResetState()
self.assertBigQueryInsertion(constants.BIGQUERY_TABLE.BUNDLE)
if __name__ == '__main__':
basetest.main()
| 34.463134 | 80 | 0.740122 | 1,687 | 14,957 | 6.369295 | 0.193242 | 0.031829 | 0.032573 | 0.022336 | 0.570777 | 0.516333 | 0.46496 | 0.411913 | 0.370033 | 0.324988 | 0 | 0.00427 | 0.170088 | 14,957 | 433 | 81 | 34.542725 | 0.861355 | 0.120679 | 0 | 0.457045 | 0 | 0 | 0.033903 | 0.001756 | 0.003436 | 0 | 0 | 0 | 0.202749 | 1 | 0.106529 | false | 0 | 0.034364 | 0 | 0.158076 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
627e64b69405a7769d3ed58922a938ab5170ca8a | 11,745 | py | Python | src/lagt/train/train.py | junyongyou/lagt_vqa | 11aeda111ec4d97980db1e60f7b66b481266d1f3 | [
"MIT"
] | null | null | null | src/lagt/train/train.py | junyongyou/lagt_vqa | 11aeda111ec4d97980db1e60f7b66b481266d1f3 | [
"MIT"
] | null | null | null | src/lagt/train/train.py | junyongyou/lagt_vqa | 11aeda111ec4d97980db1e60f7b66b481266d1f3 | [
"MIT"
] | null | null | null | import os
import tensorflow as tf
import numpy as np
import scipy
import glob
from tensorflow.keras.optimizers import Adam, SGD
from callbacks.callbacks import create_callbacks
from callbacks.warmup_cosine_decay_scheduler import WarmUpCosineDecayScheduler
from lagt.train.video_clip_feature_generator import VideoClipFeatureGenerator
from lagt.utils.gather_video_ids import gather_all_vids
from callbacks.evaluation_vq_generator import ModelEvaluationGeneratorVQ
from lagt.models.lagt_model import create_model
def check_args(args):
if 'result_folder' not in args:
exit('Result folder must be specified')
if 'meta_file' not in args:
exit('Meta file of videos and MOS must be specified')
if 'vids_meta' not in args:
args['vids_meta'] = None
if 'model_name' not in args:
args['model_name'] = 'lagt'
if 'ugc_chunk_pickle' not in args or 'ugc_chunk_folder' not in args or '' not in args:
args['ugc_chunk_pickle'] = None
args['ugc_chunk_folder'] = None
args['ugc_chunk_folder_flipped'] = None
if 'database' not in args:
args['database'] = ['live', 'konvid', 'ugc']
if 'transformer_params' not in args:
args['transformer_params'] = [2, 64, 8, 256]
if 'dropout_rate' not in args:
args['dropout_rate'] = 0.1
if 'clip_length' not in args:
args['clip_length'] = 32
if 'epochs' not in args:
args['epochs'] = 400
if 'lr_base' not in args:
args['lr_base'] = 1e-3
if 'batch_size' not in args:
args['batch_size'] = 32
if 'lr_schedule' not in args:
args['lr_schedule'] = True
if 'multi_gpu' not in args:
args['multi_gpu'] = 0
if 'gpu' not in args:
args['gpu'] = 0
if 'do_finetune' not in args:
args['do_finetune'] = True
return args
def identify_best_weights(result_folder, history, best_plcc):
best_weights = np.where(history['plcc'] == best_plcc)
if not all(best_weights):
return None
pos = best_weights[0][0]
pos_loss = '{}_{:.4f}'.format(pos + 1, history['loss'][pos])
all_weights_files = glob.glob(os.path.join(result_folder, '*.h5'))
for all_weights_file in all_weights_files:
weight_file = os.path.basename(all_weights_file)
if weight_file.startswith(pos_loss):
best_weights_file = all_weights_file
return best_weights_file
return None
def remove_non_best_weights(result_folder, best_weights_files):
all_weights_files = glob.glob(os.path.join(result_folder, '*.h5'))
for all_weights_file in all_weights_files:
if all_weights_file not in best_weights_files:
os.remove(all_weights_file)
def evaluation_on_testset(model, vq_generator):
predictions = []
mos_scores = []
for i in range(vq_generator.__len__()):
features, score = vq_generator.__getitem__(i)
mos_scores.extend(score)
prediction = model(features)
predictions.extend(np.squeeze(prediction, 1))
PLCC = scipy.stats.pearsonr(mos_scores, predictions)[0]
SROCC = scipy.stats.spearmanr(mos_scores, predictions)[0]
RMSE = np.sqrt(np.mean(np.subtract(predictions, mos_scores) ** 2))
MAD = np.mean(np.abs(np.subtract(predictions, mos_scores)))
return PLCC, SROCC, RMSE, MAD
def train_main(args, train_vids=None, val_ids=None, test_ids=None):
"""
Main function to train LAGT-PHIQNet
:param args: arguments for training
:return: Max PLCC from the training
"""
args = check_args(args)
result_folder = args['result_folder']
if not os.path.exists(result_folder):
os.makedirs(result_folder)
result_file = os.path.join(result_folder, 'result.csv')
if os.path.exists(result_file):
rf = open(result_file, 'a')
else:
rf = open(result_file, 'w+')
model_name = args['model_name']
if train_vids == None or val_ids == None:
# train and val videos will be randomly split based on random seed
train_vids, val_ids = gather_all_vids(all_vids_pkl=args['vids_meta'])
clip_length = args['clip_length']
model_name += '_clip_{}'.format(clip_length)
epochs = args['epochs']
# Model parameters
transformer_params = args['transformer_params']
dropout_rates = args['dropout_rate']
feature_length = 1280
if args['multi_gpu'] == 0:
gpus = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_visible_devices(gpus[args['gpu']], 'GPU')
model = create_model(clip_length,
feature_length=feature_length,
transformer_params=transformer_params,
dropout_rate=dropout_rates)
else:
strategy = tf.distribute.MirroredStrategy(cross_device_ops=tf.distribute.HierarchicalCopyAllReduce())
with strategy.scope():
model = create_model(clip_length,
feature_length=feature_length,
transformer_params=transformer_params,
dropout_rate=dropout_rates)
model.summary()
optimizer = Adam(args['lr_base'])
loss = 'mse'
metrics = 'mae'
model.compile(loss=loss, optimizer=optimizer, metrics=[metrics])
# model.run_eagerly = True
train_generator = VideoClipFeatureGenerator(args['meta_file'],
train_vids,
batch_size=args['batch_size'],
clip_length=args['clip_length'],
random_ratio=0.25,
training=True,
ugc_chunk_pickle=args['ugc_chunk_pickle'],
ugc_chunk_folder=args['ugc_chunk_folder'],
ugc_chunk_folder_flipped=args['ugc_chunk_folder_flipped'],
database=args['database'])
val_generator = VideoClipFeatureGenerator(args['meta_file'],
val_ids,
batch_size=args['batch_size'],
clip_length=args['clip_length'],
random_ratio=0,
training=False,
ugc_chunk_pickle=args['ugc_chunk_pickle'],
ugc_chunk_folder=args['ugc_chunk_folder'],
ugc_chunk_folder_flipped=args['ugc_chunk_folder_flipped'],
database=args['database'])
test_generator = VideoClipFeatureGenerator(args['meta_file'],
test_ids,
batch_size=1,
clip_length=args['clip_length'],
random_ratio=0,
training=False,
ugc_chunk_pickle=args['ugc_chunk_pickle'],
ugc_chunk_folder=args['ugc_chunk_folder'],
ugc_chunk_folder_flipped=args['ugc_chunk_folder_flipped'],
database=args['database'])
evaluation_callback = ModelEvaluationGeneratorVQ(val_generator, None)
callbacks = create_callbacks(model_name,
result_folder,
evaluation_callback,
checkpoint=True,
early_stop=True,
metrics=metrics)
train_steps = train_generator.__len__()
if args['lr_schedule']:
warmup_epochs = 10
total_train_steps = epochs * train_steps
warmup_steps = warmup_epochs * train_steps
warmup_lr = WarmUpCosineDecayScheduler(learning_rate_base=args['lr_base'],
total_steps=total_train_steps,
warmup_learning_rate=0.0,
warmup_steps=warmup_steps,
# hold_base_rate_steps=0,
hold_base_rate_steps=10 * train_steps,
verbose=1)
callbacks.append(warmup_lr)
model_history = model.fit(
x=train_generator,
epochs=epochs,
steps_per_epoch=train_steps,
validation_data=val_generator,
validation_steps=val_generator.__len__(),
verbose=1,
shuffle=False,
callbacks=callbacks,
)
max_plcc_pretrain = np.max(model_history.history['plcc'])
info = 'Pretrain: epochs: {}, MAX PLCC: {}\n'.format(len(model_history.history['plcc']), max_plcc_pretrain)
rf.write(info)
print(info)
best_weights_file = identify_best_weights(result_folder, model_history.history, callbacks[3].best)
remove_non_best_weights(result_folder, [best_weights_file])
if not best_weights_file:
return max_plcc_pretrain
# do fine-tuning
if args['do_finetune'] and best_weights_file:
del (callbacks[-1])
model.load_weights(best_weights_file)
finetune_lr = 1e-5
if args['lr_schedule']:
warmup_lr_finetune = WarmUpCosineDecayScheduler(learning_rate_base=finetune_lr,
total_steps=total_train_steps,
warmup_learning_rate=0.0,
warmup_steps=warmup_steps,
hold_base_rate_steps=10 * train_steps,
verbose=1)
callbacks.append(warmup_lr_finetune)
finetune_optimizer = SGD(learning_rate=finetune_lr, momentum=0.9)
model.compile(loss=loss, optimizer=finetune_optimizer, metrics=[metrics])
finetune_model_history = model.fit(
x=train_generator,
epochs=epochs,
steps_per_epoch=train_steps,
validation_data=val_generator,
validation_steps=val_generator.__len__(),
verbose=1,
shuffle=False,
callbacks=callbacks,
)
max_plcc_finetune = np.max(finetune_model_history.history['plcc'])
info = 'Finetune: epochs: {}, MAX PLCC: {}\n'.format(len(finetune_model_history.history['plcc']),
max_plcc_finetune)
rf.write(info)
print(info)
best_weights_file_finetune = identify_best_weights(result_folder, finetune_model_history.history, callbacks[3].best)
if args['do_finetune']:
best_weights = best_weights_file_finetune if best_weights_file_finetune is not None else best_weights_file
else:
best_weights = best_weights_file
model.load_weights(best_weights)
plcc, srocc, rmse, mad = evaluation_on_testset(model, test_generator)
rf.write('Results on testset: PLCC: {}, SROCC: {}, RMSE: {}, MAD: {}\n'.format(plcc, srocc, rmse, mad))
rf.flush()
rf.close()
if args['do_finetune']:
return max([max_plcc_pretrain, max_plcc_finetune])
return max_plcc_pretrain
| 42.248201 | 124 | 0.570796 | 1,271 | 11,745 | 4.959087 | 0.1786 | 0.04363 | 0.025702 | 0.028875 | 0.418372 | 0.299699 | 0.27241 | 0.27241 | 0.24766 | 0.24766 | 0 | 0.008171 | 0.34355 | 11,745 | 277 | 125 | 42.400722 | 0.809339 | 0.021626 | 0 | 0.327434 | 0 | 0 | 0.095176 | 0.008375 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022124 | false | 0 | 0.053097 | 0 | 0.110619 | 0.00885 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
627ed75868124d40b439a0a818d5fc617648b9f2 | 2,464 | py | Python | generate_code/generate.py | ohlogic/py3dEngine | 94f920ebd90a35116e04b43fbf6ca29e5c5b5104 | [
"MIT"
] | 2 | 2020-11-21T08:14:17.000Z | 2021-02-28T07:57:40.000Z | generate_code/generate.py | luntan365/py3dEngine | 94f920ebd90a35116e04b43fbf6ca29e5c5b5104 | [
"MIT"
] | null | null | null | generate_code/generate.py | luntan365/py3dEngine | 94f920ebd90a35116e04b43fbf6ca29e5c5b5104 | [
"MIT"
] | 1 | 2019-12-11T11:44:01.000Z | 2019-12-11T11:44:01.000Z | #!/usr/bin/python3
import pyglet
from pyglet.gl import *
import sys
sys.path.insert(0, "./db")
from db import *
sys.path.append("./libs")
from objloader_dbload import * # without database, use the objloader.py
def generate_objs_list():
cur = db.cursor(MySQLdb.cursors.DictCursor)
cur.execute('SELECT * FROM worldmap_objects ORDER BY id ASC;')
objs = []
count = 0
for row in cur.fetchall():
objs.append(
OBJ100(
row['objectname'],
swapyz=True,
name=row['objectname'].split(".")[0],
id=str(row['id']),
rx=str(row['rx']),
ry=str(row['ry']),
tx=str(row['tx']),
ty=str(row['ty'])
)
)
return objs
def generate_obj_matrix(winpg):
cur = db.cursor(MySQLdb.cursors.DictCursor)
cur.execute('SELECT fixobj FROM worldmap_objects;')
str1 = ""
count = 0
for row in cur.fetchall():
glPushMatrix()
str1 = row['fixobj']
if str1 != None:
exec( str1 ) # e.g., glTranslate(10,0,0);glRotate(90, 1, 0, 0);glRotate(180, 0, 1, 0);
if winpg.selected_obj == count:
glTranslatef(float(winpg.oo.tx)/20., float(winpg.oo.ty)/20., - winpg.zpos)
glRotatef(float(winpg.oo.rx), 1, 0, 0)
glRotatef(float(winpg.oo.ry), 0, 1, 0)
else:
glTranslatef(float(winpg.map.objs[count].tx)/20., float(winpg.map.objs[count].ty)/20., - winpg.zpos)
glRotatef(float(winpg.map.objs[count].rx), 1, 0, 0)
glRotatef(float(winpg.map.objs[count].ry), 0, 1, 0)
str1 = "glCallList(winpg.map.objs["+str(count)+"].gl_list);"
exec ( str1 )
glPopMatrix()
count +=1
def update_rotate_vals(oo):
cur = db.cursor(MySQLdb.cursors.DictCursor)
cur.execute('UPDATE worldmap_objects SET rx = "%s" WHERE id = "%s";' % \
(oo.rx , oo.id ))
cur.execute('UPDATE worldmap_objects SET ry = "%s" WHERE id = "%s";' % \
(oo.ry , oo.id ))
db.commit()
def update_move_vals(oo):
cur = db.cursor(MySQLdb.cursors.DictCursor)
cur.execute('UPDATE worldmap_objects SET tx = "%s" WHERE id = "%s";' % (oo.tx , oo.id ))
cur.execute('UPDATE worldmap_objects SET ty = "%s" WHERE id = "%s";' % (oo.ty , oo.id ))
db.commit()
| 32 | 114 | 0.535714 | 318 | 2,464 | 4.097484 | 0.286164 | 0.061397 | 0.046048 | 0.055257 | 0.462011 | 0.394474 | 0.376055 | 0.251727 | 0.1934 | 0.115119 | 0 | 0.029206 | 0.305195 | 2,464 | 76 | 115 | 32.421053 | 0.731893 | 0.051948 | 0 | 0.2 | 0 | 0 | 0.16977 | 0.011525 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.083333 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6280b1038ab5ed4324dbf65934a3e9ab71dae871 | 4,071 | py | Python | Base/BaseSimilarityMatrixRecommender.py | SamanFekri/BookRecommendation | 07dfa875154af39546cb263d4407339ce26d47e8 | [
"MIT"
] | null | null | null | Base/BaseSimilarityMatrixRecommender.py | SamanFekri/BookRecommendation | 07dfa875154af39546cb263d4407339ce26d47e8 | [
"MIT"
] | null | null | null | Base/BaseSimilarityMatrixRecommender.py | SamanFekri/BookRecommendation | 07dfa875154af39546cb263d4407339ce26d47e8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on 16/09/2017
@author: Maurizio Ferrari Dacrema
"""
from Base.BaseRecommender import BaseRecommender
from Base.DataIO import DataIO
import numpy as np
class BaseSimilarityMatrixRecommender(BaseRecommender):
"""
This class refers to a BaseRecommender KNN which uses a similarity matrix, it provides two function to compute item's score
bot for user-based and Item-based models as well as a function to save the W_matrix
"""
def __init__(self, URM_train, verbose=True):
super(BaseSimilarityMatrixRecommender, self).__init__(URM_train, verbose = verbose)
self._URM_train_format_checked = False
self._W_sparse_format_checked = False
def _check_format(self):
if not self._URM_train_format_checked:
if self.URM_train.getformat() != "csr":
self._print("PERFORMANCE ALERT compute_item_score: {} is not {}, this will significantly slow down the computation.".format("URM_train", "csr"))
self._URM_train_format_checked = True
if not self._W_sparse_format_checked:
if self.W_sparse.getformat() != "csr":
self._print("PERFORMANCE ALERT compute_item_score: {} is not {}, this will significantly slow down the computation.".format("W_sparse", "csr"))
self._W_sparse_format_checked = True
def save_model(self, folder_path, file_name = None):
if file_name is None:
file_name = self.RECOMMENDER_NAME
self._print("Saving model in file '{}'".format(folder_path + file_name))
data_dict_to_save = {"W_sparse": self.W_sparse}
dataIO = DataIO(folder_path=folder_path)
dataIO.save_data(file_name=file_name, data_dict_to_save = data_dict_to_save)
self._print("Saving complete")
#########################################################################################################
########## ##########
########## COMPUTE ITEM SCORES ##########
########## ##########
#########################################################################################################
class BaseItemSimilarityMatrixRecommender(BaseSimilarityMatrixRecommender):
def _compute_item_score(self, user_id_array, items_to_compute=None):
"""
URM_train and W_sparse must have the same format, CSR
:param user_id_array:
:param items_to_compute:
:return:
"""
self._check_format()
user_profile_array = self.URM_train[user_id_array]
if items_to_compute is not None:
item_scores = - np.ones((len(user_id_array), self.URM_train.shape[1]), dtype=np.float32)*np.inf
item_scores_all = user_profile_array.dot(self.W_sparse).toarray()
item_scores[:, items_to_compute] = item_scores_all[:, items_to_compute]
else:
item_scores = user_profile_array.dot(self.W_sparse).toarray()
return item_scores
class BaseUserSimilarityMatrixRecommender(BaseSimilarityMatrixRecommender):
def _compute_item_score(self, user_id_array, items_to_compute=None):
"""
URM_train and W_sparse must have the same format, CSR
:param user_id_array:
:param items_to_compute:
:return:
"""
self._check_format()
user_weights_array = self.W_sparse[user_id_array]
if items_to_compute is not None:
item_scores = - np.ones((len(user_id_array), self.URM_train.shape[1]), dtype=np.float32)*np.inf
item_scores_all = user_weights_array.dot(self.URM_train).toarray()
item_scores[:, items_to_compute] = item_scores_all[:, items_to_compute]
else:
item_scores = user_weights_array.dot(self.URM_train).toarray()
return item_scores
| 34.794872 | 160 | 0.594203 | 464 | 4,071 | 4.87069 | 0.247845 | 0.049558 | 0.053097 | 0.023894 | 0.569469 | 0.504425 | 0.484956 | 0.484956 | 0.431858 | 0.431858 | 0 | 0.005317 | 0.26087 | 4,071 | 116 | 161 | 35.094828 | 0.745763 | 0.162859 | 0 | 0.297872 | 0 | 0 | 0.09703 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.106383 | false | 0 | 0.06383 | 0 | 0.276596 | 0.085106 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6280fcdac215b71cb9564a51a019770013764f5d | 415 | py | Python | pkgs/sdk-pkg/src/genie/libs/sdk/apis/iosxe/nat/execute.py | patrickboertje/genielibs | 61c37aacf3dd0f499944555e4ff940f92f53dacb | [
"Apache-2.0"
] | 1 | 2022-01-16T10:00:24.000Z | 2022-01-16T10:00:24.000Z | pkgs/sdk-pkg/src/genie/libs/sdk/apis/iosxe/nat/execute.py | patrickboertje/genielibs | 61c37aacf3dd0f499944555e4ff940f92f53dacb | [
"Apache-2.0"
] | null | null | null | pkgs/sdk-pkg/src/genie/libs/sdk/apis/iosxe/nat/execute.py | patrickboertje/genielibs | 61c37aacf3dd0f499944555e4ff940f92f53dacb | [
"Apache-2.0"
] | null | null | null | """Execute CLI functions for nat"""
def execute_clear_nat_translation(device):
""" Clear All NAT Flows
Args:
device (`obj`): Device object
Returns:
None
Raises:
SubCommandFailure
"""
cmd = "clear ip nat translation *"
try:
out = device.execute(cmd)
except Exception as err:
raise Exception(err)
return out | 19.761905 | 42 | 0.554217 | 43 | 415 | 5.27907 | 0.651163 | 0.123348 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.361446 | 415 | 21 | 43 | 19.761905 | 0.856604 | 0.337349 | 0 | 0 | 0 | 0 | 0.123223 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
62829bf26a943cc2c146d3867457df83f69214ef | 1,955 | py | Python | postgresqleu/util/crypto.py | pgeu/pgeu-system | 3286fcfb6bb424b6a9522f219f33f5a589613f6b | [
"MIT"
] | 11 | 2020-08-20T11:16:02.000Z | 2022-03-12T23:25:04.000Z | postgresqleu/util/crypto.py | pgeu/pgeu-system | 3286fcfb6bb424b6a9522f219f33f5a589613f6b | [
"MIT"
] | 71 | 2019-11-18T10:11:22.000Z | 2022-03-27T16:12:57.000Z | postgresqleu/util/crypto.py | pgeu/pgeu-system | 3286fcfb6bb424b6a9522f219f33f5a589613f6b | [
"MIT"
] | 18 | 2019-11-18T09:56:31.000Z | 2022-01-08T03:16:43.000Z | from django.core.exceptions import ValidationError
from Cryptodome.PublicKey import RSA
from Cryptodome.Hash import SHA256, SHA1
from Cryptodome.Signature import pkcs1_15
from Cryptodome.Util.number import long_to_bytes
import base64
def validate_pem_public_key(value):
try:
k = RSA.importKey(value)
if k.has_private():
raise ValidationError("This should be a public key, but contains a private key")
except ValidationError:
raise
except Exception as e:
raise ValidationError("Could not validate public key: {}".format(e))
def validate_pem_private_key(value):
try:
k = RSA.importKey(value)
if not k.has_private():
raise ValidationError("This should be a private key, but doesn't contain one")
except ValidationError:
raise
except Exception as e:
raise ValidationError("Could not validate private key: {}".format(e))
def generate_rsa_keypair(bits=2048):
key = RSA.generate(bits)
return (
key.export_key().decode('utf8'),
key.publickey().export_key().decode('utf8'),
)
def rsa_sign_string_sha256(privatekeystr, msg):
key = RSA.importKey(privatekeystr)
h = SHA256.new(msg.encode('ascii'))
sig = pkcs1_15.new(key).sign(h)
return sig
def rsa_verify_string_sha1(publickeystr, msg, sig):
key = RSA.importKey(publickeystr)
h = SHA1.new(msg)
try:
pkcs1_15.new(key).verify(h, sig)
return True
except ValueError:
# Raises ValueError if the signature is wrong
return False
def rsa_get_jwk_struct(publickeystr, kid):
key = RSA.importKey(publickeystr)
return {
'use': 'sig',
'alg': 'RS256',
'kty': 'RSA',
'kid': kid,
'n': base64.urlsafe_b64encode(long_to_bytes(key.publickey().n)).strip(b'=').decode(),
'e': base64.urlsafe_b64encode(long_to_bytes(key.publickey().e)).strip(b'=').decode(),
}
| 28.333333 | 93 | 0.661381 | 253 | 1,955 | 4.98419 | 0.355731 | 0.047581 | 0.02617 | 0.019033 | 0.317209 | 0.317209 | 0.317209 | 0.317209 | 0.196669 | 0.126883 | 0 | 0.02642 | 0.225575 | 1,955 | 68 | 94 | 28.75 | 0.806473 | 0.021995 | 0 | 0.245283 | 0 | 0 | 0.112565 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.113208 | false | 0 | 0.207547 | 0 | 0.415094 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6284431710a1c4f100e82140b23ae3002a6679c2 | 1,506 | py | Python | test/test_LightModel/test_light3d.py | franyancr/lenstronomy | 3a7b33512a474bf1796d23276d9028b580580cf1 | [
"MIT"
] | 1 | 2018-11-08T12:33:26.000Z | 2018-11-08T12:33:26.000Z | test/test_LightModel/test_light3d.py | franyancr/lenstronomy | 3a7b33512a474bf1796d23276d9028b580580cf1 | [
"MIT"
] | null | null | null | test/test_LightModel/test_light3d.py | franyancr/lenstronomy | 3a7b33512a474bf1796d23276d9028b580580cf1 | [
"MIT"
] | null | null | null | __author__ = 'sibirrer'
import pytest
import numpy.testing as npt
import numpy as np
import scipy.integrate as integrate
class TestNumerics(object):
"""
tests the second derivatives of various lens models
"""
def setup(self):
pass
def assert_integrals(self, Model, kwargs):
lightModel = Model()
r = 2.
out = integrate.quad(lambda x: 2 * lightModel.light_3d(np.sqrt(x ** 2 + r ** 2), **kwargs), 0, 100)
light_2d_num = out[0]
light_2d = lightModel.function(r, 0, **kwargs)
npt.assert_almost_equal(light_2d_num/light_2d, 1., decimal=1)
def test_PJaffe(self):
kwargs = {'amp': 1., 'Ra': 0.2, 'Rs': 2.}
from lenstronomy.LightModel.Profiles.p_jaffe import PJaffe as Model
self.assert_integrals(Model, kwargs)
def test_hernquist(self):
kwargs = {'amp': 1., 'Rs': 5.}
from lenstronomy.LightModel.Profiles.hernquist import Hernquist as Model
self.assert_integrals(Model, kwargs)
def test_gaussian(self):
from lenstronomy.LightModel.Profiles.gaussian import Gaussian as Model
kwargs = {'amp': 1. / 4., 'sigma_x': 2., 'sigma_y': 2.}
self.assert_integrals(Model, kwargs)
def test_power_law(self):
from lenstronomy.LightModel.Profiles.power_law import PowerLaw as Model
kwargs = {'amp': 2, 'gamma': 2, 'e1': 0, 'e2': 0}
self.assert_integrals(Model, kwargs)
if __name__ == '__main__':
pytest.main("-k TestLensModel")
| 32.042553 | 107 | 0.643426 | 199 | 1,506 | 4.693467 | 0.38191 | 0.082441 | 0.107066 | 0.141328 | 0.245182 | 0.133833 | 0.133833 | 0.094218 | 0.094218 | 0 | 0 | 0.028596 | 0.233732 | 1,506 | 46 | 108 | 32.73913 | 0.780763 | 0.033865 | 0 | 0.121212 | 0 | 0 | 0.05073 | 0 | 0 | 0 | 0 | 0 | 0.181818 | 1 | 0.181818 | false | 0.030303 | 0.242424 | 0 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
628512a91a458bf570a030bb18b03d6420c6f717 | 3,131 | py | Python | main.py | carrollpaul/snake | 41e9bffd0df079b56eefdb08aa969c65e9c331cc | [
"MIT"
] | null | null | null | main.py | carrollpaul/snake | 41e9bffd0df079b56eefdb08aa969c65e9c331cc | [
"MIT"
] | null | null | null | main.py | carrollpaul/snake | 41e9bffd0df079b56eefdb08aa969c65e9c331cc | [
"MIT"
] | null | null | null | import pygame, sys, random
from rich.traceback import install
install()
pygame.init()
class Snake():
def __init__(self):
self.body = []
self.body.append(Segment(WIDTH/2, HEIGHT/2)) # Make first segemnt and add to body
self.x_change = 0
self.y_change = 0
self.len = 0
def update(self):
old_head = self.body[-1].copy() # Copy last segment in array aka first segment in snake
new_head = self.body.pop(0) # Remove first segment in array aka last segment in snake
new_head.x = (old_head.x + self.x_change)
new_head.y = (old_head.y + self.y_change)
self.body.append(new_head)
def show(self):
# Make and draw every segment in the snake
for segment in self.body:
rect = [segment.x, segment.y, 10, 10]
pygame.draw.rect(screen, WHITE, rect)
def set_dir(self, x, y):
self.x_change = x
self.y_change = y
def grow(self):
self.len += 1
head = self.body[-1].copy()
self.body.append(head)
def eat(self, x, y):
if self.body[0].x == x and self.body[0].y == y:
print('food eaten')
self.grow()
return True
return False
class Segment():
def __init__(self, x, y):
self.x = x
self.y = y
def copy(self):
return Segment(self.x, self.y)
def get_food_location():
x = random.randint(0, WIDTH/10) * 10
y = random.randint(0, HEIGHT/10) * 10
return x, y
def put_food(x, y):
food = [x, y, 10, 10]
pygame.draw.rect(screen, BLUE, food)
def game_over():
x = snake.body[-1].x # X position of head
y = snake.body[-1].y # Y position of head
if (x <= 0 or x >= WIDTH or y <= 0 or y >= HEIGHT):
return True
if snake.len > 1:
for segment in snake.body[:-1]:
# If the head coordinates equal a segemnt in the body's coordinates, they must be touching
if segment.x == x and segment.y == y:
return True
return False
# SETUP
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
BLUE = (0, 0, 255)
WIDTH = 400
HEIGHT = 400
# Make screen
clock = pygame.time.Clock()
screen = pygame.display.set_mode((WIDTH, HEIGHT))
snake = Snake()
food_x, food_y = get_food_location() # Starting food location
# GAME LOOP
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
snake.set_dir(-10, 0)
elif event.key == pygame.K_RIGHT:
snake.set_dir(10, 0)
elif event.key == pygame.K_DOWN:
snake.set_dir(0, 10)
elif event.key == pygame.K_UP:
snake.set_dir(0, -10)
if snake.eat(food_x, food_y):
food_x, food_y = get_food_location()
screen.fill(BLACK)
put_food(food_x, food_y)
snake.update()
snake.show()
pygame.display.flip()
if game_over():
print("GAME OVER")
break
pygame.time.wait(100)
sys.exit() | 26.533898 | 102 | 0.566273 | 470 | 3,131 | 3.668085 | 0.229787 | 0.046404 | 0.020882 | 0.023202 | 0.180394 | 0.096288 | 0.096288 | 0.038283 | 0.038283 | 0.038283 | 0 | 0.033906 | 0.31236 | 3,131 | 118 | 103 | 26.533898 | 0.766837 | 0.115937 | 0 | 0.098901 | 0 | 0 | 0.006892 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.120879 | false | 0 | 0.021978 | 0.010989 | 0.241758 | 0.021978 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6286a8c9b8bbaead08db875b0c8c178179332efb | 12,436 | py | Python | ao/parsers/extrinsic_metaparameters.py | darcyabjones/augustus_optimiser | 488cad3cbc81a2cc303f9bbd698e672a8dec1783 | [
"Apache-2.0"
] | null | null | null | ao/parsers/extrinsic_metaparameters.py | darcyabjones/augustus_optimiser | 488cad3cbc81a2cc303f9bbd698e672a8dec1783 | [
"Apache-2.0"
] | null | null | null | ao/parsers/extrinsic_metaparameters.py | darcyabjones/augustus_optimiser | 488cad3cbc81a2cc303f9bbd698e672a8dec1783 | [
"Apache-2.0"
] | null | null | null | from typing import Any
from typing import Dict, List, Tuple
from typing import Union
from ao.distributions import Distribution, DistributionIF
from ao.distributions import FloatConst
from ao.distributions import AOList
from ao.hints import HintConfigFactory
from ao.hints import SourceParameterFactory
from ao.hints import HintRowFactory, HintCellFactory
from ao.hints import HintKind
from ao.errors import DistParseError, ConfigParseError
from ao.parsers.distribution_functions import (
parse_float_dist, parse_float_list, parse_str_dist
)
def parser(config: Dict[Any, Any]) -> HintConfigFactory:
""" Parses a dictionary representation of the config file. """
sources = parse_sources(config.get("sources", None))
source_parameters = parse_source_parameters(
config.get("source_parameters", None)
)
weights: Dict[HintKind, Dict[str, DistributionIF]] = dict()
for kind, column, dist in parse_weights(config.get("weights", None)):
if kind in weights:
weights[kind][column] = dist
else:
weights[kind] = {column: dist}
source_weights = parse_source_weights(config.get("source_weights", None))
hint_rows = list()
for kind in HintKind:
kind_weights = weights.get(kind, {})
kind_source_weights = [v for k, v in source_weights if k == kind]
this_row = HintRowFactory(
kind,
bonus=kind_weights.get("bonus", FloatConst(1)),
malus=kind_weights.get("malus", FloatConst(1)),
local_malus=kind_weights.get("local_malus", FloatConst(1)),
cells=kind_source_weights
)
hint_rows.append(this_row)
return HintConfigFactory(sources, source_parameters, hint_rows)
def get_from_object(key: str, obj: Dict, table: str) -> Any:
""" Helper to get a field from an object or raise an error. """
val = obj.get(key, None)
if val is None:
raise ConfigParseError(
f"The required field '{key}' was missing from object {obj} in the "
"'{table}'."
)
return val
def get_kind_from_object(obj: Dict, table: Any) -> HintKind:
kind_str = get_from_object("kind", obj, table)
if not isinstance(kind_str, str):
raise ConfigParseError(
f"Error while parsing '{table}' object {obj}. "
"The field 'kind' must be a string."
)
try:
kind = HintKind.parse(kind_str)
except ValueError:
valid_kinds = str(list(map(str, HintKind)))
raise ConfigParseError(
f"Error while parsing '{table}' object {obj}. "
"The field 'kind' has an invalid value. "
f"Must be one of {valid_kinds}."
)
return kind
def try_catch_parse_dist(
dist_obj: Any,
kind: HintKind,
field: str,
table: str
) -> DistributionIF:
try:
this_dist = parse_float_dist(dist_obj)
except DistParseError as e:
raise ConfigParseError(
f"Error while parsing distribution for '{kind}' '{field}' "
f"in '{table}'. Input was {dist_obj}. "
f"{e.msg}"
)
return this_dist
def try_catch_parse_list(
dist_obj: Any,
kind: HintKind,
field: str,
table: str
) -> AOList[Union[int, float]]:
try:
this_dist = parse_float_list(dist_obj)
except DistParseError as e:
raise ConfigParseError(
f"Error while parsing distribution for '{kind}' '{field}' "
f"in '{table}'. Input was {dist_obj}. "
f"{e.msg}"
)
return this_dist
def parse_source_weights(
weights: Any
) -> List[Tuple[HintKind, HintCellFactory]]:
""" Parse the weight table from the config file. """
if weights is None or (isinstance(weights, list) and len(weights) == 0):
# We don't have any weights to optimize.
return []
elif not isinstance(weights, list):
raise ConfigParseError(
"The 'source_weights' section of the config file must be a "
f"list of objects. You provided '{weights}'."
)
offensive_elements = [w for w in weights if not isinstance(w, dict)]
if len(offensive_elements) > 0:
raise ConfigParseError(
"The 'source_weights' section of the config file must be a "
"list of objects. The following elements are not objects: "
f"{offensive_elements}."
)
out = list()
for weight in weights:
kind = get_kind_from_object(weight, "weights")
source = get_from_object("source", weight, "weights")
if "weight" in weight:
if ("weights" in weight) or ("boundaries" in weight):
raise ConfigParseError(
f"Error while parsing 'source_weights' for {kind} "
f"{source}. "
"The 'weight' and the 'weights'/'boundaries' fields are "
"mutually exclusive. Please remove the fields that you "
"don't intend to use."
)
this_dist: Distribution = try_catch_parse_dist(
weight["weight"],
kind,
"weight",
"source_weights"
)
this_list_dist: AOList[Union[int, float]] = AOList([this_dist])
out.append((kind, HintCellFactory(source, boni=this_list_dist)))
elif ("weights" in weight) and ("boundaries" in weight):
weights_list = weight["weights"]
weights_dists = try_catch_parse_list(
weights_list,
kind,
"weights",
"source_weights"
)
boundaries_list = weight["boundaries"]
boundaries_dists = try_catch_parse_list(
boundaries_list,
kind,
"boundaries",
"source_weights"
)
out.append((
kind,
HintCellFactory(source, boundaries_dists, weights_dists)
))
elif ("weights" in weight) or ("boundaries" in weight):
raise ConfigParseError(
f"Error while parsing '{kind}' '{source}' "
"in 'source_weights'. "
"When using the 'weights'/'boundaries' fields, both must be "
"provided. Please add the missing field or use the 'weight' "
"field if you are providing a single distribution."
)
else:
raise ConfigParseError(
"Error while parsing 'source_weights'. "
"Either the 'weight' or both the 'weights' and 'boundaries' "
"fields must be provided."
)
return out
def parse_weights(weights: Any) -> List[Tuple[HintKind, str, Distribution]]:
""" Parse the weight table from the config file.
Examples:
>>> parse_weights([])
[]
>>> parse_weights(None)
[]
>>> parse_weights([
... {"kind": "exon", "malus": 0.9, "bonus": ["uniform", 1, 2]},
... {"kind": "exonpart", "malus": 0.99, "local_malus": ["beta", 1, 1]},
... ])
[(HintKind.EXON, 'bonus', Uniform(FloatConst(1.0), FloatConst(2.0))),
(HintKind.EXON, 'malus', FloatConst(0.9)),
(HintKind.EXONPART, 'malus', FloatConst(0.99)),
(HintKind.EXONPART, 'local_malus', Beta(FloatConst(1.0), FloatConst(1.0)))]
"""
if weights is None or (isinstance(weights, list) and len(weights) == 0):
# We don't have any weights to optimize.
return []
elif not isinstance(weights, list):
raise ConfigParseError(
"The 'weights' section of the config file must be a list of "
f"objects. You provided '{weights}'."
)
offensive_elements = [w for w in weights if not isinstance(w, dict)]
if len(offensive_elements) > 0:
raise ConfigParseError(
"The 'weights' section of the config file must be a list of "
"objects. The following elements are not objects: "
f"{offensive_elements}."
)
out = list()
for weight in weights:
kind = get_kind_from_object(weight, "weights")
for column in ("bonus", "malus", "local_malus"):
if column not in weight:
continue
this_dist = try_catch_parse_dist(
weight[column],
kind,
column,
"weights"
)
out.append((kind, column, this_dist))
return out
def parse_sources(sources: Any) -> List[str]:
""" An interface that checks that the right types were given.
Examples:
>>> parse_sources(["one", "two", "three"])
['one', 'two', 'three']
>>> parse_sources([])
Traceback (most recent call last):
...
ConfigParseError: ...
>>> parse_sources("M")
Traceback (most recent call last):
...
ConfigParseError: ...
>>> parse_sources(["one", 2, "three"])
Traceback (most recent call last):
...
ConfigParseError: ...
"""
if sources is None or (isinstance(sources, list) and len(sources) == 0):
raise ConfigParseError(
"The 'sources' section of the config file was missing or empty. "
"We can't optimise hints when there are no hints configured."
)
elif not isinstance(sources, list):
raise ConfigParseError(
"The 'sources' section of the config file must be a list of "
f"strings. You provided {repr(sources)}."
)
offensive_elements = [v for v in sources if not isinstance(v, str)]
if len(offensive_elements) > 0:
raise ConfigParseError(
"The 'sources' section of the config file must be a list "
"of strings. The following elements are not strings: "
f"{offensive_elements}. Consider quoting them if they are "
"the sources you want."
)
# We know that they are all strings because offensive_elements is empty.
return sources
def parse_source_parameters(sps: Any) -> List[SourceParameterFactory]:
""" Checks that the right types were given and parses them to py objects.
This section is optional so None or empty lists are ok.
Expected input is of the form:
[{"source": str, "parameter": param}]"
Examples:
>>> parse_source_parameters([{"source": "M", "parameter": "1group1gene"}])
[SourceParameterFactory('M', StrConst('1group1gene'))]
>>> parse_source_parameters([])
[]
>>> parse_source_parameters(None)
[]
>>> parse_source_parameters({"source": "M", "parameter": "1group1gene"})
Traceback (most recent call last):
...
ConfigParseError: ...
>>> parse_source_parameters([
... {"source": "M",
... "parameter": ["choose", ["1group1gene", "individual_liability"]]}
... ])
[SourceParameterFactory('M', ChooseS([StrConst('1group1gene'),
StrConst('individual_liability')]))]
>>> parse_source_parameters([
... {"source": "M", "parameter": "1group1gene"},
... {"source": "E", "parameter": "individual_liability"}
... ])
[SourceParameterFactory('M', StrConst('1group1gene')),
SourceParameterFactory('E', StrConst('individual_liability'))]
"""
if sps is None:
return []
elif not isinstance(sps, list):
raise ConfigParseError(
"The 'source_parameters' section of the config file must be a "
"list of objects. "
"You provided {sps}."
)
offensive_elements = [sp for sp in sps if not isinstance(sp, dict)]
if len(offensive_elements) > 0:
raise ConfigParseError(
"The 'source_parameters' section of the config file must be a "
"list of objects. "
f"The following elements are not objects: {offensive_elements}."
)
out = list()
for sp in sps:
source = get_from_object("source", sp, "source_parameters")
param = get_from_object("parameter", sp, "source_parameters")
try:
param_dist = parse_str_dist(param)
except DistParseError as e:
raise ConfigParseError(
f"Error while parsing 'source_parameter' for {source}. "
f"{e.msg}"
)
out.append(SourceParameterFactory(source, param_dist))
return out
| 32.301299 | 79 | 0.588935 | 1,393 | 12,436 | 5.137114 | 0.142139 | 0.052823 | 0.0218 | 0.020961 | 0.431666 | 0.370878 | 0.346283 | 0.308832 | 0.280324 | 0.269704 | 0 | 0.0054 | 0.300096 | 12,436 | 384 | 80 | 32.385417 | 0.816751 | 0.187681 | 0 | 0.360656 | 0 | 0 | 0.251729 | 0.013019 | 0 | 0 | 0 | 0 | 0 | 1 | 0.036885 | false | 0 | 0.04918 | 0 | 0.135246 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
628b4116ebd3ab43ca35cd11b53d299fb862b483 | 1,836 | py | Python | tests/test_game_elements.py | osmith93/str8ts | 41e7942bdd1c3c58c81f3a9b5109a8486eb48dae | [
"MIT"
] | null | null | null | tests/test_game_elements.py | osmith93/str8ts | 41e7942bdd1c3c58c81f3a9b5109a8486eb48dae | [
"MIT"
] | null | null | null | tests/test_game_elements.py | osmith93/str8ts | 41e7942bdd1c3c58c81f3a9b5109a8486eb48dae | [
"MIT"
] | null | null | null | import pytest
from board import Cell, Board
@pytest.fixture
def box():
new_box = Cell((5, 5), False, 9, Cell.EMPTY)
return new_box
class TestBox:
@staticmethod
def test_init():
box = Cell((5, 5), False, 9, Cell.EMPTY)
assert box.guesses == [1, 2, 3, 4, 5, 6, 7, 8, 9]
assert box.is_blocked == False
assert box.pos == (5, 5)
assert box.value == Cell.EMPTY
@staticmethod
def test_remove_guess(box):
for i in range(4):
box.remove_guess(i)
assert box.guesses == [4, 5, 6, 7, 8, 9]
@staticmethod
def test_remove_guess_set(box):
removal_set = {2, 5, 7, 9}
box.remove_guess_set(removal_set)
assert box.guesses == [1, 3, 4, 6, 8]
@staticmethod
def test_unique_guess_left(box):
assert box.unique_guess_left() == False
for i in range(9):
box.remove_guess(i)
return box.unique_guess_left() == True
@staticmethod
def test_try_filling_unique_guess(box):
assert box.try_filling_unique_guess() == False
assert box.value == Cell.EMPTY
assert box.remove_guess_set({2, 3, 4, 5, 6, 7, 8, 9}) == True
assert box.try_filling_unique_guess() == True
assert box.value == 1
@pytest.fixture()
def empty_board():
new_board = Board(size=9)
return empty_board
class TestBoard:
@staticmethod
def test_init():
board = Board(9)
assert board.size == 9
assert len(board.all_pos) == 9 * 9
assert len(board.grid) == 9 * 9
@staticmethod
def test_load(empty_board):
board = Board(9)
with pytest.raises(FileNotFoundError):
board.load("/THIS/FILE/DOES/NOT/EXIST")
board.load("../data/board01.txt")
assert board.get_cell((0, 0)).is_blocked == True
| 26.608696 | 69 | 0.596405 | 261 | 1,836 | 4.030651 | 0.245211 | 0.102662 | 0.126426 | 0.011407 | 0.224335 | 0.123574 | 0.060837 | 0.060837 | 0 | 0 | 0 | 0.041572 | 0.279412 | 1,836 | 68 | 70 | 27 | 0.75359 | 0 | 0 | 0.272727 | 0 | 0 | 0.023965 | 0.013617 | 0 | 0 | 0 | 0 | 0.290909 | 1 | 0.163636 | false | 0 | 0.036364 | 0 | 0.290909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
628bb8a6ad64923ad5b31b785ce35b705ce6de6d | 8,261 | py | Python | multicanonical reweighting/multicanonical_getEta_tau.py | BenjaminWeiner/condensates-sequence | 6e28d7daa7f42f6b20bc3af08c2090167f9b5ef5 | [
"MIT"
] | null | null | null | multicanonical reweighting/multicanonical_getEta_tau.py | BenjaminWeiner/condensates-sequence | 6e28d7daa7f42f6b20bc3af08c2090167f9b5ef5 | [
"MIT"
] | null | null | null | multicanonical reweighting/multicanonical_getEta_tau.py | BenjaminWeiner/condensates-sequence | 6e28d7daa7f42f6b20bc3af08c2090167f9b5ef5 | [
"MIT"
] | null | null | null |
# given a histogram of p(E,N) at mu0,beta0, reweight it and return an estimate of p(N) at mu1,beta1
#however, we want (T-Tc)/Tc to be the same for all sequences, so we provide betaC=1/Tc
import sys
import numpy as np
import pandas as pd
import scipy.optimize
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# convention:
# beta0=temp of the simulation from which the current weighting was extracted (not meaningful for uniform)
# beta1=temp of the actual simulation being analyzed
# betaNew=temp of the simulation we want to do next, after reweighting
# betaNew=beta1+deltaBeta
#begin loading data
seq=sys.argv[1]
betaC=float(sys.argv[2])
unweighted=int(sys.argv[3])
beta0=float(sys.argv[4])
beta1=float(sys.argv[5])
#tau=(T-Tc)/Tc
tau=float(sys.argv[6])
tc=1/betaC
tNew=tc*(tau+1)
betaNew=1/tNew
linearD=30 #lattice size
j=0.05
length=24
if unweighted==1: #we're starting from an unweighted simulation
path='/tigress/bweiner/idp/grandCanonical/multicanonical_6.29.20/block_uniform/'
filename='writeRecord_multicanonical_etaUniform_'+seq+'_e'+str(beta1)+'_j'+str(j)+'_rep0.dat'
weightingFilename='preweighting_'+seq+'_uniform_beta1_'+str(beta1)+'.txt'
else: #we're loading a simulation where we've weighted H according to higher T histogram
path='/tigress/bweiner/idp/grandCanonical/multicanonical_6.29.20/block_tauStep1/'
filename='writeRecord_multicanonical_'+seq+'_beta0_'+str(beta0)+'_beta1_'+str(beta1)+'_j'+str(j)+'.dat'
weightingFilename='preweighting_'+seq+'_beta0_'+str(beta0)+'_beta1_'+str(beta1)+'.txt'
#load data
thisData=pd.read_csv(path+filename,sep=' ',header=None,names=['steps','beta','N','E'])
#find step where we reach final temp
cooledStep=0
cooledTuple=np.nonzero(thisData.iloc[:,1]==beta1)
if len(cooledTuple[0]>0):
cooledStep=cooledTuple[0][0]
#thermalize
thermalizedData=thisData.iloc[cooledStep:]
thermalizedData=thermalizedData.iloc[int(len(thermalizedData)/5):]
thermalizedData['w']=np.full((thermalizedData.shape[0],1),1)
#load multicanonical weighting data. It will be just -1: -1 for uniform weighting
multicanonicalWeightingArray=np.loadtxt(path+weightingFilename,skiprows=9,delimiter=' ')
multicanonicalWeighting={}
for i in range(multicanonicalWeightingArray.shape[0]):
multicanonicalWeighting[int(multicanonicalWeightingArray[i,0])]=multicanonicalWeightingArray[i,1]
#load mu1 from preweighting file
weightingFileStream=open(path+weightingFilename,"r")
weightingLines=weightingFileStream.readlines()
mu1=float(weightingLines[7])
def getReweightedData(data,beta0,mu0,beta1,mu1,nWeighting):
dataCopy=data.copy()
weightArray=np.array([nWeighting[i] for i in data['N']])
weights=-(beta1-beta0)*data['E']+(beta1*mu1-beta0*mu0)*data['N']+np.log(weightArray)
dataCopy['w']=np.exp(weights)
return dataCopy
#get data with multicanonical weighting removed
dataGCE=getReweightedData(thermalizedData,beta1,mu1,beta1,mu1,multicanonicalWeighting)
#given a pdf which isn't clearly separated, return the (pdf,mu1) at a new temp which has equal weights
#define equal weights using halfway between modes as threshold
# get dilute fraction by setting threshold at halfway between 2 peaks
def overlappingDiluteFraction(data):
prelimThreshold=500
binEdges=np.arange(np.min(data['N'])-0.5,np.max(data['N'])+0.5,1)
hist,_ = np.histogram(data['N'],weights=data['w'], density=True,bins=binEdges)
#find modes
countVector=binEdges[1:]-0.5
thresholdIndex=np.argwhere(abs(countVector-prelimThreshold)<10**-5)[0,0]
#in histogram bin index
mode1Index=np.argmax(hist[0:thresholdIndex])
mode2Index=np.argmax(hist[thresholdIndex:])+thresholdIndex
realThresholdIndex=int(round((mode1Index+mode2Index)/2))
dilute=hist[0:realThresholdIndex]
diluteFraction=np.sum(dilute)
return diluteFraction
#define a function mapping mu1 to dense fraction
def reweightedDiluteFraction(trialMu1,*paramTuple):
#unpack the tuple containing parameters
data,beta0,mu0,beta1=paramTuple
reweightedData=getReweightedData(data,beta0,mu0,beta1,trialMu1,multicanonicalWeighting)
reweightedFraction=overlappingDiluteFraction(reweightedData)
return reweightedFraction-0.5
def getEqualWeights_overlapping(data,beta0,mu0,beta1):
mu1=scipy.optimize.fsolve(reweightedDiluteFraction,mu0-0.005,args=(data,beta0,mu0,beta1))
solutionData=getReweightedData(data,beta0,mu0,beta1,mu1,multicanonicalWeighting)
return (mu1[0],solutionData)
#if you have data where the peaks overlap, just estimate means using the midway point between modes
def getMeans_overlapping(data):
binEdges=np.arange(np.min(data['N'])-0.5,np.max(data['N'])+0.5,1)
hist,_ = np.histogram(data['N'],weights=data['w'], density=True,bins=binEdges)
prelimThreshold=400
#find modes
countVector=binEdges[1:]-0.5
thresholdIndex=np.argwhere(abs(countVector-prelimThreshold)<10**-5)[0,0]
#in histogram bin index
mode1Index=np.argmax(hist[0:thresholdIndex])
mode2Index=np.argmax(hist[thresholdIndex:])+thresholdIndex
realThresholdIndex=int(round((mode1Index+mode2Index)/2))
diluteData=data[data['N']<=countVector[realThresholdIndex]]
denseData=data[data['N']>countVector[realThresholdIndex]]
diluteMean=np.average(diluteData['N'],weights=diluteData['w'])
denseMean=np.average(denseData['N'],weights=denseData['w'])
phi1=diluteMean*length/(linearD**3)
phi2=denseMean*length/(linearD**3)
return phi1,phi2
#solve for equal weights at a new temperature, given overlapping distribution
solMu,solData=getEqualWeights_overlapping(dataGCE,beta1,mu1,betaNew)
solDiluteFraction=overlappingDiluteFraction(solData)
plt.hist(dataGCE['N'],weights=dataGCE['w'],bins=np.arange(10,1200,1),density=True,label='old')
plt.hist(solData['N'],weights=solData['w'],bins=np.arange(10,1200,1),density=True,label='new')
plt.legend()
plt.savefig('reweightingSolution_'+seq+'_beta1_'+str(beta1)+'_betaNew_'+str(betaNew)+'.png')
#convert dataframe to pdf
binEdges=np.arange(np.min(solData['N'])-0.5,np.max(solData['N'])+0.5,1)
H,_=np.histogram(solData['N'],weights=solData['w'],bins=binEdges,density=True)
countVector=binEdges[1:]-0.5
solPDF=np.array([countVector,H]).T
#smooth reweighting, truncate noisy tails
from scipy.signal import savgol_filter
smoothedData = savgol_filter(solPDF[:,1], 99, 3) # window size 99, polynomial order 3
pdfBoundaries=np.argwhere(smoothedData>0.001).flatten()
# plt.plot(solPDF[:,0],solPDF[:,1])
# plt.plot(solPDF[pdfBoundaries[0]:pdfBoundaries[-1],0],smoothedData[pdfBoundaries[0]:pdfBoundaries[-1]])
smoothPDF=np.array([solPDF[pdfBoundaries[0]:pdfBoundaries[-1],0],smoothedData[pdfBoundaries[0]:pdfBoundaries[-1]]]).T
# plt.figure()
# plt.plot(smoothPDF[:,0],smoothPDF[:,1])
#add uniform tails
minWeight=smoothPDF[0]
maxWeight=smoothPDF[-1]
maxNp=2*(linearD**3)/length
leftTail=np.array([np.arange(0,minWeight[0]),np.full((int(minWeight[0]),),minWeight[1])]).T
rightTail=np.array([np.arange(maxWeight[0]+1,maxNp+1),np.full((int(maxNp-maxWeight[0]),),maxWeight[1])]).T
finalWeightingPDF=np.concatenate((leftTail,smoothPDF,rightTail),axis=0)
# #write output file
distType='beta0_'+str(beta1)+'_beta1_'+str(betaNew)
filename='preweighting_'+seq+'_'+distType+'.txt'
with open(filename, 'w') as f:
f.write('beta0:\n')
f.write(str(beta1)+'\n')
f.write('mu0:\n')
f.write(str(mu1)+'\n')
f.write('beta1: \n')
f.write(str(betaNew)+'\n')
f.write('mu1:\n')
f.write(str(solMu)+'\n')
f.write('N, p(N):\n')
for i in range(len(finalWeightingPDF)):
f.write(str(int(finalWeightingPDF[i,0]))+" "+str(finalWeightingPDF[i,1])+'\n')
#uniform weighting
# maxNp=2*(linearD**3)/length
# uniformWeighting=np.array([np.arange(0,maxNp+1),np.full((int(maxNp)+1,),1)]).T
# distType='uniform'
# filename='preweighting_'+seq+'_'+distType+'.txt'
# with open(filename, 'w') as f:
# f.write('beta0:\n')
# f.write(str(0)+'\n')
# f.write('mu0:\n')
# f.write(str(0)+'\n')
# f.write('beta1: \n')
# f.write(str(0)+'\n')
# f.write('mu1:\n')
# f.write(str(solMu)+'\n')
# f.write('N, p(N):\n')
# for i in range(len(uniformWeighting)):
# f.write(str(int(uniformWeighting[i,0]))+" "+str(uniformWeighting[i,1])+'\n')
| 27.628763 | 117 | 0.735625 | 1,164 | 8,261 | 5.180412 | 0.25945 | 0.019901 | 0.018574 | 0.013267 | 0.30597 | 0.261028 | 0.233997 | 0.224378 | 0.20995 | 0.20995 | 0 | 0.034524 | 0.098898 | 8,261 | 298 | 118 | 27.721477 | 0.775524 | 0.277206 | 0 | 0.125 | 0 | 0 | 0.081205 | 0.035865 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.058333 | 0 | 0.141667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
628d7f2a8f984ca256bee17f5adcd350e0f25d8a | 2,517 | py | Python | src/pivx_hashlib.py | PIVX-Project/PET4L | 0019d21071722dad91bab39a16c766f8bb8e19ae | [
"MIT"
] | 9 | 2018-05-12T01:42:04.000Z | 2021-05-10T07:39:51.000Z | src/pivx_hashlib.py | PIVX-Project/PET4L | 0019d21071722dad91bab39a16c766f8bb8e19ae | [
"MIT"
] | 8 | 2020-01-13T18:55:05.000Z | 2021-11-15T11:10:16.000Z | src/pivx_hashlib.py | PIVX-Project/PET4L | 0019d21071722dad91bab39a16c766f8bb8e19ae | [
"MIT"
] | 7 | 2018-06-18T20:43:05.000Z | 2020-09-27T20:57:57.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2017-2019 Random.Zebra (https://github.com/random-zebra/)
# Distributed under the MIT software license, see the accompanying
# file LICENSE.txt or http://www.opensource.org/licenses/mit-license.php.
import bitcoin
import hashlib
from constants import WIF_PREFIX, MAGIC_BYTE, TESTNET_WIF_PREFIX, TESTNET_MAGIC_BYTE, \
STAKE_MAGIC_BYTE, TESTNET_STAKE_MAGIC_BYTE
from pivx_b58 import b58encode, b58decode
def double_sha256(data):
return hashlib.sha256(hashlib.sha256(data).digest()).digest()
def single_sha256(data):
return hashlib.sha256(data).digest()
def generate_privkey(isTestnet=False):
"""
Based on Andreas Antonopolous work from 'Mastering Bitcoin'.
"""
valid = False
privkey = 0
while not valid:
privkey = bitcoin.random_key()
decoded_private_key = bitcoin.decode_privkey(privkey, 'hex')
valid = 0 < decoded_private_key < bitcoin.N
return base58fromhex(privkey, isTestnet)
def base58fromhex(hexstr, isTestnet):
base58_secret = TESTNET_WIF_PREFIX if isTestnet else WIF_PREFIX
data = bytes([base58_secret]) + bytes.fromhex(hexstr)
checksum = bitcoin.bin_dbl_sha256(data)[0:4]
return b58encode(data + checksum)
def pubkey_to_address(pubkey, isTestnet=False, isCold=False):
pubkey_bin = bytes.fromhex(pubkey)
pkey_hash = bitcoin.bin_hash160(pubkey_bin)
return pubkeyhash_to_address(pkey_hash, isTestnet, isCold)
def pubkeyhash_to_address(pkey_hash, isTestnet=False, isCold=False):
if isCold:
base58_secret = TESTNET_STAKE_MAGIC_BYTE if isTestnet else STAKE_MAGIC_BYTE
else:
base58_secret = TESTNET_MAGIC_BYTE if isTestnet else MAGIC_BYTE
data = bytes([base58_secret]) + pkey_hash
checksum = bitcoin.bin_dbl_sha256(data)[0:4]
return b58encode(data + checksum)
def wif_to_privkey(string):
wif_compressed = 52 == len(string)
pvkeyencoded = b58decode(string).hex()
wifversion = pvkeyencoded[:2]
checksum = pvkeyencoded[-8:]
vs = bytes.fromhex(pvkeyencoded[:-8])
check = double_sha256(vs)[0:4]
if (wifversion == WIF_PREFIX.to_bytes(1, byteorder='big').hex() and checksum == check.hex()) \
or (wifversion == TESTNET_WIF_PREFIX.to_bytes(1, byteorder='big').hex() and checksum == check.hex()):
if wif_compressed:
privkey = pvkeyencoded[2:-10]
else:
privkey = pvkeyencoded[2:-8]
return privkey
else:
return None
| 31.4625 | 113 | 0.70878 | 325 | 2,517 | 5.292308 | 0.335385 | 0.04186 | 0.032558 | 0.024419 | 0.236047 | 0.174419 | 0.132558 | 0.132558 | 0.132558 | 0.132558 | 0 | 0.040468 | 0.185141 | 2,517 | 79 | 114 | 31.860759 | 0.798147 | 0.124354 | 0 | 0.14 | 0 | 0 | 0.004121 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.14 | false | 0 | 0.08 | 0.04 | 0.38 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
628dca7eafeadab9b0364001a2b436c3a726d50b | 2,997 | py | Python | Co-Simulation/Sumo/sumo-1.7.0/tools/build/history.py | uruzahe/carla | 940c2ab23cce1eda1ef66de35f66b42d40865fb1 | [
"MIT"
] | 4 | 2020-11-13T02:35:56.000Z | 2021-03-29T20:15:54.000Z | Co-Simulation/Sumo/sumo-1.7.0/tools/build/history.py | uruzahe/carla | 940c2ab23cce1eda1ef66de35f66b42d40865fb1 | [
"MIT"
] | 9 | 2020-12-09T02:12:39.000Z | 2021-02-18T00:15:28.000Z | Co-Simulation/Sumo/sumo-1.7.0/tools/build/history.py | uruzahe/carla | 940c2ab23cce1eda1ef66de35f66b42d40865fb1 | [
"MIT"
] | 1 | 2020-11-20T19:31:26.000Z | 2020-11-20T19:31:26.000Z | #!/usr/bin/env python
# Eclipse SUMO, Simulation of Urban MObility; see https://eclipse.org/sumo
# Copyright (C) 2011-2020 German Aerospace Center (DLR) and others.
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License 2.0 which is available at
# https://www.eclipse.org/legal/epl-2.0/
# This Source Code may also be made available under the following Secondary
# Licenses when the conditions for such availability set forth in the Eclipse
# Public License 2.0 are satisfied: GNU General Public License, version 2
# or later which is available at
# https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html
# SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-or-later
# @file history.py
# @author Michael Behrisch
# @date 2014-06-21
"""
This script builds all sumo versions in a certain revision range
and tries to eliminate duplicates afterwards.
"""
from __future__ import absolute_import
import subprocess
import optparse
import shutil
import os
import sys
import traceback
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import sumolib # noqa
optParser = optparse.OptionParser()
optParser.add_option("-b", "--begin", default="v1_3_0", help="first revision to build")
optParser.add_option("-e", "--end", default="HEAD", help="last revision to build")
options, args = optParser.parse_args()
LOCK = "../history.lock"
if os.path.exists(LOCK):
sys.exit("History building is still locked!")
open(LOCK, 'w').close()
try:
subprocess.call(["git", "checkout", "-q", "master"])
subprocess.call(["git", "pull"])
commits = {}
for line in subprocess.check_output(["git", "log", "%s..%s" % (options.begin, options.end)]).splitlines():
if line.startswith("commit "):
h = line.split()[1]
commits[h] = sumolib.version.gitDescribe(h)
haveBuild = False
for h, desc in sorted(commits.items(), key=lambda x: x[1]):
if not os.path.exists('../bin%s' % desc):
ret = subprocess.call(["git", "checkout", "-q", h])
if ret != 0:
continue
os.chdir("build/cmake-build")
subprocess.call('make clean; make -j32', shell=True)
os.chdir("../..")
haveBuild = True
shutil.copytree('bin', '../bin%s' % desc,
ignore=shutil.ignore_patterns('Makefile*', '*.bat', '*.jar'))
subprocess.call('strip -R .note.gnu.build-id ../bin%s/*' % desc, shell=True)
subprocess.call("sed -i 's/%s/%s/' ../bin%s/*" % (desc, len(desc) * "0", desc), shell=True)
if haveBuild:
for line in subprocess.check_output('fdupes -1 -q ../binv*', shell=True).splitlines():
dups = line.split()
for d in dups[1:]:
subprocess.call('ln -sf %s %s' % (dups[0], d), shell=True)
subprocess.call(["git", "checkout", "-q", "master"])
except Exception:
traceback.print_exc()
os.remove(LOCK)
| 39.96 | 110 | 0.644311 | 416 | 2,997 | 4.598558 | 0.483173 | 0.058547 | 0.035546 | 0.039205 | 0.148458 | 0.118139 | 0 | 0 | 0 | 0 | 0 | 0.017105 | 0.2002 | 2,997 | 74 | 111 | 40.5 | 0.780976 | 0.295963 | 0 | 0.041667 | 0 | 0 | 0.181121 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0.020833 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
654e50df76dc184a8b8a4c0ed4391bfd96ba1623 | 960 | py | Python | test/retrievers_test/date_test.py | BrianPulfer/AuthorNameDisambiguation | abd65a83751425227bc6515407f3f2c4dff02c8f | [
"MIT"
] | 8 | 2019-06-30T12:58:52.000Z | 2022-03-23T14:16:11.000Z | test/retrievers_test/date_test.py | BrianPulfer/AuthorNameDisambiguation | abd65a83751425227bc6515407f3f2c4dff02c8f | [
"MIT"
] | null | null | null | test/retrievers_test/date_test.py | BrianPulfer/AuthorNameDisambiguation | abd65a83751425227bc6515407f3f2c4dff02c8f | [
"MIT"
] | 3 | 2019-10-28T02:34:09.000Z | 2021-09-18T19:05:32.000Z | import unittest
import datetime
from bs4 import BeautifulSoup
from main.retrievers import date
from main.eutilities import e_utilities
class TestDateRetriever(unittest.TestCase):
def test_find_date(self):
"""Tests that the date is correctly retrieved in method 'find_date()'"""
# Fetching an article
pmid = "20113659"
query = e_utilities.Query(any_terms=[pmid])
article = e_utilities.fetch(e_utilities.DATABASES.PubMed, query, rettype="xml")
soup = BeautifulSoup(article.content.decode('utf-8'), "xml")
# Getting 2/3 dates from the article
pubmed_date = date.find_date(soup)
medline_date = date.find_date(soup, pubstatus='medline')
# Checking that the retrieved dates are correct
self.assertEqual(datetime.datetime(2010, 2, 2), pubmed_date)
self.assertEqual(datetime.datetime(2010, 3, 3), medline_date)
if __name__ == '__main__':
unittest.main()
| 30 | 87 | 0.694792 | 121 | 960 | 5.330579 | 0.479339 | 0.062016 | 0.037209 | 0.049612 | 0.170543 | 0 | 0 | 0 | 0 | 0 | 0 | 0.031496 | 0.20625 | 960 | 31 | 88 | 30.967742 | 0.814961 | 0.175 | 0 | 0 | 0 | 0 | 0.043367 | 0 | 0 | 0 | 0 | 0 | 0.117647 | 1 | 0.058824 | false | 0 | 0.294118 | 0 | 0.411765 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65521030b4b5b5b2f1530f24a6376be26e8b08db | 17,119 | py | Python | generate_model/covid19_configurable.py | aitoralmeida/lus_stratification | 8153a2dd4ddd49bac8c7d36269762ddd9207d72f | [
"Apache-2.0"
] | 1 | 2020-12-26T23:27:32.000Z | 2020-12-26T23:27:32.000Z | generate_model/covid19_configurable.py | aitoralmeida/lus_stratification | 8153a2dd4ddd49bac8c7d36269762ddd9207d72f | [
"Apache-2.0"
] | null | null | null | generate_model/covid19_configurable.py | aitoralmeida/lus_stratification | 8153a2dd4ddd49bac8c7d36269762ddd9207d72f | [
"Apache-2.0"
] | null | null | null | import os
import numpy as np
import argparse
from sklearn import metrics
from random import shuffle, sample, seed
import tensorflow as tf
from tensorflow import keras
from tensorflow.random import set_seed
from tensorflow.keras.preprocessing import image
from tensorflow.keras.layers import Dense, GlobalAveragePooling2D
from tensorflow.keras.models import Model
from tensorflow.keras.applications.mobilenet import MobileNet
from tensorflow.keras.applications.mobilenet import preprocess_input as preprocess_input_v1
from tensorflow.keras.applications.mobilenet_v2 import MobileNetV2
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input as preprocess_input_v2
from tensorflow.keras.applications.efficientnet import *
#TODO add to args
TEST_SET_PATIENTS = ['04_','09_','18_','21_','27_','36_','38_','41_','51_','55_','59_','60_']
def create_sets(path, positive, negative, model_name, model_version, model, train_test_divide):
files_covid= os.listdir(path)
total_files = len(files_covid)
print ('Total files in disk:', total_files)
#randomize the files
shuffle(files_covid)
#find positive and negative files
print('*'*10)
print('Separating posititive and negative files...')
print('Positive token:', positive)
print('Negative token', negative)
positive_files = []
negative_files = []
for name in files_covid:
if negative in name:
negative_files.append(name)
elif positive in name:
positive_files.append(name)
total_positive = len(positive_files)
print ('Total positive files:', total_positive)
total_negative = len(negative_files)
print ('Total negative files:', total_negative)
#sanity check
print('>>>>>Sanity check...')
print ('Expected total files:', total_files)
print ('Total files positive+negative:', total_positive+total_negative)
#calculating splits
#train
total_train_pos = int(total_positive * train_test_divide)
total_train_neg = int(total_negative * train_test_divide)
print('*'*10)
print('Calculating splits...')
print('Training positive:', total_train_pos)
print('Training positive percentage:', float(total_train_pos/(total_train_pos+total_train_neg)))
print('Training negative:', total_train_neg)
print('Training negative percentage:', float(total_train_neg/(total_train_pos+total_train_neg)))
total_train = total_train_pos+total_train_neg
print('Training total:', total_train)
#val
test_pos = total_positive - total_train_pos
test_neg = total_negative - total_train_neg
test_total = test_pos + test_neg
print('Test positive:', test_pos)
print('Test positive percentage:', float(test_pos/test_total))
print('Test negative:', test_neg)
print('Test negative percentage:', float(test_neg/test_total))
print('Test total:', test_total)
#sanity check
print('>>>>>Sanity check...')
print('Target divide perecentage:', train_test_divide)
print('Train percentage', (float)(total_train/(total_train+test_total)))
print('Test percentage', (float)(test_total/(total_train+test_total)))
print ('Expected total files::', total_files)
print ('Total files train+val:', total_train+test_total)
#HASTA AQUI BIEN
print('*'*10)
print('Loading file names...')
print('Total positive', len(positive_files))
print('Total negative', len(negative_files))
print('Expected train pos:', total_train_pos)
print('Expected train neg:', total_train_neg)
#train
train_positive_filenames = positive_files[:total_train_pos]
train_negative_filenames = negative_files[:total_train_neg]
train_files = train_positive_filenames + train_negative_filenames
#sanity check
print('>>>>>Sanity check...')
print('Expected train positive:', total_train_pos)
print('Actual train positive:', len(train_positive_filenames))
print('Expected train negative:', total_train_neg)
print('Actual train negative:', len(train_negative_filenames))
print('Expected train:', total_train)
print('Actual files in train_files:', len(train_files))
#val
val_positive_filenames = positive_files[total_train_pos:]
val_negative_filenames = negative_files[total_train_neg:]
val_files = val_positive_filenames + val_negative_filenames
#sanity check
print('>>>>>Sanity check...')
print('Expected val positive:', test_pos)
print('Actual val positive:', len(val_positive_filenames))
print('Expected val negative:', test_neg)
print('Actual val negative:', len(val_negative_filenames))
print('Expected val:', test_total)
print('Actual files in val_files:', len(val_files))
#train_files = positive_files[:total_train_pos] + negative_files[:total_train_neg]
#val_files = positive_files[total_train_pos:] + negative_files[total_train_neg:]
shuffle(train_files)
shuffle(val_files)
#loading images
print('Loading train and val images...')
# Train
print ('Processing training data...')
X_train = []
X_train_names = []
y_train = []
fail_train = []
file_processed = 0
for filename in train_files:
file_processed += 1
if file_processed % 300 == 0:
print('Processing ', file_processed, 'of', len(train_files))
if positive in filename:
y_train.append([1,0])
elif negative in filename:
y_train.append([0,1])
else: #wrong filename
fail_train.append(filename)
img = image.load_img(path+filename, target_size=(224, 224, 3))
x = image.img_to_array(img)
if (model_name == "mobilenet"):
if (model_version == 'V1'):
x = preprocess_input_v1(x) #mobilenet v1
elif (model_version == 'V2'):
x = preprocess_input_v2(x) #mobilenet v2
X_train.append(x)
X_train_names.append(filename)
#sanity check
print('Sanity check...')
print('X_train total:', len(X_train))
print('y_train total:', len(y_train))
print('fail_train total:', len(fail_train))
print(fail_train)
#val
print ('Processing validation data...')
X_val = []
X_val_names = []
y_val = []
fail_val = []
file_processed = 0
for filename in val_files:
file_processed += 1
if file_processed % 300 == 0:
print('Processing ', file_processed, 'of', len(val_files))
if positive in filename:
y_val.append([1,0])
elif negative in filename:
y_val.append([0,1])
else: #wrong filename
fail_val.append(filename)
img = image.load_img(path+filename, target_size=(224, 224, 3))
x = image.img_to_array(img)
if (model_name == "mobilenet"):
if (model_version == 'V1'):
x = preprocess_input_v1(x) #mobilenet v1
elif (model_version == 'V2'):
x = preprocess_input_v2(x) #mobilenet v2
X_val.append(x)
X_val_names.append(filename)
#sanity check
print('Sanity check...')
print('X_val total:', len(X_val))
print('y_val total:', len(y_val))
print('fail_val total:', len(fail_val))
print(fail_val)
X_train = np.array(X_train)
y_train = np.array(y_train)
X_val = np.array(X_val)
y_val = np.array(y_val)
print('Shapes train')
print(X_train.shape)
print(y_train.shape)
print('Shapes val')
print(X_val.shape)
print(y_val.shape)
return X_train, y_train, X_train_names, X_val, y_val, X_val_names
def create_sets_by_patients(path, positive, negative, model_name, model_version, model, train_test_divide):
files_covid= os.listdir(path)
total_files = len(files_covid)
print ('Total files in disk:', total_files)
train_files = []
val_files = []
for filename in files_covid:
if any(x in filename for x in TEST_SET_PATIENTS):
val_files.append(filename)
else:
train_files.append(filename)
print('Total train files:', len(train_files))
print('Total test files:', len(val_files))
#loading images
print('Loading train and val images...')
# Train
print ('Processing training data...')
X_train = []
X_train_names = []
y_train = []
fail_train = []
file_processed = 0
for filename in train_files:
file_processed += 1
if file_processed % 300 == 0:
print('Processing ', file_processed, 'of', len(train_files))
if positive in filename:
y_train.append([1,0])
elif negative in filename:
y_train.append([0,1])
else: #wrong filename
fail_train.append(filename)
img = image.load_img(path+filename, target_size=(224, 224, 3))
x = image.img_to_array(img)
if (model_name == "mobilenet"):
if (model_version == 'V1'):
x = preprocess_input_v1(x) #mobilenet v1
elif (model_version == 'V2'):
x = preprocess_input_v2(x) #mobilenet v2
X_train.append(x)
X_train_names.append(filename)
#sanity check
print('Sanity check...')
print('X_train total:', len(X_train))
print('y_train total:', len(y_train))
print('fail_train total:', len(fail_train))
print(fail_train)
#val
print ('Processing validation data...')
X_val = []
X_val_names = []
y_val = []
fail_val = []
file_processed = 0
test_pos_total = 0
test_neg_total = 0
for filename in val_files:
file_processed += 1
if file_processed % 300 == 0:
print('Processing ', file_processed, 'of', len(val_files))
if positive in filename:
y_val.append([1,0])
test_pos_total += 1
elif negative in filename:
y_val.append([0,1])
test_neg_total += 1
else: #wrong filename
fail_val.append(filename)
img = image.load_img(path+filename, target_size=(224, 224, 3))
x = image.img_to_array(img)
if (model_name == "mobilenet"):
if (model_version == 'V1'):
x = preprocess_input_v1(x) #mobilenet v1
elif (model_version == 'V2'):
x = preprocess_input_v2(x) #mobilenet v2
X_val.append(x)
X_val_names.append(filename)
#sanity check
print('Sanity check...')
print('X_val total:', len(X_val))
print('y_val total:', len(y_val))
print('fail_val total:', len(fail_val))
print(fail_val)
print('Test positive examples:', test_pos_total)
print((float)(test_pos_total/len(y_val)))
print('Test negative examples:', test_neg_total)
print((float)(test_neg_total/len(y_val)))
X_train = np.array(X_train)
y_train = np.array(y_train)
X_val = np.array(X_val)
y_val = np.array(y_val)
print('Shapes train')
print(X_train.shape)
print(y_train.shape)
print('Shapes val')
print(X_val.shape)
print(y_val.shape)
return X_train, y_train, X_train_names, X_val, y_val, X_val_names
if __name__ == '__main__':
# parsing arguments
parser = argparse.ArgumentParser()
parser.add_argument("--model",
type=str,
default='mobilenet',
nargs="?",
help="Model: mobilenet or efficientnet.")
parser.add_argument("--model_version",
type=str,
default='V1',
nargs="?",
help="Mobile net version: V1 or V2. Efficient net scaling: B0, B1, B2, B3, B4, B5, B6 or B7.")
parser.add_argument("--dataset_path",
type=str,
default='/lus_stratification/generate_model/croppedi2p0/',
nargs="?",
help="Dataset's absolute path")
parser.add_argument("--results_path",
type=str,
default='/lus_stratification/generate_model/results/',
nargs="?",
help="Results's absolute path")
parser.add_argument("--train_test_divide",
type=float,
default=0.75,
nargs="?",
help="Train test divide value between 0.0 and 1.0")
parser.add_argument("--epochs",
type=int,
default=10,
nargs="?",
help="Epochs value between 1 and infinite")
parser.add_argument("--batch_size",
type=int,
default=32,
nargs="?",
help="Batch size value")
parser.add_argument("--steps_per_epoch",
type=int,
default=300,
nargs="?",
help="Steps per epoch value")
parser.add_argument("--use_steps_per_epoch",
type=int,
default=0,
nargs="?",
help="Use steps per epoch value: 1 use, other not use. Default 0.")
parser.add_argument("--optimizer",
type=str,
default='adam',
nargs="?",
help="Optimizer")
parser.add_argument("--loss",
type=str,
default='binary_crossentropy',
nargs="?",
help="Loss")
parser.add_argument("--label_dataset_zero",
type=str,
default='N0',
nargs="?",
help="Label dataset 0: N0, B0, M0, S0, C0, P0.")
parser.add_argument("--label_dataset_one",
type=str,
default='N1',
nargs="?",
help="Label dataset 1: N1, B1, M1, S1, C1, P1.")
parser.add_argument("--strategy",
type=str,
default='combined',
nargs="?",
help="Create sets strategy: combined or by_patients.")
parser.add_argument("--random_seed",
type=int,
default=12345,
nargs="?",
help="Random seed for reproducible results")
args = parser.parse_args()
# reproducible results
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(args.random_seed)
seed(args.random_seed)
set_seed(args.random_seed)
# get the model without the denses
if (args.model == 'mobilenet'):
if (args.model_version == 'V1'):
base_model = MobileNet(weights='imagenet', include_top=False)
elif (args.model_version == 'V2'):
base_model = MobileNetV2(weights='imagenet', include_top=False)
elif (args.model == 'efficientnet'):
if args.model_version == 'B0':
base_model = EfficientNetB0(weights='imagenet', include_top=False)
if args.model_version == 'B1':
base_model = EfficientNetB1(weights='imagenet', include_top=False)
if args.model_version == 'B2':
base_model = EfficientNetB2(weights='imagenet', include_top=False)
if args.model_version == 'B3':
base_model = EfficientNetB3(weights='imagenet', include_top=False)
if args.model_version == 'B4':
base_model = EfficientNetB4(weights='imagenet', include_top=False)
if args.model_version == 'B5':
base_model = EfficientNetB5(weights='imagenet', include_top=False)
if args.model_version == 'B6':
base_model = EfficientNetB6(weights='imagenet', include_top=False)
if args.model_version == 'B7':
base_model = EfficientNetB7(weights='imagenet', include_top=False)
last_layer = base_model.layers[-1]
new_top_layer_global_avg_pooling = GlobalAveragePooling2D()(last_layer.output)
new_dense = Dense(1024, activation='relu')(new_top_layer_global_avg_pooling)
predictions = Dense(2, activation='softmax')(new_dense)
model = Model(base_model.input, predictions)
# we will only train the new denses for the baseline
for layer in base_model.layers:
layer.trainable = False
# compile model
model.compile(optimizer=args.optimizer, loss=args.loss, metrics = ["accuracy"])
# see model structure
model.summary()
# get the data
print('***** Load files...')
if args.strategy == 'combined':
X_train, y_train, X_train_names, X_val, y_val, X_val_names = create_sets(args.dataset_path,
args.label_dataset_zero,
args.label_dataset_one,
args.model,
args.model_version,
model,
args.train_test_divide)
elif args.strategy == 'by_patients':
X_train, y_train, X_train_names, X_val, y_val, X_val_names = create_sets_by_patients(args.dataset_path,
args.label_dataset_zero,
args.label_dataset_one,
args.model,
args.model_version,
model,
args.train_test_divide)
# fit model
if (args.use_steps_per_epoch == 1):
results = model.fit(X_train, y_train, epochs=args.epochs, steps_per_epoch=args.steps_per_epoch, batch_size=args.batch_size, validation_data=(X_val, y_val))
else:
results = model.fit(X_train, y_train, epochs=args.epochs, batch_size=args.batch_size, validation_data=(X_val, y_val))
print('#' * 40)
print("Finished! Saving model")
# save model
model.save(args.results_path + 'covid19_model_'
+ args.model + args.model_version + "_for_" + args.label_dataset_zero + "_" + args.label_dataset_one)
print('#' * 40)
print("Model saved!")
| 36.657388 | 161 | 0.639114 | 2,179 | 17,119 | 4.762276 | 0.112896 | 0.032765 | 0.02467 | 0.024092 | 0.566349 | 0.532235 | 0.485304 | 0.448299 | 0.419485 | 0.360412 | 0 | 0.016763 | 0.240318 | 17,119 | 466 | 162 | 36.736052 | 0.781161 | 0.044512 | 0 | 0.508816 | 0 | 0.002519 | 0.168475 | 0.006805 | 0 | 0 | 0 | 0.002146 | 0 | 1 | 0.005038 | false | 0 | 0.040302 | 0 | 0.050378 | 0.256927 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6552b431d0a21ee6bcdda606ae6e26c6a1ac5316 | 279 | py | Python | file/file.py | wutianlong/PythonStudy | 8d351c0d2186a849e3c8a8ac65194a0444045964 | [
"MIT"
] | null | null | null | file/file.py | wutianlong/PythonStudy | 8d351c0d2186a849e3c8a8ac65194a0444045964 | [
"MIT"
] | null | null | null | file/file.py | wutianlong/PythonStudy | 8d351c0d2186a849e3c8a8ac65194a0444045964 | [
"MIT"
] | null | null | null | file = open('/Users/wutianlong/PycharmProjects/python_demos/file/test_file.xml','w')
file.write('wutianlong')
file.write('\n')
file.write('yanruixue')
file.write('\n')
file.close()
file = open('test_file.xml','r')
for line in file.readlines():
print (line)
file.close
| 15.5 | 84 | 0.695341 | 41 | 279 | 4.658537 | 0.487805 | 0.188482 | 0.115183 | 0.146597 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.103943 | 279 | 17 | 85 | 16.411765 | 0.764 | 0 | 0 | 0.2 | 0 | 0 | 0.369176 | 0.232975 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
655581f5bd0eb485902742567139e09bb660dbba | 1,878 | py | Python | cogs/image.py | RuiL1904/ruibot-discord.py | 588406ef2dbebd7d976237f1d876054d641933f6 | [
"MIT"
] | null | null | null | cogs/image.py | RuiL1904/ruibot-discord.py | 588406ef2dbebd7d976237f1d876054d641933f6 | [
"MIT"
] | null | null | null | cogs/image.py | RuiL1904/ruibot-discord.py | 588406ef2dbebd7d976237f1d876054d641933f6 | [
"MIT"
] | null | null | null | import dotenv
import nextcord as discord
from nextcord.ext import commands
import aiohttp
# Load .env
dotenv.load_dotenv()
vars = dotenv.dotenv_values('data/.env')
client_id = vars['UNSPLASH']
# Load config
from config import config
color = config.color
class Image(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command(name = 'image')
async def image(self, context, *, argument):
argument_url = argument.strip()
# API data extraction
async with aiohttp.ClientSession() as session:
async with session.get(f'https://api.unsplash.com/photos/random/?query={argument_url}&client_id={client_id}') as response:
# If server is down (Not code 200)
if response.status != 200:
# Embed sent by the bot
embed = discord.Embed(
title = 'ERRO',
description = '```A API de imagens está desligada ou o teu argumento não se encontra registado...Contacta um @Developer```',
color = color
)
config.embed_completion(context, embed)
await context.reply(embed = embed)
else:
data = await response.json()
url = data['urls']['regular']
#Embed sent by the bot
embed = discord.Embed(
title = f'Encontrei esta imagem relacionada a {argument}',
color = color,
url = url
)
embed.set_image(url = url)
config.embed_completion(context, embed)
await context.reply(embed = embed)
def setup(client):
client.add_cog(Image(client))
| 31.830508 | 148 | 0.530884 | 194 | 1,878 | 5.061856 | 0.469072 | 0.02444 | 0.022403 | 0.028513 | 0.201629 | 0.201629 | 0.201629 | 0.201629 | 0.201629 | 0.1222 | 0 | 0.005168 | 0.381789 | 1,878 | 58 | 149 | 32.37931 | 0.840655 | 0.0623 | 0 | 0.157895 | 0 | 0.026316 | 0.155074 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.131579 | 0 | 0.210526 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6558d15af9b291ba6ce61061588d40df72c0b452 | 863 | py | Python | test/test_blas.py | tskisner/so3g | 75c1d8dea84f862bdd2c9fa2c2f9d1c5b8da5eec | [
"MIT"
] | 5 | 2019-09-02T14:17:31.000Z | 2022-01-21T16:43:14.000Z | test/test_blas.py | tskisner/so3g | 75c1d8dea84f862bdd2c9fa2c2f9d1c5b8da5eec | [
"MIT"
] | 70 | 2019-05-16T23:42:40.000Z | 2022-03-23T14:35:35.000Z | test/test_blas.py | tskisner/so3g | 75c1d8dea84f862bdd2c9fa2c2f9d1c5b8da5eec | [
"MIT"
] | 2 | 2020-05-17T18:20:33.000Z | 2020-10-22T20:35:44.000Z | import unittest
import time
import numpy as np
import so3g
class TestBlas(unittest.TestCase):
def test_smoke(self):
"""Confirm that BLAS is linked in properly working by calling a
function that uses it.
"""
ndet, nfreq, nbin = 50, 100000, 2
nvec = 3
dtype = 'float32'
ft = np.ones((ndet, nfreq), dtype)
bins = np.zeros((nbin, 2), 'int32')
iD = np.ones((nbin, ndet), dtype) * 2
iV = np.zeros((nbin, ndet, nvec), dtype)
s, norm = 1., 1.2
# Need some bins.
bins[0] = [0, nfreq//2]
bins[1] = [nfreq//2, nfreq]
t0 = time.time()
so3g.nmat_detvecs_apply(ft, bins, iD, iV, s, norm)
print('Elapsed: %.6f' % (time.time() - t0))
self.assertNotEqual(ft[0,0], 1.)
if __name__ == '__main__':
unittest.main()
| 23.972222 | 71 | 0.538818 | 117 | 863 | 3.880342 | 0.529915 | 0.039648 | 0.048458 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.054329 | 0.317497 | 863 | 35 | 72 | 24.657143 | 0.716469 | 0.115875 | 0 | 0 | 0 | 0 | 0.044655 | 0 | 0 | 0 | 0 | 0 | 0.045455 | 1 | 0.045455 | false | 0 | 0.181818 | 0 | 0.272727 | 0.045455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65597f267ec90cf9fb30944fc264cd68436759dc | 430 | py | Python | resources/views/images/payment/rename.py | daihiepnguyen/apple-store | eb32a2ffa107259c8e795e2f306e1bf928725cb0 | [
"MIT"
] | null | null | null | resources/views/images/payment/rename.py | daihiepnguyen/apple-store | eb32a2ffa107259c8e795e2f306e1bf928725cb0 | [
"MIT"
] | null | null | null | resources/views/images/payment/rename.py | daihiepnguyen/apple-store | eb32a2ffa107259c8e795e2f306e1bf928725cb0 | [
"MIT"
] | null | null | null | import os
# get all files in the current directory
files = os.listdir(os.getcwd())
# change all name files in the current directory
def change_name_file(files, name_input):
for i, file in enumerate(files):
if file.endswith('.py') == False:
new_name = name_input + str(i) + ".webp"
old_name = file
os.rename(old_name, new_name)
name_input = input("Enter the name of the file: ")
change_name_file(files, name_input)
| 25.294118 | 50 | 0.718605 | 71 | 430 | 4.183099 | 0.422535 | 0.121212 | 0.06734 | 0.114478 | 0.363636 | 0.188552 | 0 | 0 | 0 | 0 | 0 | 0 | 0.169767 | 430 | 16 | 51 | 26.875 | 0.831933 | 0.197674 | 0 | 0 | 0 | 0 | 0.105263 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.1 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
655b9f4d8a44f50e42ec60ec18ee0ea53a87ec83 | 1,590 | py | Python | examples/what/dither-image-what.py | fsargent/inky | 54684464b2f35bfd52208cdfb922c09685644181 | [
"MIT"
] | 402 | 2018-10-30T21:59:53.000Z | 2022-03-24T20:43:54.000Z | examples/what/dither-image-what.py | fsargent/inky | 54684464b2f35bfd52208cdfb922c09685644181 | [
"MIT"
] | 104 | 2018-11-01T10:55:45.000Z | 2022-02-26T13:35:17.000Z | examples/what/dither-image-what.py | fsargent/inky | 54684464b2f35bfd52208cdfb922c09685644181 | [
"MIT"
] | 108 | 2018-11-03T01:43:52.000Z | 2022-03-31T09:19:35.000Z | #!/usr/bin/env python3
import argparse
from PIL import Image
from inky import InkyWHAT
print("""Inky wHAT: Dither image
Converts and displays dithered images on Inky wHAT.
""")
# Command line arguments to set display type and colour, and enter your name
parser = argparse.ArgumentParser()
parser.add_argument('--colour', '-c', type=str, required=True, choices=["red", "black", "yellow"], help="ePaper display colour")
parser.add_argument('--image', '-i', type=str, required=True, help="Input image to be converted/displayed")
args = parser.parse_args()
colour = args.colour
img_file = args.image
# Set up the inky wHAT display and border colour
inky_display = InkyWHAT(colour)
inky_display.set_border(inky_display.WHITE)
# Open our image file that was passed in from the command line
img = Image.open(img_file)
# Get the width and height of the image
w, h = img.size
# Calculate the new height and width of the image
h_new = 300
w_new = int((float(w) / h) * h_new)
w_cropped = 400
# Resize the image with high-quality resampling
img = img.resize((w_new, h_new), resample=Image.LANCZOS)
# Calculate coordinates to crop image to 400 pixels wide
x0 = (w_new - w_cropped) / 2
x1 = x0 + w_cropped
y0 = 0
y1 = h_new
# Crop image
img = img.crop((x0, y0, x1, y1))
# Convert the image to use a white / black / red colour palette
pal_img = Image.new("P", (1, 1))
pal_img.putpalette((255, 255, 255, 0, 0, 0, 255, 0, 0) + (0, 0, 0) * 252)
img = img.convert("RGB").quantize(palette=pal_img)
# Display the final image on Inky wHAT
inky_display.set_image(img)
inky_display.show()
| 23.731343 | 128 | 0.718868 | 266 | 1,590 | 4.203008 | 0.409774 | 0.010733 | 0.010733 | 0.033989 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.034509 | 0.161635 | 1,590 | 66 | 129 | 24.090909 | 0.804201 | 0.315094 | 0 | 0 | 0 | 0 | 0.159555 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1 | 0 | 0.1 | 0.033333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
655c317b2c5a0d6bf04b4e7d23c9743e35855785 | 12,887 | py | Python | engine/src/valet/engine/db_connect/db_apis/music.py | onap/optf-fgps | 1494071d0329698297c5d78ee0799dbff0b57e43 | [
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | engine/src/valet/engine/db_connect/db_apis/music.py | onap/optf-fgps | 1494071d0329698297c5d78ee0799dbff0b57e43 | [
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | engine/src/valet/engine/db_connect/db_apis/music.py | onap/optf-fgps | 1494071d0329698297c5d78ee0799dbff0b57e43 | [
"Apache-2.0",
"CC-BY-4.0"
] | 1 | 2021-10-15T18:54:03.000Z | 2021-10-15T18:54:03.000Z | #
# -------------------------------------------------------------------------
# Copyright (c) 2019 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -------------------------------------------------------------------------
#
import base64
import json
import requests
from valet.utils.decryption import decrypt
class REST(object):
"""Helper class for REST operations."""
def __init__(self, hosts, port, path, timeout, retries,
userid, password, ns, logger):
"""Initializer. Accepts target host list, port, and path."""
self.hosts = hosts # List of IP or FQDNs
self.port = port # Port Number
self.path = path # Path starting with /
self.timeout = float(timeout) # REST request timeout in seconds
self.retries = retries # Retires before failing over to next Music server.
self.userid = userid
self.password = password
self.ns = ns
self.logger = logger # For logging
self.urls = []
for host in self.hosts:
# Must end without a slash
self.urls.append('http://%(host)s:%(port)s%(path)s' % {
'host': host,
'port': self.port,
'path': self.path,
})
def __headers(self, content_type='application/json'):
"""Returns HTTP request headers."""
headers = {
'ns': self.ns,
'accept': content_type,
'content-type': content_type,
'authorization': 'Basic %s' % base64.b64encode((self.userid + ':' + self.password).encode()).decode()
}
return headers
def request(self, method='get', content_type='application/json', path='/',
data=None, raise400=True):
""" Performs HTTP request """
if method not in ('post', 'get', 'put', 'delete'):
raise KeyError("Method must be: post, get, put, or delete.")
method_fn = getattr(requests, method)
if data:
data_json = json.dumps(data)
else:
data_json = None
response = None
timeout = False
err_message = ""
full_url = ""
for url in self.urls:
# Try each url in turn. First one to succeed wins.
full_url = url + path
for attempt in range(self.retries):
# Ignore the previous exception.
try:
my_headers = self.__headers(content_type)
for header_key in my_headers:
if (type(my_headers[header_key]).__name__ == 'unicode'):
my_headers[header_key] = my_headers[header_key].encode('ascii', 'ignore')
response = method_fn(full_url, data=data_json,
headers=my_headers,
timeout=self.timeout)
if raise400 or not response.status_code == 400:
response.raise_for_status()
return response
except requests.exceptions.Timeout as err:
err_message = str(err) #err.message
response = requests.Response()
response.url = full_url
if not timeout:
self.logger.warning("Music: %s Timeout" % url, errorCode='availability')
timeout = True
except requests.exceptions.RequestException as err:
err_message = str(err) #err.message
self.logger.debug("Music: %s Request Exception" % url)
self.logger.debug(" method = %s" % method)
self.logger.debug(" timeout = %s" % self.timeout)
self.logger.debug(" err = %s" % err)
self.logger.debug(" full url = %s" % full_url)
self.logger.debug(" request data = %s" % data_json)
self.logger.debug(" request headers = %s" % my_headers)
self.logger.debug(" status code = %s" % response.status_code)
self.logger.debug(" response = %s" % response.text)
self.logger.debug(" response headers = %s" % response.headers)
# If we get here, an exception was raised for every url,
# but we passed so we could try each endpoint. Raise status
# for the last attempt (for now) so that we report something.
if response is not None:
self.logger.debug("Music: Full Url: %s", full_url)
self.logger.debug("Music: %s ", err_message)
response.raise_for_status()
class Music(object):
"""Wrapper for Music API"""
def __init__(self, _config, _logger):
"""Initializer. Accepts a lock_timeout for atomic operations."""
self.logger = _logger
pw = decrypt(_config["engine"]["ek"],
_config["logging"]["lk"],
_config["db"]["dk"],
_config["music"]["password"])
kwargs = {
'hosts': _config["music"]["hosts"],
'port': _config["music"]["port"],
'path': _config["music"]["path"],
'timeout': _config["music"]["timeout"],
'retries': _config["music"]["retries"],
'userid': _config["music"]["userid"],
'password': pw,
'ns': _config["music"]["namespace"],
'logger': _logger,
}
self.rest = REST(**kwargs)
self.lock_names = []
self.lock_timeout = _config["music"]["lock_timeout"]
self.replication_factor = _config["music"]["replication_factor"]
@staticmethod
def __row_url_path(keyspace, table, pk_name=None, pk_value=None):
"""Returns a Music-compliant row URL path."""
path = '/keyspaces/%(keyspace)s/tables/%(table)s/rows' % {
'keyspace': keyspace,
'table': table,
}
if pk_name and pk_value:
path += '?%s=%s' % (pk_name, pk_value)
return path
def create_keyspace(self, keyspace):
"""Creates a keyspace."""
data = {
'replicationInfo': {
# 'class': 'NetworkTopologyStrategy',
# 'dc1': self.replication_factor,
'class': 'SimpleStrategy',
'replication_factor': self.replication_factor,
},
'durabilityOfWrites': True,
'consistencyInfo': {
'type': 'eventual',
},
}
path = '/keyspaces/%s' % keyspace
response = self.rest.request(method='post', path=path, data=data)
return response.ok
def drop_keyspace(self, keyspace):
"""Drops a keyspace."""
data = {
'consistencyInfo': {
'type': 'eventual',
},
}
path = '/keyspaces/%s' % keyspace
response = self.rest.request(method='delete', path=path, data=data)
return response.ok
def create_table(self, keyspace, table, schema):
"""Creates a table."""
data = {
'fields': schema,
'consistencyInfo': {
'type': 'eventual',
},
}
self.logger.debug(data)
path = '/keyspaces/%(keyspace)s/tables/%(table)s' % {
'keyspace': keyspace,
'table': table,
}
response = self.rest.request(method='post', path=path, data=data)
return response.ok
def create_index(self, keyspace, table, index_field, index_name=None):
"""Creates an index for the referenced table."""
data = None
if index_name:
data = {
'index_name': index_name,
}
pstr = '/keyspaces/%(keyspace)s/tables/%(table)s/index/%(index_field)s'
path = pstr % {
'keyspace': keyspace,
'table': table,
'index_field': index_field,
}
response = self.rest.request(method='post', path=path, data=data)
return response.ok
def version(self):
"""Returns version string."""
path = '/version'
response = self.rest.request(method='get', content_type='text/plain', path=path)
return response.text
def create_lock(self, lock_name):
"""Returns the lock id. Use for acquiring and releasing."""
path = '/locks/create/%s' % lock_name
response = self.rest.request(method='post', path=path)
return json.loads(response.text)["lock"]["lock"]
def acquire_lock(self, lock_id):
"""Acquire a lock."""
path = '/locks/acquire/%s' % lock_id
response = self.rest.request(method='get', path=path, raise400=False)
return json.loads(response.text)["status"] == "SUCCESS"
def release_lock(self, lock_id):
"""Release a lock."""
path = '/locks/release/%s' % lock_id
response = self.rest.request(method='delete', path=path)
return response.ok
def delete_lock(self, lock_name):
"""Deletes a lock by name."""
path = '/locks/delete/%s' % lock_name
response = self.rest.request(method='delete', path=path, raise400=False)
return response.ok
def delete_all_locks(self):
"""Delete all locks created during the lifetime of this object."""
# TODO(JD): Shouldn't this really be part of internal cleanup?
# FIXME: It can be several API calls. Any way to do in one fell swoop?
for lock_name in self.lock_names:
self.delete_lock(lock_name)
def create_row(self, keyspace, table, values):
"""Create a row."""
# self.logger.debug("MUSIC: create_row "+ table)
data = {
'values': values,
'consistencyInfo': {
'type': 'eventual',
},
}
path = '/keyspaces/%(keyspace)s/tables/%(table)s/rows' % {
'keyspace': keyspace,
'table': table,
}
response = self.rest.request(method='post', path=path, data=data)
return response.ok
def insert_atom(self, keyspace, table, values, name=None, value=None):
"""Atomic create/update row."""
data = {
'values': values,
'consistencyInfo': {
'type': 'atomic',
}
}
path = self.__row_url_path(keyspace, table, name, value)
method = 'post'
# self.logger.debug("MUSIC: Method: %s ", (method.upper()))
# self.logger.debug("MUSIC: Path: %s", (path))
# self.logger.debug("MUSIC: Data: %s", (data))
self.rest.request(method=method, path=path, data=data)
def update_row_eventually(self, keyspace, table, values):
"""Update a row. Not atomic."""
data = {
'values': values,
'consistencyInfo': {
'type': 'eventual',
},
}
path = self.__row_url_path(keyspace, table)
response = self.rest.request(method='post', path=path, data=data)
return response.ok
def delete_row_eventually(self, keyspace, table, pk_name, pk_value):
"""Delete a row. Not atomic."""
data = {
'consistencyInfo': {
'type': 'eventual',
},
}
path = self.__row_url_path(keyspace, table, pk_name, pk_value)
response = self.rest.request(method='delete', path=path, data=data)
return response.ok
def delete_atom(self, keyspace, table, pk_name, pk_value):
"""Atomic delete row."""
data = {
'consistencyInfo': {
'type': 'atomic',
}
}
path = self.__row_url_path(keyspace, table, pk_name, pk_value)
self.rest.request(method='delete', path=path, data=data)
def read_row(self, keyspace, table, pk_name, pk_value):
"""Read one row based on a primary key name/value."""
path = self.__row_url_path(keyspace, table, pk_name, pk_value)
response = self.rest.request(path=path)
return response.json()
def read_all_rows(self, keyspace, table):
"""Read all rows."""
return self.read_row(keyspace, table, pk_name=None, pk_value=None)
| 33.128535 | 113 | 0.540079 | 1,388 | 12,887 | 4.894092 | 0.20245 | 0.029442 | 0.037539 | 0.04328 | 0.29972 | 0.258501 | 0.248933 | 0.220521 | 0.164876 | 0.155013 | 0 | 0.003451 | 0.325367 | 12,887 | 388 | 114 | 33.213918 | 0.777893 | 0.179095 | 0 | 0.260331 | 0 | 0 | 0.133903 | 0.01843 | 0 | 0 | 0 | 0.002577 | 0 | 1 | 0.090909 | false | 0.020661 | 0.016529 | 0 | 0.18595 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
655ccb8c9c2d7eabfccc53316f02371cf7cc08dd | 1,302 | py | Python | 19_White-box-Cartoonization/03_integer_quantization.py | khanfarhan10/PINTO_model_zoo | 4cad2e506d8c0fb604aa7b5f84115a840ab59ba1 | [
"MIT"
] | 1,529 | 2019-12-11T13:36:23.000Z | 2022-03-31T18:38:27.000Z | 19_White-box-Cartoonization/03_integer_quantization.py | khanfarhan10/PINTO_model_zoo | 4cad2e506d8c0fb604aa7b5f84115a840ab59ba1 | [
"MIT"
] | 200 | 2020-01-06T09:24:42.000Z | 2022-03-31T17:29:08.000Z | 19_White-box-Cartoonization/03_integer_quantization.py | khanfarhan10/PINTO_model_zoo | 4cad2e506d8c0fb604aa7b5f84115a840ab59ba1 | [
"MIT"
] | 288 | 2020-02-21T14:56:02.000Z | 2022-03-30T03:00:35.000Z | ### tf-nightly-2.2.0.dev20200406
import tensorflow as tf
import tensorflow_datasets as tfds
import numpy as np
def representative_dataset_gen():
for data in raw_test_data.take(100):
image = data['image'].numpy()
image = tf.image.resize(image, (720, 720))
image = image / 127.5 - 1
image = image[np.newaxis,:,:,:]
yield [image]
tf.compat.v1.enable_eager_execution()
raw_test_data, info = tfds.load(name="voc/2007", with_info=True, split="validation", data_dir="~/TFDS", download=False)
# Integer Quantization - Input/Output=float32
input_arrays=["input"]
output_arrays=['add_1']
size = 720
graph_def_file="export/white_box_cartoonization_freeze_graph.pb"
input_tensor={"input":[1,size,size,3]}
converter = tf.lite.TFLiteConverter.from_saved_model('./saved_model')
#converter = tf.lite.TFLiteConverter.from_frozen_graph(graph_def_file, input_arrays, output_arrays,input_tensor)
converter.experimental_new_converter = True
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset_gen
tflite_quant_model = converter.convert()
with open('export/white_box_cartoonization_integer_quant.tflite', 'wb') as w:
w.write(tflite_quant_model)
print("Integer Quantization complete! - white_box_cartoonization_integer_quant.tflite")
| 37.2 | 119 | 0.780338 | 183 | 1,302 | 5.289617 | 0.47541 | 0.065083 | 0.068182 | 0.057851 | 0.152893 | 0.082645 | 0 | 0 | 0 | 0 | 0 | 0.032258 | 0.095238 | 1,302 | 34 | 120 | 38.294118 | 0.789474 | 0.140553 | 0 | 0 | 0 | 0 | 0.21204 | 0.12938 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.12 | 0 | 0.16 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
655cd9571ebcea7d5f3cf4a5934c66224e0e06c9 | 577 | py | Python | tests/test_parametric_components/test_PoloidalFieldCoilCase.py | RemDelaporteMathurin/paramak | 10552f1b89820dd0f7a08e4a126834877e3106b4 | [
"MIT"
] | null | null | null | tests/test_parametric_components/test_PoloidalFieldCoilCase.py | RemDelaporteMathurin/paramak | 10552f1b89820dd0f7a08e4a126834877e3106b4 | [
"MIT"
] | null | null | null | tests/test_parametric_components/test_PoloidalFieldCoilCase.py | RemDelaporteMathurin/paramak | 10552f1b89820dd0f7a08e4a126834877e3106b4 | [
"MIT"
] | null | null | null |
import paramak
import unittest
class test_PoloidalFieldCoilCase(unittest.TestCase):
def test_PoloidalFieldCoilCase_creation(self):
"""Creates a pf coil case using the PoloidalFieldCoilCase parametric
component and checks that a cadquery solid is created."""
test_shape = paramak.PoloidalFieldCoilCase(
casing_thickness=5,
coil_height=50,
coil_width=50,
center_point=(
1000,
500))
assert test_shape.solid is not None
assert test_shape.volume > 1000
| 27.47619 | 76 | 0.646447 | 61 | 577 | 5.95082 | 0.672131 | 0.07438 | 0.082645 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.039604 | 0.299827 | 577 | 20 | 77 | 28.85 | 0.858911 | 0.207972 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.153846 | 1 | 0.076923 | false | 0 | 0.153846 | 0 | 0.307692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6562921d35148ecda9aa66073dff1a2dbb7b306c | 4,364 | py | Python | tools/create_testing_volumes.py | erebe/dcos-commons | 7516c3e3b955ffbdc1e5a8718ad5bd78ff6ff4ea | [
"Apache-2.0"
] | 3 | 2018-12-16T04:34:37.000Z | 2019-01-15T22:38:54.000Z | tools/create_testing_volumes.py | erebe/dcos-commons | 7516c3e3b955ffbdc1e5a8718ad5bd78ff6ff4ea | [
"Apache-2.0"
] | 1 | 2019-07-05T19:59:18.000Z | 2019-07-05T19:59:18.000Z | tools/create_testing_volumes.py | erebe/dcos-commons | 7516c3e3b955ffbdc1e5a8718ad5bd78ff6ff4ea | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import json
import os
import time
import dcos_launch
from dcos_test_utils import logger, helpers, ssh_client
# Here we create 4 MOUNT volumes on every agent, where the first two have no
# profile and their filesystem default to ext4, and the last two have the "xfs"
# profile and filesystem.
MOUNT_VOLUME_PROFILES = [None, None, "xfs", "xfs"]
MOUNT_VOLUME_SIZE_MB = 200
def mount_volumes():
""" Will create 200MB partions on clusters launched by dcos-launch
"""
volume_script = """#!/bin/bash
set -e
if [ {dcos_mounts} ]; then
echo 'Volumes already exist, exiting early'
exit 0
fi
echo 'Stopping agent and clearing state...'
systemctl stop dcos-mesos-slave.service
cat /var/lib/dcos/mesos-resources || echo 'No resources file found'
ls -l /var/lib/mesos/slave/meta/slaves/latest || echo 'No latest agent symlink found'
rm -f /var/lib/dcos/mesos-resources
rm -f /var/lib/mesos/slave/meta/slaves/latest
losetup -a
""".format(
dcos_mounts=" -a ".join(["-e /dcos/volume{}".format(i) for i, _ in enumerate(MOUNT_VOLUME_PROFILES)])
)
for i, p in enumerate(MOUNT_VOLUME_PROFILES):
volume_script += """
if [ ! -e {loop_file} ]; then
echo 'Creating loopback device {loop_dev}...'
dd if=/dev/zero of={loop_file} bs=1M count={size_mb}
losetup {loop_dev} {loop_file}
mkfs -t {fs_type} {loop_dev}
losetup -d {loop_dev}
fi
if [ ! -e {dcos_mount} ]; then
echo 'Creating loopback volume {dcos_mount}...'
mkdir -p {dcos_mount}
echo \"{loop_file} {dcos_mount} auto loop 0 2\" | tee -a /etc/fstab
mount {dcos_mount}
fi
""".format(
size_mb=MOUNT_VOLUME_SIZE_MB,
dcos_mount="/dcos/volume{}".format(i),
loop_dev="/dev/loop{}".format(i),
loop_file="/root/volume{}.img".format(i),
fs_type=p or "ext4"
)
# To create profile mount volumes, we manually run `make_disk_resources.py`
# to generate disk resources, then parse the result and set the
# `disk.source.profile` field for each profile mount volume.
volume_script += """
echo 'Updating disk resources...'
export MESOS_WORK_DIR MESOS_RESOURCES
eval $(sed -E "s/^([A-Z_]+)=(.*)$/\\1='\\2'/" /opt/mesosphere/etc/mesos-slave-common) # Set up `MESOS_WORK_DIR`.
eval $(sed -E "s/^([A-Z_]+)=(.*)$/\\1='\\2'/" /opt/mesosphere/etc/mesos-slave) # Set up `MESOS_RESOURCES`.
/opt/mesosphere/bin/make_disk_resources.py /var/lib/dcos/mesos-resources
source /var/lib/dcos/mesos-resources
/opt/mesosphere/bin/python -c "
import json;
import os;
profiles = {profiles}
resources = json.loads(os.environ['MESOS_RESOURCES'])
for r in resources:
try:
disk_source = r['disk']['source']
disk_source['profile'] = profiles[disk_source['mount']['root']]
except KeyError:
pass
print('MESOS_RESOURCES=\\'' + json.dumps(resources) + '\\'')
" > /var/lib/dcos/mesos-resources
echo 'Restarting agent...'
systemctl restart dcos-mesos-slave.service
""".format(profiles={"/dcos/volume{}".format(i): p for i, p in enumerate(MOUNT_VOLUME_PROFILES) if p})
cluster_info_path = os.getenv("CLUSTER_INFO_PATH", "cluster_info.json")
if not os.path.exists(cluster_info_path):
raise Exception("No cluster info to work with!")
cluster_info_json = json.load(open(cluster_info_path))
launcher = dcos_launch.get_launcher(cluster_info_json)
description = launcher.describe()
ssh = launcher.get_ssh_client()
with ssh.tunnel(description["masters"][0]["public_ip"]) as t:
t.copy_file(helpers.session_tempfile(ssh.key), "ssh_key")
t.copy_file(helpers.session_tempfile(volume_script), "volume_script.sh")
t.command(["chmod", "600", "ssh_key"])
ssh_command = ["ssh", "-i", "ssh_key"] + ssh_client.SHARED_SSH_OPTS
scp_command = ["scp", "-i", "ssh_key"] + ssh_client.SHARED_SSH_OPTS
for private_agent in description["private_agents"]:
target = "{}@{}".format(ssh.user, private_agent["private_ip"])
t.command(scp_command + ["volume_script.sh", target + ":~/volume_script.sh"])
t.command(ssh_command + [target, "sudo", "bash", "volume_script.sh"])
# nasty hack until we add a better post-flight
time.sleep(60)
if __name__ == "__main__":
logger.setup(os.getenv("LOG_LEVEL", "DEBUG"))
mount_volumes()
| 35.193548 | 114 | 0.670486 | 628 | 4,364 | 4.47293 | 0.339172 | 0.044856 | 0.0178 | 0.0267 | 0.206479 | 0.138127 | 0.09612 | 0.073336 | 0.027768 | 0.027768 | 0 | 0.006704 | 0.179652 | 4,364 | 123 | 115 | 35.479675 | 0.777933 | 0.115949 | 0 | 0.077778 | 0 | 0.033333 | 0.551509 | 0.16155 | 0 | 0 | 0 | 0 | 0 | 1 | 0.011111 | false | 0.011111 | 0.077778 | 0 | 0.088889 | 0.011111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65633362621641e719056033daf4428d291974d7 | 264 | py | Python | fedn/fedn/utils/checksum.py | eriks-aidotse/fedn | ab784e6ac45fd02be4532c9bbc8d5b8c75b62d51 | [
"Apache-2.0"
] | 75 | 2020-07-19T10:40:15.000Z | 2022-03-13T06:56:04.000Z | fedn/fedn/utils/checksum.py | eriks-aidotse/fedn | ab784e6ac45fd02be4532c9bbc8d5b8c75b62d51 | [
"Apache-2.0"
] | 124 | 2020-07-27T18:16:21.000Z | 2022-03-10T12:16:04.000Z | fedn/fedn/utils/checksum.py | eriks-aidotse/fedn | ab784e6ac45fd02be4532c9bbc8d5b8c75b62d51 | [
"Apache-2.0"
] | 28 | 2020-08-14T19:39:30.000Z | 2022-03-16T10:29:09.000Z | import hashlib
def md5(fname):
"""
:param fname:
:return:
"""
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
| 17.6 | 53 | 0.560606 | 35 | 264 | 4.142857 | 0.657143 | 0.144828 | 0.17931 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.048128 | 0.291667 | 264 | 14 | 54 | 18.857143 | 0.727273 | 0.083333 | 0 | 0 | 0 | 0 | 0.00905 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.142857 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6563ae0e4520f1e216d4eaf0137991fb80d170ea | 24,607 | py | Python | misc/scancode-fingerprint/tests/data/fingerprint/similarity_matching2.py | nexB/scancode-plugins-builtin | 3243913072a58dd62a72f52dce9b86273aa8d122 | [
"Apache-2.0"
] | 1 | 2020-06-24T16:03:52.000Z | 2020-06-24T16:03:52.000Z | misc/scancode-fingerprint/tests/data/fingerprint/similarity_matching2.py | nexB/scancode-plugins-builtin | 3243913072a58dd62a72f52dce9b86273aa8d122 | [
"Apache-2.0"
] | 1 | 2021-06-02T02:54:43.000Z | 2021-06-02T02:54:43.000Z | misc/scancode-fingerprint/tests/data/fingerprint/similarity_matching2.py | nexB/scancode-plugins-builtin | 3243913072a58dd62a72f52dce9b86273aa8d122 | [
"Apache-2.0"
] | null | null | null | import inspect
import cProfile
import pdb
import collections
import traceback
import logging
from functools import partial
from os import makedirs, getcwd
from os.path import join, abspath, exists, isdir
import requests
from appdirs import user_data_dir
from pyprint.Printer import Printer
from coala_utils.decorators import (enforce_signature, classproperty,
get_public_members)
from coalib.bears.BEAR_KIND import BEAR_KIND
from coalib.output.printers.LogPrinter import LogPrinterMixin
from coalib.results.Result import Result
from coalib.results.TextPosition import ZeroOffsetError
from coalib.settings.FunctionMetadata import FunctionMetadata
from coalib.settings.Section import Section
from coalib.settings.Setting import Setting
from coalib.settings.ConfigurationGathering import get_config_directory
from .meta import bearclass
def _setting_is_enabled(bear, key):
"""
Check setting key is in section.
:param bear: Bear object.
:param key: Setting key.
:return: ``True`` if setting value is ``True``. Setting object if
setting key is in section else ``False``.
"""
if not isinstance(bear, Bear):
raise ValueError(
'Positional argument bear is not an instance of Bear class.')
if key is None:
raise ValueError('No setting key passed.')
if key not in bear.section:
return False
try:
return bool(bear.section[key])
except ValueError:
pass
return bear.section[key]
def _is_debugged(bear):
"""
Check whether the bear is in debug mode according to its section-settings.
:param bear: Bear object.
:return: True if ``debug_bears`` is ``True`` or if bear name specified
in ``debug_bears`` setting match with the bear parameter.
"""
setting = _setting_is_enabled(bear, key='debug_bears')
if isinstance(setting, bool):
return setting
return bear.name.lower() in map(str.lower, setting)
def _is_profiled(bear):
"""
Check whether the bear is in profile mode according to its section-settings.
:param bear: Bear object.
:return: current working directory if ``profile`` is ``True``, False
if ``profile`` is ``False`` else return directory path
specified in ``profile``.
"""
setting = _setting_is_enabled(bear, key='profile')
if setting is True:
return getcwd()
if isinstance(setting, Setting):
return setting.value
return False
class Debugger(pdb.Pdb):
def __init__(self, bear, *args, **kwargs):
if not isinstance(bear, Bear):
raise ValueError('Positional argument bear is not an instance of '
'Bear class.')
super(Debugger, self).__init__(*args, **kwargs)
self.bear = bear
def do_quit(self, arg):
self.clear_all_breaks()
super().do_continue(arg)
return 1
do_q = do_quit
do_exit = do_quit
def do_settings(self, arg):
md = self.bear.get_metadata()
section_params_dict = md.create_params_from_section(
self.bear.section)
for param in md.non_optional_params:
self.message('%s = %r' % (param, section_params_dict[param]))
for param in md.optional_params:
self.message('%s = %r' % (param, section_params_dict[param] if
param in section_params_dict else
md.optional_params[param][2]))
return 1
def debug_run(func, dbg=None, *args, **kwargs):
dbg = Debugger() if dbg is None else dbg
bear_results = dbg.runcall(func, *args, **kwargs)
if isinstance(bear_results, collections.Iterable):
results = []
iterator = iter(bear_results)
try:
while True:
result = dbg.runcall(next, iterator)
results.append(result)
except StopIteration:
return results
else:
return bear_results
class Bear(Printer, LogPrinterMixin, metaclass=bearclass):
"""
A bear contains the actual subroutine that is responsible for checking
source code for certain specifications. However it can actually do
whatever it wants with the files it gets. If you are missing some Result
type, feel free to contact us and/or help us extending the coalib.
This is the base class for every bear. If you want to write a bear, you
will probably want to look at the GlobalBear and LocalBear classes that
inherit from this class. In any case you'll want to overwrite at least the
run method. You can send debug/warning/error messages through the
debug(), warn(), err() functions. These will send the
appropriate messages so that they are outputted. Be aware that if you use
err(), you are expected to also terminate the bear run-through
immediately.
Settings are available at all times through self.section.
To indicate which languages your bear supports, just give it the
``LANGUAGES`` value which should be a set of string(s):
>>> from dependency_management.requirements.PackageRequirement import (
... PackageRequirement)
>>> from dependency_management.requirements.PipRequirement import (
... PipRequirement)
>>> class SomeBear(Bear):
... LANGUAGES = {'C', 'CPP','C#', 'D'}
To indicate the requirements of the bear, assign ``REQUIREMENTS`` a set
with instances of ``PackageRequirements``.
>>> class SomeBear(Bear):
... REQUIREMENTS = {
... PackageRequirement('pip', 'coala_decorators', '0.2.1')}
If your bear uses requirements from a manager we have a subclass from,
you can use the subclass, such as ``PipRequirement``, without specifying
manager:
>>> class SomeBear(Bear):
... REQUIREMENTS = {PipRequirement('coala_decorators', '0.2.1')}
To specify additional attributes to your bear, use the following:
>>> class SomeBear(Bear):
... AUTHORS = {'Jon Snow'}
... AUTHORS_EMAILS = {'jon_snow@gmail.com'}
... MAINTAINERS = {'Catelyn Stark'}
... MAINTAINERS_EMAILS = {'catelyn_stark@gmail.com'}
... LICENSE = 'AGPL-3.0'
... ASCIINEMA_URL = 'https://asciinema.org/a/80761'
If the maintainers are the same as the authors, they can be omitted:
>>> class SomeBear(Bear):
... AUTHORS = {'Jon Snow'}
... AUTHORS_EMAILS = {'jon_snow@gmail.com'}
>>> SomeBear.maintainers
{'Jon Snow'}
>>> SomeBear.maintainers_emails
{'jon_snow@gmail.com'}
If your bear needs to include local files, then specify it giving strings
containing relative file paths to the INCLUDE_LOCAL_FILES set:
>>> class SomeBear(Bear):
... INCLUDE_LOCAL_FILES = {'checkstyle.jar', 'google_checks.xml'}
To keep track easier of what a bear can do, simply tell it to the CAN_FIX
and the CAN_DETECT sets. Possible values:
>>> CAN_DETECT = {'Syntax', 'Formatting', 'Security', 'Complexity', 'Smell',
... 'Unused Code', 'Redundancy', 'Variable Misuse', 'Spelling',
... 'Memory Leak', 'Documentation', 'Duplication', 'Commented Code',
... 'Grammar', 'Missing Import', 'Unreachable Code', 'Undefined Element',
... 'Code Simplification', 'Statistics'}
>>> CAN_FIX = {'Syntax', ...}
Specifying something to CAN_FIX makes it obvious that it can be detected
too, so it may be omitted:
>>> class SomeBear(Bear):
... CAN_DETECT = {'Syntax', 'Security'}
... CAN_FIX = {'Redundancy'}
>>> list(sorted(SomeBear.can_detect))
['Redundancy', 'Security', 'Syntax']
Every bear has a data directory which is unique to that particular bear:
>>> class SomeBear(Bear): pass
>>> class SomeOtherBear(Bear): pass
>>> SomeBear.data_dir == SomeOtherBear.data_dir
False
BEAR_DEPS contains bear classes that are to be executed before this bear
gets executed. The results of these bears will then be passed to the
run method as a dict via the dependency_results argument. The dict
will have the name of the Bear as key and the list of its results as
results:
>>> class SomeBear(Bear): pass
>>> class SomeOtherBear(Bear):
... BEAR_DEPS = {SomeBear}
>>> SomeOtherBear.BEAR_DEPS
{<class 'coalib.bears.Bear.SomeBear'>}
Every bear resides in some directory which is specified by the
source_location attribute:
>>> class SomeBear(Bear): pass
>>> SomeBear.source_location
'...Bear.py'
Every linter bear makes use of an executable tool for its operations.
The SEE_MORE attribute provides a link to the main page of the linter
tool:
>>> class PyLintBear(Bear):
... SEE_MORE = 'https://www.pylint.org/'
>>> PyLintBear.SEE_MORE
'https://www.pylint.org/'
In the future, bears will not survive without aspects. aspects are defined
as part of the ``class`` statement's parameter list. According to the
classic ``CAN_DETECT`` and ``CAN_FIX`` attributes, aspects can either be
only ``'detect'``-able or also ``'fix'``-able:
>>> from coalib.bearlib.aspects.Metadata import CommitMessage
>>> class aspectsCommitBear(Bear, aspects={
... 'detect': [CommitMessage.Shortlog.ColonExistence],
... 'fix': [CommitMessage.Shortlog.TrailingPeriod],
... }, languages=['Python']):
... pass
>>> aspectsCommitBear.aspects['detect']
[<aspectclass 'Root.Metadata.CommitMessage.Shortlog.ColonExistence'>]
>>> aspectsCommitBear.aspects['fix']
[<aspectclass 'Root.Metadata.CommitMessage.Shortlog.TrailingPeriod'>]
To indicate the bear uses raw files, set ``USE_RAW_FILES`` to True:
>>> class RawFileBear(Bear):
... USE_RAW_FILES = True
>>> RawFileBear.USE_RAW_FILES
True
However if ``USE_RAW_FILES`` is enabled the Bear is in charge of managing
the file (opening the file, closing the file, reading the file, etc).
"""
LANGUAGES = set()
REQUIREMENTS = set()
AUTHORS = set()
AUTHORS_EMAILS = set()
MAINTAINERS = set()
MAINTAINERS_EMAILS = set()
PLATFORMS = {'any'}
LICENSE = ''
INCLUDE_LOCAL_FILES = set()
CAN_DETECT = set()
CAN_FIX = set()
ASCIINEMA_URL = ''
SEE_MORE = ''
BEAR_DEPS = set()
USE_RAW_FILES = False
@classproperty
def name(cls):
"""
:return: The name of the bear
"""
return cls.__name__
@classproperty
def can_detect(cls):
"""
:return: A set that contains everything a bear can detect, gathering
information from what it can fix too.
"""
return cls.CAN_DETECT | cls.CAN_FIX
@classproperty
def source_location(cls):
"""
:return: The file path where the bear was fetched from.
"""
return inspect.getfile(cls)
@classproperty
def maintainers(cls):
"""
:return: A set containing ``MAINTAINERS`` if specified, else takes
``AUTHORS`` by default.
"""
return cls.AUTHORS if cls.MAINTAINERS == set() else cls.MAINTAINERS
@classproperty
def maintainers_emails(cls):
"""
:return: A set containing ``MAINTAINERS_EMAILS`` if specified, else
takes ``AUTHORS_EMAILS`` by default.
"""
return (cls.AUTHORS_EMAILS if cls.MAINTAINERS_EMAILS == set()
else cls.MAINTAINERS_EMAILS)
@enforce_signature
def __init__(self,
section: Section,
message_queue,
timeout=0):
"""
Constructs a new bear.
:param section: The section object where bear settings are
contained.
:param message_queue: The queue object for messages. Can be ``None``.
:param timeout: The time the bear is allowed to run. To set no
time limit, use 0.
:raises TypeError: Raised when ``message_queue`` is no queue.
:raises RuntimeError: Raised when bear requirements are not fulfilled.
"""
Printer.__init__(self)
if message_queue is not None and not hasattr(message_queue, 'put'):
raise TypeError('message_queue has to be a Queue or None.')
self.section = section
self.message_queue = message_queue
self.timeout = timeout
self.debugger = _is_debugged(bear=self)
self.profile = _is_profiled(bear=self)
if self.profile and self.debugger:
raise ValueError(
'Cannot run debugger and profiler at the same time.')
self.setup_dependencies()
cp = type(self).check_prerequisites()
if cp is not True:
error_string = ('The bear ' + self.name +
' does not fulfill all requirements.')
if cp is not False:
error_string += ' ' + cp
self.err(error_string)
raise RuntimeError(error_string)
def _print(self, output, **kwargs):
self.debug(output)
def log_message(self, log_message, timestamp=None, **kwargs):
if self.message_queue is not None:
self.message_queue.put(log_message)
def run(self, *args, dependency_results=None, **kwargs):
raise NotImplementedError
def _dump_bear_profile_data(self, profiler):
filename = '{}_{}.prof'.format(self.section.name, self.name)
path = join(self.profile, filename)
if not isdir(self.profile):
try:
makedirs(self.profile)
except FileExistsError:
logging.error('File exists :'.format(self.profile))
raise SystemExit(2)
profiler.dump_stats(path)
def profile_run(self, *args, profiler=None, **kwargs):
profiler = cProfile.Profile() if profiler is None else profiler
bear_results = profiler.runcall(self.run, *args, **kwargs)
if isinstance(bear_results, collections.Iterable):
results = []
iterator = iter(bear_results)
while True:
try:
result = profiler.runcall(next, iterator)
results.append(result)
except StopIteration:
break
else:
results = bear_results
self._dump_bear_profile_data(profiler)
return results
def run_bear_from_section(self, args, kwargs):
try:
# Don't get `language` setting from `section.contents`
if self.section.language and (
'language' in self.get_metadata()._optional_params or
'language' in self.get_metadata()._non_optional_params):
kwargs['language'] = self.section.language
kwargs.update(
self.get_metadata().create_params_from_section(self.section))
except ValueError as err:
self.warn('The bear {} cannot be executed.'.format(
self.name), str(err))
return
if self.debugger:
return debug_run(self.run, Debugger(bear=self), *args, **kwargs)
elif self.profile:
return self.profile_run(*args, **kwargs)
else:
return self.run(*args, **kwargs)
def execute(self, *args, debug=False, **kwargs):
name = self.name
try:
self.debug('Running bear {}...'.format(name))
# If `dependency_results` kwargs is defined but there are no
# dependency results (usually in Bear that has no dependency)
# delete the `dependency_results` kwargs, since most Bears don't
# define `dependency_results` kwargs in its `run()` function.
if ('dependency_results' in kwargs and
kwargs['dependency_results'] is None and
not self.BEAR_DEPS):
del kwargs['dependency_results']
# If it's already a list it won't change it
result = self.run_bear_from_section(args, kwargs)
return [] if result is None else list(result)
except (Exception, SystemExit) as exc:
if debug and not isinstance(exc, SystemExit):
raise
if isinstance(exc, ZeroOffsetError):
self.err('Bear {} violated one-based offset convention.'
.format(name), str(exc))
if (self.kind() == BEAR_KIND.LOCAL
and ('log_level' not in self.section
or self.section['log_level'].value != 'DEBUG')):
self.err('Bear {} failed to run on file {}. Take a look '
'at debug messages (`-V`) for further '
'information.'.format(name, args[0]))
elif ('log_level' not in self.section
or self.section['log_level'].value != 'DEBUG'):
self.err('Bear {} failed to run. Take a look '
'at debug messages (`-V`) for further '
'information.'.format(name))
self.debug(
'The bear {bear} raised an exception. If you are the author '
'of this bear, please make sure to catch all exceptions. If '
'not and this error annoys you, you might want to get in '
'contact with the author of this bear.\n\nTraceback '
'information is provided below:\n\n{traceback}'
'\n'.format(bear=name, traceback=traceback.format_exc()))
@staticmethod
def kind():
"""
:return: The kind of the bear
"""
raise NotImplementedError
@classmethod
def get_metadata(cls):
"""
:return: Metadata for the run function. However parameters like
``self`` or parameters implicitly used by coala (e.g.
filename for local bears) are already removed.
"""
return FunctionMetadata.from_function(
cls.run,
omit={'self', 'dependency_results', 'language'})
@classmethod
def __json__(cls):
"""
Override JSON export of ``Bear`` object.
"""
# json cannot serialize properties, so drop them
_dict = {key: value for key, value in get_public_members(cls).items()
if not isinstance(value, property)}
metadata = cls.get_metadata()
non_optional_params = metadata.non_optional_params
optional_params = metadata.optional_params
_dict['metadata'] = {
'desc': metadata.desc,
'non_optional_params': ({param: non_optional_params[param][0]}
for param in non_optional_params),
'optional_params': ({param: optional_params[param][0]}
for param in optional_params)}
if hasattr(cls, 'languages'):
_dict['languages'] = (str(language) for language in cls.languages)
return _dict
@classmethod
def missing_dependencies(cls, lst):
"""
Checks if the given list contains all dependencies.
:param lst: A list of all already resolved bear classes (not
instances).
:return: A set of missing dependencies.
"""
return set(cls.BEAR_DEPS) - set(lst)
@classmethod
def get_non_optional_settings(cls, recurse=True):
"""
This method has to determine which settings are needed by this bear.
The user will be prompted for needed settings that are not available
in the settings file so don't include settings where a default value
would do.
Note: This function also queries settings from bear dependencies in
recursive manner. Though circular dependency chains are a challenge to
achieve, this function would never return on them!
:param recurse: Get the settings recursively from its dependencies.
:return: A dictionary of needed settings as keys and a tuple of
help text and annotation as values.
"""
non_optional_settings = {}
if recurse:
for dependency in cls.BEAR_DEPS:
non_optional_settings.update(
dependency.get_non_optional_settings())
non_optional_settings.update(cls.get_metadata().non_optional_params)
return non_optional_settings
@staticmethod
def setup_dependencies():
"""
This is a user defined function that can download and set up
dependencies (via download_cached_file or arbitrary other means) in an
OS independent way.
"""
@classmethod
def check_prerequisites(cls):
"""
Checks whether needed runtime prerequisites of the bear are satisfied.
This function gets executed at construction.
Section value requirements shall be checked inside the ``run`` method.
>>> from dependency_management.requirements.PipRequirement import (
... PipRequirement)
>>> class SomeBear(Bear):
... REQUIREMENTS = {PipRequirement('pip')}
>>> SomeBear.check_prerequisites()
True
>>> class SomeOtherBear(Bear):
... REQUIREMENTS = {PipRequirement('really_bad_package')}
>>> SomeOtherBear.check_prerequisites()
'really_bad_package is not installed. You can install it using ...'
>>> class anotherBear(Bear):
... REQUIREMENTS = {PipRequirement('bad_package', '0.0.1')}
>>> anotherBear.check_prerequisites()
'bad_package 0.0.1 is not installed. You can install it using ...'
:return: True if prerequisites are satisfied, else False or a string
that serves a more detailed description of what's missing.
"""
for requirement in cls.REQUIREMENTS:
if not requirement.is_installed():
return str(requirement) + ' is not installed. You can ' + (
'install it using ') + (
' '.join(requirement.install_command()))
return True
def get_config_dir(self):
"""
Gives the directory where the configuration file is.
:return: Directory of the config file.
"""
return get_config_directory(self.section)
def download_cached_file(self, url, filename):
"""
Downloads the file if needed and caches it for the next time. If a
download happens, the user will be informed.
Take a sane simple bear:
>>> from queue import Queue
>>> bear = Bear(Section("a section"), Queue())
We can now carelessly query for a neat file that doesn't exist yet:
>>> from os import remove
>>> if exists(join(bear.data_dir, "a_file")):
... remove(join(bear.data_dir, "a_file"))
>>> file = bear.download_cached_file("https://github.com/", "a_file")
If we download it again, it'll be much faster as no download occurs:
>>> newfile = bear.download_cached_file("https://github.com/", "a_file")
>>> newfile == file
True
:param url: The URL to download the file from.
:param filename: The filename it should get, e.g. "test.txt".
:return: A full path to the file ready for you to use!
"""
filename = join(self.data_dir, filename)
if exists(filename):
return filename
self.info('Downloading {filename!r} for bear {bearname} from {url}.'
.format(filename=filename, bearname=self.name, url=url))
response = requests.get(url, stream=True, timeout=20)
response.raise_for_status()
with open(filename, 'wb') as file:
for chunk in response.iter_content(chunk_size=16 * 1024):
file.write(chunk)
return filename
@classproperty
def data_dir(cls):
"""
Returns a directory that may be used by the bear to store stuff. Every
bear has an own directory dependent on their name.
"""
data_dir = abspath(join(user_data_dir('coala-bears'), cls.name))
makedirs(data_dir, exist_ok=True)
return data_dir
@property
def new_result(self):
"""
Returns a partial for creating a result with this bear already bound.
"""
return partial(Result.from_values, self) | 36.61756 | 80 | 0.614622 | 2,906 | 24,607 | 5.097041 | 0.204405 | 0.008034 | 0.012625 | 0.006751 | 0.177761 | 0.127869 | 0.109101 | 0.095598 | 0.081015 | 0.075479 | 0 | 0.002063 | 0.290893 | 24,607 | 672 | 81 | 36.61756 | 0.846811 | 0.432885 | 0 | 0.168919 | 0 | 0 | 0.096834 | 0.001679 | 0 | 0 | 0 | 0 | 0 | 1 | 0.10473 | false | 0.006757 | 0.074324 | 0 | 0.358108 | 0.010135 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6564231dc490aa50a267d1b178c44416188487d5 | 445 | py | Python | crypto.py | karuu6/blockr | 174c6cb1d4275603f11af0b3aa19f3a05256184b | [
"MIT"
] | 1 | 2021-12-22T01:30:47.000Z | 2021-12-22T01:30:47.000Z | crypto.py | karuu6/blockr | 174c6cb1d4275603f11af0b3aa19f3a05256184b | [
"MIT"
] | null | null | null | crypto.py | karuu6/blockr | 174c6cb1d4275603f11af0b3aa19f3a05256184b | [
"MIT"
] | null | null | null | from hashlib import sha256
from datetime import datetime
_ts = lambda: datetime.now().__str__()
def hash(d):
h = sha256()
h.update(d)
return h.hexdigest()
def file_hash(fname):
with open(fname,'rb') as f:
h = sha256()
for chunk in iter(lambda: f.read(4096),b''):
h.update(chunk)
return h.hexdigest()
# crappy encryption for
# proof of concept
def xor(d):
b=bytearray(d)
for i in range(len(b)):
b[i] ^= 0x69
return bytes(b) | 17.8 | 46 | 0.67191 | 75 | 445 | 3.906667 | 0.56 | 0.047782 | 0.109215 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.043716 | 0.177528 | 445 | 25 | 47 | 17.8 | 0.756831 | 0.08764 | 0 | 0.222222 | 0 | 0 | 0.004951 | 0 | 0 | 0 | 0.009901 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.111111 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6565c709724d9195649f74910cef4b57c952487b | 9,875 | py | Python | python/chromium/cr.py | asankah/stonesthrow | e207640910fa5cdae0165b94ade639a42e3cc44b | [
"Apache-2.0"
] | null | null | null | python/chromium/cr.py | asankah/stonesthrow | e207640910fa5cdae0165b94ade639a42e3cc44b | [
"Apache-2.0"
] | null | null | null | python/chromium/cr.py | asankah/stonesthrow | e207640910fa5cdae0165b94ade639a42e3cc44b | [
"Apache-2.0"
] | null | null | null | import stonesthrow
import argparse
import os
import platform
import shutil
import subprocess
import time
def Argument(*args, **kwargs):
"""Decorator Argument adds an annotation to a function indicating
which argparse arguments are required by the underlying command.
Usage:
@Argument('--hello', '-H', help='Hello world!')
def foo_Command():
pass
... will annotate `foo` with an argument that's equivalent to what's produced
by argparse.add_argument().
"""
def wrap(func):
if 'option_arguments' not in vars(func):
vars(func)['option_arguments'] = []
vars(func)['option_arguments'].append((args, kwargs))
return func
return wrap
def CommandNeedsSource(func):
"""Decorator @CommandNeedsSource annotates a command handler indicating
that the command requires an up-to-date source checkout. This means that
the builder host will check out the revision matching the request and
perform a `gclient sync` if necessary along with preparing the build
directory.
Usage:
@CommandNeedsSource
def foo_Command():
pass
"""
vars(func)['needs_source'] = True
return func
def InvokeMb(options, *args):
if len(args) == 0 or not isinstance(options, stonesthrow.Options):
raise ValueError('first argument should be an Options object')
if platform.system() == 'Windows':
mb_tool = os.path.join(options.source_path, 'tools', 'mb', 'mb.bat')
else:
mb_tool = os.path.join(options.source_path, 'tools', 'mb', 'mb.py')
command = [
mb_tool, args[0], '-c', options.mb_config, '-g', options.goma_path,
options.build_path
] + list(args[1:])
stonesthrow.CheckCall(command, cwd=options.source_path)
def IsGomaRunning(options, goma_status_cmd):
output = stonesthrow.CheckOutput(goma_status_cmd)
for line in output.splitlines():
line = line.strip()
if line.startswith('compiler proxy '
) and ' status: ' in line and line.endswith('ok'):
return True
return False
def EnsureGoma(options):
if platform.system() == 'Windows':
attempted_to_start_goma = False
for x in range(5):
goma_ctl = os.path.join(options.goma_path, 'goma_ctl.bat')
if IsGomaRunning(options,
['cmd.exe', '/c', goma_ctl, 'status']):
return True
if not attempted_to_start_goma:
attempted_to_start_goma = True
command = ['cmd.exe', '/c', goma_ctl, 'ensure_start']
# Don't wait for completion.
subprocess.Popen(command, shell=True)
time.sleep(1)
stonesthrow.Error('timed out while attempting to start Goma')
return False
# On Posix
goma_ctl = os.path.join(options.goma_path, 'goma_ctl.py')
if IsGomaRunning(options, ['python', goma_ctl, 'status']):
return True
stonesthrow.CheckCall(['python', goma_ctl, 'ensure_start'])
return True
def GetNinjaCommand(options):
command = ['ninja']
if options.max_build_jobs != 0:
command += ['-j', str(options.max_build_jobs)]
command += ['-C', options.build_path]
return command
def _BuildTargetFromCommand(options, command):
if os.path.isabs(command):
return None
if os.path.dirname(command) in ['', '.']:
return os.path.basename(command)
return None
class Commands:
"""Defines the commands that will be invoked for a Chromium repository.
Any method that ends with the suffix '_Command' defines a new subcommand.
Arguments and source requirements for the subcommand are specified via the
@Argument and @CommandNeedsSource decorators.
A method defining a subcommand receives a single argument, which is the
result of calling argparse.ArgumentParser.parse_args() containing the
parsed arguments.
"""
def Prepare_Command(self, options):
"""prepare build directory."""
InvokeMb(options, 'gen')
@CommandNeedsSource
@Argument(
'targets',
nargs=argparse.REMAINDER,
metavar='TARGETS',
help='targets to build')
def Build_Command(self, options):
"""build specified targets."""
if len(options.targets) == 0:
stonesthrow.Error('no targets specified')
return
# If Goma fails, don't try to run a build. A suitable error should
# already have been presented.
if not EnsureGoma(options):
return
stonesthrow.CheckCall(
GetNinjaCommand(options) + options.targets, cwd=options.build_path)
@Argument(
'targets',
nargs=argparse.REMAINDER,
metavar='TARGETS',
help='targets to clean')
def Clean_Command(self, options):
"""clean specified targets."""
stonesthrow.CheckCall(
GetNinjaCommand(options) + ['-t', 'clean'] + options.targets,
cwd=build_path)
@Argument('--source', action='store_true', help='clobber source')
@Argument('--output', '-o', action='store_true', help='clobber output')
@Argument('--force', '-f', action='store_true', help='force')
def Clobber_Command(self, options):
"""clobber output directory."""
if not (options.source or options.output):
stonesthrow.Info('need to specify either --source or --output')
return
if options.source:
force_flag = ['--force'] if options.force else []
stonesthrow.CheckCall(['git', 'clean'] + foce_flag)
if options.output:
if options.force:
shutil.rmtree(options.build_path)
InvokeMb(options, 'gen')
else:
stonesthrow.Info(
'will remove everything in {}'.format(options.build_path))
@Argument(
'--build',
action='store_true',
dest='build',
help='build dependencies')
@Argument(
'args',
nargs=argparse.REMAINDER,
metavar="ARGUMENTS",
help="command to run")
def Run_Command(self, options):
"""runs a command."""
if len(options.args) == 0:
stonesthrow.Error('no arguments specified')
return
if options.build:
build_target = _BuildTargetFromCommand(options, options.args[0])
if build_target is not None:
ninja_command = GetNinjaCommand(options)
stonesthrow.CheckCall(ninja_command + [build_target])
stonesthrow.CheckCall(options.args, cwd=options.build_path)
def RebaseUpdate_Command(self, options):
"""runs 'git rebase-update'."""
clank_dir = os.path.join(options.source_path, "clank")
if os.path.exists(clank_dir):
stonesthrow.CheckCall(
['git', 'checkout', 'origin/master'], cwd=clank_dir)
stonesthrow.CheckCall(
['git', 'pull', 'origin', 'master'], cwd=clank_dir)
chrome_dir = options.source_path
stonesthrow.CheckCall(
['git', 'checkout', 'origin/master'], cwd=chrome_dir)
stonesthrow.CheckCall(
['git', 'pull', 'origin', 'master'], cwd=chrome_dir)
stonesthrow.CheckCall(['gclient', 'sync'], cwd=chrome_dir)
stonesthrow.CheckCall(['git', 'clean', '-f'], cwd=chrome_dir)
stonesthrow.CheckCall(
['git', 'rebase-update', '--no-fetch', '--keep-going'],
cwd=chrome_dir)
@CommandNeedsSource
def Sync_Command(self, options):
"""Run 'gclient sync'"""
stonesthrow.CheckCall(['gclient', 'sync'], cwd=options.source_path, shell=True)
def ConfigureFlags(config):
parser = argparse.ArgumentParser(
description='Chromium platform specific subcommands')
subparsers = parser.add_subparsers()
c = Commands()
for name in dir(c):
if not name.endswith('_Command'):
continue
value = getattr(c, name)
command = name[:-len('_Command')].lower()
doc = value.__doc__
subparser = subparsers.add_parser(command, help=value.__doc__)
subparser.set_defaults(method=value)
if hasattr(value, 'option_arguments'):
for args, kwargs in value.option_arguments:
subparser.add_argument(*args, **kwargs)
return parser
def NeedsSource(options):
if not hasattr(options, 'method'):
raise ValueError('invalid command')
return hasattr(options.method,
'needs_source') and options.method.needs_source
def _GetCommandDescriptor(command_name, command):
doc = command.__doc__.splitlines()
description = doc[0]
depends_on_source = hasattr(command,
'needs_source') and command.needs_source
usage = '\n'.join(doc[2:])
if hasattr(command, 'option_arguments'):
usage += "\nOptions:\n"
for args, kwargs in command.option_arguments:
usage += """{flags} :
{help}
""".format(flags=', '.join(args),
help=kwargs.get("help", "(no description given)"))
return {
"name": [command_name],
"description": description,
"usage": usage,
"depends_on_source": depends_on_source,
"visible": True
}
def ListCommands(options):
commands = []
c = Commands()
for name in dir(c):
if not name.endswith('_Command'):
continue
commands.append(
_GetCommandDescriptor(name[:-len('_command')].lower(),
getattr(c, name)))
return commands
def NotifyEvent(options):
pass
def Run(options):
if not hasattr(options, 'method'):
raise ValueError('invalid command')
return options.method(options)
| 30.478395 | 87 | 0.612456 | 1,087 | 9,875 | 5.449862 | 0.24563 | 0.050641 | 0.021269 | 0.014348 | 0.174038 | 0.142809 | 0.126435 | 0.104997 | 0.089804 | 0.089804 | 0 | 0.001524 | 0.269266 | 9,875 | 323 | 88 | 30.572755 | 0.819429 | 0.141671 | 0 | 0.247619 | 0 | 0 | 0.132485 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0.004762 | 0.033333 | 0 | 0.242857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65673a332a9a38fe7aa96a26446698421315ef1b | 7,452 | py | Python | main.py | Brazzers211/Telegram_bots | 3996d379f627ccef9cf753bf4e7e7e3a96f92782 | [
"BSL-1.0"
] | null | null | null | main.py | Brazzers211/Telegram_bots | 3996d379f627ccef9cf753bf4e7e7e3a96f92782 | [
"BSL-1.0"
] | null | null | null | main.py | Brazzers211/Telegram_bots | 3996d379f627ccef9cf753bf4e7e7e3a96f92782 | [
"BSL-1.0"
] | null | null | null | import os
from datetime import datetime, timedelta
from aiogram import Bot, Dispatcher, executor, types
from aiogram.types import ReplyKeyboardRemove, ReplyKeyboardMarkup, KeyboardButton, InlineKeyboardMarkup, InlineKeyboardButton, ContentType
button_hi = KeyboardButton('Сливы')
button_hi2 = KeyboardButton('Архивы')
button_hi3 = KeyboardButton('Приватка')
greet_kb = ReplyKeyboardMarkup(resize_keyboard=True)
greet_kb.add(button_hi,button_hi2,button_hi3)
Arhivi = KeyboardButton('Школьницы')
Arhivi2 = KeyboardButton('Милфы')
menu = KeyboardButton('Главное меню')
Arhivis = ReplyKeyboardMarkup(resize_keyboard=True)
Arhivis.add(Arhivi,Arhivi2,menu)
Privatka = KeyboardButton('Купить')
Otzivi = KeyboardButton('Отзывы')
menu = KeyboardButton('Главное меню')
press = ReplyKeyboardMarkup(resize_keyboard=True)
press.add(Privatka,Otzivi,menu)
Pokypka_arhiv = KeyboardButton('Купить Архив со Шк')
menu = KeyboardButton('Главное меню')
arif = ReplyKeyboardMarkup(resize_keyboard=True)
arif.add(Pokypka_arhiv,menu)
Pokypka_arhiv2 = KeyboardButton('Купить Архив со Милфами')
menu = KeyboardButton('Главное меню')
arif2 = ReplyKeyboardMarkup(resize_keyboard=True)
arif2.add(Pokypka_arhiv2,menu)
sub_inline_markup = InlineKeyboardMarkup(row_width=1)
ssilka_oplata = InlineKeyboardButton(text="Месяц приватки 250р", callback_data="submonth")
sub_inline_markup.insert(ssilka_oplata)
sub_inline_shkool_markup = InlineKeyboardMarkup(row_width=1)
ssilka_oplata_shkool = InlineKeyboardButton(text="Покупка архива школьниц", callback_data="shkool")
sub_inline_shkool_markup.insert(ssilka_oplata_shkool)
sub_inline_milfs_markup = InlineKeyboardMarkup(row_width=1)
ssilka_oplata_milfs = InlineKeyboardButton(text="Покупка архива милф", callback_data="milfs")
sub_inline_milfs_markup.insert(ssilka_oplata_milfs)
TOKEN_KASSA = "381764678:TEST:33651"
API_TOKEN = "5220046644:AAEHt4ki_jK3mH8S0ds8-2UNxfSqJ5xZCtw"
bot = Bot(token=API_TOKEN)
dp = Dispatcher(bot)
@dp.message_handler(commands=['start'])
async def process_start_command(message: types.Message):
await message.answer("Привет, я бот приват Сливов @slivu97\n<◇>~•~ОБЬЯВЛЕНИЕ~•~<◇>\nБот в ранем доступе! прошу не покупать архивы\nПотратите деньги в пустую.\n<◇>~•~ОБЬЯВЛЕНИЕ~•~<◇>\nВыбери что тебя интересует",reply_markup=greet_kb)
@dp.message_handler(text=['Главное меню'])
async def glav_menu(message: types.Message):
await message.answer("~~~~Вы_Перешли_В~~~~\n~~~главное меню~~~",reply_markup=greet_kb)
@dp.message_handler(text=['Сливы'])
async def nashi_slivu(message: types.Message):
await message.answer("Наши сливы\n№1 - @slivu97\n№2 - @slivu96")
@dp.message_handler(text=['Архивы'])
async def arhifs_menu(message: types.Message):
await message.answer("Есть архивы вот такие:\n№1-Школьницы\n№2-Милфы",reply_markup=Arhivis)
@dp.message_handler(text=['Приватка'])
async def privatochka(message: types.Message):
await message.answer("Вход в приватку стоит 250р\nУспей пока цены не поднялись",reply_markup=press)
@dp.message_handler(text=['Школьницы'])
async def shk(message: types.Message):
await message.answer("Архив со шк 1гб Цена 500р", reply_markup=arif)
@dp.message_handler(text=['Милфы'])
async def mifs(message: types.Message):
await message.answer("Архив со Милфами 1гб Цена 250р",reply_markup=arif2)
@dp.message_handler(text=['Купить Архив со Шк'])
async def arhif_shk(message: types.Message):
await message.answer("◇ВОТ ИНЛАЙН КНОПКА ДЛЯ ОПЛАТЫ◇", reply_markup=sub_inline_shkool_markup)
@dp.message_handler(text=['Купить Архив со Милфами'])
async def arhif_mifs(message: types.Message):
await message.answer("◇ВОТ ИНЛАЙН КНОПКА ДЛЯ ОПЛАТЫ◇", reply_markup=sub_inline_milfs_markup)
@dp.message_handler(text=['Рефиральная Ссылка'])
async def ref_silka(message: types.Message):
await message.answer("Ещё не создана")
@dp.message_handler(text=['Купить'])
async def process_h2_command(message: types.Message):
await message.answer("◇ВОТ ИНЛАЙН КНОПКА ДЛЯ ОПЛАТЫ◇", reply_markup=sub_inline_markup)
@dp.message_handler(text=['Отзывы'])
async def procss_hi2_command(message: types.Message):
await message.answer("Лови")
await bot.send_photo(message.chat.id,photo=open('1.jpg', 'rb'))
await bot.send_photo(message.chat.id,photo=open('2.jpg', 'rb'))
await bot.send_photo(message.chat.id,photo=open('3.jpg', 'rb'))
@dp.message_handler(text=['Пополнить'])
async def esli_kupil_privat(message: types.Message):
await message.answer("◇ВОТ ИНЛАЙН КНОПКА ДЛЯ ОПЛАТЫ◇", reply_markup=sub_inline_markup)
@dp.callback_query_handler(text=['shkool'])
async def submonth(callp: types.CallbackQuery):
await bot.delete_message(callp.from_user.id, callp.message.message_id)
await bot.send_invoice(chat_id=callp.from_user.id, title="Покупка архива школьниц", description="Покупка архива со школьницами", payload="month_shkool", provider_token=TOKEN_KASSA, currency="RUB", start_parameter="bot_shk",prices=[{"label": "Руб", "amount": 50000}])
@dp.pre_checkout_query_handler()
async def process_pre_checkout_query(pre_checkout_query: types.PreCheckoutQuery):
await bot.answer_pre_checkout_query(pre_checkout_query.id, ok=True)
@dp.message_handler(content_types=ContentType.SUCCESSFUL_PAYMENT)
async def process_pay_shk(message: types.Message):
if message.successful_payment.invoice_payload == "month_shkool":
await message.answer(f"Держи ссылку\nhttps://mega.nz/folder/vaYHlYTJ#jlFyV7T2uq3-J4X1nenP1A")
@dp.callback_query_handler(text=['milfs'])
async def milfs_arhiv(calls: types.CallbackQuery):
await bot.delete_message(calls.from_user.id, calls.message.message_id)
await bot.send_invoice(chat_id=calls.from_user.id, title="Покупка архива милфами", description="Покупка архива с милфами", payload="month_filfs", provider_token=TOKEN_KASSA, currency="RUB", start_parameter="bot_milfs",prices=[{"label": "Руб", "amount": 25000}])
@dp.pre_checkout_query_handler()
async def process_pre_checkout_query(pre_checkout_query: types.PreCheckoutQuery):
await bot.answer_pre_checkout_query(pre_checkout_query.id, ok=True)
@dp.message_handler(content_types=ContentType.SUCCESSFUL_PAYMENT)
async def process_pay_milf(message: types.Message):
if message.successful_payment.invoice_payload == "month_filfs":
await message.answer(f"Держи ссылку\nНа милф")
@dp.callback_query_handler(text=['submonth'])
async def submonth(call: types.CallbackQuery):
await bot.delete_message(call.from_user.id, call.message.message_id)
await bot.send_invoice(chat_id=call.from_user.id, title="Покупка привата", description="Покупка доступа к приват каналу", payload="month_sub", provider_token=TOKEN_KASSA, currency="RUB", start_parameter="test_bots",prices=[{"label": "Руб", "amount": 25000}])
@dp.pre_checkout_query_handler()
async def process_pre_checkout_query(pre_checkout_query: types.PreCheckoutQuery):
await bot.answer_pre_checkout_query(pre_checkout_query.id, ok=True)
@dp.message_handler(content_types=ContentType.SUCCESSFUL_PAYMENT)
async def process_pay(message: types.Message):
if message.successful_payment.invoice_payload == "month_sub":
chat_id = -1001554743391
expire_date = datetime.now()
timedelta(days=1)
link = await bot.create_chat_invite_link(chat_id, expire_date.timestamp, 1)
await message.answer(f"Держи ссылку\n{link.invite_link}")
if __name__ == '__main__':
executor.start_polling(dp, skip_updates=True)
| 46.867925 | 270 | 0.784085 | 1,022 | 7,452 | 5.518591 | 0.231898 | 0.031206 | 0.04539 | 0.055319 | 0.495213 | 0.464362 | 0.392731 | 0.322163 | 0.284574 | 0.237766 | 0 | 0.016267 | 0.092593 | 7,452 | 158 | 271 | 47.164557 | 0.814256 | 0 | 0 | 0.14876 | 0 | 0.008264 | 0.191224 | 0.02818 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.033058 | 0 | 0.033058 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
656865e34d7cc98e4e8cdf2ae8330745097b481a | 2,909 | py | Python | cos_distance.py | ChiangYintso/cabbage-search | 67d5a2ac6f90b97f4fdf402c8651ae98e779faa1 | [
"Apache-2.0"
] | null | null | null | cos_distance.py | ChiangYintso/cabbage-search | 67d5a2ac6f90b97f4fdf402c8651ae98e779faa1 | [
"Apache-2.0"
] | null | null | null | cos_distance.py | ChiangYintso/cabbage-search | 67d5a2ac6f90b97f4fdf402c8651ae98e779faa1 | [
"Apache-2.0"
] | null | null | null | import sys
from typing import IO, DefaultDict, List, KeysView
from collections import defaultdict
from math import log10
import os
import numpy as np
def get_term_frequencies_n(file: IO) -> DefaultDict[str, int]:
result: DefaultDict[str, int] = defaultdict(int)
lines = file.readlines()
for line in lines:
terms = line.split('/')
for term in terms:
result[term] += 1
return result
def get_tf_l(tf_n: int) -> float:
return 1 + log10(tf_n)
def get_idf_t(N: int, df: np.array) -> np.array:
return np.log10(N / df)
def get_tf_vec(term_doc: DefaultDict[str, int], terms: KeysView[str]) -> np.array:
arr = np.zeros(len(terms), dtype=np.float)
for i, k in enumerate(terms):
arr[i] = term_doc[k]
return arr
def get_df_n(prefix: str, docs: List[str], term: str) -> int:
count = 0
for docpath in docs:
with open(f'{prefix}{docpath}', encoding='utf-8') as f:
text = ''.join(f.readlines())
if term in text:
count += 1
return count
def get_df_list_n(prefix: str, docs, terms):
arr = np.zeros(len(terms), dtype=np.float)
for i, term in enumerate(terms):
arr[i] = get_df_n(prefix, docs, term)
return arr
def normalise(idf_vec: np.array) -> np.array:
return idf_vec / np.sqrt(sum(idf_vec * idf_vec))
def cos_dis(v1: np.array, v2: np.array) -> float:
return np.sum(v1 * v2) / (
np.sum(v1 * v1) ** 0.5 * np.sum(v2 * v2) ** 0.5)
if __name__ == '__main__':
if len(sys.argv) != 4:
print('usage: python cos_distance.py <prefix> <doc_id1> <doc_id2>')
exit(-1)
docs = os.listdir(f'build/{sys.argv[1]}_terms')
N = len(docs)
term_path1 = f'build/{sys.argv[1]}_terms/{sys.argv[1]}_{"%05d" % int(sys.argv[2])}.txt'
term_path2 = f'build/{sys.argv[1]}_terms/{sys.argv[1]}_{"%05d" % int(sys.argv[3])}.txt'
tf1_n: DefaultDict[str, int]
tf2_n: DefaultDict[str, int]
terms: KeysView[str]
with open(term_path1, encoding='utf-8') as f1, open(term_path2, encoding='utf-8') as f2:
tf1_n = get_term_frequencies_n(f1)
tf2_n = get_term_frequencies_n(f2)
terms = tf1_n.keys() or tf2_n.keys()
tf_vec_1 = get_tf_vec(tf1_n, terms)
tf_vec_2 = get_tf_vec(tf2_n, terms)
df_vec: np.array = get_df_list_n(f'build/{sys.argv[1]}_terms/', docs, terms)
idf_vec: np.array = get_idf_t(N, df_vec)
idf_vec = normalise(idf_vec)
tfxidf_vec1 = tf_vec_1 * idf_vec
tfxidf_vec2 = tf_vec_2 * idf_vec
cos_similarity = cos_dis(tfxidf_vec1, tfxidf_vec2)
print('similarity: ', cos_similarity)
for i in range(1, 3):
print(f'doc{i}: ')
with open(f'build/{sys.argv[1]}_article/{sys.argv[1]}_{"%05d" % int(sys.argv[i + 1])}.txt',
encoding='utf-8') as f:
for line in f.readlines():
print(line)
print()
| 30.302083 | 99 | 0.613269 | 475 | 2,909 | 3.56 | 0.216842 | 0.049675 | 0.037847 | 0.038439 | 0.258427 | 0.161443 | 0.099941 | 0.087522 | 0.087522 | 0.087522 | 0 | 0.032824 | 0.235476 | 2,909 | 95 | 100 | 30.621053 | 0.727518 | 0 | 0 | 0.055556 | 0 | 0.041667 | 0.135442 | 0.081128 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.083333 | 0.055556 | 0.305556 | 0.069444 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65691ba2dea8b985665b55f0e8b4ea929b0974e7 | 1,498 | py | Python | sublimeText3/Packages/SublimeCodeIntel/libs/langinfo_mozilla.py | MoAnsir/dot_file_2017 | 5f67ef8f430416c82322ab7e7e001548936454ff | [
"MIT"
] | 2 | 2018-04-24T10:02:26.000Z | 2019-06-02T13:53:31.000Z | Data/Packages/SublimeCodeIntel/libs/langinfo_mozilla.py | Maxize/Sublime_Text_3 | be620476b49f9a6ce2ca2cfe825c4e142e7e82b9 | [
"Apache-2.0"
] | 1 | 2016-02-10T09:50:09.000Z | 2016-02-10T09:50:09.000Z | Packages/SublimeCodeIntel/libs/langinfo_mozilla.py | prisis/sublime-text-packages | 99ae8a5496613e27a75e5bd91723549b21476e60 | [
"MIT"
] | 2 | 2019-04-11T04:13:02.000Z | 2019-06-02T13:53:33.000Z | # Copyright (c) 2009 ActiveState Software Inc.
# See the file LICENSE.txt for licensing information.
"""LangInfo definitions for languages coming out of the Mozilla project and
that don't logically fit in the other `langinfo_*.py` files.
"""
import re
from langinfo import LangInfo
class StringPropertiesLangInfo(LangInfo):
"""A properties file commonly used in the Mozilla project with
`nsIStringBundleService`.
Note: The Java world also uses ".properties".
http://java.sun.com/docs/books/tutorial/i18n/resbundle/propfile.html
This looks to be the same format. I'm guessing that Mozilla's use
of the extension for string bundles is derived from this.
"""
name = "String Properties"
conforms_to_bases = ["Text"]
exts = [".properties"]
class ChromeManifestLangInfo(LangInfo):
"""A Mozilla chrome manifest file."""
name = "Chrome Manifest"
conforms_to_bases = ["Text"]
# Can't claim ".manifest" extension, because ".manifest" XML files are
# common on Windows for UAC.
filename_patterns = [
"chrome.manifest",
# These for Komodo's benefit:
"chrome.p.manifest", # Suggested usage by 'koext' tool.
"devbuild.manifest", # Komodo: in common usage
]
class XPTLangInfo(LangInfo):
"""Mozilla XPCOM Type info file.
XPT files are the result of compiling .idl files with "xpidl".
http://www.mozilla.org/scriptable/typelib_tools.html
"""
name = "XPT"
exts = [".xpt"]
| 31.208333 | 76 | 0.686248 | 191 | 1,498 | 5.34555 | 0.612565 | 0.041136 | 0.033301 | 0.037218 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005089 | 0.212951 | 1,498 | 47 | 77 | 31.87234 | 0.860899 | 0.61482 | 0 | 0.117647 | 0 | 0 | 0.209393 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.117647 | 0 | 0.764706 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
656ba63d398a274aa1f455b7c397d16440a59635 | 3,463 | py | Python | torch_agents/main.py | cogment/cogment-verse | b7e3eddac57021ec77912a5d38e09f8202dc352f | [
"Apache-2.0"
] | 23 | 2021-10-01T01:33:15.000Z | 2022-03-10T18:18:50.000Z | torch_agents/main.py | cogment/cogment-verse | b7e3eddac57021ec77912a5d38e09f8202dc352f | [
"Apache-2.0"
] | 35 | 2021-11-06T04:37:07.000Z | 2022-03-18T18:05:28.000Z | torch_agents/main.py | cogment/cogment-verse | b7e3eddac57021ec77912a5d38e09f8202dc352f | [
"Apache-2.0"
] | 4 | 2021-12-14T15:24:50.000Z | 2022-01-17T11:06:34.000Z | # Copyright 2021 AI Redefined Inc. <dev+cogment@ai-r.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import json
import logging
import os
import sys
import torch.multiprocessing as mp
from dotenv import load_dotenv
import cogment
from cogment_verse import RunContext
from cogment_verse_torch_agents.hive_adapter.hive_agent_adapter import HiveAgentAdapter
from cogment_verse_torch_agents.muzero.adapter import MuZeroAgentAdapter
from cogment_verse_torch_agents.simple_a2c.simple_a2c_agent import SimpleA2CAgentAdapter
from cogment_verse_torch_agents.simple_bc import SimpleBCAgentAdapter
from cogment_verse_torch_agents.hf_sb3.sb3_adapter import SimpleSB3AgentAdapter
from cogment_verse_torch_agents.selfplay_td3.selfplay_agent import SelfPlayAgentAdapter
import cog_settings
load_dotenv()
PORT = int(os.getenv("COGMENT_VERSE_TORCH_AGENTS_PORT", "9000"))
PROMETHEUS_PORT = int(os.getenv("COGMENT_VERSE_TORCH_AGENTS_PROMETHEUS_PORT", "8000"))
TRIAL_DATASTORE_ENDPOINT = os.getenv("COGMENT_VERSE_TRIAL_DATASTORE_ENDPOINT")
MODEL_REGISTRY_ENDPOINT = os.getenv("COGMENT_VERSE_MODEL_REGISTRY_ENDPOINT")
ORCHESTRATOR_ENDPOINT = os.getenv("COGMENT_VERSE_ORCHESTRATOR_ENDPOINT")
ACTOR_ENDPOINTS = json.loads(os.getenv("COGMENT_VERSE_ACTOR_ENDPOINTS"))
ENVIRONMENT_ENDPOINTS = json.loads(os.getenv("COGMENT_VERSE_ENVIRONMENT_ENDPOINTS"))
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
async def main():
context = RunContext(
cog_settings=cog_settings,
user_id="cogment_verse_torch_agents",
services_endpoints={
"orchestrator": ORCHESTRATOR_ENDPOINT,
"trial_datastore": TRIAL_DATASTORE_ENDPOINT,
"model_registry": MODEL_REGISTRY_ENDPOINT,
**ACTOR_ENDPOINTS,
**ENVIRONMENT_ENDPOINTS,
},
)
hive_adapter = HiveAgentAdapter()
hive_adapter.register_implementations(context)
simple_a2c_adapter = SimpleA2CAgentAdapter()
simple_a2c_adapter.register_implementations(context)
muzero_adapter = MuZeroAgentAdapter()
muzero_adapter.register_implementations(context)
simple_bc_adapter = SimpleBCAgentAdapter()
simple_bc_adapter.register_implementations(context)
simple_sb3_adapter = SimpleSB3AgentAdapter()
simple_sb3_adapter.register_implementations(context)
selfplay_td3_adapter = SelfPlayAgentAdapter()
selfplay_td3_adapter.register_implementations(context)
log.info(f"Torch agents service starts on {PORT}...")
await context.serve_all_registered(cogment.ServedEndpoint(port=PORT), prometheus_port=PROMETHEUS_PORT)
if __name__ == "__main__":
mp.set_start_method("spawn")
# suggested fix https://github.com/pytorch/pytorch/issues/67864
mp.set_sharing_strategy("file_system")
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(main())
except KeyboardInterrupt:
log.error("process interrupted")
sys.exit(-1)
| 35.701031 | 106 | 0.788334 | 426 | 3,463 | 6.093897 | 0.396714 | 0.069337 | 0.058937 | 0.079738 | 0.234592 | 0.083975 | 0.058552 | 0.029276 | 0 | 0 | 0 | 0.012383 | 0.137164 | 3,463 | 96 | 107 | 36.072917 | 0.856426 | 0.184233 | 0 | 0 | 0 | 0 | 0.144128 | 0.097153 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.262295 | 0 | 0.262295 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
656d7c0cc21ead4737999b83cadb0aa51032a106 | 3,273 | py | Python | tests/fixtures/hypothesis_state.py | timdev/terraform.py | ff74e5cd80d1a9f17292421be8facf1a1eeb8b69 | [
"Apache-2.0"
] | 269 | 2017-04-07T22:11:14.000Z | 2022-03-11T17:35:14.000Z | tests/fixtures/hypothesis_state.py | timdev/terraform.py | ff74e5cd80d1a9f17292421be8facf1a1eeb8b69 | [
"Apache-2.0"
] | 46 | 2015-06-07T18:31:22.000Z | 2017-04-05T19:21:23.000Z | tests/fixtures/hypothesis_state.py | timdev/terraform.py | ff74e5cd80d1a9f17292421be8facf1a1eeb8b69 | [
"Apache-2.0"
] | 74 | 2017-04-06T22:08:11.000Z | 2022-03-02T19:11:42.000Z | # -*- coding: utf-8 -*-
import random
import hypothesis.strategies as st
from string import ascii_letters, ascii_lowercase, digits
aws_region_list = [
'us-east-1',
'us-east-2',
'us-west-1',
'us-west-2',
'ap-south-1',
'ap-northeast-1',
'ap-northeast-2',
'ap-southeast-1',
'ap-southeast-2',
'eu-central-1',
'eu-west-1',
'eu-west-2']
# this is preparing for more backend types
def get_be_config_st(be):
backend_dict = {
's3': s3_backend_config_st}
return backend_dict[be]
@st.composite
def lineage_st(draw):
"""Hypothesis strategy for generated lineage strings."""
first = draw(st.text(
alphabet = list('abcdef0123456789'),
min_size=8,
max_size=8))
second = draw(st.text(
alphabet = list('abcdef0123456789'),
min_size=4,
max_size=4))
third = draw(st.text(
alphabet = list('abcdef0123456789'),
min_size=4,
max_size=4))
fourth = draw(st.text(
alphabet = list('abcdef0123456789'),
min_size=4,
max_size=4))
fifth = draw(st.text(
alphabet = list('abcdef0123456789'),
min_size=12,
max_size=12))
return '{}-{}-{}-{}-{}'.format(first, second, third, fourth, fifth)
@st.composite
def s3_bucket_name_st(draw):
"""Hypothesis strategy for s3 bucket names.
http://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-s3-bucket-naming-requirements.html
"""
char1 = draw(st.text(
alphabet = list(ascii_lowercase + digits),
min_size=1,
max_size=1))
middle = draw(st.text(
alphabet = list(ascii_lowercase + digits + '.-'),
min_size = 4,
max_size = 61).filter(lambda x: '..' not in x and '.-' not in x and '.-' not in x))
endchar = draw(st.text(
alphabet = list(ascii_lowercase + digits + '.'),
min_size = 1,
max_size = 1))
return '{}{}{}'.format(char1, middle, endchar)
@st.composite
def s3_backend_config_st(draw):
"""Hypothesis strategy for s3 backend configuration."""
s3_be_dict = {
'bucket': draw(s3_bucket_name_st()),
'encrypt': draw(st.sampled_from(['true', 'false'])),
'key': draw(st.text(
alphabet=list(ascii_letters + digits + '!-_.*\'()/'),
min_size=1,
max_size=1024).filter(lambda x: x[0] not in '/')),
'region': draw(st.sampled_from(aws_region_list)) }
if bool(random.getrandbits(1)):
s3_be_dict['profile'] = 'testawsprofile'
return s3_be_dict
@st.composite
def remote_init_st(draw):
"""Hypothesis strategy to generate terraform remote init state."""
be_type = draw(st.sampled_from(['s3']))
ri_dict = {
"version": 3,
"serial": 0,
"lineage": draw(lineage_st()),
"backend": {
"type": be_type,
"config": draw(get_be_config_st(be_type)()),
"hash": draw(st.text(alphabet=list(digits), min_size=18, max_size=18))
},
"modules": [
{
"path": [
"root"
],
"outputs": {},
"resources": {},
"depends_on": []
}
]
}
return ri_dict
| 27.504202 | 107 | 0.56187 | 398 | 3,273 | 4.444724 | 0.301508 | 0.044093 | 0.056529 | 0.101752 | 0.358395 | 0.309214 | 0.249293 | 0.235726 | 0.18485 | 0.18485 | 0 | 0.046422 | 0.282615 | 3,273 | 118 | 108 | 27.737288 | 0.706985 | 0.113352 | 0 | 0.221053 | 0 | 0 | 0.134636 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.031579 | 0 | 0.136842 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
657284ed9c4485732cadeaeccd94e6cc08471af4 | 1,539 | py | Python | model.py | JoshVarty/Reacher | cab41484aaaeeae177cc625c3697d7e7cd80c2ed | [
"MIT"
] | null | null | null | model.py | JoshVarty/Reacher | cab41484aaaeeae177cc625c3697d7e7cd80c2ed | [
"MIT"
] | null | null | null | model.py | JoshVarty/Reacher | cab41484aaaeeae177cc625c3697d7e7cd80c2ed | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
class FCNetwork(nn.Module):
def __init__(self, state_size, action_size, output_gate=None):
super(FCNetwork, self).__init__()
self.fc1 = nn.Linear(state_size, 256)
self.fc2 = nn.Linear(256, 256)
self.fc3 = nn.Linear(256, action_size)
self.output_gate = output_gate
def forward(self, input):
x = F.relu(self.fc1(input))
x = F.relu(self.fc2(x))
x = self.fc3(x)
if self.output_gate is not None:
x = self.output_gate(x)
return x
class ActorCriticNetwork(nn.Module):
def __init__(self, state_size, action_size):
super(ActorCriticNetwork, self).__init__()
self.actor = FCNetwork(state_size, action_size, F.tanh)
self.critic = FCNetwork(state_size, 1)
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.std = nn.Parameter(torch.ones(1, action_size)).to(self.device)
self.to(self.device)
def forward(self, state, action = None):
#state = torch.Tensor(state).to(self.device)
#Get action
a = self.actor(state)
distribution = torch.distributions.Normal(a, self.std)
if action is None:
action = distribution.sample()
log_prob = distribution.log_prob(action)
log_prob = torch.sum(log_prob, dim=1, keepdim=True)
#Get value from critic
value = self.critic(state)
return action, log_prob, value
| 29.037736 | 84 | 0.630279 | 211 | 1,539 | 4.421801 | 0.293839 | 0.048232 | 0.048232 | 0.061093 | 0.113612 | 0.081458 | 0.081458 | 0.081458 | 0.081458 | 0 | 0 | 0.01918 | 0.254711 | 1,539 | 52 | 85 | 29.596154 | 0.794246 | 0.048083 | 0 | 0 | 0 | 0 | 0.006156 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.088235 | 0 | 0.323529 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6573cc6ddcf9357e299f821257400c5ffa75021c | 8,965 | py | Python | train.py | frgfm/drlnd-p2-continuous-control | 0c3c9f60426b1f9c58833c8af9d4b1dfbc57c7a3 | [
"MIT"
] | null | null | null | train.py | frgfm/drlnd-p2-continuous-control | 0c3c9f60426b1f9c58833c8af9d4b1dfbc57c7a3 | [
"MIT"
] | null | null | null | train.py | frgfm/drlnd-p2-continuous-control | 0c3c9f60426b1f9c58833c8af9d4b1dfbc57c7a3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
import os
import torch
import numpy as np
from pathlib import Path
from collections import deque
import matplotlib.pyplot as plt
from tqdm import tqdm, trange
from unityagents import UnityEnvironment
from agent import Agent
def set_seed(seed):
"""Set the seed for pseudo-random number generations
Args:
seed (int): seed to set for reproducibility
"""
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def train_agent(agent, env, n_episodes=600, max_t=1000, success_thresh=30.):
"""Agent training function
Args:
agent: agent to train
env: environment to interact with
n_episodes (int, optional): maximum number of training episodes
max_t (int, optional): maximum number of timesteps per episode
success_thresh (float, optional): minimum running average score to consider environment solved
Returns:
scores (list<float>): scores of each episode
"""
scores = []
# Last 100 episodes' scores
scores_window = deque(maxlen=100)
brain_name = env.brain_names[0]
success = False
tqdm_range = trange(1, n_episodes + 1)
for i_episode in tqdm_range:
# reset the environment
env_info = env.reset(train_mode=True)[brain_name]
# get the current state
states = env_info.vector_observations
agent.reset()
# initialize the score
_scores = np.zeros(len(env_info.agents))
for _ in range(max_t):
actions = agent.act(states, add_noise=True)
# Perform action in the environment
env_info = env.step(actions)[brain_name]
# Get next state, reward and completion boolean
next_states = env_info.vector_observations
rewards = env_info.rewards
dones = env_info.local_done
# Agent step
for state, action, reward, next_state, done in zip(states, actions, rewards, next_states, dones):
agent.step(state, action, reward, next_state, done)
# Update episode score
states = next_states
_scores += rewards
if np.any(dones):
break
# Save most recent score
scores.append(np.mean(_scores))
scores_window.append(scores[-1])
# Console output
running_mean = np.mean(scores_window)
tqdm_range.set_postfix(raw_score=scores[-1], avg_score=running_mean)
if i_episode % 100 == 0:
tqdm.write(f'Episode {i_episode}/{n_episodes}\tavg score: {running_mean:.2f}')
if (not success) and running_mean >= success_thresh:
tqdm.write(f'Solved in {i_episode:d} episodes!\tavg score: {running_mean:.2f}')
success = True
return scores
def plot_scores(scores, running_window_size=100, success_thresh=30.):
"""Plot the score statistics over training episodes
Args:
scores (list<float>): scores obtained at each episode
running_window_size (int, optional): number of episodes used to moving window
success_thresh (float): minimum score to consider the environment as solved
"""
# plot the scores
plt.plot(np.arange(len(scores)), scores, zorder=1)
# Running average scores
ra_scores, rm_scores = [], []
success_x, success_y = None, None
success = False
for idx in range(len(scores)):
ra_score = np.mean(scores[max(0, idx - running_window_size + 1): idx + 1])
ra_scores.append(ra_score)
rm_scores.append(np.median(scores[max(0, idx - running_window_size + 1): idx + 1]))
if (not success) and ra_score > success_thresh:
success_x, success_y = idx + 1, ra_score
success = True
plt.plot(np.arange(len(scores)), ra_scores, zorder=2)
plt.plot(np.arange(len(scores)), rm_scores, zorder=3)
plt.axhline(y=success_thresh, color='r', linestyle='--', zorder=4)
if success_x and success_y:
plt.scatter(success_x, success_y, color='r', zorder=5)
# Legend
plt.grid(True, linestyle='dotted')
plt.ylabel('Score')
plt.xlabel('Episode #')
legends = ['Raw score', 'Running average score', 'Running median score']
if success_x and success_y:
legends.append('Success episode')
plt.legend(legends, loc='upper right')
plt.title('DDPG training scores')
plt.show()
def main(args):
if args.deterministic:
set_seed(42)
env = UnityEnvironment(file_name=args.env_path, no_graphics=args.no_graphics)
# get the default brain
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
env_info = env.reset(train_mode=True)[brain_name]
# number of agents in the environment
print('Number of agents:', len(env_info.agents))
# number of actions
action_size = brain.vector_action_space_size
print('Number of actions:', action_size)
# examine the state space
state = env_info.vector_observations[0]
print('States look like:', state)
state_size = len(state)
print('States have length:', state_size)
agent = Agent(state_size, action_size,
train=True,
device=args.device,
buffer_size=args.buffer_size,
batch_size=args.batch_size,
lr=args.lr, lr_steps=args.lr_steps, lr_gamma=args.lr_gamma,
gamma=args.gamma, tau=args.tau,
noise_mean=args.noise_mean, noise_theta=args.noise_theta,
noise_sigma=args.noise_sigma,
grad_clip=args.grad_clip)
scores = train_agent(agent, env,
n_episodes=args.episodes)
output_folder = Path(args.output)
if not output_folder.is_dir():
output_folder.mkdir(parents=True)
# Save model
torch.save(agent.actor_local.state_dict(), output_folder.joinpath('actor_model.pt'))
torch.save(agent.critic_local.state_dict(), output_folder.joinpath('critic_model.pt'))
env.close()
# Plot results
fig = plt.figure()
plot_scores(scores, running_window_size=100, success_thresh=args.success_threshold)
fig.savefig(output_folder.joinpath('training_scores.png'), transparent=True)
if __name__ == "__main__":
import argparse
# Environment
parser = argparse.ArgumentParser(description='Reacher arm agent training',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--no-graphics", dest="no_graphics",
help="Should graphical environment be disabled",
action="store_true")
# Input / Output
parser.add_argument('--env-path', default='./Reacher_Linux/Reacher.x86_64',
help='path to executable unity environment')
parser.add_argument('--output', default='./outputs', type=str, help='output folder')
parser.add_argument('--success-threshold', default=30., type=float,
help='minimum running average score over last 100 episodes to consider environment solved')
# Device
parser.add_argument('--device', default=None, help='device')
parser.add_argument("--deterministic", dest="deterministic",
help="should the training be performed in deterministic mode",
action="store_true")
# Loader
parser.add_argument('-b', '--batch-size', default=128, type=int, help='batch size')
parser.add_argument('--buffer-size', default=1e5, type=int, help='replay buffer size')
# Optimizer
parser.add_argument('--lr', default=5e-4, type=float, help='learning rate')
parser.add_argument('--episodes', default=600, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--lr-steps', default=100 * 1000, type=int, help='number of steps between each scheduler step')
parser.add_argument('--lr-gamma', default=0.5, type=float, help='LR multiplier applied at each scheduler step')
parser.add_argument('--gamma', default=0.9, type=float, help='discount factor')
parser.add_argument('--tau', default=5e-4, type=float, help='for soft update of target parameters')
parser.add_argument('--noise-mean', default=0., type=float, help='mean of Ornstein-Uhlenbeck noise')
parser.add_argument('--noise-theta', default=0.15, type=float, help='theta parameter Ornstein-Uhlenbeck noise')
parser.add_argument('--noise-sigma', default=0.12, type=float, help='sigma parameter of Ornstein-Uhlenbeck noise')
parser.add_argument('--grad-clip', default=0., type=float, help='gradient clipping')
args = parser.parse_args()
main(args)
| 40.201794 | 119 | 0.657669 | 1,166 | 8,965 | 4.897942 | 0.248714 | 0.028366 | 0.053581 | 0.013133 | 0.208195 | 0.16547 | 0.075118 | 0.042374 | 0.042374 | 0.012257 | 0 | 0.013213 | 0.23179 | 8,965 | 222 | 120 | 40.382883 | 0.81603 | 0.14512 | 0 | 0.083333 | 0 | 0 | 0.167527 | 0.007814 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027778 | false | 0 | 0.076389 | 0 | 0.111111 | 0.027778 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65750f8d694a9b5f1b3f3a8ae5af2a15e115d86f | 2,236 | py | Python | AlphaZero_Gomoku-master/play.py | Dancy679/- | 3d4c507cace8df7c566fce080d7ba4bf8838b4ba | [
"MIT"
] | null | null | null | AlphaZero_Gomoku-master/play.py | Dancy679/- | 3d4c507cace8df7c566fce080d7ba4bf8838b4ba | [
"MIT"
] | 2 | 2019-01-11T10:47:50.000Z | 2019-01-12T02:00:30.000Z | AlphaZero_Gomoku-master/play.py | Dancy679/- | 3d4c507cace8df7c566fce080d7ba4bf8838b4ba | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import print_function
from game import Board, Game
from policy_value_net import PolicyValueNet
from mcts_pure import MCTSPlayer as MCTS_Pure
from mcts_alphaZero import MCTSPlayer
import pickle
import threading
import _thread as thread
from Graphic import Graphic
class myThread (threading.Thread):
def __init__(self, graphic):
threading.Thread.__init__(self)
self.graphic = graphic
def run(self):
self.graphic.run()
class Human(object):
def __init__(self, graphic):
self.player = None
self.graphic = graphic
def set_player_ind(self, p):
self.player = p
def get_action(self, board):
try:
# location = input("Your move: ")
print("is your turns")
location = self.graphic.input()
print(location)
if isinstance(location, str):
location = [int(n, 10) for n in location.split(",")]
move = board.location_to_move(location)
except Exception as e:
move = -1
if move == -1 or move not in board.availables:
print("invalid move")
move = self.get_action(board)
return move
def __str__(self):
return "Human {}".format(self.player)
def run():
n = 5
width, height = 10, 10
model_file = 'current_policy.model'
try:
board = Board(width=width, height=height, n_in_row=n)
game = Game(board)
graphic = Graphic()
# graphic.run()
print(1111)
# thread1 = threading.Thread(target=graphic.run, args=())
best_policy = PolicyValueNet(width, height, model_file='./model/' + model_file)
mcts_player = MCTSPlayer(best_policy.policy_value_fn, c_puct=5, n_playout=1200)
human = Human(graphic)
# set start_player=0 for human first
thread2 = threading.Thread(target=game.start_play, args=(human, mcts_player, graphic, 1, 1))
# game.start_play(human, mcts_player, graphic, start_player=0, is_shown=1)
thread2.setDaemon(True)
thread2.start()
graphic.run()
except KeyboardInterrupt:
print('\n\rquit')
if __name__ == '__main__':
run()
| 29.038961 | 100 | 0.623435 | 276 | 2,236 | 4.82971 | 0.34058 | 0.049512 | 0.016504 | 0.027007 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01721 | 0.272361 | 2,236 | 76 | 101 | 29.421053 | 0.80209 | 0.103309 | 0 | 0.105263 | 0 | 0 | 0.039039 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.122807 | false | 0 | 0.157895 | 0.017544 | 0.350877 | 0.105263 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
657606914bc6eaf8e4d2dacaeddd1996f18d23e1 | 12,967 | py | Python | luffy/models/layers/attention.py | Fei-Wang/dl-pytorch | a7672603e2de7824d0ff7e97b69dedad3fd9d476 | [
"MIT"
] | null | null | null | luffy/models/layers/attention.py | Fei-Wang/dl-pytorch | a7672603e2de7824d0ff7e97b69dedad3fd9d476 | [
"MIT"
] | null | null | null | luffy/models/layers/attention.py | Fei-Wang/dl-pytorch | a7672603e2de7824d0ff7e97b69dedad3fd9d476 | [
"MIT"
] | null | null | null | import torch
import torch.nn.functional as F
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
from torch import nn, einsum
from torch.nn.modules.utils import _pair
from .mlp import MLP
__all__ = ['Attention', 'WindowAttention', 'WindowAttentionV2', 'DeformableAttention', 'MultiQueryAttention']
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, dim_head=None, drop=0.):
super().__init__()
dim_head = dim_head or dim // num_heads
self.num_heads = num_heads
self.scale = dim_head ** -0.5
inner_dim = dim_head * num_heads
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias=False)
self.drop = nn.Dropout(drop)
self.to_out = nn.Sequential(nn.Linear(inner_dim, dim), nn.Dropout(drop))
def forward(self, x, mask=None):
q, k, v = self.to_qkv(x).chunk(3, dim=-1)
q, k, v = map(lambda t: rearrange(t, 'b n (nh d) -> b nh n d', nh=self.num_heads), (q, k, v))
q = q * self.scale
sim = einsum('b h i d, b h j d -> b h i j', q, k) # h means nh
# TODO: other mask types?
if mask is not None:
b, _, n, n = sim.shape
assert mask.shape == (b, n, n), 'mask has incorrect dimensions'
sim.masked_fill_(~mask, -torch.finfo(sim.dtype).max)
attn = sim.softmax(dim=-1)
attn = self.drop(attn)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b nh n d -> b n (nh d)')
return self.to_out(out)
class WindowAttention(nn.Module):
@staticmethod
def double_step_seq(step1, len1, step2, len2):
seq1 = torch.arange(0, step1 * len1, step1)
seq2 = torch.arange(0, step2 * len2, step2)
return (seq1[:, None] + seq2[None, :]).reshape(1, -1)
def __init__(self, dim, window_size, num_heads, dim_head=None, drop=0.):
super().__init__()
dim_head = dim_head or dim // num_heads
self.num_heads = num_heads
self.scale = dim_head ** -0.5
inner_dim = dim_head * num_heads
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias=False)
self.drop = nn.Dropout(drop)
self.to_out = nn.Sequential(nn.Linear(inner_dim, dim), nn.Dropout(drop))
wh, ww = _pair(window_size)
self.ws = wh * ww
self.relative_position_bias_table = nn.Parameter(torch.zeros((2 * wh - 1) * (2 * ww - 1), num_heads))
rel_index_coords = self.double_step_seq(2 * ww - 1, wh, 1, ww)
relative_position_index = rel_index_coords + rel_index_coords.T
relative_position_index = relative_position_index.flip(1)
relative_position_index = rearrange(relative_position_index, 'ws1 ws2-> (ws1 ws2)')
self.register_buffer("relative_position_index", relative_position_index)
def forward(self, x, mask=None):
"""
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
q, k, v = self.to_qkv(x).chunk(3, dim=-1)
q, k, v = map(lambda t: rearrange(t, 'b n (nh d) -> b nh n d', nh=self.num_heads), (q, k, v))
q = q * self.scale
sim = einsum('b h i d, b h j d -> b h i j', q, k)
relative_position_bias = self.relative_position_bias_table[self.relative_position_index]
relative_position_bias = rearrange(relative_position_bias, '(ws1 ws2) n-> 1 n ws1 ws2', ws1=self.ws)
sim = sim + relative_position_bias
if mask is not None:
sim = rearrange(sim, '(b nw) nh n1 n2 -> b nw nh n1 n2', nw=mask.shape[0])
mask = rearrange(mask, 'nw ws1 ws2 -> 1 nw 1 ws1 ws2')
sim = sim + mask
sim = rearrange(sim, 'b nw nh n1 n2 -> (b nw) nh n1 n2')
attn = sim.softmax(dim=-1)
attn = self.drop(attn)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b nh n d -> b n (nh d)')
return self.to_out(out)
class WindowAttentionV2(nn.Module):
def __init__(self, dim, window_size, num_heads, dim_head=None, drop=0., meta_hidden_dim=384):
super().__init__()
super().__init__()
dim_head = dim_head or dim // num_heads
wh, ww = _pair(window_size)
self.num_heads = num_heads
self.scale = dim_head ** -0.5
inner_dim = dim_head * num_heads
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias=False)
self.drop = nn.Dropout(drop)
self.to_out = nn.Sequential(nn.Linear(inner_dim, dim), nn.Dropout(drop))
# meta network for positional encodings
self.meta_mlp = MLP(2, meta_hidden_dim, num_heads)
self.register_parameter("tau", torch.nn.Parameter(torch.ones((1, num_heads, 1, 1))))
coordinates = torch.cartesian_prod(torch.arange(wh), torch.arange(ww))
relative_coordinates = coordinates[:, None, :] - coordinates[None, :, :]
relative_coordinates_log = torch.sign(relative_coordinates) * torch.log(1.0 + relative_coordinates.abs())
self.register_buffer("relative_coordinates_log", relative_coordinates_log, persistent=False)
def forward(self, x, mask=None):
q, k, v = self.to_qkv(x).chunk(3, dim=-1)
q, k, v = map(lambda t: rearrange(t, 'b n (nh d) -> b nh n d', nh=self.num_heads), (q, k, v))
# compute attention map with scaled cosine attention
q, k = F.normalize(q, dim=-1), F.normalize(k, dim=-1)
sim = einsum('b h i d, b h j d -> b h i j', q, k)
sim = sim * self.tau
relative_position_bias = self.meta_mlp(self.relative_coordinates_log)
relative_position_bias = rearrange(relative_position_bias, 'ws1 ws2 nh->1 nh ws1 ws2')
sim = sim + relative_position_bias
if mask is not None:
sim = rearrange(sim, '(b nw) nh n1 n2 -> b nw nh n1 n2', nw=mask.shape[0])
mask = rearrange(mask, 'nw ws1 ws2 -> 1 nw 1 ws1 ws2')
sim = sim + mask
sim = rearrange(sim, 'b nw nh n1 n2 -> (b nw) nh n1 n2')
attn = sim.softmax(dim=-1)
attn = self.drop(attn)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b nh n d -> b n (nh d)')
return self.to_out(out)
class DeformableAttention(nn.Module):
def __init__(self, dim, input_resolution, offset_kernel_size, offset_range_factor=2, num_heads=8, dim_head=None,
drop=0., dim_group=128):
super().__init__()
dim_head = dim_head or dim // num_heads
self.num_heads = num_heads
self.scale = dim_head ** -0.5
inner_dim = dim_head * num_heads
self.h, self.w = input_resolution
self.dim_group = dim_group
self.num_groups = dim // dim_group
self.offset_range_factor = offset_range_factor
self.conv_offset = nn.Sequential(
nn.Conv2d(dim_group, dim_group, offset_kernel_size, 1, offset_kernel_size // 2, groups=dim_group),
Rearrange('B dg h w -> B h w dg'),
nn.LayerNorm(dim_group),
nn.GELU(),
nn.Linear(dim_group, 2, bias=False)
)
self.to_q = nn.Linear(dim, inner_dim, bias=False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False)
self.drop = nn.Dropout(drop)
self.to_out = nn.Sequential(nn.Linear(inner_dim, dim), nn.Dropout(drop))
self.rpe_table = nn.Parameter(torch.zeros(self.num_heads, self.h * 2 - 1, self.w * 2 - 1))
reference = self.get_reference(self.h, self.w)
self.register_buffer('reference', reference)
@staticmethod
def get_reference(h, w):
ref_y, ref_x = torch.meshgrid(
torch.linspace(0.5, h - 0.5, h),
torch.linspace(0.5, w - 0.5, w))
ref = torch.stack((ref_y, ref_x), -1)
ref[..., 1].div_(w).mul_(2).sub_(1)
ref[..., 0].div_(h).mul_(2).sub_(1)
return ref
def resample(self, x):
x = rearrange(x, 'b (h w) (ng dg) -> (b ng) dg h w', dg=self.dim_group, h=self.h)
offset = self.conv_offset(x)
if self.offset_range_factor > 0:
offset_range = torch.tensor([1.0 / self.h, 1.0 / self.w]).reshape(1, 1, 1, 2)
offset = offset.tanh().mul(offset_range).mul(self.offset_range_factor)
if self.offset_range_factor >= 0:
pos = offset + self.reference
else:
pos = (offset + self.reference).tanh()
x_sampled = F.grid_sample(
input=x,
grid=pos[..., (1, 0)], # y, x -> x, y
mode='bilinear', align_corners=True) # B, dg, h, w
x_sampled = rearrange(x_sampled, '(b ng) dg h w -> b (h w) (ng dg)', ng=self.num_groups)
rpe_bias = repeat(self.rpe_table, '(ng nhg) H W -> (b ng) nhg H W', b=x.shape[0] // self.num_groups,
ng=self.num_groups)
q_grid = repeat(self.reference, 'h w p -> B (h w) 1 p', B=x.shape[0])
pos = rearrange(pos, 'B h w p -> B 1 (h w) p')
displacement = (q_grid - pos).mul(0.5)
attn_bias = F.grid_sample(
input=rpe_bias,
grid=displacement[..., (1, 0)],
mode='bilinear', align_corners=True
) # B, nhg, L, L # L = h * w
attn_bias = rearrange(attn_bias, '(b ng) nhg L1 L2 -> b (ng nhg) L1 L2', ng=self.num_groups)
return x_sampled, attn_bias
def forward(self, x):
q = self.to_q(x)
x_sampled, attn_bias = self.resample(q)
k, v = self.to_kv(x_sampled).chunk(2, dim=-1)
q, k, v = map(lambda t: rearrange(t, 'b n (nh d) -> b nh n d', nh=self.num_heads), (q, k, v))
q = q * self.scale
sim = einsum('b h i d, b h j d -> b h i j', q, k) # h means nh
sim = sim + attn_bias
attn = sim.softmax(dim=-1)
attn = self.drop(attn)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b nh n d -> b n (nh d)')
return self.to_out(out)
class RotaryEmbedding(nn.Module):
"""rotary positional embedding.
`RoFormer: Enhanced Transformer with Rotary Position Embedding
<https://arxiv.org/abs/2104.09864>`_"""
def __init__(self, dim):
super().__init__()
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer("inv_freq", inv_freq)
def forward(self, max_seq_len):
seq = torch.arange(max_seq_len, dtype=self.inv_freq.dtype)
freqs = einsum("i , j -> i j", seq, self.inv_freq)
return torch.cat((freqs, freqs), dim=-1)
class MultiQueryAttention(nn.Module):
def __init__(self, dim, dim_head=None, num_heads=8, drop=0.):
super().__init__()
dim_head = dim_head or dim // num_heads
self.num_heads = num_heads
self.scale = dim_head ** -0.5
inner_dim = dim_head * num_heads
self.to_q = nn.Linear(dim, inner_dim, bias=False)
self.to_kv = nn.Linear(dim, dim_head * 2, bias=False)
self.drop = nn.Dropout(drop)
self.to_out = nn.Sequential(nn.Linear(inner_dim, dim), nn.Dropout(drop))
self.rotary_emb = RotaryEmbedding(dim_head)
# for caching causal mask and rotary embeddings
self.register_buffer("mask", None, persistent=False)
self.register_buffer("pos_emb", None, persistent=False)
def get_rotary_embedding(self, n):
if self.pos_emb is not None and self.pos_emb.shape[-2] >= n:
return self.pos_emb[:n]
pos_emb = self.rotary_emb(n)
self.register_buffer("pos_emb", pos_emb, persistent=False)
return pos_emb
@staticmethod
def rotate_half(x):
x = rearrange(x, "... (j d) -> ... j d", j=2)
x1, x2 = x.unbind(dim=-2)
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(self, pos, t):
return (t * pos.cos()) + (self.rotate_half(t) * pos.sin())
def get_mask(self, n):
if self.mask is not None and self.mask.shape[-1] >= n:
return self.mask[:n, :n]
mask = torch.ones((n, n), dtype=torch.bool).triu(1)
self.register_buffer("mask", mask, persistent=False)
return mask
def forward(self, x):
q = self.to_q(x)
k, v = self.to_kv(x).chunk(2, dim=-1)
q = rearrange(q, "b n (nh d) -> b nh n d", nh=self.num_heads)
positions = self.get_rotary_embedding(x.shape[1])
q, k = map(lambda t: self.apply_rotary_pos_emb(positions, t), (q, k))
q = q * self.scale
sim = einsum('b h i d, b j d -> b h i j', q, k) # h means nh
causal_mask = self.get_mask(x.shape[1])
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
sim = sim - sim.amax(dim=-1, keepdim=True)
attn = sim.softmax(dim=-1)
attn = self.drop(attn)
out = einsum('b h i j, b j d -> b h i d', attn, v)
out = rearrange(out, 'b nh n d -> b n (nh d)')
return self.to_out(out)
| 41.03481 | 116 | 0.587954 | 2,054 | 12,967 | 3.539435 | 0.11295 | 0.037414 | 0.008253 | 0.006878 | 0.490784 | 0.443191 | 0.386382 | 0.386382 | 0.381706 | 0.359972 | 0 | 0.021864 | 0.273386 | 12,967 | 315 | 117 | 41.165079 | 0.749735 | 0.039176 | 0 | 0.436214 | 0 | 0 | 0.094789 | 0.003792 | 0 | 0 | 0 | 0.003175 | 0.004115 | 1 | 0.078189 | false | 0 | 0.028807 | 0.004115 | 0.193416 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65769a171ea27b1e27bc572c82c16b4cfa9bb4de | 32,669 | py | Python | python/ossid/scripts/online_learning.py | r-pad/OSSID_code | b80d429a3fa4464a69a78dc2112d52b4f05d0dfe | [
"MIT"
] | 1 | 2022-03-30T01:43:41.000Z | 2022-03-30T01:43:41.000Z | python/ossid/scripts/online_learning.py | r-pad/OSSID_code | b80d429a3fa4464a69a78dc2112d52b4f05d0dfe | [
"MIT"
] | null | null | null | python/ossid/scripts/online_learning.py | r-pad/OSSID_code | b80d429a3fa4464a69a78dc2112d52b4f05d0dfe | [
"MIT"
] | 1 | 2022-03-30T04:42:29.000Z | 2022-03-30T04:42:29.000Z | import open3d as o3d
import os, sys
import argparse
import random
from tqdm import tqdm
import numpy as np
import pandas as pd
import cv2
import torch
import pickle
import time
from pathlib import Path
from omegaconf import DictConfig, OmegaConf
from torch.utils.data import Dataset, DataLoader
from ossid.models.dtoid import DtoidNet
from ossid.models.maskrcnn import MaskRCNN
from ossid.datasets import getDataloaders
from ossid.datasets.utils import collate_fn
from ossid.utils import expandBox, dict_to, to_np, move_to
from ossid.utils.bop_utils import saveResultsBop
from ossid.utils.zephyr_utils import networkInference
from ossid.config import OSSID_CKPT_ROOT, OSSID_DATA_ROOT, BOP_RESULTS_FOLDER, OSSID_RESULT_ROOT, BOP_DATASETS_ROOT, OSSID_DET_ROOT
from ossid.utils.detection import saveLmoYcbvGT, evalFinetuneResults
from zephyr.datasets.score_dataset import ScoreDataset
from zephyr.models.pointnet2 import PointNet2SSG
from zephyr.options import getOptions, checkArgs
from zephyr.utils import depth2cloud, meta2K, K2meta, projectPointsUv
from zephyr.utils.metrics import add, adi
from zephyr.utils.bop_dataset import BopDataset, BopDatasetArgs
from zephyr.utils.halcon_wrapper import PPFModel
from zephyr.utils.renderer import Renderer, blend
from zephyr.utils.icp import icpRefinement
from zephyr.constants import OBJECT_DIAMETERES
from zephyr.data_util import hypoShiftYcbv2BopBatch, modelPointsShiftYcbv2Bop, modelShiftBopYcbv
from zephyr.full_pipeline.model_featurization import FeatureModel
from zephyr.full_pipeline.scene_featurization import featurizeScene
from bop_toolkit_lib.visibility import estimate_visib_mask_gt
from bop_toolkit_lib.misc import ensure_dir, depth_im_to_dist_im_fast
import faulthandler
faulthandler.enable()
def makeFolder(folder):
if not os.path.exists(folder):
os.makedirs(folder)
def getFeaturizedModels(dataset):
from zephyr.full_pipeline.options import getOptions
parser = getOptions()
args = parser.parse_args([])
args.bop_root = dataset.bop_root
args.dataset_name = dataset.dataset_name
args.grid_dir_name = "grid"
args.sampled_model_dir_name = "model_pc"
args.grid_indices_path = os.path.join(args.bop_root, args.dataset_name, args.grid_dir_name, "verts_grid_0.npy")
dataset.dataset_camera["fx"] = dataset.dataset_camera['K'][0,0]
dataset.dataset_camera["fy"] = dataset.dataset_camera['K'][1,1]
dataset.dataset_camera["cx"] = dataset.dataset_camera['K'][0,2]
dataset.dataset_camera["cy"] = dataset.dataset_camera['K'][1,2]
featured_objects = {}
for obj_id in dataset.obj_ids:
is_sym = obj_id in dataset.sym_obj_ids
obj = FeatureModel(dataset.dataset_root, is_sym, args, create_index=True)
obj.construct(obj_id, dataset.getObjPath(obj_id), dataset.dataset_camera)
featured_objects[obj_id] = obj
return featured_objects
def main(main_args):
torch.manual_seed(42)
np.random.seed(42)
random.seed(42)
DATASET_NAME = main_args.dataset_name
DTOID_CONFIDENT_THRESHOLD = 0.5
ZEPHYR_CONFIDENT_THRESHOLD = 20
SAVE_ROOT = OSSID_RESULT_ROOT
assert not (main_args.ignore_dtoid_mask and main_args.always_dtoid_mask)
makeFolder(SAVE_ROOT)
makeFolder(BOP_RESULTS_FOLDER)
next_finetune_number = main_args.finetune_interval
'''Initialize the trained DTOID model'''
# Use the DTOID network
if main_args.dtoid_weights_path is not None:
ckpt_v = int(main_args.dtoid_weights_path.split("/")[-2].split("_")[1][1:])
ckpt_path = Path(main_args.dtoid_weights_path)
conf_path = ckpt_path.parent.parent / ("config_v%d.yaml" % ckpt_v)
elif DATASET_NAME == 'lmo':
conf_path = os.path.join(OSSID_CKPT_ROOT, "dtoid_conf_lmo.yaml")
if main_args.use_offline_model:
ckpt_path = os.path.join(OSSID_CKPT_ROOT, "dtoid_transductive_lmo.ckpt")
else:
ckpt_path = os.path.join(OSSID_CKPT_ROOT, "dtoid_pretrained.ckpt")
elif DATASET_NAME == 'ycbv':
conf_path = os.path.join(OSSID_CKPT_ROOT, "dtoid_conf_ycbv.yaml")
if main_args.use_offline_model:
ckpt_path = os.path.join(OSSID_CKPT_ROOT, "dtoid_transductive_ycbv.ckpt")
else:
ckpt_path = os.path.join(OSSID_CKPT_ROOT, "dtoid_pretrained.ckpt")
ossid_args = OmegaConf.load(conf_path)
# Override arguments by use-provided directories
ossid_args.dataset.bop_root = BOP_DATASETS_ROOT
ossid_args.model.pretrained_dtoid_path = os.path.join(OSSID_CKPT_ROOT, "dtoid_pretrained_original.pth.tar")
if DATASET_NAME == 'ycbv':
ossid_args.dataset.grid_root = os.path.join(OSSID_DATA_ROOT, "templates_YCBV_BOP")
ossid_args.dataset.zephyr_result_path = os.path.join(OSSID_DATA_ROOT, "test_ycbv_boptest_zephyr_result_unseen.pkl")
elif DATASET_NAME == 'lmo':
ossid_args.dataset.grid_root = os.path.join(OSSID_DATA_ROOT, "templates_LMO_DTOID")
ossid_args.dataset.zephyr_result_path = os.path.join(OSSID_DATA_ROOT, "lmo_boptest_zephyr_result.pkl")
# Use the DTOID provided by original authors (https://github.com/jpmerc/DTOID)
# This model was trained also on YCB-V objects, and thus can only be used to evaluate on LM-O.
ossid_args.model.use_pretrained_dtoid = main_args.use_pretrained_dtoid
ossid_args.dataset.test_dataset_name = main_args.dataset_name
ossid_args.dataset.train_dataset_name = main_args.dataset_name
# Keep all the zephyr results for the training set
ossid_args.dataset.zephyr_filter_key = None
ossid_args.dataset.zephyr_results_percent = 1
# use more templates for training
ossid_args.dataset.train_local_template_sample_from = 10
if main_args.n_local_test is not None:
ossid_args.dataset.n_local_test = main_args.n_local_test
elif main_args.use_pretrained_dtoid: # If their weights are used
ossid_args.dataset.n_local_test = 160
else: # If our weights are used
ossid_args.dataset.n_local_test = 10
print("Number of local templates =", ossid_args.dataset.n_local_test)
train_loader, valid_loader, test_loader = getDataloaders(ossid_args)
# Sort the test loader
test_loader.dataset.sortTargets(reverse=main_args.backward)
ModelClass = DtoidNet
model = DtoidNet(ossid_args)
if main_args.use_pretrained_dtoid:
# DTOID weightes provided by the authors will be loaded
print("Loading DTOID weights provided by the original authors")
pass
elif ckpt_path is not None:
print("Loading DTOID Model weights from", ckpt_path)
ckpt = torch.load(ckpt_path)
model.load_state_dict(ckpt['state_dict'])
initial_state_dict = model.state_dict()
model = model.to(0)
model = model.eval()
'''Initialize the trained Zephyr model'''
if DATASET_NAME == 'lmo':
CKPT_PATH = os.path.join(OSSID_CKPT_ROOT, "final_lmo.ckpt") # The path to the checkpoint
USE_ICP = False # Not using ICP for LMO dataset, as it only uses PPF hypotheses, which are already after ICP processing.
MODEL_DATA_TPATH = os.path.join(OSSID_DATA_ROOT, "zephyr_model_data", "lmo", "model_cloud_{:02d}.npz") # path template to the sampled point cloud
INCONST_RATIO_TH = 100
elif DATASET_NAME == 'ycbv':
if main_args.test_seen:
CKPT_PATH_FOR_ODD = os.path.join(OSSID_CKPT_ROOT, "final_ycbv.ckpt")
CKPT_PATH_FOR_EVEN = os.path.join(OSSID_CKPT_ROOT, "final_ycbv_valodd.ckpt")
else:
CKPT_PATH_FOR_ODD = os.path.join(OSSID_CKPT_ROOT, "final_ycbv_valodd.ckpt")
CKPT_PATH_FOR_EVEN = os.path.join(OSSID_CKPT_ROOT, "final_ycbv.ckpt")
USE_ICP = True # using ICP for LMO dataset
MODEL_DATA_TPATH = os.path.join(OSSID_DATA_ROOT, "zephyr_model_data", "ycbv", "model_cloud_{:02d}.npz") # path template to the sampled point cloud
INCONST_RATIO_TH = 10
'''Set up the arguments for the model'''
parser = getOptions()
zephyr_args = parser.parse_args([])
# Model-related
zephyr_args.model_name = "pn2"
zephyr_args.dataset = "HSVD_diff_uv_norm"
zephyr_args.no_valid_proj = True
zephyr_args.no_valid_depth = True
zephyr_args.inconst_ratio_th = INCONST_RATIO_TH
# Dataset-related
zephyr_args.dataset_root = [""]
zephyr_args.dataset_name = [DATASET_NAME]
# zephyr_args.resume_path = CKPT_PATH
zephyr_args.test_dataset = True
'''Initialize pytorch dataloader and model'''
# dataloader is only needed for the getPointNetData() function
# zephyr_loader = getDataloader(zephyr_args)[0]
zephyr_dataset = ScoreDataset([], "", DATASET_NAME, zephyr_args, mode='test')
zephyr_args.dim_point = zephyr_dataset.dim_point
zephyr_args.unseen_oids = []
zephyr_args.extra_bottleneck_dim = 0
if main_args.dataset_name == "ycbv":
zephyr_model = PointNet2SSG(zephyr_args.dim_point, zephyr_args, num_class=1)
zephyr_ckpt = torch.load(CKPT_PATH_FOR_ODD)
zephyr_model.load_state_dict(zephyr_ckpt['state_dict'])
zephyr_model = zephyr_model.to(0).eval()
zephyr_model_for_odd = zephyr_model
zephyr_model = PointNet2SSG(zephyr_args.dim_point, zephyr_args, num_class=1)
zephyr_ckpt = torch.load(CKPT_PATH_FOR_EVEN)
zephyr_model.load_state_dict(zephyr_ckpt['state_dict'])
zephyr_model = zephyr_model.to(0).eval()
zephyr_model_for_even = zephyr_model
else:
zephyr_model = PointNet2SSG(zephyr_args.dim_point, zephyr_args, num_class=1)
zephyr_ckpt = torch.load(CKPT_PATH)
zephyr_model.load_state_dict(zephyr_ckpt['state_dict'])
zephyr_model = zephyr_model.to(0).eval()
'''Initialize the BOP dataset'''
# Set up the options
bop_args = BopDatasetArgs(
bop_root=BOP_DATASETS_ROOT,
dataset_name=DATASET_NAME,
model_type=None,
split_name="bop_test", # This indicates we want to use the testing set defined in BOP challenge (different than original test set)
split="test",
split_type=None,
ppf_results_file=None,
skip=1, # Iterate over all test samples, with no skipping
)
bop_dataset = BopDataset(bop_args)
print("Length of the test dataset:", len(bop_dataset))
'''Load the zephyr results'''
zephyr_results = pickle.load(open(ossid_args.dataset.zephyr_result_path, 'rb'))
zephyr_results = {(r['obj_id'], r['scene_id'], r['im_id']):r for r in zephyr_results}
# Extract the training dataset from the training loader
train_dtoid_bop_dataset = train_loader.dataset
train_dtoid_bop_dataset.clearTargets()
# Recover from the training/validation split on zephyr results
train_dtoid_bop_dataset.zephyr_results = zephyr_results
'''optimizer for dtoid model'''
optimizer = torch.optim.Adam(
model.parameters(),
lr = 1e-4,
weight_decay = 1e-6,
amsgrad = True
)
'''Test the DTOID model before finetuning'''
if main_args.raw_dtoid:
print("Testing the DTOID model before finetuning")
test_results = testDtoidModel(model, test_loader)
save_path = os.path.join(SAVE_ROOT, "before_finetune_dtoid_results_%s.pkl" % main_args.exp_name)
print("Saving results to", save_path)
pickle.dump({
"test_results": test_results,
"main_args": main_args,
}, open(save_path, 'wb'))
df = pd.DataFrame.from_dict(test_results)
print("DTOID mean IoU:", df['dtoid_iou'].mean())
print("DTOID Valid IoU recall", (df['dtoid_iou'] > 0.5).astype(float).mean())
return 0
if main_args.use_sift_hypos:
# Initialize the featured model for YCB-V dataset
featured_objects = getFeaturizedModels(bop_dataset)
'''main loop'''
test_results = []
finetune_logs = []
renderers = {}
# Create the surface model (PPF training stage)
print("Creating PPF models using Halcon")
ppf_models = {}
for obj_id in bop_dataset.obj_ids:
full_model_path = bop_dataset.model_tpath.format(obj_id=obj_id)
if DATASET_NAME == 'ycbv':
ppf_models[obj_id] = PPFModel(full_model_path, ModelSamplingDist = 0.03)
else:
ppf_models[obj_id] = PPFModel(full_model_path)
# Preloading all model data
print("Preloading all model data")
model_data_all = {}
for obj_id in bop_dataset.obj_ids:
# Load the information of the model point cloud from the pre-processed dataset
model_data_path = MODEL_DATA_TPATH.format(obj_id)
model_data = np.load(model_data_path)
model_points, model_colors, model_normals = model_data['model_points'], model_data['model_colors'], model_data['model_normals']
model_data_all[obj_id] = (model_points, model_colors, model_normals)
# The batch is the data for dtoid dataset
for iteration, batch in tqdm(enumerate(test_loader), total=len(test_loader)):
obj_id, scene_id, im_id = batch['obj_id'].item(), batch['scene_id'].item(), batch['im_id'].item()
zr = zephyr_results[(obj_id, scene_id, im_id)]
# Get the full mesh model provided by LineMOD dataset
full_model_path = bop_dataset.model_tpath.format(obj_id=obj_id)
# Get the raw data from the bop dataset, preparing for zephyr inference
bop_data = bop_dataset.getDataByIds(obj_id, scene_id, im_id)
# Extract the data from the bop datapoint
img, depth, scene_camera = bop_data['img'], bop_data['depth'], bop_data['scene_camera']
scene_meta = bop_data['scene_meta']
mat_gt = bop_data['mat_gt']
cam_K = np.asarray(scene_camera['cam_K']).reshape((3, 3))
# Load the information of the model point cloud from the pre-processed dataset
model_points, model_colors, model_normals = model_data_all[obj_id]
# Get the proper error function according to whether the object is symmetric or not
is_sym = obj_id in bop_dataset.sym_obj_ids
if main_args.fast:
err_func = add
else:
err_func = adi if is_sym else add
# DTOID inference first
dict_to(batch, 0)
with torch.no_grad():
model = model.eval()
t1 = time.time()
out = model.forwardTestTime(batch)
time_dtoid = time.time() - t1
final_bbox = to_np(out['final_bbox'][0])
final_score = to_np(out['final_score'][0])
dtoid_iou = to_np(out['seg_IoU'])
dtoid_pred_mask = to_np(out['segmentation'][0,0])
dtoid_confident = final_score[0] > DTOID_CONFIDENT_THRESHOLD
use_dtoid_mask = False
if main_args.ignore_dtoid_mask:
use_dtoid_mask = False
elif main_args.always_dtoid_mask:
use_dtoid_mask = True
else:
use_dtoid_mask = dtoid_confident
if iteration < main_args.finetune_warmup:
use_dtoid_mask = False
if not use_dtoid_mask:
# Run zephyr on the whole image
# Here we just get the stored zephyr results
zephyr_score = zr['score']
zephyr_mask = zr['pred_mask_visib']
zephyr_pose = zr['pred_pose']
pred_pose = to_np(zephyr_pose)
pred_score = zephyr_score
time_ppf = None
time_sift = None
time_zephyr = None
time_icp = None
else:
# Take the prediction and run zephyr on the predicted mask
# Get the mask according to dtoid detection results
if main_args.use_dtoid_segmask:
dtoid_mask = dtoid_pred_mask > 0.5
if dtoid_mask.sum() <= 25: # too few points
dtoid_mask = np.ones_like(dtoid_mask)
else:
dtoid_mask = np.zeros_like(depth)
expand_ratio = 1.2
img_h, img_w = depth.shape
for i, (bbox, score) in enumerate(zip(final_bbox, final_score)):
# if main_args.always_dtoid_mask:
# if i >= 1 and (dtoid_mask * (depth > 0).astype(int)).sum() > 0:
# continue
# If the good boxes are already used and the mask is not empty
if score < 0.5 and (dtoid_mask * (depth > 0).astype(int)).sum() > 0:
continue
# Expand the detection bbox a bit
x1, y1, x2, y2 = bbox
x1, y1, x2, y2 = expandBox(x1, y1, x2, y2, img_h, img_w, expand_ratio)
dtoid_mask[int(y1):int(y2), int(x1):int(x2)] = 1
# if dtoid_mask.sum() <= 50: # too few points
# dtoid_mask = np.ones_like(dtoid_mask)
time_sift = 0
# Get pose hypotheses
if DATASET_NAME == 'ycbv':
ppf_model = ppf_models[obj_id]
# Run the PPF algorithm on the detected region
scene_pc = depth2cloud(depth, np.logical_and(dtoid_mask, depth > 0), cam_K)
# poses_ppf, scores_ppf, time_ppf = ppf_model.find_surface_model(scene_pc * 1000.0) # The wrapper requires the input to be in milimeters
poses_ppf, scores_ppf, time_ppf = ppf_model.find_surface_model(scene_pc * 1000.0, DensePoseRefinement='false', SceneSamplingDist=0.03, RefPtRate=0.2) # The wrapper requires the input to be in milimeters
poses_ppf[:, :3, 3] = poses_ppf[:, :3, 3] / 1000.0 # Convert from milimeter to meter
poses_all = poses_ppf
if main_args.use_sift_hypos:
t1 = time.time()
# Compute pose hypotheses from SIFT feature matches
try:
keypoints, features, cloud, frames = featurizeScene(img, depth_im_to_dist_im_fast(depth, cam_K), dtoid_mask, scene_meta, [11], [11])
except ValueError:
# The mask is too small to get any SIFT features
time_sift = None
poses_sift = np.stack([np.eye(4) for _ in range(20)], axis=0)
poses_all = np.concatenate([poses_sift, poses_all], axis=0)
else:
'''Match to corresponding object'''
poses_sift, match_aux = featured_objects[obj_id].match(features, frames, mat_gt)
time_sift = time.time() - t1
poses_all = np.concatenate([poses_sift, poses_all], axis=0)
# Shift the model points from YCB-V dataset to BOP
model_points = modelPointsShiftYcbv2Bop(model_points, obj_id)
else:
ppf_model = ppf_models[obj_id]
# Run the PPF algorithm on the detected region
scene_pc = depth2cloud(depth, np.logical_and(dtoid_mask, depth > 0), cam_K)
poses_ppf, scores_ppf, time_ppf = ppf_model.find_surface_model(scene_pc * 1000.0) # The wrapper requires the input to be in milimeters
poses_ppf[:, :3, 3] = poses_ppf[:, :3, 3] / 1000.0 # Convert from milimeter to meter
poses_all = poses_ppf
# Recompute the per-point error for newly-estimated poses
pp_err = np.asarray([err_func(mat[:3,:3], mat[:3, 3], mat_gt[:3, :3], mat_gt[:3, 3], model_points) for mat in poses_all])
# Run zephyr
data_for_zephyr = {
"img": img, "depth": depth, "cam_K": cam_K,
"model_colors": model_colors, "model_points": model_points, "model_normals":model_normals,
"pose_hypos": poses_all, 'pp_err': pp_err
}
if main_args.dataset_name == 'ycbv':
# Handle two models for YCB-V
zephyr_model = zephyr_model_for_even if obj_id % 2 == 0 else zephyr_model_for_odd
poses_zephyr, scores_zephyr, pp_err, uv_original, time_zephyr = networkInference(zephyr_model, zephyr_dataset, data_for_zephyr, return_time=True)
pred_score = scores_zephyr.max().item()
pred_idx = scores_zephyr.argmax()
pred_pose = poses_zephyr[pred_idx]
pred_err = pp_err[pred_idx].item()
# Run ICP as a post-processing step
time_icp = 0
if USE_ICP:
uv_original = to_np(uv_original)
t1 = time.time()
pred_pose, _ = icpRefinement(
depth, uv_original[pred_idx],
pred_pose, cam_K, model_points, inpaint_depth=False, icp_max_dist=0.01
)
time_icp = time.time() - t1
pred_err = err_func(pred_pose[:3,:3], pred_pose[:3, 3], mat_gt[:3, :3], mat_gt[:3, 3], model_points)
# Render the object to get predicted color and depth
if obj_id not in renderers:
renderer = Renderer(K2meta(cam_K))
renderer.addObject(obj_id, full_model_path, pose=pred_pose, mm2m=True, simplify=main_args.fast)
renderers[obj_id] = renderer
else:
renderer = renderers[obj_id]
renderer.obj_nodes[obj_id].matrix = pred_pose
pred_color, pred_depth = renderer.render(depth_only=True)
# Compute the IoU metrics
pred_mask = pred_depth > 0
gt_mask = bop_data['mask_gt'] > 0
gt_mask_visib = bop_data['mask_gt_visib'] > 0
pred_mask_visib = estimate_visib_mask_gt(depth, pred_depth, 15/1000.)
# finetune DTOID after every a certain number of datapoints are added
if main_args.use_oracle_gt:
zephyr_confident = True
else:
zephyr_confident = pred_score > ZEPHYR_CONFIDENT_THRESHOLD
finetune = False
time_finetune = 0
if not main_args.no_finetune and zephyr_confident:
# Add the datapoint into the finetuning dataset
train_dtoid_bop_dataset.addTarget(obj_id, scene_id, im_id)
if main_args.use_oracle_gt:
train_dtoid_bop_dataset.updateZephyrMask(obj_id, scene_id, im_id, gt_mask_visib, pred_score)
else:
train_dtoid_bop_dataset.updateZephyrMask(obj_id, scene_id, im_id, pred_mask_visib, pred_score)
if len(train_dtoid_bop_dataset) == next_finetune_number:
finetune = True
if main_args.finetune_reset:
print("Resetting the DTOID weights, and the optimizer")
model.load_state_dict(initial_state_dict)
optimizer = torch.optim.Adam(
model.parameters(),
lr = 1e-4,
weight_decay = 1e-6,
amsgrad = True
)
print("Starting finetuning DTOID at iteration %d" % iteration)
t1 = time.time()
model, train_logs = finetuneDtoid(model, train_dtoid_bop_dataset, optimizer, epochs=main_args.finetune_epochs, batch_size=args.finetune_batch_size)
time_finetune = time.time() - t1
if main_args.save_each:
# save the model weights immediately after finetuning
model_save_folder = os.path.join(SAVE_ROOT, main_args.exp_name)
makeFolder(model_save_folder)
model_save_path = os.path.join(model_save_folder, "epoch_%d.ckpt" % iteration)
print("Saving the current model at", model_save_path)
torch.save({
"iteration": iteration,
"model_state_dict": model.state_dict(),
"conf": ossid_args,
}, model_save_path)
finetune_logs.append(train_logs)
if main_args.non_cum:
print("Clearing finetuning targets")
train_dtoid_bop_dataset.clearTargets()
next_finetune_number = main_args.finetune_interval
else:
next_finetune_number = next_finetune_number + main_args.finetune_interval
iou = np.logical_and(pred_mask, gt_mask).sum().astype(float) / np.logical_or(pred_mask, gt_mask).sum().astype(float)
iou_visib = np.logical_and(pred_mask_visib, gt_mask_visib).sum().astype(float) / np.logical_or(pred_mask_visib, gt_mask_visib).sum().astype(float)
result = {}
result['obj_id'] = obj_id
result['scene_id'] = scene_id
result['im_id'] = im_id
result['dtoid_confident'] = dtoid_confident
result['zephyr_confident'] = zephyr_confident
result['use_dtoid_mask'] = use_dtoid_mask
result['finetune'] = finetune
result['dtoid_iou'] = dtoid_iou
result['dtoid_pred_mask'] = dtoid_pred_mask
result['dtoid_bbox'] = final_bbox
result['dtoid_score'] = final_score
result['pred_pose'] = to_np(pred_pose)
result['pred_score'] = pred_score
result['pred_err'] = pred_err
result['pred_add01d'] = float(pred_err < 0.1 * OBJECT_DIAMETERES[DATASET_NAME][obj_id])
result['pred_mask'] = pred_mask
result['pred_mask_visib'] = pred_mask_visib
result['pred_iou'] = iou
result['pred_iou_visib'] = iou_visib
result['time_dtoid'] = time_dtoid
result['time_ppf'] = time_ppf
result['time_sift'] = time_sift
result['time_zephyr'] = time_zephyr
result['time_icp'] = time_icp
result['time_finetune'] = time_finetune
test_results.append(result)
save_path = os.path.join(SAVE_ROOT, "results_%s.pkl" % main_args.exp_name)
print("Saving results to", save_path)
pickle.dump({
"test_results": test_results,
"main_args": main_args,
"finetune_logs": finetune_logs,
"final_state_dict": model.state_dict(),
}, open(save_path, 'wb'))
print("Saving results in BOP format")
saveResultsBop(
test_results,
BOP_RESULTS_FOLDER, "online-%s" % main_args.exp_name,
main_args.dataset_name, pose_key='pred_pose', score_key='pred_score',
run_eval_script=True,
)
df = pd.DataFrame.from_dict(test_results)
print("DTOID mean IoU:", df['dtoid_iou'].mean())
print("DTOID Valid IoU recall", (df['dtoid_iou'] > 0.5).astype(float).mean())
print("Zephyr Valid IoU recall", (df['pred_iou_visib'] > 0.5).astype(float).mean())
'''Evaluate the results in terms of Detection mAP'''
tmp_root = Path(OSSID_DET_ROOT)
saveLmoYcbvGT(tmp_root, bop_root=BOP_DATASETS_ROOT)
evalFinetuneResults(save_path, DATASET_NAME, tmp_root)
def testDtoidModel(model, test_loader):
'''
A function performing test epoch on the test_loader
'''
test_results = []
for batch in tqdm(test_loader):
obj_id, scene_id, im_id = batch['obj_id'].item(), batch['scene_id'].item(), batch['im_id'].item()
dict_to(batch, 0)
with torch.no_grad():
model = model.eval()
out = model.forwardTestTime(batch)
final_bbox = to_np(out['final_bbox'][0])
final_score = to_np(out['final_score'][0])
dtoid_iou = to_np(out['seg_IoU'])
dtoid_pred_mask = to_np(out['segmentation'][0,0])
result = {}
result['obj_id'] = obj_id
result['scene_id'] = scene_id
result['im_id'] = im_id
result['dtoid_bbox'] = final_bbox
result['dtoid_score'] = final_score
result['dtoid_iou'] = dtoid_iou
result['dtoid_pred_mask'] = dtoid_pred_mask
result['gt_bbox'] = to_np(batch['bbox_gt'][0,0,:4])
test_results.append(result)
return test_results
def finetuneDtoid(model, train_dataset, optimizer, epochs=1, batch_size=8):
train_loader = DataLoader(
train_dataset, batch_size=batch_size, num_workers=8, collate_fn = collate_fn,
shuffle=True, pin_memory=True
)
model = model.train()
train_logs = []
for epoch in range(epochs):
epoch_logs = []
for batch in tqdm(train_loader):
batch = move_to(batch, 0)
if type(model) is MaskRCNN:
out = model(*batch)
else:
out = model(batch)
loss = out['loss']
optimizer.zero_grad()
loss.backward()
optimizer.step()
epoch_logs.append({
"train_loss": loss.item()
})
train_logs.append(epoch_logs)
return model, train_logs
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Arguments for test-time training')
parser.add_argument("--dataset_name", type=str, default='lmo', choices=['lmo', 'ycbv'], help="The name of the dataset to be used")
parser.add_argument("--exp_name", type=str, default="exp", help="The name of the experiement to be appended to the saved result file")
# About which initial weight to use
# By default, the DTOID pretrained on the render dataset will be loaded as the initial weight
parser.add_argument("--use_offline_model", action="store_true", help="If set, the DTOID model already finetuned offline will be loaded. ")
parser.add_argument("--use_pretrained_dtoid", action="store_true", help="If True, the DTOID model provided by the authors will be used. ")
parser.add_argument("--dtoid_weights_path", type=str, default=None, help="If not None, DTOID weights will be loaded from this path and override all other arguments. ")
parser.add_argument("--n_local_test", type=int, default=None, help="If not None, this value will be used as the number of local templates used for inference. ")
parser.add_argument("--use_dtoid_segmask", action="store_true", help="If set, the segmentation mask by DTOID will be used. Otherwise the bbox will be used. ")
parser.add_argument("--ignore_dtoid_mask", action="store_true", help="If set, the zephyr will not be run on the DTOID mask, but on the entire image. ")
parser.add_argument("--always_dtoid_mask", action="store_true", help="If set, the confidence filtering by DTOID scores will be turned off. ")
parser.add_argument("--use_oracle_gt", action="store_true", help="If set, the ground truth mask and box instead of zephyr's will be used to finetune the DTOID. ")
parser.add_argument("--use_sift_hypos", action="store_true", help="If set, the pose hypotheses will also be estimated from SIFT feature matches. ")
parser.add_argument("--test_seen", action="store_true", help="If set, the models trained on the same object set will be use for testing. ")
parser.add_argument("--backward", action="store_true", help="If set, the images will be sorted in the backward image ID order. ")
parser.add_argument("--use_maskrcnn", action="store_true", help='If set, the mask rcnn model will be used')
# Detailed parameters for finetuning
parser.add_argument("--finetune_interval", type=int, default=8, help="Finetuning will happen after every this number of finetuning datapoints are added")
parser.add_argument("--finetune_warmup", type=int, default=0, help="Finetuning will happen only after this number of datapoints are added")
parser.add_argument("--finetune_epochs", type=int, default=1, help="The epochs of training at each time DTOID is finetuned")
parser.add_argument("--finetune_reset", action="store_true", help="If set, before each finetuning, the network will be reset to the initial weights")
parser.add_argument("--finetune_batch_size", type=int, default=8, help="The batch size for finetuning. ")
parser.add_argument("--non_cum", action="store_true", help="If set, the finetuning example will be cleared after finetuning. ")
parser.add_argument("--save_each", action="store_true", help="If set, the weights of the model will be saved after each finetuning function")
# About how to test the DTOID
# By default, the DTOID will be finetuned and tested gradually
parser.add_argument("--raw_dtoid", action="store_true", help="If True, the DTOID model before finetuning will be tested")
parser.add_argument("--no_finetune", action="store_true", help="If set, the DTOID will not be finetuned. This will be a test script for DTOID+Zephyr")
parser.add_argument("--fast", action="store_true", help="If set, the script will be run at a fast mode. (only add will be used)")
args = parser.parse_args()
main(args) | 45.373611 | 218 | 0.659065 | 4,432 | 32,669 | 4.58213 | 0.130866 | 0.019697 | 0.020091 | 0.013295 | 0.366506 | 0.30131 | 0.269106 | 0.240398 | 0.223557 | 0.210065 | 0 | 0.009642 | 0.247635 | 32,669 | 720 | 219 | 45.373611 | 0.816591 | 0.105452 | 0 | 0.260116 | 0 | 0.001927 | 0.150813 | 0.012808 | 0 | 0 | 0 | 0 | 0.001927 | 1 | 0.009634 | false | 0.001927 | 0.077071 | 0 | 0.094412 | 0.036609 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
657783fd719850dfbb10411576f26607211a0d98 | 2,754 | py | Python | microscopium/screens/image_xpress.py | DragaDoncila/microscopium | 46cdebc29ce530366bb2c1b4b9ce8c4f10fc700a | [
"BSD-3-Clause"
] | null | null | null | microscopium/screens/image_xpress.py | DragaDoncila/microscopium | 46cdebc29ce530366bb2c1b4b9ce8c4f10fc700a | [
"BSD-3-Clause"
] | null | null | null | microscopium/screens/image_xpress.py | DragaDoncila/microscopium | 46cdebc29ce530366bb2c1b4b9ce8c4f10fc700a | [
"BSD-3-Clause"
] | null | null | null | """Feature computations and other functions for Image Xpress screens
"""
import os
import collections as coll
import re
from .. import features as feat
from .. import _util
def ix_semantic_filename(fn):
"""Split an ImageXpress filename into its annotated components.
Parameters
----------
fn : string
A filename from the ImageXpress high-content screening system.
Returns
-------
semantic : collections.OrderedDict {string: string}
A dictionary mapping the different components of the filename.
"""
keys = ['directory', 'prefix', 'plate', 'well', 'field', 'channel',
'suffix']
directory, fn = os.path.split(fn)
fn, suffix = fn.split('.', 1)
fn_regex = re.search(r'([^\W_]{0,})(?:_{0,})([A-P]\d+)_s(\d+)_w(\d{1})',
fn)
# finds last set of contiguous digits after underscore
dir_regex = re.search(r'_(\d+)(?!.*_(\d+))', directory)
prefix, well, field, channel = map(lambda x: fn_regex.group(x),
range(1, 5))
plate = int(dir_regex.group(1))
values = [directory, prefix, int(plate), well,
_util.int_or_none(field), _util.int_or_none(channel), suffix]
semantic = coll.OrderedDict(zip(keys, values))
# molecular devices 1-index their channel and field values,
# subtract 1 if these fields exist
for key in ["channel", "field"]:
if semantic[key] is not None:
semantic[key] -= 1
return semantic
def filename2coord(fn):
"""Obtain (plate, well) coordinates from a filename.
Parameters
----------
fn : string
The input filename. This must include a directory with the
the plate number has these aren't coded in IX files.
Returns
-------
coord : (int, string, int) tuple
The (plate, well, cell) coordinates of the image.
Examples
--------
>>> fn = "./Week1_22123/G10_s2_w11C3B9BCC-E48F-4C2F-9D31-8F46D8B5B972.tif"
>>> filename2coord(fn)
(22123, 'G10')
"""
sem = ix_semantic_filename(fn)
return sem["plate"], sem["well"]
def filename2id(fn):
"""Get a mongo ID, string representation of (plate, well), from filename.
Parameters
----------
fn : string
Filename of a standard Image Xpress screen image. This must include
the directory with plate number.
Returns
-------
id_ : string
The mongo ID.
Examples
--------
>>> fn = "./Week4_27481/Week1_22123/G10_s2_w11C3B9BCC-"\
"E48F-4C2F-9D31-8F46D8B5B972.tif"
>>> filename2id(fn)
'22123-G10'
"""
from .myores import key2mongo
id_ = key2mongo(filename2coord(fn))
return id_
feature_map = feat.default_feature_map
| 27 | 78 | 0.608206 | 338 | 2,754 | 4.85503 | 0.405325 | 0.027422 | 0.032907 | 0.024375 | 0.063376 | 0.063376 | 0.063376 | 0.063376 | 0.063376 | 0.063376 | 0 | 0.045366 | 0.255628 | 2,754 | 101 | 79 | 27.267327 | 0.755122 | 0.492738 | 0 | 0 | 0 | 0.032258 | 0.106436 | 0.038779 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096774 | false | 0 | 0.193548 | 0 | 0.387097 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
657bc170d03e396a0fa4e3f22dcddd2461b71869 | 763 | py | Python | chexnet.py | skaravind/Python-Whatsapp-Chatbot | a1f37cbd7e3e0cae7620454fe06ad263c796d32f | [
"MIT"
] | 98 | 2018-07-10T10:54:38.000Z | 2021-11-15T08:09:13.000Z | chexnet.py | skaravind/Python-Whatsapp-Chatbot | a1f37cbd7e3e0cae7620454fe06ad263c796d32f | [
"MIT"
] | 11 | 2018-07-10T10:54:29.000Z | 2022-03-12T00:00:27.000Z | chexnet.py | skaravind/Python-Whatsapp-Chatbot | a1f37cbd7e3e0cae7620454fe06ad263c796d32f | [
"MIT"
] | 25 | 2018-07-22T21:22:23.000Z | 2021-06-06T15:26:53.000Z | from keras.applications.densenet import DenseNet121, preprocess_input, decode_predictions
from PIL import Image
import numpy as np
print("importing model")
model = DenseNet121(weights='weights.h5', classes=14)
print("done.")
classes=['Atelectasis','Cardiomegaly','Effusion','Infiltration','Mass','Nodule',
'Pneumonia','Pneumothorax','Consolidation','Edema','Emphysema','Fibrosis','Pleural_Thickening','Hernia']
def predict(path):
img = Image.open(path).resize((224,224))
x = np.array(img)
if len(x.shape) == 2:
x = np.stack([x]*3,2)
else:
pass
x = (x-x.mean())/x.std()
x = np.expand_dims(x, axis=0)
preds = model.predict(x)
np.sort(preds)
print("Model's top 3 predicted:")
top3 = np.argsort(-preds[0])[:3]
return [classes[i] for i in top3] | 30.52 | 104 | 0.699869 | 112 | 763 | 4.732143 | 0.642857 | 0.022642 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.035503 | 0.114024 | 763 | 25 | 105 | 30.52 | 0.748521 | 0 | 0 | 0 | 0 | 0 | 0.244764 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0.045455 | 0.181818 | 0 | 0.272727 | 0.136364 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
657c47f2931e8d89d4c73394c1716a2801879905 | 4,680 | py | Python | hacGpsPoints.py | ddzeko/hac-enc-transform-excel | 573721a5004b50cae834202781e348e399180518 | [
"MIT-0"
] | null | null | null | hacGpsPoints.py | ddzeko/hac-enc-transform-excel | 573721a5004b50cae834202781e348e399180518 | [
"MIT-0"
] | null | null | null | hacGpsPoints.py | ddzeko/hac-enc-transform-excel | 573721a5004b50cae834202781e348e399180518 | [
"MIT-0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @author Copyright (c) 2022 Damir Dzeko Antic
# @version 0.1.1
# @lastUpdate 2022-02-02
# ChangeLog:
# - can be tested with: python3 -m unittest hacGpsPoints.py
import sys
try:
assert (sys.version_info.major == 3 and sys.version_info.minor >= 7), "Python version must be 3.7 or newer"
except Exception as e:
print (e)
sys.exit(1)
from os import environ
import re
import csv
import unittest
# enumeration for columnar formats we know how to deal with
class CSV_Dialect_local(csv.Dialect):
delimiter = ';'
doublequote = True
quotechar = '"'
lineterminator = '\n'
quoting = csv.QUOTE_ALL
csv.register_dialect('local', CSV_Dialect_local)
# check if debugging requested via environment variable DEBUG
try:
DEBUG = int(environ.get('DEBUG'))
except:
DEBUG = 0
class Dict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
class HAC_gpsPoints(dict):
"""
is this class a singleton or it just deviates from the usual object-oriented design
anyway, do not attempt to create multiple instances
"""
hac_gpsPoints = dict()
# called to check if key exists in registry
@classmethod
def lookup(classObj, mjesto):
return classObj.hac_gpsPoints[mjesto] if mjesto in classObj.hac_gpsPoints else None
@classmethod
def loadFromCsvFile(classObj, fn):
""" read in the GPS points of entries and exits to Croatian Highways (HAC) road network
"""
fn_match = re.match('(?i)^.*\.csv$', fn)
if not fn_match:
raise HAC_gpsPoints_Error(f'Not our file name: "{fn}"')
with open(fn, 'r', encoding='utf-8', newline=None) as file:
_1st_line = file.readline().rstrip('\n')
if _1st_line == r'"Naplatna postaja HAC";"GPS pin ulaza";"GPS pin izlaza"':
dialect = csv.get_dialect('local')
has_header = True
else:
raise HAC_gpsPoints_Error(f'Format of the file "{fn}" not recognized:\n {repr(_1st_line)}')
file.seek(0) # Rewind.
reader = csv.reader(file, dialect)
if has_header:
next(reader) # Skip header row.
for row in reader:
try:
mjesto,gps_ulaz,gps_izlaz,*tail = row
except Exception as e:
raise HAC_gpsPoints_Error(f'In line {reader.line_num}, something strange: {row}\n {str(e)}')
if len(mjesto) == 0 or mjesto.startswith('#'): # row commented out
continue
hac_topo = HAC_gpsPoints(mjesto)
if gps_ulaz:
hac_topo.setPoint('ulaz', *re.split(', ?', gps_ulaz))
if gps_izlaz:
hac_topo.setPoint('izlaz', *re.split(', ?', gps_izlaz))
# called to create an object and register it
def __init__(self, mjesto):
dict.__init__(self)
self._key = mjesto
HAC_gpsPoints.hac_gpsPoints[self._key] = self
# called to create a named point coordinates
def setPoint(self, pointName, lon, lat):
self[pointName] = Dict({'lon': format(float(lon), '.6f'), 'lat': format(float(lat), '.6f')})
def getPoint(self, pointName):
return [ self[pointName].lon, self[pointName].lat ] if pointName in self else None
class HAC_gpsPoints_Error(Exception):
"""Exception raised for errors while loading GPS Points.
Attributes:
message -- explanation of the error
"""
def __init__(self, message=f'Failed to load GPS Points at "{{__name__}}"'):
self.message = message
super().__init__(self.message)
def __str__(self):
return f'{self.message}'
class TestHacGpsPoints(unittest.TestCase):
def setUp(self):
self.hac_gpsPoints = HAC_gpsPoints.loadFromCsvFile('autocesta_ulazi_izlazi.csv')
def test_hac_gps_point_1(self):
actual = HAC_gpsPoints.lookup('Otočac')
expected = {"ulaz": {"lon": "44.891475", "lat": "15.190142"}, "izlaz": {"lon": "44.892615", "lat": "15.188661"}}
self.assertEqual(actual, expected)
def test_hac_gps_point_2(self):
actual = HAC_gpsPoints.lookup('Dugobabe') # 'Vučevica' would actually work (on the full HAC data set)
expected = None
self.assertEqual(actual, expected)
def main():
raise HAC_gpsPoints_Error(f'{__file__} should not be run as stand-alone program')
if __name__ == '__main__':
sys.exit(main())
| 30.993377 | 120 | 0.61859 | 592 | 4,680 | 4.690878 | 0.412162 | 0.069139 | 0.030609 | 0.031689 | 0.088225 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019276 | 0.268376 | 4,680 | 150 | 121 | 31.2 | 0.791764 | 0.197222 | 0 | 0.103448 | 0 | 0 | 0.141116 | 0.007042 | 0 | 0 | 0 | 0 | 0.034483 | 1 | 0.126437 | false | 0 | 0.057471 | 0.034483 | 0.37931 | 0.011494 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65809003f23a13e0c6398f4e572bea38b6de1c31 | 1,044 | py | Python | thumbor/compatibility/result_storage.py | bear8421/thumbor | 00a0c44d44b8fa5f06c38deee7123793addda404 | [
"MIT"
] | 1 | 2021-12-24T02:01:52.000Z | 2021-12-24T02:01:52.000Z | thumbor/compatibility/result_storage.py | bear8421/thumbor | 00a0c44d44b8fa5f06c38deee7123793addda404 | [
"MIT"
] | 2 | 2022-03-17T06:53:16.000Z | 2022-03-31T19:42:00.000Z | thumbor/compatibility/result_storage.py | bear8421/thumbor | 00a0c44d44b8fa5f06c38deee7123793addda404 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
import thumbor.result_storages as storages
from thumbor.compatibility import compatibility_get
class Storage(storages.BaseStorage):
@property
def storage(self):
storage = self.context.modules.compatibility_legacy_result_storage
if storage is None:
raise RuntimeError(
"The 'COMPATIBILITY_LEGACY_RESULT_STORAGE' configuration should point "
"to a valid result storage when using compatibility result storage."
)
return storage
async def put(self, image_bytes):
if self.storage is None:
return
return self.storage.put(image_bytes)
async def get(self):
if self.storage is None:
return
return await compatibility_get(func=self.storage.get)
| 29.828571 | 87 | 0.681992 | 125 | 1,044 | 5.608 | 0.512 | 0.078459 | 0.055635 | 0.091298 | 0.088445 | 0.088445 | 0.088445 | 0 | 0 | 0 | 0 | 0.006281 | 0.237548 | 1,044 | 34 | 88 | 30.705882 | 0.874372 | 0.225096 | 0 | 0.2 | 0 | 0 | 0.168329 | 0.046135 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.1 | 0 | 0.35 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6580ec17b33f5f4a1260c6ea2581724c9ac0d05d | 3,058 | py | Python | primehub/extras/devlab.py | InfuseAI/primehub-python-sdk | edbdbcb3e41f0c99e4542245de1345a64f509fb4 | [
"Apache-2.0"
] | 10 | 2021-09-13T23:14:22.000Z | 2022-02-06T06:07:40.000Z | primehub/extras/devlab.py | KellenJohn/primehub-python-sdk | edbdbcb3e41f0c99e4542245de1345a64f509fb4 | [
"Apache-2.0"
] | 4 | 2021-08-10T03:10:27.000Z | 2021-12-16T02:11:50.000Z | primehub/extras/devlab.py | KellenJohn/primehub-python-sdk | edbdbcb3e41f0c99e4542245de1345a64f509fb4 | [
"Apache-2.0"
] | 1 | 2021-12-21T11:59:51.000Z | 2021-12-21T11:59:51.000Z | import json
import sys
import time
from tempfile import mkstemp
from primehub import Helpful, cmd, has_data_from_stdin, Module
from primehub.utils.permission import ask_for_permission
class DevLab(Helpful, Module):
@cmd(name='submit-case', description='submit use case')
def read_from_stdin_or_file(self, *args, **kwargs):
"""
primehub devlab submit-case abc -f -xd <<EOF
{
"instanceType": "cpu-1",
"image": "base-notebook",
"displayName": "test",
"command": "echo \"test1\"\necho \"test2\"",
}
EOF
:param args:
:param kwargs:
:return:
"""
if has_data_from_stdin():
print("".join(sys.stdin.readlines()))
print(args, kwargs)
@cmd(name='cmd', description='show internal commands')
def print_register_table(self):
from primehub.utils.decorators import show_debug_info
show_debug_info()
@cmd(name='test-query', description='test-graphql')
def test_query(self):
query = """
{
me {
effectiveGroups {
id
name
displayName
}
}
}
"""
results = self.request({}, query)
return results['data']['me']['effectiveGroups']
@ask_for_permission
@cmd(name='say-yes', description='show case for @ask_for_permission')
def ask_for_permission(self, **kwargs):
from datetime import datetime
s = str(datetime.now())
with open("ask_for_permission.txt", "w") as fh:
fh.write(s)
fh.write("\n")
return dict(message='create a file [ask_for_permission.txt] having the current datetime content')
@cmd(name='regression-job-logs', description='regression')
def regression(self):
instance_type = [x for x in self.primehub.instancetypes.list() if "gpu" not in x][0]['id']
scripts = r'bash -c "for i in {1..5}; do date; sleep 1; done"'
job_spec = dict(instanceType=instance_type, image="base-notebook", displayName="test-from-cli", command=scripts)
fd, path = mkstemp(".json")
with open(path, "w") as fh:
fh.write(json.dumps(job_spec))
my_job = self.primehub.jobs.submit(file=path)
my_id = my_job['id']
print("job id:", my_job['id'])
last_state = None
while True:
p = self.primehub.jobs.get(my_id)['phase']
if last_state is None:
last_state = p
if p != last_state:
print("Job Phase: {} -> {}".format(last_state, p))
last_state = p
if last_state == 'Running':
break
if last_state == 'Succeeded':
break
time.sleep(1)
print("Logs:")
for g in self.primehub.jobs.logs(my_id, follow=True):
print(g)
def help_description(self):
return "dev-lab is used to the primehub-python-sdk development and testing"
| 30.58 | 120 | 0.565402 | 367 | 3,058 | 4.580381 | 0.39782 | 0.042832 | 0.057109 | 0.019036 | 0.05235 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003784 | 0.308699 | 3,058 | 99 | 121 | 30.888889 | 0.791391 | 0.098103 | 0 | 0.057971 | 0 | 0 | 0.231587 | 0.016856 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.115942 | 0.014493 | 0.26087 | 0.101449 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
658102426bdbb54d73275227e6aca5af62d9af3b | 4,255 | py | Python | dol/__init__.py | noelpuru/djongo-optimistic-lock | 7eeedc8e637c7edaf739956f63234d7e107a168b | [
"BSD-2-Clause"
] | null | null | null | dol/__init__.py | noelpuru/djongo-optimistic-lock | 7eeedc8e637c7edaf739956f63234d7e107a168b | [
"BSD-2-Clause"
] | null | null | null | dol/__init__.py | noelpuru/djongo-optimistic-lock | 7eeedc8e637c7edaf739956f63234d7e107a168b | [
"BSD-2-Clause"
] | null | null | null | from djongo import models
from django import forms
from django.core.exceptions import ImproperlyConfigured
from django.contrib.admin.widgets import AdminIntegerFieldWidget
class ConcurrentUpdate(Exception):
"""
Raised when a model can not be saved due to a concurrent update.
"""
class ReadonlyInput(forms.TextInput):
"""
A HiddenInput would be perfect for version fields, but hidden
inputs leave ugly empty rows in the admin. The version must
be submitted, of course, to be checked, so we can't just use
ModelAdmin.readonly_fields.
Pending Django ticket #11277, this displays the version in an
uneditable input so there's no empty row in the admin table.
https://code.djangoproject.com/ticket/11277
"""
def __init__(self, *args, **kwargs):
super(ReadonlyInput, self).__init__(*args, **kwargs)
# just readonly, because disabled won't submit the value
self.attrs['readonly'] = 'readonly'
class VersionField(models.PositiveIntegerField):
"""
An integer field to track versions. Every time the model is saved,
it is incremented by one.
"""
def __init__(self, *args, **kwargs):
kwargs.setdefault('default', 0)
super(VersionField, self).__init__(*args, **kwargs)
def formfield(self, **kwargs):
widget = kwargs.get('widget')
if widget:
if issubclass(widget, AdminIntegerFieldWidget):
widget = ReadonlyInput()
else:
widget = forms.HiddenInput
kwargs['widget'] = widget
return super(VersionField, self).formfield(**kwargs)
class VersionedMixin(object):
"""
Model mixin implementing version checking during saving.
When a concurrent update is detected, saving is aborted and
ConcurrentUpdate will be raised.
"""
def _do_update(self, base_qs, using, pk_val, values, update_fields, forced_update):
version_field = self.get_version_field()
# _do_update is called once for each model in the inheritance
# hierarchy. We only care about the model with the version field.
if version_field.model != base_qs.model:
return super(VersionedMixin, self)._do_update(
base_qs, using, pk_val, values, update_fields, forced_update)
if version_field.attname in self.get_deferred_fields():
# With a deferred VersionField, it's not possible to do any
# sensible concurrency checking, so throw an error. The
# other option would be to treat deferring the VersionField
# the same as excluding it from `update_fields` -- a way to
# bypass checking altogether.
raise RuntimeError("It doesn't make sense to save a model with a deferred VersionField")
# pre_save may or may not have been called at this point, based on if
# version_field is in update_fields. Since we need to reliably know the
# old version, we can't increment there.
old_version = version_field.value_from_object(self)
# so increment it here instead. Now old_version is reliable.
for i, value_tuple in enumerate(values):
if isinstance(value_tuple[0], VersionField):
assert old_version == value_tuple[2]
values[i] = (
value_tuple[0],
value_tuple[1],
value_tuple[2] + 1,
)
setattr(self, version_field.attname, old_version + 1)
updated = super(VersionedMixin, self)._do_update(
base_qs=base_qs.filter(**{version_field.attname: old_version}),
using=using,
pk_val=pk_val,
values=values,
update_fields=update_fields if values else None, # Make sure base_qs is always checked
forced_update=forced_update
)
if not updated and base_qs.filter(pk=pk_val).exists():
raise ConcurrentUpdate
return updated
def get_version_field(self):
for field in self._meta.fields:
if isinstance(field, VersionField):
return field
raise ImproperlyConfigured(
'VersionedMixin models must have a VersionField')
| 37.991071 | 100 | 0.651704 | 525 | 4,255 | 5.139048 | 0.371429 | 0.044477 | 0.011119 | 0.011119 | 0.096368 | 0.059303 | 0.059303 | 0.034099 | 0.034099 | 0.034099 | 0 | 0.005829 | 0.274266 | 4,255 | 111 | 101 | 38.333333 | 0.867876 | 0.32785 | 0 | 0.035088 | 0 | 0 | 0.053435 | 0 | 0 | 0 | 0 | 0 | 0.017544 | 1 | 0.087719 | false | 0 | 0.070175 | 0 | 0.298246 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6581d3ca6895f2fe44370613688f4e7ccf2f4b0f | 756 | py | Python | pgumosru/utils.py | olekhov/meshconverter | 6ad56537da3163b8b12113b2bcedd1ab2888318f | [
"Unlicense"
] | 1 | 2018-09-24T11:54:12.000Z | 2018-09-24T11:54:12.000Z | utils.py | yasukuf/dnevnik | 48651f2ab936c2388098f62a1692e2380cd233fc | [
"MIT"
] | null | null | null | utils.py | yasukuf/dnevnik | 48651f2ab936c2388098f62a1692e2380cd233fc | [
"MIT"
] | null | null | null | #!env python3
# -*- coding: utf-8 -*-
import requests
def my_get_post(f,url, **kwargs):
""" Try to GET or POST up to maxtries times.
If it fails - raise the exception.
Used to counter bogus pgu.mos.ru responses.
Some times it does not work for the first (and second) connection. """
maxtries=5
attempt=0
havedata=False
#print("request:",url)
while attempt<maxtries:
try:
r=f(url,allow_redirects=False, **kwargs)
return r
except Exception as e:
print(e)
attempt+=1
raise "Can not connect"
def print_dict(d):
""" Pretty dictionary printer. For debugging """
for key, value in d.items() :
print ("["+key+"]=["+str(value)+"]")
| 24.387097 | 74 | 0.584656 | 102 | 756 | 4.294118 | 0.696078 | 0.018265 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009311 | 0.289683 | 756 | 30 | 75 | 25.2 | 0.806331 | 0.382275 | 0 | 0 | 0 | 0 | 0.045558 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.0625 | 0 | 0.25 | 0.1875 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6587f834a44bd5efb57ed4142f69cb5a9ed6b0fa | 3,472 | py | Python | python/01_app_crawler/myapp.py | jjmoo/daily | fb8cf0e64606a2a76a6141bb0e9ccd143c30f07c | [
"MIT"
] | 1 | 2020-03-27T16:42:02.000Z | 2020-03-27T16:42:02.000Z | python/01_app_crawler/myapp.py | jjmoo/daily | fb8cf0e64606a2a76a6141bb0e9ccd143c30f07c | [
"MIT"
] | null | null | null | python/01_app_crawler/myapp.py | jjmoo/daily | fb8cf0e64606a2a76a6141bb0e9ccd143c30f07c | [
"MIT"
] | null | null | null | import re
from urllib import parse
url_pattern = re.compile('href=\"(\S*?(union\.htm\?|category\.htm\?|detail\.htm\?)\S*?)\"')
category_pattern = re.compile('category\.htm\?orgame=(\d+)(&categoryId=(\d+))?')
app_list_pattern = re.compile('appList\.htm\?orgame=(\d+)&categoryId=(\d+).*?&pageContext=(\d+)')
list_count_pattern = re.compile('\"count\":(\d+)')
list_json_pattern = re.compile('\"pkgName\":\"(\S+?)\"')
apk_pkg_pattern = re.compile('detail\.htm\?apkName=((\w|\.)+)')
apk_name_pattern = re.compile('appname=\"(\S+)\"')
apk_category_pattern = re.compile('class=\"det-type-link\".*?>(.*?)<')
apk_download_pattern = re.compile('data-apkUrl=\"(.*?\.apk)')
category_child_format = 'https://sj.qq.com/myapp/cate/appList.htm?orgame=%d&categoryId=%d&pageSize=50&pageContext=%d'
app_detail_url_format = 'https://android.myapp.com/myapp/detail.htm?apkName=%s'
def get_seed_url():
return 'https://sj.qq.com/myapp/index.htm'
def parse_target(url, content):
child_list = parse_common_child(url, content)
child_list += parse_category_child(url, content)
pkg, category, name, download = parse_app_detail(url, content)
return pkg, category, name, download, child_list
def combine_url(base, url):
return parse.urljoin(base, url)
def parse_common_child(url, content):
matcher_list = url_pattern.findall(content)
child_list = []
if len(matcher_list) > 0:
for matcher in matcher_list:
child = combine_url(url, matcher[0])
if child not in child_list:
child_list.append(child)
return child_list
def parse_category_child(url, content):
category_child = []
if 'category.htm?' in url:
matcher = category_pattern.search(url)
if matcher:
orgame = int(matcher.group(1))
category_id = 0
if matcher.group(3) is not None:
category_id = int(matcher.group(3))
category_child.append(category_child_format % (orgame, category_id, 0))
if 'appList.htm?' in url:
matcher = app_list_pattern.search(url)
if matcher:
orgame = int(matcher.group(1))
category_id = int(matcher.group(2))
context = int(matcher.group(3))
cnt, child_list = parse_app_list(url, content)
category_child += child_list
category_child.append(category_child_format % (orgame, category_id, context + cnt))
return category_child
def parse_app_list(_, content):
cnt = 0
child_list = []
matcher = list_count_pattern.search(content)
if matcher:
cnt = int(matcher.group(1))
matcher_list = list_json_pattern.findall(content)
if len(matcher_list) > 0:
for pkg_matcher in matcher_list:
child_list.append(app_detail_url_format % pkg_matcher)
return cnt, child_list
def parse_app_detail(url, content):
pkg = None
category = None
name = None
download = None
if 'detail.htm?' in url:
matcher = apk_pkg_pattern.search(url)
if matcher:
pkg = matcher.group(1)
matcher = apk_category_pattern.search(content)
if matcher:
category = matcher.group(1)
matcher = apk_name_pattern.search(content)
if matcher:
name = matcher.group(1)
matcher = apk_download_pattern.search(content)
if matcher:
download = matcher.group(1)
return pkg, category, name, download
| 34.72 | 117 | 0.643721 | 452 | 3,472 | 4.732301 | 0.174779 | 0.050491 | 0.067321 | 0.041141 | 0.42777 | 0.148668 | 0.103787 | 0.103787 | 0.103787 | 0.053296 | 0 | 0.007003 | 0.218606 | 3,472 | 99 | 118 | 35.070707 | 0.781423 | 0 | 0 | 0.160494 | 0 | 0.012346 | 0.152362 | 0.081797 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08642 | false | 0 | 0.024691 | 0.024691 | 0.197531 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
658864644d0120c30d936609437956852a7c9872 | 1,933 | py | Python | hamming-code/matrix.py | RobinvdGriend/hamming-code | e699f90b43e0a0653cd88726f4e0920ffb6ba26d | [
"MIT"
] | 1 | 2017-05-30T08:07:42.000Z | 2017-05-30T08:07:42.000Z | hamming-code/matrix.py | RobinvdGriend/hamming-code | e699f90b43e0a0653cd88726f4e0920ffb6ba26d | [
"MIT"
] | 1 | 2017-06-08T10:12:09.000Z | 2017-06-08T10:12:09.000Z | hamming-code/matrix.py | RobinvdGriend/hamming-code | e699f90b43e0a0653cd88726f4e0920ffb6ba26d | [
"MIT"
] | null | null | null | '''
Het doel van deze comment is obscuur
en qua geheugengebruik is het duur
je vraagt je vast af, maar waarom?
Het antwoord is, heel saai, daarom
'''
class Matrix:
def __init__(self, values):
self.values = values
'''returns transpose'''
def transpose(self):
new_matrix = []
for i in range(len(self.values[0])):
new_row = []
for j, row in enumerate(self.values):
new_row.append(self.values[j][i])
new_matrix.append(new_row)
return Matrix(new_matrix)
'''
takes two matrices A and B as arguments and returns AB. Overrides the @
operator
'''
def __matmul__(self, other):
new_matrix = []
for i, row in enumerate(self.values):
new_row = []
for k in range(len(other.values[0])):
counter = 0
for j, element in enumerate(row):
counter += element * other.values[j][k]
new_row.append(counter)
new_matrix.append(new_row)
return Matrix(new_matrix)
''' adds two matrices '''
def __add__(self, other):
new_matrix = []
for i, row in enumerate(self.values):
new_row = []
for j, element in enumerate(row):
new_element = element + other.values[i][j]
new_row.append(new_element)
new_matrix.append(new_row)
return Matrix(new_matrix)
''' takes modulo two of each entry in the matrix'''
def getbinary(self):
new_matrix = []
for row in self.values:
new_row = []
for element in row:
new_element = element % 2
new_row.append(new_element)
new_matrix.append(new_row)
return Matrix(new_matrix)
def __str__(self):
return str(self.values)
def __repr__(self):
return str(self.values)
| 27.225352 | 75 | 0.55613 | 241 | 1,933 | 4.261411 | 0.282158 | 0.105161 | 0.046738 | 0.062317 | 0.461538 | 0.398247 | 0.352483 | 0.323272 | 0.323272 | 0.282376 | 0 | 0.003162 | 0.345577 | 1,933 | 70 | 76 | 27.614286 | 0.808696 | 0.072944 | 0 | 0.545455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.159091 | false | 0 | 0 | 0.045455 | 0.318182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65898f25be370e61dd3bba430ffc17ee10f02bee | 2,567 | py | Python | snippets/test_nametable-from-filename.py | thundernixon/fontbakery | cff658dd7385179915b4770c42d9ae3ab22fa25d | [
"Apache-2.0"
] | 351 | 2015-01-12T09:27:03.000Z | 2022-03-24T14:37:56.000Z | snippets/test_nametable-from-filename.py | thundernixon/fontbakery | cff658dd7385179915b4770c42d9ae3ab22fa25d | [
"Apache-2.0"
] | 2,308 | 2015-01-07T10:49:14.000Z | 2022-03-31T22:55:21.000Z | snippets/test_nametable-from-filename.py | thundernixon/fontbakery | cff658dd7385179915b4770c42d9ae3ab22fa25d | [
"Apache-2.0"
] | 89 | 2015-03-02T17:31:04.000Z | 2022-03-16T13:18:59.000Z | #!/usr/bin/env python
# Copyright 2013,2016 The Font Bakery Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See AUTHORS.txt for the list of Authors and LICENSE.txt for the License.
import unittest
import os
from fontTools.ttLib import TTFont
script = __import__("fontbakery-nametable-from-filename")
class NameTableFromTTFName(unittest.TestCase):
def _font_renaming(self, f_path):
"""The test fonts have been generated from Glyphsapp and conform
to the googlefonts nametable spec. The test should pass if the new
nametable matches the test font's name table."""
fonts_paths = [os.path.join(f_path, f) for f in os.listdir(f_path)
if '.ttf' in f]
for font_path in fonts_paths:
font = TTFont(font_path)
old_nametable = font['name']
new_nametable = script.nametable_from_filename(font_path)
for field in script.REQUIRED_FIELDS:
if old_nametable.getName(*field):
enc = old_nametable.getName(*field).getEncoding()
self.assertEqual(
str(old_nametable.getName(*field)).decode(enc),
str(new_nametable.getName(*field)).decode(enc),
)
def test_nunito_renaming(self):
"""Nunito Chosen because it has another family Nunito Heavy and a lot
of weights"""
f_path = os.path.join('data', 'test', 'nunito')
self._font_renaming(f_path)
def test_cabin_renaming(self):
"""Cabin chosen because it has a seperate Condensed family"""
f_path = os.path.join('data', 'test', 'cabin')
self._font_renaming(f_path)
def test_glyphsapp_family_sans_export(self):
"""The ultimate test. Can this naming tool repoduce Google Font's
Naming schema.
Source repo here: https://github.com/davelab6/glyphs-export"""
f_path = os.path.join('data', 'test', 'familysans')
self._font_renaming(f_path)
if __name__ == '__main__':
unittest.main()
| 39.492308 | 77 | 0.664199 | 346 | 2,567 | 4.780347 | 0.442197 | 0.027207 | 0.024184 | 0.043531 | 0.124547 | 0.075574 | 0.075574 | 0 | 0 | 0 | 0 | 0.006704 | 0.244644 | 2,567 | 64 | 78 | 40.109375 | 0.846313 | 0.431243 | 0 | 0.1 | 0 | 0 | 0.068642 | 0.024566 | 0 | 0 | 0 | 0 | 0.033333 | 1 | 0.133333 | false | 0 | 0.133333 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
658bd11b2c735afb8273225d50914f15550d689c | 2,047 | py | Python | src/frama_use_case.py | hsu1943/frama | cc1fcf08c1b30ee4aef1a70f399e1119c54f8d8a | [
"MIT"
] | 17 | 2018-04-18T16:43:15.000Z | 2022-03-24T13:59:01.000Z | src/frama_use_case.py | hsu1943/frama | cc1fcf08c1b30ee4aef1a70f399e1119c54f8d8a | [
"MIT"
] | 1 | 2020-12-16T15:43:51.000Z | 2020-12-16T15:43:51.000Z | src/frama_use_case.py | hsu1943/frama | cc1fcf08c1b30ee4aef1a70f399e1119c54f8d8a | [
"MIT"
] | 5 | 2018-04-27T06:38:11.000Z | 2021-06-07T19:51:02.000Z | # Copyright (c) 2017 Ioannis Athanasiadis(supernlogn)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from frama_numpy_performance import frama_perf
import numpy as np
from matplotlib import pyplot as plt
# Create input, user can define its own input
Length = 10000
x = np.linspace(0,10 * np.pi, Length)
Price = 2 * np.sin(x)
Price[int(Length/2):Length] += 3
Noise = 0.2 * np.random.randn(Price.shape[0]) # white noise
InputPrice = Price + Noise
batch = 100
Filt = frama_perf(InputPrice, batch)
# plot the result to figure out the difference
# beween it (Filt) and the desired outcome (Price)
fig, (ax1, ax2) = plt.subplots(1,2, sharex=True, sharey=True)
ax1.plot(Price, label='real price', linewidth=3.0)
ax1.plot(Filt, label='estimated price', linewidth=1.0)
leg1 = ax1.legend()
ax2.plot(InputPrice, label='price + noise', linewidth=3.0)
ax2.plot(Filt, label='estimated price', linewidth=1.0)
leg2 = ax2.legend()
fig.suptitle(('FRAMA under 10% noise and batch = 100'))
# plt.savefig('../images/estimation_example5.png')
plt.show()
| 40.94 | 80 | 0.756229 | 322 | 2,047 | 4.791925 | 0.509317 | 0.057032 | 0.01685 | 0.028516 | 0.049255 | 0.049255 | 0.049255 | 0.049255 | 0 | 0 | 0 | 0.027089 | 0.152418 | 2,047 | 49 | 81 | 41.77551 | 0.862248 | 0.620909 | 0 | 0 | 0 | 0 | 0.119522 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.15 | 0 | 0.15 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
658ce0600a81623bca72e8f2d79eced3b9a890e6 | 14,610 | py | Python | bottler/core.py | bugout-dev/unim-bottler | 98ced31a49c3adc6c29631678efbbcac8ec55f85 | [
"Apache-2.0"
] | 4 | 2022-01-25T23:35:49.000Z | 2022-01-30T23:28:11.000Z | bottler/core.py | bugout-dev/unim-bottler | 98ced31a49c3adc6c29631678efbbcac8ec55f85 | [
"Apache-2.0"
] | null | null | null | bottler/core.py | bugout-dev/unim-bottler | 98ced31a49c3adc6c29631678efbbcac8ec55f85 | [
"Apache-2.0"
] | 1 | 2022-02-02T18:29:31.000Z | 2022-02-02T18:29:31.000Z | """
Generic diamond functionality for Moonstream contracts.
"""
import argparse
import json
import os
import sys
from typing import Any, Dict, List, Optional, Set
from brownie import network
from bottler.MockErc20 import MockErc20
from bottler.MockTerminus import MockTerminus
from . import (
abi,
Diamond,
DiamondCutFacet,
DiamondLoupeFacet,
BottlerFacet,
BottlerInitializer,
OwnershipFacet,
)
FACETS: Dict[str, Any] = {
"DiamondCutFacet": DiamondCutFacet,
"DiamondLoupeFacet": DiamondLoupeFacet,
"OwnershipFacet": OwnershipFacet,
"BottlerFacet": BottlerFacet,
}
FACET_PRECEDENCE: List[str] = [
"DiamondCutFacet",
"OwnershipFacet",
"DiamondLoupeFacet",
"BottlerFacet",
]
FACET_ACTIONS: Dict[str, int] = {"add": 0, "replace": 1, "remove": 2}
ZERO_ADDRESS = "0x0000000000000000000000000000000000000000"
def facet_cut(
diamond_address: str,
facet_name: str,
facet_address: str,
action: str,
transaction_config: Dict[str, Any],
initializer_address: str = ZERO_ADDRESS,
ignore_methods: Optional[List[str]] = None,
ignore_selectors: Optional[List[str]] = None,
) -> Any:
"""
Cuts the given facet onto the given Diamond contract.
Resolves selectors in the precedence order defined by FACET_PRECEDENCE (highest precedence first).
"""
assert (
facet_name in FACETS
), f"Invalid facet: {facet_name}. Choices: {','.join(FACETS)}."
assert (
action in FACET_ACTIONS
), f"Invalid cut action: {action}. Choices: {','.join(FACET_ACTIONS)}."
if ignore_methods is None:
ignore_methods = []
if ignore_selectors is None:
ignore_selectors = []
project_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
abis = abi.project_abis(project_dir)
reserved_selectors: Set[str] = set()
for facet in FACET_PRECEDENCE:
if facet == facet_name:
break
facet_abi = abis.get(facet, [])
for item in facet_abi:
if item["type"] == "function":
reserved_selectors.add(abi.encode_function_signature(item))
facet_function_selectors: List[str] = []
facet_abi = abis.get(facet_name, [])
for item in facet_abi:
if item["type"] == "function":
if item["name"] not in ignore_methods:
function_selector = abi.encode_function_signature(item)
if (
function_selector not in reserved_selectors
and function_selector not in ignore_selectors
):
facet_function_selectors.append(function_selector)
target_address = facet_address
if FACET_ACTIONS[action] == 2:
target_address = ZERO_ADDRESS
diamond_cut_action = [
target_address,
FACET_ACTIONS[action],
facet_function_selectors,
]
calldata = b""
if facet_name == "BottlerFacet":
if initializer_address != ZERO_ADDRESS and action != "remove":
bottler_initializer = BottlerInitializer.BottlerInitializer(
initializer_address
)
calldata = bottler_initializer.contract.init.encode_input()
diamond = DiamondCutFacet.DiamondCutFacet(diamond_address)
transaction = diamond.diamond_cut(
[diamond_cut_action], initializer_address, calldata, transaction_config
)
return transaction
def gogogo(owner_address: str, transaction_config: Dict[str, Any]) -> Dict[str, Any]:
"""
Deploy diamond along with all its basic facets and attach those facets to the diamond.
Returns addresses of all the deployed contracts with the contract names as keys.
"""
result: Dict[str, Any] = {}
try:
diamond_cut_facet = DiamondCutFacet.DiamondCutFacet(None)
diamond_cut_facet.deploy(transaction_config)
except Exception as e:
print(e)
result["error"] = "Failed to deploy DiamondCutFacet"
return result
result["DiamondCutFacet"] = diamond_cut_facet.address
try:
diamond = Diamond.Diamond(None)
diamond.deploy(owner_address, diamond_cut_facet.address, transaction_config)
except Exception as e:
print(e)
result["error"] = "Failed to deploy Diamond"
return result
result["Diamond"] = diamond.address
try:
diamond_loupe_facet = DiamondLoupeFacet.DiamondLoupeFacet(None)
diamond_loupe_facet.deploy(transaction_config)
except Exception as e:
print(e)
result["error"] = "Failed to deploy DiamondLoupeFacet"
return result
result["DiamondLoupeFacet"] = diamond_loupe_facet.address
try:
ownership_facet = OwnershipFacet.OwnershipFacet(None)
ownership_facet.deploy(transaction_config)
except Exception as e:
print(e)
result["error"] = "Failed to deploy OwnershipFacet"
return result
result["OwnershipFacet"] = ownership_facet.address
try:
bottler_initializer = BottlerInitializer.BottlerInitializer(None)
bottler_initializer.deploy(transaction_config)
except Exception as e:
print(e)
result["error"] = "Failed to deploy BottlerInitializer"
return result
result["BottlerInitializer"] = bottler_initializer.address
try:
bottler_facet = BottlerFacet.BottlerFacet(None)
bottler_facet.deploy(transaction_config)
except Exception as e:
print(e)
result["error"] = "Failed to deploy BottlerFacet"
return result
result["BottlerFacet"] = bottler_facet.address
result["attached"] = []
try:
facet_cut(
diamond.address,
"DiamondLoupeFacet",
diamond_loupe_facet.address,
"add",
transaction_config,
)
except Exception as e:
print(e)
result["error"] = "Failed to attach DiamondLoupeFacet"
return result
result["attached"].append("DiamondLoupeFacet")
try:
facet_cut(
diamond.address,
"OwnershipFacet",
ownership_facet.address,
"add",
transaction_config,
)
except Exception as e:
print(e)
result["error"] = "Failed to attach OwnershipFacet"
return result
result["attached"].append("OwnershipFacet")
try:
facet_cut(
diamond.address,
"BottlerFacet",
bottler_facet.address,
"add",
transaction_config,
initializer_address=bottler_initializer.address,
)
except Exception as e:
print(e)
result["error"] = "Failed to attach BottlerFacet"
return result
result["attached"].append("BottlerFacet")
return result
def release_the_kraken(
owner_address: str,
terminus_address: str,
unicorn_milk_address: str,
raw_full_bottle_prices: List[int],
transaction_config: Dict[str, Any],
) -> Dict[str, Any]:
result: Dict[str, Any] = {}
terminus = MockTerminus(terminus_address)
terminus_payment_token_address = terminus.payment_token()
terminus_payment_token = MockErc20(terminus_payment_token_address)
print("Checking if you have enough terminus payment tokens")
balance = terminus_payment_token.balance_of(owner_address)
print("Your terminus payment token balance is:", balance)
if balance < terminus.pool_base_price() * 6:
raise Exception("You don't have enough tokens to pay for the pools")
print("Running gogogo")
gogogo_result = gogogo(owner_address, transaction_config)
if "error" in gogogo_result:
print(gogogo_result)
print("Do you want to continue anyway? (y/n)")
if input() != "y":
raise Exception(gogogo_result["error"])
result["gogogo"] = gogogo_result
bottler = BottlerFacet.BottlerFacet(result["gogogo"]["Diamond"])
print("Creating terminus pools:")
current_id = terminus.total_pools() + 1
print("Approving the payment token for terminus")
terminus_payment_token.approve(terminus.address, 2 ** 256 - 1, transaction_config)
print("Creating pools:")
for i in range(1, 7):
terminus.create_pool_v1(2 ** 256 - 1, True, True, transaction_config)
result["pools"] = {
"empty": [current_id, current_id + 1, current_id + 2],
"full": [current_id + 3, current_id + 4, current_id + 5],
}
print("Transferring pools control to the bottler contract")
for pool_id in result["pools"]["empty"]:
terminus.set_pool_controller(
pool_id,
bottler.address,
transaction_config,
)
for pool_id in result["pools"]["full"]:
terminus.set_pool_controller(
pool_id,
bottler.address,
transaction_config,
)
bottler.set_up(unicorn_milk_address, terminus_address, transaction_config)
bottler.set_bottle_capacities([5000, 2000, 500], transaction_config)
bottler.set_empty_bottle_pool_ids(result["pools"]["empty"], transaction_config)
bottler.set_full_bottle_pool_ids(result["pools"]["full"], transaction_config)
full_bottle_prices = raw_full_bottle_prices
bottler.set_full_bottle_prices(full_bottle_prices, transaction_config)
return result
def handle_facet_cut(args: argparse.Namespace) -> None:
network.connect(args.network)
diamond_address = args.address
action = args.action
facet_name = args.facet_name
facet_address = args.facet_address
transaction_config = Diamond.get_transaction_config(args)
facet_cut(
diamond_address,
facet_name,
facet_address,
action,
transaction_config,
initializer_address=args.initializer_address,
ignore_methods=args.ignore_methods,
ignore_selectors=args.ignore_selectors,
)
def handle_gogogo(args: argparse.Namespace) -> None:
network.connect(args.network)
owner_address = args.owner
transaction_config = Diamond.get_transaction_config(args)
result = gogogo(owner_address, transaction_config)
if args.outfile is not None:
with args.outfile:
json.dump(result, args.outfile)
json.dump(result, sys.stdout, indent=4)
def handle_release_the_kraken(args: argparse.Namespace) -> None:
network.connect(args.network)
owner_address = args.owner
transaction_config = Diamond.get_transaction_config(args)
result = release_the_kraken(
owner_address,
args.terminus,
args.unicorn_milk,
args.full_bottle_prices,
transaction_config,
)
if args.outfile is not None:
with args.outfile:
json.dump(result, args.outfile)
json.dump(result, sys.stdout, indent=4)
def generate_cli() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
description="CLI to manage Moonstream DAO diamond contracts",
)
parser.set_defaults(func=lambda _: parser.print_help())
subcommands = parser.add_subparsers()
Diamond_parser = Diamond.generate_cli()
subcommands.add_parser("diamond", parents=[Diamond_parser], add_help=False)
facet_cut_parser = subcommands.add_parser("facet-cut")
Diamond.add_default_arguments(facet_cut_parser, transact=True)
facet_cut_parser.add_argument(
"--facet-name",
required=True,
choices=FACETS,
help="Name of facet to cut into or out of diamond",
)
facet_cut_parser.add_argument(
"--facet-address",
required=False,
default=ZERO_ADDRESS,
help=f"Address of deployed facet (default: {ZERO_ADDRESS})",
)
facet_cut_parser.add_argument(
"--action",
required=True,
choices=FACET_ACTIONS,
help="Diamond cut action to take on entire facet",
)
facet_cut_parser.add_argument(
"--initializer-address",
default=ZERO_ADDRESS,
help=f"Address of contract to run as initializer after cut (default: {ZERO_ADDRESS})",
)
facet_cut_parser.add_argument(
"--ignore-methods",
nargs="+",
help="Names of methods to ignore when cutting a facet onto or off of the diamond",
)
facet_cut_parser.add_argument(
"--ignore-selectors",
nargs="+",
help="Method selectors to ignore when cutting a facet onto or off of the diamond",
)
facet_cut_parser.set_defaults(func=handle_facet_cut)
gogogo_parser = subcommands.add_parser("gogogo")
Diamond.add_default_arguments(gogogo_parser, transact=True)
gogogo_parser.add_argument(
"--owner", required=True, help="Address of owner of diamond proxy"
)
gogogo_parser.add_argument(
"-o",
"--outfile",
type=argparse.FileType("w"),
default=None,
help="(Optional) file to write deployed addresses to",
)
gogogo_parser.set_defaults(func=handle_gogogo)
DiamondCutFacet_parser = DiamondCutFacet.generate_cli()
subcommands.add_parser(
"diamond-cut", parents=[DiamondCutFacet_parser], add_help=False
)
DiamondLoupeFacet_parser = DiamondLoupeFacet.generate_cli()
subcommands.add_parser(
"diamond-loupe", parents=[DiamondLoupeFacet_parser], add_help=False
)
OwnershipFacet_parser = OwnershipFacet.generate_cli()
subcommands.add_parser("ownership", parents=[OwnershipFacet_parser], add_help=False)
release_the_kraken_parser = subcommands.add_parser("release-the-kraken")
Diamond.add_default_arguments(release_the_kraken_parser, transact=True)
release_the_kraken_parser.add_argument(
"--owner", required=True, help="Address of owner of diamond proxy"
)
release_the_kraken_parser.add_argument(
"-o",
"--outfile",
type=argparse.FileType("w"),
default=None,
help="(Optional) file to write deployed addresses to",
)
release_the_kraken_parser.add_argument(
"--terminus",
required=True,
help="Address of terminus contract",
)
release_the_kraken_parser.add_argument(
"--unicorn-milk",
required=True,
help="Address of unicorn milk contract",
)
release_the_kraken_parser.add_argument(
"--full-bottle-prices",
nargs="+",
type=int,
required=True,
help="Full bottle prices",
)
release_the_kraken_parser.set_defaults(func=handle_release_the_kraken)
return parser
def main() -> None:
parser = generate_cli()
args = parser.parse_args()
args.func(args)
if __name__ == "__main__":
main()
| 31.085106 | 102 | 0.665845 | 1,618 | 14,610 | 5.786156 | 0.146477 | 0.058107 | 0.022217 | 0.017304 | 0.356441 | 0.276223 | 0.239265 | 0.212027 | 0.189596 | 0.182119 | 0 | 0.007558 | 0.239288 | 14,610 | 469 | 103 | 31.151386 | 0.834803 | 0.025873 | 0 | 0.337662 | 0 | 0 | 0.153391 | 0.006274 | 0 | 0 | 0.002961 | 0 | 0.005195 | 1 | 0.020779 | false | 0 | 0.023377 | 0 | 0.077922 | 0.049351 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
658f044ff524eb679f5bdb84ef2ffd0506f1a311 | 4,445 | py | Python | Python_files/MackayTL.py | shuaib7860/Trophic-Analysis-Toolbox | dc5c9d4d1f4acb19889ea57b94dbb4e5b2c68cfb | [
"MIT"
] | 1 | 2021-05-20T13:48:07.000Z | 2021-05-20T13:48:07.000Z | Python_files/MackayTL.py | shuaib7860/Trophic-Analysis-Toolbox | dc5c9d4d1f4acb19889ea57b94dbb4e5b2c68cfb | [
"MIT"
] | null | null | null | Python_files/MackayTL.py | shuaib7860/Trophic-Analysis-Toolbox | dc5c9d4d1f4acb19889ea57b94dbb4e5b2c68cfb | [
"MIT"
] | null | null | null | from scipy.sparse.linalg import spsolve, cg
from scipy.sparse import diags, lil_matrix
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
#The functions below are able to handle sparse matrices and so they are a much more efficient memory implementation of the previous functions you had coded up
# Thus these are able to handle much larger networks then previously.
def get_exact_trophic_levels(G, weight=None):
G2 = G.to_undirected(reciprocal=False,as_view=True)
if nx.is_connected(G2):
B = nx.adj_matrix(G, weight=weight)
in_deg = B.sum(axis=0).A1
out_deg = B.sum(axis=1).A1
v = in_deg - out_deg
w = in_deg + out_deg
L = diags(w, 0) - (B + B.transpose())
L[0,0 ]= 0
h = spsolve(L, v)
h = h - h.min()
return h
else:
print('Network must be weakly connected')
def get_exact_trophic_coherence(G, weight=None):
h = get_trophic_levels(G, weight=weight)
B = nx.adj_matrix(G, weight=weight)
H = lil_matrix(B.shape, dtype=float)
for i, j in zip(B.nonzero()[0], B.nonzero()[1]):
H[i,j] = h[j] - h[i] - 1
H2 = (H.tocsr()).power(2)
F_0 = (B.multiply(H2)).sum() / B.sum()
return F_0, h
#Using one of the iterative methods for linear equation solvers instead of spsolve is much more efficient but not as accurate in terms of the exact solution but the overall rnaking is still correct
def get_trophic_levels(G, weight=None):
G2 = G.to_undirected(reciprocal=False,as_view=True)
if nx.is_connected(G2):
B = nx.adj_matrix(G, weight=weight)
in_deg = B.sum(axis=0).A1
out_deg = B.sum(axis=1).A1
v = in_deg - out_deg
w = in_deg + out_deg
L = diags(w, 0) - (B + B.transpose())
L[0,0 ]= 0
h = cg(L, v)
h = h - h.min()
return h
else:
print('Network must be weakly connected')
def get_trophic_coherence(G, weight=None):
h = get_trophic_levels(G, weight=weight)
B = nx.adj_matrix(G, weight=weight)
H = lil_matrix(B.shape, dtype=float)
for i, j in zip(B.nonzero()[0], B.nonzero()[1]):
H[i,j] = h[j] - h[i] - 1
H2 = (H.tocsr()).power(2)
F_0 = (B.multiply(H2)).sum() / B.sum()
return F_0, h
G_semi_coherent=nx.DiGraph()
G_semi_coherent.add_edges_from([(1,4),(1,5),(2,5),(3,5),(3,6),(4,7),(5,7),(5,8),(5,9),
(6,9),(1,7),(2,8),(3,9),(1,8),(2,7),(2,9)])
G_coherent=nx.DiGraph()
G_coherent.add_edges_from([(1,4),(1,5),(2,5),(3,5),(3,6),(4,7),(5,7),(5,8),(5,9),(6,9)])
G_incoherent=nx.DiGraph()
G_incoherent.add_edges_from([(1,2), (2,3),(3,1)])
# Calculating MacKay Trophic levels for coherent network and then visualising the network with the trophic level controlling the coordinates of the nodes in the plot
G=G_coherent
F,h=get_trophic_coherence(G)
pos = nx.spring_layout(G)
trophic_levels = {}
for i, node in enumerate(G.nodes):
trophic_levels[node] = h[i]
for node in G_coherent.nodes:
pos[node][1] = trophic_levels[node]
plt.figure(figsize = (5,5))
nx.draw(G, pos=pos);
nx.draw_networkx_labels(G, pos=pos);
print(f'F_0 coherent: {round(F,4)}')
print(f'h: {h}')
G=G_semi_coherent
F,h=get_trophic_coherence(G)
pos = nx.spring_layout(G)
trophic_levels = {}
for i, node in enumerate(G.nodes):
trophic_levels[node] = h[i]
for node in G_coherent.nodes:
pos[node][1] = trophic_levels[node]
plt.figure(figsize = (5,5))
nx.draw(G, pos=pos);
nx.draw_networkx_labels(G, pos=pos);
print(f'F_0 semi-coherent: {round(F,4)}')
print(f'h: {round(h,3)}')
G=G_incoherent
F,h=get_trophic_coherence(G)
pos = nx.spring_layout(G)
plt.figure(figsize = (5,5))
nx.draw(G, pos=pos);
nx.draw_networkx_labels(G, pos=pos);
print(f'F_0 incoherent: {round(F,4)}')
print(f'h: {h}')
#Visualisation of graphs where h is the trophic levels
troph_positions = {}
for i in range (len(h)):
troph_positions[i]= [np.random.random(),h[i]]
pos= troph_positions
fig, ax = plt.subplots(figsize=(5, 10))
#networkx drawing call
nx.draw(G, pos, node_size=40, node_color='b', ax=ax)
# turn the axis on
ax.set_axis_on()
ax.tick_params(left=True, bottom=True, labelleft=True, labelbottom=True)
plt.ylabel('Trophic Level')
plt.xlabel('Trophic incoherence = ' + "{:.2f}".format(F))
ax.xaxis.set_major_formatter(plt.NullFormatter())
plt.xticks([], [])
| 26.939394 | 197 | 0.637345 | 775 | 4,445 | 3.530323 | 0.24129 | 0.057018 | 0.028509 | 0.02924 | 0.566886 | 0.565789 | 0.565789 | 0.54386 | 0.54386 | 0.54386 | 0 | 0.032532 | 0.204724 | 4,445 | 164 | 198 | 27.103659 | 0.741443 | 0.152531 | 0 | 0.650943 | 0 | 0 | 0.058025 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037736 | false | 0 | 0.04717 | 0 | 0.122642 | 0.075472 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
658f1cbd54692481d065e281fe35d01257d47bf6 | 2,285 | py | Python | bayesfit/plot_Marginals.py | LunkRat/bayesfit | aaef3ba013e3ebaf596c2c26baf88b1112b5f73a | [
"Apache-2.0"
] | 44 | 2017-10-03T20:22:04.000Z | 2022-03-16T23:15:19.000Z | bayesfit/plot_Marginals.py | hoechenberger/bayesfit | cc76e474dfc402c81dd9a85f31ed886350c4f491 | [
"Apache-2.0"
] | 8 | 2018-09-24T16:57:36.000Z | 2021-09-22T18:24:13.000Z | bayesfit/plot_Marginals.py | hoechenberger/bayesfit | cc76e474dfc402c81dd9a85f31ed886350c4f491 | [
"Apache-2.0"
] | 9 | 2017-11-11T22:48:03.000Z | 2020-10-22T16:02:29.000Z | """
*******************************************************
*
* plot_marginals - PLOT MARGINALS
*
* License: Apache 2.0
* Written by: Michael Slugocki
* Created on: September 10, 2018
* Last updated: September 18, 2018
*
*******************************************************
"""
#################################################################
# IMPORT MODULES
#################################################################
import numpy as np
import matplotlib.pyplot as plt
#################################################################
# PLOT MARGINAL DISTRIBUTION
#################################################################
def plot_marginals(metrics):
"""Plots marginal distributions for each parameter of
the fitted model.
Keyword arguments:
metrics -- contain important metrics about fitted model (dictionary)
"""
# Generate basic plot of marginal distributions
fig, axes = plt.subplots(2, 2,
subplot_kw=dict(polar=False),
figsize = (7,6))
# Scale parameter
axes[0,0].set_xlabel('Scale')
axes[0,1].set_xlabel('Slope')
axes[1,0].set_xlabel('Gamma')
axes[1,1].set_xlabel('Lambda')
# Loop through and plot marginals that exist
counter = 0
idx = np.array([[0,0], [0,1], [1,0], [1,1]])
for keys in ['scale', 'slope', 'gamma', 'lambda']:
axes[idx[counter,0],idx[counter,1]].set_ylabel('Probability')
if metrics['Marginals'][keys] is not np.nan and metrics['Marginals'][keys].size > 1:
axes[idx[counter,0],idx[counter,1]].plot(metrics['Marginals_X'][keys],
metrics['Marginals'][keys],
lw=3,
color='#5998ff')
axes[idx[counter,0],idx[counter,1]].fill_between(metrics['Marginals_X'][keys],
metrics['Marginals'][keys], color='#5998ff', alpha = .4)
elif metrics['Marginals'][keys].size == 1:
axes[idx[counter,0],idx[counter,1]].text(0.5,0.5, "None",
horizontalalignment='center',
verticalalignment='center',
transform=axes[idx[counter,0],idx[counter,1]].transAxes)
# Update counter
counter += 1
plt.tight_layout()
plt.show()
| 37.459016 | 92 | 0.492341 | 237 | 2,285 | 4.696203 | 0.438819 | 0.089847 | 0.059299 | 0.067385 | 0.2354 | 0.2354 | 0.2354 | 0.091644 | 0.091644 | 0.091644 | 0 | 0.034949 | 0.223632 | 2,285 | 60 | 93 | 38.083333 | 0.592446 | 0.267834 | 0 | 0 | 0 | 0 | 0.10917 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.068966 | 0 | 0.103448 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65926c65b5e8d6760db46d5fb76dd06a2368c7d3 | 719 | py | Python | addons/osfstorage/settings/defaults.py | fabmiz/osf.io | 8d86af3f0a6e5388bd5b18383e68e27b65a66247 | [
"Apache-2.0"
] | 1 | 2015-10-02T18:35:53.000Z | 2015-10-02T18:35:53.000Z | addons/osfstorage/settings/defaults.py | fabmiz/osf.io | 8d86af3f0a6e5388bd5b18383e68e27b65a66247 | [
"Apache-2.0"
] | 18 | 2020-03-24T15:26:02.000Z | 2022-03-08T21:30:39.000Z | addons/osfstorage/settings/defaults.py | kounoAkihiro/SV-RDM-OSF | 76fb0c739f4cdabf03b5bfd2bc63d83b1c2d4796 | [
"Apache-2.0"
] | 1 | 2021-10-04T21:16:56.000Z | 2021-10-04T21:16:56.000Z | # encoding: utf-8
import importlib
import os
import logging
from website import settings
logger = logging.getLogger(__name__)
WATERBUTLER_CREDENTIALS = {
'storage': {}
}
WATERBUTLER_SETTINGS = {
'storage': {
'provider': 'filesystem',
'folder': os.path.join(settings.BASE_PATH, 'osfstoragecache'),
}
}
WATERBUTLER_RESOURCE = 'folder'
DISK_SAVING_MODE = settings.DISK_SAVING_MODE
try:
mod = importlib.import_module('.{}'.format(settings.MIGRATION_ENV), package='addons.osfstorage.settings')
globals().update({k: getattr(mod, k) for k in dir(mod)})
except Exception as ex:
logger.warn('No migration settings loaded for OSFStorage, falling back to local dev. {}'.format(ex))
| 23.193548 | 109 | 0.710709 | 86 | 719 | 5.77907 | 0.639535 | 0.060362 | 0.056338 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001658 | 0.161335 | 719 | 30 | 110 | 23.966667 | 0.822554 | 0.020862 | 0 | 0 | 0 | 0 | 0.230769 | 0.037037 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.238095 | 0 | 0.238095 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6592b40854f5333124ca2d0056c96093e90ecafa | 5,719 | py | Python | ex23 - A - measure water (fastest way).py | neong83/algorithm_practices | 9a33854f47c24f8aa117c196493b21aeb4c11eb5 | [
"MIT"
] | null | null | null | ex23 - A - measure water (fastest way).py | neong83/algorithm_practices | 9a33854f47c24f8aa117c196493b21aeb4c11eb5 | [
"MIT"
] | null | null | null | ex23 - A - measure water (fastest way).py | neong83/algorithm_practices | 9a33854f47c24f8aa117c196493b21aeb4c11eb5 | [
"MIT"
] | null | null | null | """
There are 3 buckets available for user to measure water with, 3 L, 5 L, and 8 L.
None of them has any indicater / marks to tell current level of water inside the bucket.
We had filled up 8 L bucket with waters and would like split it equally between 2 people
"""
from dataclasses import dataclass
from collections import deque
class Bucket:
def __init__(self, capacity, water=0):
self.capacity = capacity
self.water = water
def __str__(self):
return f"{self.capacity}L bucket with {self.water} water"
def __eq__(self, other):
return self.capacity == other.capacity and self.water == other.water
def is_full(self):
return self.capacity == self.water
def has_water(self):
return self.water > 0
def remain_capacity(self):
return self.capacity - self.water
def add_water(self, water):
available_space = self.remain_capacity()
if available_space > 0:
self.water += water if available_space > water else available_space
def dump_water(self, water):
if self.water >= water:
self.water -= water
else:
self.water = 0
@dataclass
class Action:
poll_from: int
add_to: int
amount_of_water: int
def __repr__(self):
return f"poll from: {self.poll_from}L, add to: {self.add_to}L, water amount: {self.amount_of_water}"
class BucketState:
def __init__(self, actions: [Action] = []):
self.three_L_bucket = Bucket(capacity=3)
self.five_L_bucket = Bucket(capacity=5)
self.eight_L_bucket = Bucket(capacity=8, water=8)
self.actions = actions
def __iter__(self):
yield self.eight_L_bucket
yield self.five_L_bucket
yield self.three_L_bucket
def get_bucket_from_size(self, size):
if size == 3:
return self.three_L_bucket
elif size == 5:
return self.five_L_bucket
else:
return self.eight_L_bucket
def get_current_state(self):
return (
self.eight_L_bucket.water,
self.five_L_bucket.water,
self.three_L_bucket.water,
)
def resume_state(self):
for current_action in self.actions:
poll_from = self.get_bucket_from_size(current_action.poll_from)
add_to = self.get_bucket_from_size(current_action.add_to)
poll_from.dump_water(current_action.amount_of_water)
add_to.add_water(current_action.amount_of_water)
def is_final_state(self):
return (
self.eight_L_bucket.water == 4
and self.five_L_bucket.water == 4
and self.three_L_bucket.water == 0
)
def set_action(self, poll_from, add_to, amount_of_water):
self.actions.append(Action(poll_from, add_to, amount_of_water))
def take_action(self, poll_from_bucket_capacity: int, add_to_bucket_capacity: int):
poll_from = self.get_bucket_from_size(poll_from_bucket_capacity)
add_to = self.get_bucket_from_size(add_to_bucket_capacity)
if poll_from.has_water() and not add_to.is_full():
dump_water = (
add_to.remain_capacity()
if poll_from.water > add_to.remain_capacity()
else poll_from.water
)
add_to.add_water(dump_water)
poll_from.dump_water(dump_water)
self.set_action(
poll_from_bucket_capacity, add_to_bucket_capacity, dump_water
)
def explore_next_move(current_state: BucketState):
for poll_from_bucket in current_state:
for add_to_bucket in current_state:
if (
not poll_from_bucket == add_to_bucket
and poll_from_bucket.has_water()
and not add_to_bucket.is_full()
):
new_bucket_state = BucketState(list(current_state.actions))
new_bucket_state.resume_state()
new_bucket_state.take_action(
poll_from_bucket.capacity, add_to_bucket.capacity
)
yield new_bucket_state
def search_solution(bucket_state: BucketState):
visited_status = set()
queue = deque()
queue.append(bucket_state)
# trying to use BFS to find the shortest solution to handle this problem
while queue:
current_state = queue.popleft()
latest_status = current_state.get_current_state()
if latest_status not in visited_status:
for next_move in explore_next_move(current_state):
queue.append(next_move)
if next_move.is_final_state():
return next_move
visited_status.add(latest_status)
def get_water_quality_for_visual_validation(buckets, action):
def get_index_by_capacity(capacity):
switcher = {8: 0, 5: 1, 3: 2}
return switcher.get(capacity, "Invalid capacity")
poll_from = get_index_by_capacity(action.poll_from)
add_to = get_index_by_capacity(action.add_to)
buckets[poll_from] -= action.amount_of_water
buckets[add_to] += action.amount_of_water
if __name__ == "__main__":
bucket_state = BucketState()
solution = search_solution(bucket_state)
if solution:
visual_validation_bucket = [8, 0, 0]
print(f"water start with 8L, 5L, 3L -> '{visual_validation_bucket}'")
for action in solution.actions:
get_water_quality_for_visual_validation(visual_validation_bucket, action)
print(
f"poll {action.amount_of_water}L of water from '{action.poll_from}L bucket' to '{action.add_to}L bucket' -> {visual_validation_bucket}"
)
| 32.129213 | 151 | 0.647491 | 769 | 5,719 | 4.469441 | 0.163849 | 0.055863 | 0.034041 | 0.023276 | 0.274367 | 0.186209 | 0.122782 | 0.048298 | 0.027349 | 0 | 0 | 0.007479 | 0.275223 | 5,719 | 177 | 152 | 32.310734 | 0.821713 | 0.057877 | 0 | 0.031008 | 0 | 0.015504 | 0.065428 | 0.018773 | 0 | 0 | 0 | 0 | 0 | 1 | 0.162791 | false | 0 | 0.015504 | 0.062016 | 0.325581 | 0.015504 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65990e56a42206e02e3c8e812fdea0fd3f8a87e5 | 1,337 | py | Python | pi_grpc_streaming/server.py | singmiya/RPC | 9b41fa512ad2d6f4aed56c1a714464daad050b8e | [
"MIT"
] | 1 | 2020-02-24T08:23:17.000Z | 2020-02-24T08:23:17.000Z | pi_grpc_streaming/server.py | singmiya/RPC | 9b41fa512ad2d6f4aed56c1a714464daad050b8e | [
"MIT"
] | null | null | null | pi_grpc_streaming/server.py | singmiya/RPC | 9b41fa512ad2d6f4aed56c1a714464daad050b8e | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import random
import math
import grpc
import time
from concurrent import futures
import pi_pb2_grpc
import pi_pb2
class PiCalculatorServicer(pi_pb2_grpc.PiCalculatorServicer):
def Calc(self, request_iterator, context):
# request是一个迭代器参数,对应一个stream请求
for request in request_iterator:
# if request.n < 0:
# context.set_code(grpc.StatusCode.INVALID_ARGUMENT) # 参数错误
# context.set_details('request number should be positive') # 错误具体说明
# yield pi_pb2.PiResponse()
# 50% 的概率会有响应
# if random.randint(0, 1) == 1:
# continue
s = 0.0
for i in range(request.n):
s += 1.0 / (2 * i + 1) / (2 * i + 1)
# 响应是一个生成器,一个响应对应一个请求
context.set_code(grpc.StatusCode.OK)
yield pi_pb2.PiResponse(n=i, value=math.sqrt(8 * s))
def main():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
servicer = PiCalculatorServicer()
pi_pb2_grpc.add_PiCalculatorServicer_to_server(servicer, server)
server.add_insecure_port('localhost:8080')
server.start()
try:
time.sleep(1000)
except KeyboardInterrupt:
server.stop()
if __name__ == '__main__':
main()
| 27.854167 | 83 | 0.611818 | 156 | 1,337 | 5.057692 | 0.512821 | 0.038023 | 0.034221 | 0.073511 | 0.070976 | 0 | 0 | 0 | 0 | 0 | 0 | 0.033508 | 0.285714 | 1,337 | 47 | 84 | 28.446809 | 0.79267 | 0.239342 | 0 | 0 | 0 | 0 | 0.021912 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.259259 | 0 | 0.37037 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65997f7011f355b493b6c2e0985b511f9a718a36 | 4,604 | py | Python | temp_time_series/create_mock_index.py | pramodbiligiri/datahub | 892adbcf330a9c7c687a293dd3edeca9fa0e2fd8 | [
"Apache-2.0"
] | 3,586 | 2020-01-27T11:09:57.000Z | 2022-03-15T16:13:30.000Z | temp_time_series/create_mock_index.py | iamduyu/datahub | 4c33124e8f5582749877e30ac2b0c0c1bfa06f42 | [
"Apache-2.0"
] | 1,678 | 2020-01-27T20:51:01.000Z | 2022-03-15T15:22:02.000Z | temp_time_series/create_mock_index.py | iamduyu/datahub | 4c33124e8f5582749877e30ac2b0c0c1bfa06f42 | [
"Apache-2.0"
] | 924 | 2020-01-28T20:10:50.000Z | 2022-03-15T10:01:23.000Z | #!/usr/bin/env python3
import os
from datetime import datetime
from typing import Optional, Generator, Tuple
# import hashlib
HOUR_IN_MS = 3600000
DAY_IN_MS = 86400000
START_DAY_IN_MS = int(datetime.now().timestamp() * 1000) - 5 * DAY_IN_MS
CounterType = Optional[int]
NameType = Optional[str]
IndexRowType = Tuple[
NameType,
CounterType,
CounterType,
NameType,
CounterType,
CounterType,
CounterType,
CounterType,
CounterType,
CounterType,
]
def day(n: int) -> int:
return START_DAY_IN_MS + n * DAY_IN_MS
class MockIndexGenerator:
INDEX_NAME = "mock_dataset_stats_aspect_v1"
INDEX_FIELD_NAMES = [
"urn",
"rowCount",
"columnCount",
"columnStats.key",
"columnStats.numNull",
"eventTimestampMillis",
"eventGranularity",
"partitionSpec.parition",
"partitionSpec.timeWindow.startTimeMillis",
"partitionSpec.timeWindow.granulatiry",
]
INDEX_FIELD_TYPES = [
"keyword",
"long",
"long",
"keyword",
"long",
"date",
"long",
"keyword",
"date",
"long",
]
def __init__(self, start_days_in_ms, num_recs, num_cols):
self._start_days_in_ms = start_days_in_ms
self._num_recs = num_recs
self._num_cols = num_cols
self._stat_num_rows_start = 10000
self._stat_num_cols_start = 50
self._stat_num_nulls = 100
def _get_num_rows(self, i: int):
return self._stat_num_rows_start + (100 * i)
def _get_num_cols(self, i: int):
return self._stat_num_cols_start + i
def _get_num_nulls(self, i: int, c: int):
return self._stat_num_nulls + c + (10 * i)
def _get_event_time_ms(self, i: int):
return self._start_days_in_ms + (i * HOUR_IN_MS)
@staticmethod
def _get_index_row_json(row: IndexRowType) -> str:
return ",".join(
[
f'"{field}" : "{value}"'
for field, value in zip(MockIndexGenerator.INDEX_FIELD_NAMES, row)
if value is not None
]
)
def get_records(self) -> Generator[IndexRowType, None, None]:
for i in range(self._num_recs):
# emit one table record
yield self._get_index_row_json((
"table_1",
self._get_num_rows(i),
self._get_num_cols(i),
None,
None,
self._get_event_time_ms(i),
HOUR_IN_MS,
None,
None,
None)
)
# emit one record per column
for c in range(self._num_cols):
yield self._get_index_row_json((
f"table_1",
None,
None,
f"col_{c}",
self._get_num_nulls(i, c),
self._get_event_time_ms(i),
HOUR_IN_MS,
None,
None,
None)
)
@staticmethod
def get_props_json() -> str:
return ",".join(
[
f'"{field}" : {{ "type" : "{type}" }}'
for field, type in zip(
MockIndexGenerator.INDEX_FIELD_NAMES,
MockIndexGenerator.INDEX_FIELD_TYPES,
)
]
)
def gen_index_schema() -> None:
properties_json = MockIndexGenerator.get_props_json()
index_schema_gen_cmd = (
f"curl -v -XPUT http://localhost:9200/{MockIndexGenerator.INDEX_NAME} -H 'Content-Type: application/json' -d '"
+ """
{
"settings":{},
"mappings":{
"properties":{ """
+ f"{properties_json}"
+ """
}
}
}'"""
)
print(index_schema_gen_cmd)
os.system(index_schema_gen_cmd)
def populate_index_data() -> None:
for id, row in enumerate(
MockIndexGenerator(START_DAY_IN_MS, 100, 20).get_records()
):
# id = hashlib.md5(row.encode("utf-8")).hexdigest()
index_row_gen_command = (
f"curl -v -XPUT http://localhost:9200/{MockIndexGenerator.INDEX_NAME}/_doc/{id} "
+ "-H 'Content-Type: application/json' -d '{ "
+ f"{row}"
+ " }'"
)
print(index_row_gen_command)
os.system(index_row_gen_command)
def generate() -> None:
#gen_index_schema()
populate_index_data()
if __name__ == "__main__":
generate()
| 26.308571 | 119 | 0.535187 | 496 | 4,604 | 4.604839 | 0.272177 | 0.024518 | 0.018389 | 0.022767 | 0.294221 | 0.182137 | 0.103328 | 0.081436 | 0.081436 | 0.081436 | 0 | 0.018262 | 0.357732 | 4,604 | 174 | 120 | 26.45977 | 0.754143 | 0.033232 | 0 | 0.293706 | 0 | 0.006993 | 0.167379 | 0.028346 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083916 | false | 0 | 0.020979 | 0.048951 | 0.181818 | 0.013986 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
659a1ba7704155cb39734758e4538c59e70d01ec | 2,400 | py | Python | resources/app/test.py | fraserdale/-Nike-order-tracker-win32-x64 | e3444918cee6d314402cf80038785a461cf050ed | [
"MIT"
] | 1 | 2018-07-01T10:52:33.000Z | 2018-07-01T10:52:33.000Z | resources/app/test.py | fraserdale/-Nike-order-tracker-win32-x64 | e3444918cee6d314402cf80038785a461cf050ed | [
"MIT"
] | null | null | null | resources/app/test.py | fraserdale/-Nike-order-tracker-win32-x64 | e3444918cee6d314402cf80038785a461cf050ed | [
"MIT"
] | 2 | 2018-05-23T06:48:48.000Z | 2018-07-01T10:52:34.000Z | from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
from selenium import webdriver
import time
from bs4 import BeautifulSoup
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
##############################################
orders = []
emails = []
locale = "gb"
##############################################
locale = open(dir_path + "/locale.txt","r").read()
open(dir_path + "/output.txt","w").write("")
shit = open(dir_path + "/input.txt","r").read().replace("\n",":").split(":")
for x in range(0, int(len(shit)),2):
orders.append(shit[x])
emails.append(shit[x+1])
if locale.lower() == "gb":
url = "https://secure-store.nike.com/gb/en_gb/orders/?loginForm&orderId="
elif locale.lower() == "us":
url = "https://secure-store.nike.com/us/en_us/orders/?loginForm&orderId="
else:
print("region not correct. try again")
quit()
for i in range (int(len(orders))):
orderNo = orders[i]
email = emails[i]
options = webdriver.ChromeOptions()
options.add_argument('headless')
driver = webdriver.Chrome(chrome_options=options)
driver.get(url + orderNo)
time.sleep(2)
driver.find_element_by_id('guestsEmail').send_keys(email)
buttons = driver.find_elements_by_xpath("//*[contains(text(), 'Submit')]")
for btn in buttons:
btn.click()
try:
WebDriverWait(driver, 3).until(
EC.visibility_of_element_located((By.CLASS_NAME, "status")))
html = driver.page_source
soup = BeautifulSoup(html, "lxml")
time.sleep(1)
status = soup.find_all("div", class_="status")[0].text
status = status.strip('\n')
if status == "PREPARING TO SHIP":
edd = (((soup.find_all("div", class_="edd")[0]).text).strip())[-10:]
open(dir_path + "/output.txt","a").write("Order: " + orderNo + " | Status: " + status + " : Estimated Delivery: " + edd + "\n")
print("writing")
else:
open(dir_path + "/output.txt","a").write("Order: " + orderNo + " | Status: " + status + "\n")
print("writing")
except TimeoutException:
open(dir_path + "/output.txt","a").write("Not able to login, wrong email + orderID or wrong region. us or gb only")
| 34.782609 | 139 | 0.613333 | 304 | 2,400 | 4.736842 | 0.424342 | 0.034028 | 0.045833 | 0.047222 | 0.163889 | 0.123611 | 0.0875 | 0.069444 | 0.069444 | 0.069444 | 0 | 0.005621 | 0.184583 | 2,400 | 68 | 140 | 35.294118 | 0.730199 | 0 | 0 | 0.074074 | 0 | 0 | 0.20902 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.148148 | 0 | 0.148148 | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
659aa84c2e419e1b8c24bf99dfa92340a5977f5c | 237 | py | Python | atcoder/abc206C_swappable.py | uninhm/kyopro | bf6ed9cbf6a5e46cde0291f7aa9d91a8ddf1f5a3 | [
"BSD-3-Clause"
] | 31 | 2020-05-13T01:07:55.000Z | 2021-07-13T07:53:26.000Z | atcoder/abc206C_swappable.py | uninhm/kyopro | bf6ed9cbf6a5e46cde0291f7aa9d91a8ddf1f5a3 | [
"BSD-3-Clause"
] | 10 | 2020-05-20T07:22:09.000Z | 2021-07-19T03:52:13.000Z | atcoder/abc206C_swappable.py | uninhm/kyopro | bf6ed9cbf6a5e46cde0291f7aa9d91a8ddf1f5a3 | [
"BSD-3-Clause"
] | 14 | 2020-05-11T05:58:36.000Z | 2021-12-07T03:20:43.000Z | # uninhm
# https://atcoder.jp/contests/abc206/tasks/abc206_c
# implementation
from collections import Counter
n = int(input())
a = list(map(int, input().split()))
c = Counter(a)
ans = 0
for i in a:
ans += (n - c[i])
print(ans//2)
| 15.8 | 51 | 0.649789 | 39 | 237 | 3.923077 | 0.692308 | 0.104575 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.040609 | 0.168776 | 237 | 14 | 52 | 16.928571 | 0.736041 | 0.299578 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
659aee2562f07a9599374a85c36675f12a623e89 | 67,465 | py | Python | psyneulink/core/components/ports/parameterport.py | JeshuaT/PsyNeuLink | 912f691028e848659055430f37b6c15273c762f1 | [
"Apache-2.0"
] | null | null | null | psyneulink/core/components/ports/parameterport.py | JeshuaT/PsyNeuLink | 912f691028e848659055430f37b6c15273c762f1 | [
"Apache-2.0"
] | 101 | 2021-01-21T04:25:00.000Z | 2022-03-30T08:52:41.000Z | psyneulink/core/components/ports/parameterport.py | JeshuaT/PsyNeuLink | 912f691028e848659055430f37b6c15273c762f1 | [
"Apache-2.0"
] | null | null | null | # Princeton University licenses this file to You under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
# ************************************** ParameterPort ******************************************************
"""
Contents
--------
* `ParameterPort_Overview`
* `ParameterPort_Creation`
- `ParameterPort_Specification`
* `ParameterPort_Structure`
* `ParameterPort_Execution`
* `ParameterPort_Class_Reference`
.. _ParameterPort_Overview:
Overview
--------
ParameterPorts belong to either a `Mechanism <Mechanism>` or a `Projection <Projection>`. A ParameterPort is created
to represent each `modulatable parameter <ParameterPort_Modulable_Parameters>` of the `Mechanism
<Mechanism>` or a `Projection <Projection>`, as well as those of the component's `function <Component_Function>` and
any of its secondary functions (e.g. `TransferMechanism.integrator_function`). A
ParameterPort provides the current value of the parameter it represents during any relevant computations, and serves as
an interface for parameter modulation.
A ParameterPort can receive one or more `ControlProjections <ControlProjection>` and/or `LearningProjections
<LearningProjection>` that modify the value returned by the ParameterPort according to the ParameterPort's
`function <ParameterPort.function>`. The Projections received by a ParameterPort are listed in its `mod_afferents
<ParameterPort.mod_afferents>` attribute.
When the Mechanism or Projection to which a ParameterPort belongs executes, that component and its function use the
ParameterPort's value -- not the parameter attribute's value -- for any computation. A ParameterPort's corresponding
attribute on the Mechanism, Projection, or Function to which it belongs (i.e. MyTransferMech.function.gain),
stores the "base value" of that parameter. The base value of a parameter is the variable of the ParameterPort's
function. The base value can be viewed or changed at any time through this attribute.
The ParameterPort value is available on the ParameterPort itself, as well as the mod_name attribute of the Mechanism
or Projection to which it belongs (i.e. MyTransferMech.mod_gain would return the value of the "gain" ParameterPort
of the MyTransferMech mechanism.)
.. note::
Either of these options for looking up the value of the ParameterPort will return the ParameterPort value that
was used during the most recent execution. This means that if the value of MyTransferMech.function.gain (the
base value) is updated after execution #1, the base value will change immediately, but the ParameterPort value (and
MyTransferMech.mod_gain) will not be computed again until execution #2.
As a result, if either MyTransferMech.mod_gain or MyTransferMech.parameter_ports["gain"].value is viewed in between
execution #1 and execution #2, it will return the gain ParameterPort value that was used during execution 1.
.. _ParameterPort_Creation:
Creating a ParameterPort
-------------------------
ParameterPorts are created automatically when the `Mechanism <Mechanism>` or `Projection <Projection>` to which they
belong is created. The `owner <Port.owner>` of a ParameterPort must be a `Mechanism <Mechanism>` or `MappingProjection`
(the initialization of a ParameterPort cannot be `deferred <Port_Deferred_Initialization>`). One ParameterPort is
created for each modulable Parameter of its owner, as well as for each modulable Parameter of the owner's
`function <Component.function>` or secondary functions (modulable
Parameters of a Component
are listed in its `Parameters` class, and have the attribute
`modulable <Parameter.modulable>` set to True.)
Each ParameterPort is created using the value specified for the corresponding parameter, as described below.
The ParameterPorts for the parameters of a Mechanism or Projection are listed in its `parameter_ports
<Mechanism_Base.parameter_ports>` attribute.
COMMENT:
FOR DEVELOPERS: The instantiation of ParameterPorts for all of the `user_params` of a Component can be
suppressed if a *PARAMETER_PORTS* entry is included and set to `NotImplemented` in the
default of its class Parameters; the instantiation of a ParameterPort
for an individual parameter in user_params can be suppressed by including it in
exclude_from_parameter_ports for the class (or one of its parent classes)
(see LearningProjection and EVCControlMechanism for examples, and `note
<ParameterPorts_Suppression>` below for additional information about how
to suppress creation of a ParameterPort for individual parameters. This should be done
for any parameter than can take a value or a string that is a keyword as its specification
(i.e., of the arg for the parameter in the Component's constructor) but should not have a
ParameterPort (e.g., input_port and output_port), as otherwise the
specification will be interpreted as a numeric parameter (in the case of a value) or
a parameter of the keyword's type, a ParameterPort will be created, and then it's value,
rather than the parameter's actual value, will be returned when the parameter is accessed
using "dot notation" (this is because the getter for an attribute's property first checks
to see if there is a ParameterPort for that attribute and, if so, returns the value of the
ParameterPort).
COMMENT
.. _ParameterPort_Specification:
*Specifying Parameters*
~~~~~~~~~~~~~~~~~~~~~~~
Parameters can be specified in one of several places:
* In the **argument** of the constructor for the `Component <Component>` to which the parameter belongs
(see `Component_Structural_Attributes` for additional details).
..
* In a *parameter specification dictionary* assigned to the **params** argument in the constructor for the
Component to which the parameter belongs, or any place else the value of a parameter can be specified.
The entry for each parameter must use the name of the parameter (or a corresponding keyword) as its key,
and the parameter's specification as its value (see `examples <ParameterPort_Specification_Examples>` below).
Parameters for a Component's `function <Component.function>` can be specified in an entry with the key
*FUNCTION_PARAMS*, and a value that is itself a parameter specification dictionary containing an entry for
each of the function's parameters to be specified. When a value is assigned to a parameter in a specification
dictionary, it overrides any value assigned to the argument for the parameter in the Component's constructor.
..
* By direct assignment to the Component's attribute for the parameter
(see `below <ParameterPort_Modulable_Parameters>`).
..
..
* In the **runtime_params** argument of a call to a Composition's `Run` method
.. _ParameterPort_Value_Specification:
The specification of the initial value of a parameter can take any of the following forms:
.. _ParameterPort_Value_Assignment:
* **Value** -- this must be a valid value for the parameter. It creates a default ParameterPort,
assigns the parameter's default value as the ParameterPort's `value <ParameterPort.value>`,
and assigns the parameter's name as the name of the ParameterPort.
* **ParameterPort reference** -- this must refer to an existing **ParameterPort** object; its name must be the
name of a parameter of the owner or of the owner's `function <Component.function>`, and its value must be a valid
one for the parameter.
.. note::
This capability is provided for generality and potential
future use, but its current use is not advised.
.. _ParameterPort_Modulatory_Specification:
* **Modulatory specification** -- this can be an existing `ControlSignal` or `ControlProjection`,
a `LearningSignal` or `LearningProjection`, a constructor or the class name for any of these, or the
keywords *CONTROL*, *CONTROL_PROJECTION*, *LEARNING*, or *LEARNING_PROJECTION*. Any of these create a default
ParameterPort, assign the parameter's default value as the ParameterPort's `value <ParameterPort.value>`,
and assign the parameter's name as the name of the ParameterPort. They also create and/or assign the
corresponding ModulatorySignal and ModulatoryProjection, and assign the ParameterPort as the
ModulatoryProjection's `receiver <Projection_Base.receiver>`. If the ModulatorySignal and/or
ModulatoryProjection already exist, their value(s) must be valid one(s) for the parameter. Note that only
Control and Learning Modulatory components can be assigned to a ParameterPort (Gating components cannot be
used -- they can only be assigned to `InputPorts <InputPort>` and `OutputPorts <OutputPort>`).
.. _ParameterPort_Tuple_Specification:
* **2-item tuple:** *(<value>, <Modulatory specification>)* -- this creates a default ParameterPort, uses the value
specification (1st item) as parameter's `value assignment <ParameterPort_Value_Assignment>`, and assigns the
parameter's name as the name of the ParameterPort. The Modulatory specification (2nd item) is used as the
ParameterPort's `modulatory assignment <ParameterPort_Modulatory_Specification>`, and the ParameterPort
is assigned as the `receiver <Projection_Base.receiver>` for the corresponding `ModulatoryProjection
<ModulatoryProjection>`.
.. note::
Currently, the `function <Component.function>` of a Component, although it can be specified as a parameter
value, cannot be assigned a `ModulatorySignal <ModulatorySignal>` or modified in the **runtime_params**
argument of a call to a Mechanism's `execute <Mechanism_Base.execute>` method. This may change in the future.
The value specified for a parameter (either explicitly or by default) is assigned to an attribute of the Component or
of the Component's `function <Mechanism_Base.function>` to which the parameter belongs. The attribute has the same
name as the parameter, and can be referenced using standard Python attribute ("dot") notation; for example, the value
of a parameter named *param* is assigned to an attribute named ``param`` that can be referenced as
``my_component.param``). The parameter's value is assigned as the **default value** for the ParameterPort.
.. _ParameterPorts_Suppression:
.. note::
If the value of a parameter is specified as `NotImplemented`, or any non-numeric value that is not one of those
listed above, then no ParameterPort is created and the parameter cannot be modified by a `ModulatorySignal
<ModulatorySignal>` or in the **runtime_params** argument of a call to a Mechanism's `execute
<Mechanism_Base.execute>` method.
.. _ParameterPort_Specification_Examples:
*Examples*
~~~~~~~~~~
In the following example, a Mechanism is created by specifying two of its parameters, as well as its
`function <Component.function>` and two of that function's parameters, each using a different specification format::
>>> import psyneulink as pnl
>>> my_mechanism = pnl.RecurrentTransferMechanism(
... size=5,
... noise=pnl.ControlSignal(),
... function=pnl.Logistic(
... gain=(0.5, pnl.ControlSignal),
... bias=(1.0, pnl.ControlSignal(modulation=pnl.ADDITIVE))))
COMMENT:
If assigning a default ControlSignal makes the noise value the same as the
default noise value, why are we using a ControlSignal here??
COMMENT
The first argument of the constructor for the Mechanism specifies its `size <Component.size>` parameter by
directly assigning a value to it. The second specifies the `noise <RecurrentTransferMechanism.noise>` parameter
by assigning a default `ControlSignal`; this will use the default value of the
`noise <RecurrentTransferMechanism.noise>` attribute. The **function** argument is specified using the constructor for
a `Logistic` function, that specifies two of its parameters. The `gain <Logistic.gain>` parameter
is specified using a tuple, the first item of which is the value to be assigned, and the second specifies
a default `ControlSignal`. The `bias <Logistic.bias>` parameter is also specified using a tuple,
in this case with a constructor for the ControlSignal that specifies its `modulation <ModulatorySignal.modulation>`
parameter.
In the following example, a `MappingProjection` is created, and its
`matrix <MappingProjection.MappingProjection.matrix>` parameter is assigned a random weight matrix (using a
`matrix keyword <Matrix_Keywords>`) and `LearningSignal`::
>>> my_input_mechanism = pnl.TransferMechanism()
>>> my_output_mechanism = pnl.TransferMechanism()
>>> my_mapping_projection = pnl.MappingProjection(sender=my_input_mechanism,
... receiver=my_output_mechanism,
... matrix=(pnl.RANDOM_CONNECTIVITY_MATRIX,
... pnl.LearningSignal))
.. note::
The `matrix <MappingProjection.MappingProjection.matrix>` parameter belongs to the MappingProjection's `function
<Projection_Base.function>`; however, since it has only one standard function, its arguments are available in the
constructor for the Projection (see `here <User_Modifiable_Parameters>` for a more detailed explanation).
The example below shows how to specify the parameters in the first example using a parameter specification dictionary::
>>> my_mechanism = pnl.RecurrentTransferMechanism(
... noise=5,
... params={pnl.NOISE: pnl.CONTROL,
... pnl.FUNCTION: pnl.Logistic,
... pnl.FUNCTION_PARAMS:{
... pnl.GAIN:(0.5,pnl.ControlSignal),
... pnl.BIAS:(1.0,pnl.ControlSignal(modulation=pnl.ADDITIVE))}})
There are several things to note here.
First, the parameter specification dictionary must be assigned to the **params** argument of the constructor. Note that
if the parameter is specified in a parameter specification dictionary, the key for the parameter must be a string that
is the same as the name of parameter (i.e., identical to how it appears as an arg in the constructor; as is shown
for **noise** in the example), or using a keyword that resolves to such a string (as shown for *NOISE* in the
example).
Second, both methods for specifying a parameter -- directly in an argument for the parameter, or in an entry of a
parameter specification dictionary -- can be used within the same constructor.
If a particular parameter is specified in both ways (as is the case for **noise** in the example), the value in the
parameter specification dictionary takes priority (i.e., it is the value that will be assigned to the parameter).
Finally, the keyword *FUNCTION_PARAMS* can be used in a parameter specification dictionary to specify
parameters of the Component's `function <Component.function>`, as shown for the **gain** and **bias** parameters of
the Logistic function in the example.
The example below shows how to access ParameterPort values vs base values, and demonstrates their differences:
>>> my_transfer_mechanism = pnl.TransferMechanism(
... noise=5.0,
... function=pnl.Linear(slope=2.0))
>>> assert my_transfer_mechanism.noise.base == 5.0
>>> assert my_transfer_mechanism.mod_noise == [5.0]
>>> assert my_transfer_mechanism.function.slope.base == 2.0
>>> assert my_transfer_mechanism.mod_slope == [2.0]
Notice that the noise attribute, which stores the base value for the noise ParameterPort of my_transfer_mechanism, is
on my_transfer_mechanism, while the slope attribute, which stores the base value for the slope ParameterPort of
my_transfer_mechanism, is on my_transfer_mechanism's function. However, mod_noise and mod_slope are both properties on
my_transfer_mechanism.
>>> my_transfer_mechanism.noise.base = 4.0
>>> my_transfer_mechanism.function.slope.base = 1.0
>>> assert my_transfer_mechanism.noise.base == 4.0
>>> assert my_transfer_mechanism.mod_noise == [5.0]
>>> assert my_transfer_mechanism.function.slope.base == 1.0
>>> assert my_transfer_mechanism.mod_slope == [2.0]
When the base values of noise and slope are updated, we can inspect these attributes immediately and observe that they
have changed. We do not observe a change in mod_noise or mod_slope because the ParameterPort value will not update
until the mechanism executes.
>>> my_transfer_mechanism.execute([10.0])
array([[14.]])
>>> assert my_transfer_mechanism.noise.base == 4.0
>>> assert my_transfer_mechanism.mod_noise == [4.0]
>>> assert my_transfer_mechanism.function.slope.base == 1.0
>>> assert my_transfer_mechanism.mod_slope == 1.0
Now that the mechanism has executed, we can see that each ParameterPort evaluated its function with the base value,
producing a modulated noise value of 4.0 and a modulated slope value of 1.0. These values were used by
my_transfer_mechanism and its Linear function when the mechanism executed.
.. _ParameterPort_Structure:
Structure
---------
Every ParameterPort is owned by a `Mechanism <Mechanism>` or `MappingProjection`. It can receive one or more
`ControlProjections <ControlProjection>` or `LearningProjections <LearningProjection>`, that are listed in its
`mod_afferents <ParameterPort.mod_afferents>` attribute. A ParameterPort cannot receive
`PathwayProjections <PathwayProjection>` or `GatingProjections <GatingProjection>`. When the ParameterPort is
updated (i.e., its owner is executed), it uses the values of its ControlProjections and LearningProjections to
determine whether and how to modify its parameter's attribute value, which is then assigned as the ParameterPort's
`value <ParameterPort.value>` (see `ParameterPort_Execution` for addition details). ParameterPorts have the
following core attributes:
* `variable <ParameterPort.variable>` - the parameter's attribute value; that is, the value assigned to the
attribute for the parameter of the ParameterPort's owner; it can be thought of as the parameter's "base" value.
It is used by its `function <ParameterPort.function>` to determine the ParameterPort's
`value <ParameterPort.value>`. It must match the format (the number and type of elements) of the parameter's
attribute value.
* `mod_afferents <ParameterPort.mod_afferents>` - lists the `ModulatoryProjections <ModulationProjection>` received
by the ParameterPort. These specify either modify the ParameterPort's `function <ParameterPort.function>`, or
directly assign the `value <ParameterPort.value>` of the ParameterPort itself (see `ModulatorySignals_Modulation`).
* `function <ParameterPort.function>` - takes the parameter's attribute value as its input, modifies it under the
influence of any `ModulatoryProjections` it receives (listed in `mod_afferents <ParameterPort.mod_afferents>`,
and assigns the result as the ParameterPort's `value <ParameterPort.value>` which is used as the parameter's
"actual" value.
* `value <ParameterPort.value>` - the result of `function <ParameterPort.function>`; used by the ParameterPort's
owner as the value of the parameter for which the ParameterPort is responsible.
.. _ParameterPort_Modulable_Parameters:
All of the modulable parameters of a Component -- that is, for which it has ParameterPorts --
are listed in its `Parameters` class, and have the attribute
`modulable <Parameter.modulable>` set to True. The
ParameterPorts for a Mechanism or Projection are listed in its `parameter_ports <Mechanism_Base.parameter_ports>`
attribute, which is also read-only.
An initial value can be assigned to a parameter in the corresponding argument of the constructor for the Component
(see `above <ParameterPort_Value_Specification>`. Parameter values can also be modified by a assigning a value to
the corresponding attribute.
The parameters of a Component's function can be modified by assigning a value to the corresponding attribute of the
Component's `function <Component.function>` attribute (e.g., ``myMechanism.function.my_parameter``).
See `Mechanism_ParameterPorts` for additional information.
.. _ParameterPort_Execution:
Execution
---------
A ParameterPort cannot be executed directly. It is executed when the Component to which it belongs is executed.
When this occurs, the ParameterPort executes any `ModulatoryProjections` it receives, the values of which
modulate parameters of the ParameterPort's `function <ParameterPort.function>`. The ParameterPort then calls
its `function <ParameterPort.function>` and the result is assigned as its `value <ParameterPort.value>`. The
ParameterPort's `value <ParameterPort.value>` is used as the value of the corresponding parameter by the Component,
or by its own `function <Component.function>`.
.. note::
It is important to note the distinction between the `function <ParameterPort.function>` of a ParameterPort,
and the `function <Component.function>` of the Component to which it belongs. The former is used to determine the
value of a parameter used by the latter (see `figure <ModulatorySignal_Anatomy_Figure>`, and `Port_Execution` for
additional details).
.. _ParameterPort_Class_Reference:
Class Reference
---------------
"""
import collections
import inspect
import operator
import types
import warnings
from copy import deepcopy
import numpy as np
import typecheck as tc
from psyneulink.core.components.component import Component, parameter_keywords
from psyneulink.core.components.functions.function import get_param_value_for_keyword
from psyneulink.core.components.ports.modulatorysignals.modulatorysignal import ModulatorySignal
from psyneulink.core.components.ports.port import PortError, Port_Base, _instantiate_port, port_type_keywords
from psyneulink.core.components.shellclasses import Mechanism, Projection, Function
from psyneulink.core.globals.context import ContextFlags
from psyneulink.core.globals.keywords import \
CONTEXT, CONTROL_PROJECTION, CONTROL_SIGNAL, CONTROL_SIGNALS, FUNCTION, FUNCTION_PARAMS, \
LEARNING_SIGNAL, LEARNING_SIGNALS, MECHANISM, NAME, PARAMETER_PORT, PARAMETER_PORT_PARAMS, PATHWAY_PROJECTION, \
PROJECTION, PROJECTIONS, PROJECTION_TYPE, REFERENCE_VALUE, SENDER, VALUE
from psyneulink.core.globals.parameters import ParameterBase, ParameterAlias, SharedParameter
from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set
from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel
from psyneulink.core.globals.utilities \
import ContentAddressableList, ReadOnlyOrderedDict, is_iterable, is_numeric, is_value_spec, iscompatible, \
is_instance_or_subclass, UtilitiesError, gen_friendly_comma_str
__all__ = [
'ParameterPort', 'ParameterPortError', 'port_type_keywords',
]
port_type_keywords = port_type_keywords.update({PARAMETER_PORT})
class ParameterPortList(ContentAddressableList):
separator = '-'
legal_key_type_strings = ContentAddressableList.legal_key_type_strings + ['Parameter']
_owner_port_suffix = 'self'
def __init__(
self,
component_type,
key=None,
list=None,
name=None,
owner=None,
**kwargs
):
# cache, Parameter keys added when creating Ports, others upon lookup
self.parameter_mapping = {}
self.owner = owner
super().__init__(component_type, key, list, name, **kwargs)
def __contains__(self, item):
try:
return super().__contains__(item)
except ParameterPortError:
return False
def __getitem__(self, key):
try:
return self.parameter_mapping[key]
except KeyError:
pass
try:
return super().__getitem__(key)
except TypeError as e:
# ContentAddressableList throws TypeError when key/index lookup fails
names = self._get_possible_port_names(key)
possible_ports = set()
for name in names:
try:
r = super().__getitem__(name)
possible_ports.add(r)
except TypeError:
pass
if len(possible_ports) == 0:
raise e from None
elif len(possible_ports) == 1:
res = next(iter(possible_ports))
else:
raise ParameterPortError(
f'Multiple ParameterPorts for {key} exist. Did you want'
f' {gen_friendly_comma_str(sorted([p.name for p in possible_ports]))}?'
) from None
except UtilitiesError as e:
# ContentAddressableList throws UtilitiesError if key is not an int
# or string. handle only Parameter key here
if not isinstance(key, ParameterBase):
raise e from None
try:
final_source = key.final_source
except AttributeError:
final_source = key
try:
res = self.parameter_mapping[final_source]
except KeyError:
try:
raise ParameterPortError(
f'No ParameterPort corresponds to {key._owner._owner}'
f'.parameters.{key.name}'
) from None
except AttributeError:
raise e from None
if res is not None:
self.parameter_mapping[key] = res
return res
def __delitem__(self, key):
main_port = self[key]
rem_mapping_keys = set()
for m, port in self.parameter_mapping.items():
if port is main_port:
rem_mapping_keys.add(m)
for m in rem_mapping_keys:
del self.parameter_mapping[m]
del self.data[self.data.index(main_port)]
def _get_possible_port_names(self, param_name):
"""
Returns:
a list of possible parameter port names to check if
*param_name* is actually an alias or alias-with-suffix
(e.g. "leak" is an alias of "integration_rate", and
"leak__integrator_function" should refer to
"integration_rate__integrator_function")
"""
unsuffixed_name = ParameterPortList._get_base_name(param_name)
if unsuffixed_name == param_name:
# all possible function-suffixed names
names = sorted(
[
p.name for p in self.owner.parameters
if is_instance_or_subclass(p.default_value, Function)
]
+ [self._owner_port_suffix]
)
# put 'function' at beginning
try:
function_index = names.index(FUNCTION)
names = (
[names[function_index]]
+ names[0:function_index]
+ names[function_index + 1:]
)
except ValueError:
pass
names = [self._get_explicit_name(param_name, name) for name in names]
else:
names = []
# try to get a Parameter that corresponds to param_name, which
# can have a "shared parameter suffix" that disambiguates which
# desired port it refers to if there are multiple
try:
param = getattr(self.owner.parameters, param_name)
except AttributeError:
try:
param = getattr(self.owner.parameters, unsuffixed_name)
except AttributeError:
return names
# if it's a shared parameter with identical name, there are no
# other aliases we need to add
try:
source_name = param.source.name
except AttributeError:
return names
if source_name != param.name:
if unsuffixed_name == param_name:
# basic alias, e.g. "leak" -> "integration_rate"
names.append(source_name)
else:
# alias with suffix, e.g. "leak__function"
# -> "integration_rate__function"
suffix = ParameterPortList._get_suffix(param_name)
names.append(
ParameterPortList._get_explicit_name(source_name, suffix)
)
if isinstance(param, ParameterAlias):
# alias to another alias or a shared parameter
# e.g. leak -> integration_rate -> rate
names.extend(self._get_possible_port_names(source_name))
else:
# e.g. integration_rate__integrator_function
# -> rate__integrator_function
names.append(
ParameterPortList._get_explicit_name(
source_name,
param.attribute_name
)
)
return names
@classmethod
def _get_explicit_name(cls, port_name, parameter_name=None):
return f'{port_name}{cls.separator}{parameter_name}'
@classmethod
def _get_base_name(cls, explicit_name):
try:
return explicit_name.split(cls.separator)[0]
except IndexError:
return explicit_name
@classmethod
def _get_suffix(cls, explicit_name):
try:
return explicit_name.split(cls.separator)[1]
except IndexError:
return ''
class ParameterPortError(Exception):
def __init__(self, error_value):
self.error_value = error_value
def __str__(self):
return repr(self.error_value)
class ParameterPort(Port_Base):
"""
ParameterPort( \
owner, \
reference_value=None \
function=LinearCombination(operation=PRODUCT), \
Subclass of `Port <Port>` that represents and possibly modifies the parameter of a `Mechanism <Mechanism>`,
`Projection <Projection>`, or its `Function`. See `Port_Class_Reference` for additional arguments and attributes.
COMMENT:
PortRegistry
-------------
All ParameterPorts are registered in PortRegistry, which maintains an entry for the subclass,
a count for all instances of it, and a dictionary of those instances
COMMENT
Arguments
---------
owner : Mechanism or MappingProjection
the `Mechanism <Mechanism>` or `MappingProjection` to which to which the ParameterPort belongs; it must be
specified or determinable from the context in which the ParameterPort is created (the initialization of a
ParameterPort cannot be `deferred <Port_Deferred_Initialization>`. The owner of a ParameterPort
for the parameter of a `function <Component.function>` should be specified as the Mechanism or Projection to
which the function belongs.
reference_value : number, list or np.ndarray
specifies the default value of the parameter for which the ParameterPort is responsible.
variable : number, list or np.ndarray
specifies the parameter's initial value and attribute value — that is, the value of the attribute of the
ParameterPort's owner or its `function <Component.function>` assigned to the parameter.
function : Function or method : default LinearCombination(operation=SUM)
specifies the function used to convert the parameter's attribute value (same as the ParameterPort's
`variable <ParameterPort.variable>`) to the ParameterPort's `value <ParameterPort.value>`.
Attributes
----------
mod_afferents : Optional[List[Projection]]
a list of the `ModulatoryProjection <ModulatoryProjection>` that project to the ParameterPort (i.e.,
for which it is a `receiver <Projection_Base.receiver>`); these can be `ControlProjection(s)
<ControlProjection>` and/or `LearningProjection(s) <LearningProjection>`, but not `GatingProjection
<GatingProjection>`. The `value <ModulatoryProjection_Base.value>` of each must match the format
(number and types of elements) of the ParameterPort's `variable <ParameterPort.variable>`.
variable : number, list or np.ndarray
the parameter's attribute value — that is, the value of the attribute of the
ParameterPort's owner or its `function <Component.function>` assigned to the parameter.
function : Function : default Linear
converts the parameter's attribute value (same as the ParameterPort's `variable <ParameterPort.variable>`)
to the ParameterPort's `value <ParameterPort.value>`, under the influence of any
`ModulatoryProjections <ModulatoryProjection>` received by the ParameterPort (and listed in its
`mod_afferents <ParameterPort.mod_afferents>` attribute. The result is assigned as the ParameterPort's
`value <ParameterPort>`.
value : number, List[number] or np.ndarray
the result returned by the ParameterPort's `function <ParameterPort.function>`, and used by the
ParameterPort's owner or its `function <Component.function>` as the value of the parameter for which the
ParmeterPort is responsible. Note that this is not necessarily the same as the parameter's attribute value
(that is, the value of the owner's attribute for the parameter), since the ParameterPort's
`function <ParameterPort.function>` may modify the latter under the influence of its
`mod_afferents <ParameterPort.mod_afferents>`.
"""
#region CLASS ATTRIBUTES
componentType = PARAMETER_PORT
paramsType = PARAMETER_PORT_PARAMS
portAttributes = Port_Base.portAttributes
connectsWith = [CONTROL_SIGNAL, LEARNING_SIGNAL]
connectsWithAttribute = [CONTROL_SIGNALS, LEARNING_SIGNALS]
projectionSocket = SENDER
modulators = [CONTROL_SIGNAL, LEARNING_SIGNAL]
canReceive = modulators
projection_type = CONTROL_PROJECTION
classPreferenceLevel = PreferenceLevel.TYPE
# Any preferences specified below will override those specified in TYPE_DEFAULT_PREFERENCES
# Note: only need to specify setting; level will be assigned to TYPE automatically
# classPreferences = {
# PREFERENCE_SET_NAME: 'ParameterPortCustomClassPreferences',
# PREFERENCE_KEYWORD<pref>: <setting>...}
#endregion
tc.typecheck
def __init__(self,
owner,
reference_value=None,
variable=None,
size=None,
function=None,
projections=None,
params=None,
name=None,
parameter_name=None,
prefs:is_pref_set=None,
**kwargs):
# If context is not COMPONENT or CONSTRUCTOR, raise exception
context = kwargs.pop(CONTEXT, None)
if context is None:
raise ParameterPortError(f"Contructor for {self.__class__.__name__} cannot be called directly"
f"(context: {context}")
# FIX: UPDATED TO INCLUDE LEARNING [CHANGE THIS TO INTEGRATOR FUNCTION??]
# # Reassign default for MATRIX param of MappingProjection
# if isinstance(owner, MappingProjection) and name is MATRIX:
# function = LinearCombination(operation=SUM)
self.reference_value = reference_value
# Validate sender (as variable) and params
# Note: pass name of Mechanism (to override assignment of componentName in super.__init__)
super(ParameterPort, self).__init__(owner,
variable=variable,
size=size,
projections=projections,
function=function,
params=params,
name=name,
prefs=prefs,
context=context)
def _validate_against_reference_value(self, reference_value):
"""Validate that value of the Port is compatible with the reference_value
reference_value is the value of the parameter to which the ParameterPort is assigned
"""
if reference_value is not None and not iscompatible(np.squeeze(reference_value), np.squeeze(self.defaults.value)):
iscompatible(np.squeeze(reference_value), np.squeeze(self.defaults.value))
name = self.name or ""
raise ParameterPortError("Value specified for {} {} of {} ({}) is not compatible "
"with its expected format ({})".
format(name, self.componentName, self.owner.name, self.defaults.value, reference_value))
def _instantiate_projections(self, projections, context=None):
"""Instantiate Projections specified in PROJECTIONS entry of params arg of Port's constructor
Disallow any PathwayProjections
Call _instantiate_projections_to_port to assign ModulatoryProjections to .mod_afferents
"""
# MODIFIED 7/8/17
# FIX: THIS SHOULD ALSO LOOK FOR OTHER FORMS OF SPECIFICATION
# FIX: OF A PathwayProjection (E.G., TARGET PORT OR MECHANISM)
from psyneulink.core.components.projections.pathway.pathwayprojection import PathwayProjection_Base
pathway_projections = [proj for proj in projections if isinstance(proj, PathwayProjection_Base)]
if pathway_projections:
pathway_proj_names = []
for proj in pathway_projections:
pathway_proj_names.append(proj.name + ' ')
raise PortError("{} not allowed for {}: {}".
format(PathwayProjection_Base.__self__.__name__,
self.__class__.__name__,
pathway_proj_names))
self._instantiate_projections_to_port(projections=projections, context=context)
def _check_for_duplicate_projections(self, projection):
"""Check if projection is redundant with one in mod_afferents of ParameterPort
Check for any instantiated projection in mod_afferents with the same sender as projection
or one in deferred_init status with sender specification that is the same type as projection.
Returns redundant Projection if found, otherwise False.
"""
duplicate = next(iter([proj for proj in self.mod_afferents
if ((proj.sender == projection.sender and proj != projection)
or (proj.initialization_status == ContextFlags.DEFERRED_INIT
and proj._init_args[SENDER] == type(projection.sender)))]), None)
if duplicate and self.verbosePref or self.owner.verbosePref:
from psyneulink.core.components.projections.projection import Projection
warnings.warn(f'{Projection.__name__} from {projection.sender.name} {projection.sender.__class__.__name__}'
f' of {projection.sender.owner.name} to {self.name} {self.__class__.__name__} of '
f'{self.owner.name} already exists; will ignore additional one specified ({projection.name}).')
return duplicate
@tc.typecheck
def _parse_port_specific_specs(self, owner, port_dict, port_specific_spec):
"""Get connections specified in a ParameterPort specification tuple
Tuple specification can be:
(port_spec, projections)
Assumes that port_spec has already been extracted and used by _parse_port_spec
Returns params dict with PROJECTIONS entries if any of these was specified.
"""
from psyneulink.core.components.projections.projection import _parse_connection_specs, _is_projection_spec
params_dict = {}
port_spec = port_specific_spec
if isinstance(port_specific_spec, dict):
return None, port_specific_spec
elif isinstance(port_specific_spec, tuple):
tuple_spec = port_specific_spec
# GET PORT_SPEC (PARAM VALUE) AND ASSIGN PROJECTIONS_SPEC **********************************************
# 2-item tuple specification
if len(tuple_spec) == 2:
# 1st item is a value, so treat as Port spec (and return to _parse_port_spec to be parsed)
# and treat 2nd item as Projection specification
if is_numeric(tuple_spec[0]):
port_spec = tuple_spec[0]
reference_value = port_dict[REFERENCE_VALUE]
# Assign value so sender_dim is skipped below
# (actual assignment is made in _parse_port_spec)
if reference_value is None:
port_dict[REFERENCE_VALUE]=port_spec
elif not iscompatible(port_spec, reference_value):
raise PortError("Value in first item of 2-item tuple specification for {} of {} ({}) "
"is not compatible with its {} ({})".
format(ParameterPort.__name__, owner.name, port_spec,
REFERENCE_VALUE, reference_value))
projections_spec = tuple_spec[1]
elif _is_projection_spec(tuple_spec[0], include_matrix_spec=True):
port_spec, projections_spec = tuple_spec
# Tuple is Projection specification that is used to specify the Port,
else:
# return None in port_spec to suppress further, recursive parsing of it in _parse_port_spec
port_spec = None
if tuple_spec[0] != self:
# If 1st item is not the current port (self), treat as part of the projection specification
projections_spec = tuple_spec
else:
# Otherwise, just use 2nd item as projection spec
port_spec = None
projections_spec = tuple_spec[1]
# 3- or 4-item tuple specification
elif len(tuple_spec) in {3,4}:
# Tuple is projection specification that is used to specify the Port,
# so return None in port_spec to suppress further, recursive parsing of it in _parse_port_spec
port_spec = None
# Reduce to 2-item tuple Projection specification
projection_item = tuple_spec[3] if len(tuple_spec)==4 else None
projections_spec = (tuple_spec[0],projection_item)
# GET PROJECTIONS IF SPECIFIED *************************************************************************
try:
projections_spec
except UnboundLocalError:
pass
else:
try:
params_dict[PROJECTIONS] = _parse_connection_specs(self,
owner=owner,
connections=projections_spec)
# Parse the value of all of the Projections to get/validate parameter value
from psyneulink.core.components.projections.modulatory.controlprojection import ControlProjection
from psyneulink.core.components.projections.modulatory.learningprojection import LearningProjection
for projection_spec in params_dict[PROJECTIONS]:
if port_dict[REFERENCE_VALUE] is None:
# FIX: - PUTTING THIS HERE IS A HACK...
# FIX: MOVE TO _parse_port_spec UNDER PROCESSING OF ProjectionTuple SPEC
# FIX: USING _get_port_for_socket
# from psyneulink.core.components.projections.projection import _parse_projection_spec
# defaults.value?
mod_signal_value = projection_spec.port.value \
if isinstance(projection_spec.port, Port_Base) else None
mod_projection = projection_spec.projection
if isinstance(mod_projection, dict):
if mod_projection[PROJECTION_TYPE] not in {ControlProjection, LearningProjection}:
raise ParameterPortError("PROGRAM ERROR: {} other than {} or {} ({}) found "
"in specification tuple for {} param of {}".
format(Projection.__name__,
ControlProjection.__name__,
LearningProjection.__name__,
mod_projection, port_dict[NAME], owner.name))
elif VALUE in mod_projection:
mod_proj_value = mod_projection[VALUE]
else:
mod_proj_value = None
elif isinstance(mod_projection, Projection):
if not isinstance(mod_projection, (ControlProjection, LearningProjection)):
raise ParameterPortError("PROGRAM ERROR: {} other than {} or {} ({}) found "
"in specification tuple for {} param of {}".
format(Projection.__name__,
ControlProjection.__name__,
LearningProjection.__name__,
mod_projection, port_dict[NAME], owner.name))
elif mod_projection.initialization_status == ContextFlags.DEFERRED_INIT:
continue
mod_proj_value = mod_projection.defaults.value
else:
raise ParameterPortError("Unrecognized Projection specification for {} of {} ({})".
format(self.name, owner.name, projection_spec))
# FIX: 11/25/17 THIS IS A MESS: CHECK WHAT IT'S ACTUALLY DOING
# If ModulatoryProjection's value is not specified, try to assign one
if mod_proj_value is None:
# If not specified for Port, assign that
if VALUE not in port_dict or port_dict[VALUE] is None:
port_dict[VALUE] = mod_signal_value
# If value has been assigned, make sure value is the same for ModulatorySignal
elif port_dict[VALUE] != mod_signal_value:
# If the values differ, assign None so that Port's default is used
port_dict[VALUE] = None
# No need to check any more ModulatoryProjections
break
#
else:
port_dict[VALUE] = mod_proj_value
except ParameterPortError:
raise ParameterPortError("Tuple specification in {} specification dictionary "
"for {} ({}) is not a recognized specification for one or more "
"{}s, {}s, or {}s that project to it".
format(ParameterPort.__name__,
owner.name,
projections_spec,
Mechanism.__name__,
ModulatorySignal.__name__,
Projection.__name__))
elif port_specific_spec is not None:
raise ParameterPortError("PROGRAM ERROR: Expected tuple or dict for {}-specific params but, got: {}".
format(self.__class__.__name__, port_specific_spec))
return port_spec, params_dict
@staticmethod
def _get_port_function_value(owner, function, variable):
"""Return parameter variable (since ParameterPort's function never changes the form of its variable"""
return variable
def _get_variable_from_projections(self, context=None):
"""
Get backingfield ("base") value of param of function of Mechanism to which the ParameterPort belongs.
"""
# FIX 3/6/19: source does not yet seem to have been assigned to owner.function
return self.source._get(context)
@property
def pathway_projections(self):
raise ParameterPortError("PROGRAM ERROR: Attempt to access {} for {}; {}s do not have {}s".
format(PATHWAY_PROJECTION, self.name, PARAMETER_PORT, PATHWAY_PROJECTION))
@pathway_projections.setter
def pathway_projections(self, value):
raise ParameterPortError("PROGRAM ERROR: Attempt to assign {} to {}; {}s cannot accept {}s".
format(PATHWAY_PROJECTION, self.name, PARAMETER_PORT, PATHWAY_PROJECTION))
def _instantiate_parameter_ports(owner, function=None, context=None):
"""Call _instantiate_parameter_port for all modulable parameters to instantiate ParameterPorts for them
If owner.parameter_port is None or False:
- no ParameterPorts will be instantiated.
Otherwise, instantiate ParameterPort for each modulable parameter
:param function:
"""
# TBI / IMPLEMENT: use specs to implement ParameterPorts below
owner._parameter_ports = ParameterPortList(
component_type=ParameterPort,
name=owner.name + '.parameter_ports',
owner=owner,
)
# Check that all ParameterPorts for owner have not been explicitly suppressed
try:
if owner.parameter_ports is NotImplemented:
return
except KeyError:
# PARAMETER_PORTS not specified at all, so OK to continue and construct them
pass
# Instantiate ParameterPort for each modulable Parameter on
# function and owner. function is first because in some
# cases a Parameter will be specified on both, and the function's
# values/defaults should take precedence
def skip_parameter_port(parameter):
return (
isinstance(parameter, (ParameterAlias, SharedParameter))
or parameter.name in owner.exclude_from_parameter_ports
or not parameter.modulable
)
port_parameters = collections.defaultdict(set)
port_aliases = set()
owner_ports = set()
# function may be a custom function not yet parsed to a UDF
# function may also be a Function class, in which case parameter
# ports are still created for the modulable Parameters
for p in owner.parameters:
func = p.default_value
if (
not p.reference
and is_instance_or_subclass(func, Function)
and not isinstance(p, (ParameterAlias, SharedParameter))
):
for func_param in func.parameters:
if not skip_parameter_port(func_param):
port_parameters[func_param.name].add(p.name)
if isinstance(p, ParameterAlias):
port_aliases.add(p.name)
if not skip_parameter_port(p):
owner_ports.add(p.name)
for parameter_port_name in port_parameters:
if (
len(port_parameters[parameter_port_name]) > 1
or parameter_port_name in port_aliases
or parameter_port_name in owner_ports
):
add_suffix = True
else:
add_suffix = False
for corresponding_parameter_component_name in port_parameters[parameter_port_name]:
corresponding_parameter_component = getattr(
owner.parameters,
corresponding_parameter_component_name
)._get(context)
p = getattr(
corresponding_parameter_component.parameters,
parameter_port_name
)
# .function is not finalized yet, because this happens before
# _instantiate_function
if corresponding_parameter_component_name is FUNCTION:
source = operator.attrgetter(f'{FUNCTION}.parameters.{p.name}')
else:
source = p
# use Shared/FunctionParameter value as fallback
try:
value = owner.initial_shared_parameters[corresponding_parameter_component_name][p.name]
except (KeyError, TypeError):
value = None
# if parameter value on actual Parameter was specified or there is
# no Shared/FunctionParameter value, use the actual Parameter default
if p._user_specified or value is None:
if p.spec is not None:
value = p.spec
else:
value = p.default_value
if add_suffix:
explicit_name = ParameterPortList._get_explicit_name(
p.name,
corresponding_parameter_component_name
)
else:
explicit_name = p.name
_instantiate_parameter_port(
owner,
p.name,
value,
context=context,
function=corresponding_parameter_component,
source=source,
explicit_name=explicit_name
)
for p in owner.parameters:
if not skip_parameter_port(p):
if (
p.name in port_parameters
or p.name in port_aliases
):
explicit_name = ParameterPortList._get_explicit_name(
p.name,
ParameterPortList._owner_port_suffix
)
else:
explicit_name = p.name
if p.spec is not None:
value = p.spec
else:
value = p.default_value
_instantiate_parameter_port(
owner,
p.name,
value,
context=context,
function=function,
source=p,
explicit_name=explicit_name,
)
owner.parameter_ports.sort(key=lambda port: port.name)
def _instantiate_parameter_port(
owner,
param_name,
param_value,
context,
function=None,
source=None,
explicit_name=None
):
"""Call _instantiate_port for allowable params, to instantiate a ParameterPort for it
Include ones in function.parameters
Exclude if it is a:
ParameterPort that already exists
non-numeric value (including NotImplemented, False or True)
unless it is:
a tuple (could be one specifying Modulatory Component)
a dict with the name FUNCTION_PARAMS (otherwise exclude)
function or method
IMPLEMENTATION NOTE: FUNCTION_RUNTIME_PARAM_NOT_SUPPORTED
(this is because self.defaults.function could be a class rather than an bound method;
i.e., not yet instantiated; could be rectified by assignment in _instantiate_function)
# FIX: UPDATE WITH MODULATION_MODS
# FIX: CHANGE TO IntegratorFunction FUnction ONCE LearningProjection MODULATES ParameterPort Function:
If param_name is FUNCTION_PARAMS and param is a matrix (presumably for a MappingProjection)
modify ParameterPort's function to be LinearCombination (rather Linear which is the default)
"""
from psyneulink.core.components.ports.modulatorysignals.modulatorysignal import _is_modulatory_spec
from psyneulink.core.components.projections.modulatory.modulatoryprojection import ModulatoryProjection_Base
def _get_tuple_for_single_item_modulatory_spec(obj, name, value):
"""Return (<default param value>, <modulatory spec>) for modulatory spec
"""
try:
param_default_value = getattr(obj.defaults, name)
# Only assign default value if it is not None
if param_default_value is not None:
return (param_default_value, value)
else:
return value
except AttributeError:
raise ParameterPortError("Unrecognized specification for {} paramater of {} ({})".
format(param_name, owner.name, param_value))
if explicit_name is None:
explicit_name = param_name
# EXCLUSIONS:
# # Skip if ParameterPort already exists
# if param_name in owner.ParameterPorts:
# return
if param_value is NotImplemented:
return
# Allow numerics but omit booleans (which are treated by is_numeric as numerical)
if is_numeric(param_value) and not isinstance(param_value, bool):
pass
# Only allow a FUNCTION_PARAMS dict
elif isinstance(param_value, (ReadOnlyOrderedDict, dict)) and param_name == FUNCTION_PARAMS:
pass
# Allow ModulatoryProjection
elif isinstance(param_value, Projection):
if isinstance(param_value, ModulatoryProjection_Base):
pass
else:
return
# Allow Projection class
elif inspect.isclass(param_value) and issubclass(param_value, Projection):
if issubclass(param_value, (ModulatoryProjection_Base)):
pass
else:
return
elif _is_modulatory_spec(param_value, include_matrix_spec=False) and not isinstance(param_value, tuple):
# If parameter is a single Modulatory specification (e.g., ControlSignal, or CONTROL, etc.)
# try to place it in a tuple (for interpretation by _parse_port_spec) using default value as 1st item
# (note: exclude matrix since it is allowed as a value specification but not a projection reference)
try:
param_value = _get_tuple_for_single_item_modulatory_spec(function, param_name, param_value)
except ParameterPortError:
param_value = _get_tuple_for_single_item_modulatory_spec(owner, param_name, param_value)
# Allow tuples (could be spec that includes a Projection or Modulation)
elif isinstance(param_value, tuple):
# # FIX: EXTRACT VALUE HERE (AS IN Component.__init__?? [4/18/17]
# param_value = owner._get_param_value_from_tuple(param_value)
pass
# Allow if it is a keyword for a parameter
elif isinstance(param_value, str) and param_value in parameter_keywords:
pass
# Exclude function (see docstring above)
elif param_name == FUNCTION:
return
# (7/19/17 CW) added this if statement below while adding `hetero` and `auto` and AutoAssociativeProjections: this
# allows `hetero` to be specified as a matrix, while still generating a ParameterPort
elif isinstance(param_value, np.ndarray) or isinstance(param_value, np.matrix):
pass
# allow function parameters
elif param_name in function.parameters.names():
pass
# Exclude all others
else:
return
# Assign ParameterPorts to Component for parameters of its function (function_params), except for ones that are:
# - another component
# - a function or method
# - have a value of None (see IMPLEMENTATION_NOTE below)
# - they have the same name as another parameter of the component (raise exception for this)
# IMPLEMENTATION NOTE:
# The following is necessary since, if ANY parameters of a function are specified, entries are made
# in the FUNCTION_PARAMS dict of its owner for ALL of the function's params; however, their values
# will be set to None (and there may not be a way to determine a
# default; e.g., the length of the array for the weights or exponents params for LinearCombination).
# Therefore, None will be passed as the reference_value, which will cause validation of the
# ParameterPort's function (in _instantiate_function()) to fail.
# Current solution is to simply not instantiate a ParameterPort for any function_param that has
# not been explicitly specified
if param_value is None:
return
if not _is_legal_param_value(owner, param_value):
return
elif (_is_modulatory_spec(param_value, include_matrix_spec=False)
and not isinstance(param_value, tuple)):
# If parameter is a single Modulatory specification (e.g., ControlSignal, or CONTROL, etc.)
# try to place it in a tuple (for interpretation by _parse_port_spec) using default value as 1st item
# (note: exclude matrix since it is allowed as a value specification vs. a projection reference)
try:
param_value = _get_tuple_for_single_item_modulatory_spec(
function,
param_name,
param_value
)
except ParameterPortError:
param_value = _get_tuple_for_single_item_modulatory_spec(
owner,
param_name,
param_value
)
# # FIX: 10/3/17 - ??MOVE THIS TO _parse_port_specific_specs ----------------
# # Use param_value as constraint
# # IMPLEMENTATION NOTE: need to copy, since _instantiate_port() calls _parse_port_value()
# # for constraints before port_spec, which moves items to subdictionaries,
# # which would make them inaccessible to the subsequent parse of port_spec
from psyneulink.core.components.ports.modulatorysignals.modulatorysignal import ModulatorySignal
from psyneulink.core.components.mechanisms.modulatory.modulatorymechanism import ModulatoryMechanism_Base
if (
is_iterable(param_value)
and any(isinstance(item, (ModulatorySignal, ModulatoryProjection_Base, ModulatoryMechanism_Base)) for item in param_value)
):
reference_value = param_value
else:
reference_value = deepcopy(param_value)
# Assign parameterPort for function_param to the component
port = _instantiate_port(
owner=owner,
port_type=ParameterPort,
name=explicit_name,
port_spec=param_value,
reference_value=reference_value,
reference_value_name=param_name,
params=None,
context=context
)
if port:
owner._parameter_ports[explicit_name] = port
# will be parsed on assignment of function
# FIX: if the function is manually changed after assignment,
# FIX: the source will remain pointing to the original Function
port.source = source
# if the source parameter is not added here, we can't reference
# a ParameterPort by Parameter
owner.parameter_ports.parameter_mapping[source] = port
return port
def _is_legal_param_value(owner, value):
from psyneulink.core.components.mechanisms.modulatory.control.controlmechanism import _is_control_spec
from psyneulink.core.components.mechanisms.modulatory.control.gating.gatingmechanism import _is_gating_spec
# LEGAL PARAMETER VALUES:
# # lists, arrays or numeric values
if is_value_spec(value):
return True
# tuple, first item of which is a legal parameter value
# note: this excludes (param_name, Mechanism) tuples used to specify a ParameterPort
# (e.g., if specified for the control_signals param of ControlMechanism)
if isinstance(value, tuple):
if _is_legal_param_value(owner, value[0]):
return True
if isinstance(value, dict) and VALUE in value:
return True
if _is_control_spec(value) or _is_gating_spec(value):
return True
# keyword that resolves to one of the above
if get_param_value_for_keyword(owner, value) is not None:
return True
# Assignment of ParameterPort for Component objects, function or method are not currently supported
if isinstance(value, (types.FunctionType, types.MethodType, Component)):
return False
def _get_parameter_port(sender_owner, sender_type, param_name, component):
"""Return ParameterPort for named parameter of a Mechanism requested by owner
"""
# Validate that component is a Mechanism or Projection
if not isinstance(component, (Mechanism, Projection)):
raise ParameterPortError("Request for {} of a component ({}) that is not a {} or {}".
format(PARAMETER_PORT, component, MECHANISM, PROJECTION))
try:
return component._parameter_ports[param_name]
except KeyError:
# Check that param (named by str) is an attribute of the Mechanism
if not (hasattr(component, param_name) or hasattr(component.function, param_name)):
raise ParameterPortError("{} (in specification of {} {}) is not an attribute "
"of {} or its function"
.format(param_name, sender_type, sender_owner.name, component))
# Check that the Mechanism has a ParameterPort for the param
if param_name not in component._parameter_ports.names:
raise ParameterPortError("There is no ParameterPort for the parameter ({}) of {} "
"specified in {} for {}".
format(param_name, component.name, sender_type, sender_owner.name))
| 49.606618 | 130 | 0.651731 | 7,743 | 67,465 | 5.533514 | 0.100478 | 0.007002 | 0.010316 | 0.01111 | 0.271834 | 0.20499 | 0.163376 | 0.135392 | 0.116347 | 0.110069 | 0 | 0.002542 | 0.282813 | 67,465 | 1,359 | 131 | 49.64312 | 0.882939 | 0.547336 | 0 | 0.310696 | 0 | 0.003396 | 0.058624 | 0.009899 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042445 | false | 0.022071 | 0.050934 | 0.005093 | 0.185059 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
659d7c6487df6be9956c6f26474fcf201cb9d3b3 | 10,333 | py | Python | docusign_esign/models/watermark.py | joekohlsdorf/docusign-esign-python-client | 40407544f79c88716d36fabf36f65c3ef1a5c3ba | [
"MIT"
] | 58 | 2017-10-18T23:06:57.000Z | 2021-04-15T23:14:58.000Z | docusign_esign/models/watermark.py | joekohlsdorf/docusign-esign-python-client | 40407544f79c88716d36fabf36f65c3ef1a5c3ba | [
"MIT"
] | 49 | 2017-10-27T05:54:09.000Z | 2021-04-29T22:06:17.000Z | docusign_esign/models/watermark.py | joekohlsdorf/docusign-esign-python-client | 40407544f79c88716d36fabf36f65c3ef1a5c3ba | [
"MIT"
] | 49 | 2017-09-16T07:23:41.000Z | 2021-05-07T20:21:20.000Z | # coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: devcenter@docusign.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from docusign_esign.client.configuration import Configuration
class Watermark(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'display_angle': 'str',
'enabled': 'str',
'font': 'str',
'font_color': 'str',
'font_size': 'str',
'id': 'str',
'image_base64': 'str',
'transparency': 'str',
'watermark_text': 'str'
}
attribute_map = {
'display_angle': 'displayAngle',
'enabled': 'enabled',
'font': 'font',
'font_color': 'fontColor',
'font_size': 'fontSize',
'id': 'id',
'image_base64': 'imageBase64',
'transparency': 'transparency',
'watermark_text': 'watermarkText'
}
def __init__(self, _configuration=None, **kwargs): # noqa: E501
"""Watermark - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._display_angle = None
self._enabled = None
self._font = None
self._font_color = None
self._font_size = None
self._id = None
self._image_base64 = None
self._transparency = None
self._watermark_text = None
self.discriminator = None
setattr(self, "_{}".format('display_angle'), kwargs.get('display_angle', None))
setattr(self, "_{}".format('enabled'), kwargs.get('enabled', None))
setattr(self, "_{}".format('font'), kwargs.get('font', None))
setattr(self, "_{}".format('font_color'), kwargs.get('font_color', None))
setattr(self, "_{}".format('font_size'), kwargs.get('font_size', None))
setattr(self, "_{}".format('id'), kwargs.get('id', None))
setattr(self, "_{}".format('image_base64'), kwargs.get('image_base64', None))
setattr(self, "_{}".format('transparency'), kwargs.get('transparency', None))
setattr(self, "_{}".format('watermark_text'), kwargs.get('watermark_text', None))
@property
def display_angle(self):
"""Gets the display_angle of this Watermark. # noqa: E501
# noqa: E501
:return: The display_angle of this Watermark. # noqa: E501
:rtype: str
"""
return self._display_angle
@display_angle.setter
def display_angle(self, display_angle):
"""Sets the display_angle of this Watermark.
# noqa: E501
:param display_angle: The display_angle of this Watermark. # noqa: E501
:type: str
"""
self._display_angle = display_angle
@property
def enabled(self):
"""Gets the enabled of this Watermark. # noqa: E501
# noqa: E501
:return: The enabled of this Watermark. # noqa: E501
:rtype: str
"""
return self._enabled
@enabled.setter
def enabled(self, enabled):
"""Sets the enabled of this Watermark.
# noqa: E501
:param enabled: The enabled of this Watermark. # noqa: E501
:type: str
"""
self._enabled = enabled
@property
def font(self):
"""Gets the font of this Watermark. # noqa: E501
The font to be used for the tab value. Supported Fonts: Arial, Arial, ArialNarrow, Calibri, CourierNew, Garamond, Georgia, Helvetica, LucidaConsole, Tahoma, TimesNewRoman, Trebuchet, Verdana, MSGothic, MSMincho, Default. # noqa: E501
:return: The font of this Watermark. # noqa: E501
:rtype: str
"""
return self._font
@font.setter
def font(self, font):
"""Sets the font of this Watermark.
The font to be used for the tab value. Supported Fonts: Arial, Arial, ArialNarrow, Calibri, CourierNew, Garamond, Georgia, Helvetica, LucidaConsole, Tahoma, TimesNewRoman, Trebuchet, Verdana, MSGothic, MSMincho, Default. # noqa: E501
:param font: The font of this Watermark. # noqa: E501
:type: str
"""
self._font = font
@property
def font_color(self):
"""Gets the font_color of this Watermark. # noqa: E501
The font color used for the information in the tab. Possible values are: Black, BrightBlue, BrightRed, DarkGreen, DarkRed, Gold, Green, NavyBlue, Purple, or White. # noqa: E501
:return: The font_color of this Watermark. # noqa: E501
:rtype: str
"""
return self._font_color
@font_color.setter
def font_color(self, font_color):
"""Sets the font_color of this Watermark.
The font color used for the information in the tab. Possible values are: Black, BrightBlue, BrightRed, DarkGreen, DarkRed, Gold, Green, NavyBlue, Purple, or White. # noqa: E501
:param font_color: The font_color of this Watermark. # noqa: E501
:type: str
"""
self._font_color = font_color
@property
def font_size(self):
"""Gets the font_size of this Watermark. # noqa: E501
The font size used for the information in the tab. Possible values are: Size7, Size8, Size9, Size10, Size11, Size12, Size14, Size16, Size18, Size20, Size22, Size24, Size26, Size28, Size36, Size48, or Size72. # noqa: E501
:return: The font_size of this Watermark. # noqa: E501
:rtype: str
"""
return self._font_size
@font_size.setter
def font_size(self, font_size):
"""Sets the font_size of this Watermark.
The font size used for the information in the tab. Possible values are: Size7, Size8, Size9, Size10, Size11, Size12, Size14, Size16, Size18, Size20, Size22, Size24, Size26, Size28, Size36, Size48, or Size72. # noqa: E501
:param font_size: The font_size of this Watermark. # noqa: E501
:type: str
"""
self._font_size = font_size
@property
def id(self):
"""Gets the id of this Watermark. # noqa: E501
# noqa: E501
:return: The id of this Watermark. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Watermark.
# noqa: E501
:param id: The id of this Watermark. # noqa: E501
:type: str
"""
self._id = id
@property
def image_base64(self):
"""Gets the image_base64 of this Watermark. # noqa: E501
# noqa: E501
:return: The image_base64 of this Watermark. # noqa: E501
:rtype: str
"""
return self._image_base64
@image_base64.setter
def image_base64(self, image_base64):
"""Sets the image_base64 of this Watermark.
# noqa: E501
:param image_base64: The image_base64 of this Watermark. # noqa: E501
:type: str
"""
self._image_base64 = image_base64
@property
def transparency(self):
"""Gets the transparency of this Watermark. # noqa: E501
# noqa: E501
:return: The transparency of this Watermark. # noqa: E501
:rtype: str
"""
return self._transparency
@transparency.setter
def transparency(self, transparency):
"""Sets the transparency of this Watermark.
# noqa: E501
:param transparency: The transparency of this Watermark. # noqa: E501
:type: str
"""
self._transparency = transparency
@property
def watermark_text(self):
"""Gets the watermark_text of this Watermark. # noqa: E501
# noqa: E501
:return: The watermark_text of this Watermark. # noqa: E501
:rtype: str
"""
return self._watermark_text
@watermark_text.setter
def watermark_text(self, watermark_text):
"""Sets the watermark_text of this Watermark.
# noqa: E501
:param watermark_text: The watermark_text of this Watermark. # noqa: E501
:type: str
"""
self._watermark_text = watermark_text
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Watermark, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Watermark):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Watermark):
return True
return self.to_dict() != other.to_dict()
| 30.302053 | 244 | 0.590826 | 1,203 | 10,333 | 4.939318 | 0.159601 | 0.064625 | 0.090878 | 0.10552 | 0.532817 | 0.480478 | 0.46651 | 0.406261 | 0.300067 | 0.199091 | 0 | 0.034895 | 0.303881 | 10,333 | 340 | 245 | 30.391176 | 0.791186 | 0.413917 | 0 | 0.093525 | 0 | 0 | 0.096383 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.172662 | false | 0 | 0.028777 | 0 | 0.33813 | 0.014388 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
659de92f420c1fa5d556098464771f69290ee6b0 | 39,190 | py | Python | qmla/shared_functionality/genetic_algorithm.py | flynnbr11/QMD | ac8cfe1603658ee9b916452f29b99460ee5e3d44 | [
"MIT"
] | 9 | 2021-01-08T12:49:01.000Z | 2021-12-29T06:59:32.000Z | qmla/shared_functionality/genetic_algorithm.py | flynnbr11/QMD | ac8cfe1603658ee9b916452f29b99460ee5e3d44 | [
"MIT"
] | 2 | 2021-02-22T20:42:25.000Z | 2021-02-22T22:22:59.000Z | qmla/shared_functionality/genetic_algorithm.py | flynnbr11/QMD | ac8cfe1603658ee9b916452f29b99460ee5e3d44 | [
"MIT"
] | 9 | 2021-02-15T14:18:48.000Z | 2021-12-17T04:02:07.000Z | import numpy as np
import itertools
import sys
import os
import random
import copy
import scipy
import time
import pandas as pd
import sklearn as skl
sys.path.append("/home/bf16951/QMD")
import qmla
import qmla.model_building_utilities
__all__ = ["GeneticAlgorithmQMLA", "GeneticAlgorithmFullyConnectedLikewisePauliTerms"]
class GeneticAlgorithmQMLA:
r"""
Standalone genetic algorithm implementation for integration with :class:`qmla.QuantumModelLearningAgent`.
This class works with the :class:`~qmla.exploration_strategies.ExplorationStrategy`
to construct models according to the genetic strategy.
:param list genes: individual terms which can be combined to form chromosomes
:param int num_sites: maximum dimension permitted in model search
:param str true_model: target model. if None, set at random from space of valid models.
:param list base_terms: deprecated TODO remove
:param str selection_method: mechanism through which to select chromosomes as parents.
Currently only 'roulette' available, but the framework should facilitate
alternatives.
:param str crossover_method: mechanism through which parent chromosomes are combined
to form offspring.
Currently only 'one_point' available, but the framework should facilitate
alternatives.
:param str mutation_method: mechanism through which to perform chromosome mutation
Currently only 'element_wise' available, but the framework should facilitate
alternatives.
:param float mutation_probability: rate with which the mutation mechanism incurs mutation.
:param float selection_truncation_rate: fraction of models to retain as viable parents
to the subsequent generation; the lower-rated other models are discarded.
:param int num_protected_elite_models: number of models to automatically admit to the
subsequent generation.
:param int unchanged_elite_num_generations_cutoff: after this number of generations,
if the top model has not changed, the model search is terminated.
:param str log_file: path of QMLA instance's log file.
"""
def __init__(
self,
genes,
num_sites,
true_model=None,
base_terms=["x", "y", "z"],
selection_method="roulette",
crossover_method="one_point",
mutation_method="element_wise",
mutation_probability=0.1,
selection_truncation_rate=0.5,
num_protected_elite_models=2,
unchanged_elite_num_generations_cutoff=5,
log_file=None,
**kwargs
):
self.num_sites = num_sites
self.base_terms = base_terms
self.genes = list(sorted(genes))
self.get_base_chromosome()
if true_model is None:
r = random.randint(1, 2 ** self.num_terms - 1)
r = format(r, "0{}b".format(self.num_terms))
self.true_model = self.map_chromosome_to_model(r)
else:
self.true_model = true_model
self.true_chromosome = self.map_model_to_chromosome(self.true_model)
self.true_chromosome_string = self.chromosome_string(self.true_chromosome)
self.all_zero_chromosome_string = "0" * self.num_terms
self.addition_str = "+"
self.mutation_probability = mutation_probability
self.mutation_count = 0
self.previously_considered_chromosomes = []
self.chromosomes_at_generation = {}
self.delta_f_by_generation = {}
self.genetic_generation = 1
self.log_file = log_file
self.f_score_change_by_generation = {}
self.fitness_at_generation = {}
self.models_ranked_by_fitness = {}
self.most_elite_models_by_generation = {}
self.num_protected_elite_models = num_protected_elite_models
self.terminate_early_if_top_model_unchanged = True
self.best_model_unchanged = False
self.unchanged_elite_num_generations_cutoff = (
unchanged_elite_num_generations_cutoff
)
self.selection_truncation_rate = selection_truncation_rate
self.gene_pool = pd.DataFrame(
columns=["model", "chromosome", "f_score", "probability", "generation"]
)
self.elite_models = pd.DataFrame(
columns=["model", "chromosome", "f_score", "generation", "elite_position"]
)
# specifying which functionality to use
self.selection_method = self.select_from_pair_df_remove_selected
self.mutation_method = self.element_wise_mutation
self.crossover_method = self.one_point_crossover
available_selection_methods = {
"roulette": self.select_from_pair_df_remove_selected,
}
available_mutation_methods = {"element_wise": self.element_wise_mutation}
available_crossover_methods = {"one_point": self.one_point_crossover}
self.selection_method = available_selection_methods[selection_method]
self.mutation_method = available_mutation_methods[mutation_method]
self.crossover_method = available_crossover_methods[crossover_method]
def get_base_chromosome(self):
r"""
Creates basic chromosome, i.e. with all genes set to 0.
"""
self.num_terms = len(self.genes)
self.basic_chromosome = np.array([0] * self.num_terms)
self.chromosome_description = self.genes
self.chromosome_description_array = np.array(self.genes)
def map_chromosome_to_model(
self,
chromosome,
):
r"""
Given a chromosome, get the corresponding model.
:param np.array chromosome: chromosome representing a candidate model
:returns str model_string: name of the corresponding model
"""
if isinstance(chromosome, str):
chromosome = list(chromosome)
chromosome = np.array([int(i) for i in chromosome])
assert (
len(chromosome) == self.num_terms
), "Chromosome must be of length {}".format(self.num_terms)
nonzero_postions = chromosome.nonzero()
present_terms = list(self.chromosome_description_array[nonzero_postions])
model_string = "+".join(present_terms)
return model_string
def map_model_to_chromosome(self, model):
r"""
Given a model, get the corresponding chromosome.
:param str model: name of candidate model
:returns np.array chromosome: array of ones and zeros indicating which genes are active in the model
"""
terms = qmla.model_building_utilities.get_constituent_names_from_name(model)
assert np.all(
[t in self.chromosome_description for t in terms]
), "Cannot map some term(s) to any available gene. Terms: {} \n Genes".format(
terms, self.chromosome_description
)
locs = [self.chromosome_description.index(t) for t in terms]
chromosome = copy.copy(self.basic_chromosome)
chromosome[np.array(locs)] = 1
return chromosome
def model_f_score(self, model_name):
r"""
Get the F score of a candidate model.
:param str model_name: name of candidate model
:returns float f_score: F score, between 0 and 1, indicating how many terms overlap
between the candidate and target models.
"""
model_as_chromosome = self.map_model_to_chromosome(model_name)
return self.chromosome_f_score(model_as_chromosome)
def chromosome_string(self, c):
r"""Map a chromosome array to a string."""
b = [str(i) for i in c]
s = "".join(b)
if s == "1000000000":
# TODO generaalise
# 1 followed by num_terms 0's can be generated and is not permitted
self.log_print(["Unallowed chromosome string {} for {}".format(b, c)])
return s
def chromosome_f_score(
self,
chromosome,
):
r"""
Get the F score of a candidate model from its chromosome representation.
:param np.array chromosome: representation of candidate model
:returns float f_score: F score, between 0 and 1, indicating how many terms overlap
between the candidate and target models.
"""
if not isinstance(chromosome, np.ndarray):
chromosome = np.array([int(a) for a in list(chromosome)])
return skl.metrics.f1_score(chromosome, self.true_chromosome)
def log_print(self, to_print_list):
r"""Wrapper for :func:`~qmla.print_to_log`"""
qmla.logging.print_to_log(
to_print_list=to_print_list,
log_file=self.log_file,
log_identifier="GA gen {}".format(self.genetic_generation),
)
def random_initial_models(self, num_models=5):
r"""
Generate random models from the space of valid candidates.
:param int num_models: number of candidates to generate
:returns list new_models: the randomly generated model names
"""
if num_models > 2 ** self.num_terms:
self.log_print(
[
"Number of models requested > number of possible models ({})".format(
2 ** self.num_terms
),
"Reducing by half until < half available",
]
)
while num_models > (2 ** self.num_terms) / 2:
num_models = int(num_models / 2)
new_models = []
self.initial_number_models = num_models
self.chromosomes_at_generation[0] = []
self.previously_considered_chromosomes = []
self.birth_register = pd.DataFrame(
columns=[
"child",
"chromosome_child",
"parent_a",
"parent_b",
"chromosome_parent_a",
"chromosome_parent_b",
"generation",
"f_score",
]
) # TODO this is awful - this stuff shouldn't be initialised in this function
while len(new_models) < num_models:
# generate random number and
# format as binary string, i.e. chromosome
r = random.randint(1, 2 ** self.num_terms - 1)
r = format(r, "0{}b".format(self.num_terms))
if self.chromosome_string(r) not in self.previously_considered_chromosomes:
r = list(r)
r = np.array([int(i) for i in r])
mod = self.map_chromosome_to_model(r)
chrom = self.chromosome_string(r)
f = self.chromosome_f_score(chrom)
self.previously_considered_chromosomes.append(chrom)
self.chromosomes_at_generation[0].append(chrom)
new_models.append(mod)
birth = pd.Series(
{
"child": mod,
"chromosome_child": chrom,
"generation": 1,
"f_score": f,
}
)
self.birth_register.loc[len(self.birth_register)] = birth
return new_models
def rand_model_f(self):
r"""
Generate a random model chromosome and evaluate its F score.
"""
r = 0
while r == 0:
r = np.random.randint(2 ** self.num_terms)
b = bin(r)[2:].zfill(self.num_terms)
b_array = np.array([int(i) for i in list(b)])
f = skl.metrics.f1_score(b_array, self.true_chromosome)
return f, b_array
def random_models_sorted_by_f_score(
self,
num_models=14,
):
r"""
Generate a set of random models and sort them by F score.
"""
n_runs = 1e3 # first sample ~1000 random numbers
some_models = [self.rand_model_f() for _ in range(int(n_runs))]
f_scores = np.array(some_models)[:, 0]
chromosomes = np.array(some_models)[:, 1]
# then choose from those randomly generated models
random_chroms = np.random.choice(chromosomes, num_models)
random_models = [self.map_chromosome_to_model(c) for c in random_chroms]
models_w_f = list(
zip(random_models, [self.model_f_score(m) for m in random_models])
)
sorted_by_f = sorted(models_w_f, key=lambda x: x[1])
sorted_models = np.array(sorted_by_f)[:, 0]
sorted_models = list(sorted_models)
just_f = np.array(models_w_f)[:, 1]
just_f = [float(a) for a in just_f]
return sorted_models
######################
# Selection functions
######################
def selection(self, **kwargs):
r"""
Wrapper for user's selected selection method.
Whatever method is called must return
* prescribed_chromosomes
* chromosomes_for_crossover - pairs
"""
return self.selection_method(**kwargs)
def select_from_pair_df_remove_selected(self, **kwargs):
# normalise so pairs' probabilities sum to 1
self.chrom_pair_df.probability = self.chrom_pair_df.probability.astype(float)
self.chrom_pair_df.probability = (
self.chrom_pair_df.probability / self.chrom_pair_df.probability.sum()
)
pair_ids = list(self.chrom_pair_df.index)
pair_probs = [self.chrom_pair_df.loc[i].probability for i in pair_ids]
self.log_print(["Number available pairs:", len(pair_ids)])
# randomly select a pair from list of pairs
selected_id = np.random.choice(a=pair_ids, p=pair_probs)
selected_entry = self.chrom_pair_df.loc[selected_id]
# Drop so it can't be chosen again
self.chrom_pair_df.drop(selected_id, inplace=True)
self.log_print(
["chrom pair df has {} options remaining".format(len(self.chrom_pair_df))]
)
selection = {
"chromosome_1": selected_entry["c1"],
"chromosome_2": selected_entry["c2"],
"other_data": {
"cut": int(selected_entry["cut1"]),
"force_mutation": bool(selected_entry["force_mutation"]),
},
}
return selection
def basic_pair_selection(self, chromosome_selection_probabilities, **kwargs):
r"""
Mechanism for selecting two models from the database of potential parents.
:param pd.DataFrame chromosome_selection_probabilities:
database indicating the probability that every valid pair of
parents should be selected.
:return tuple selected_chromosomes: two models
"""
chromosomes = list(chromosome_selection_probabilities.keys())
probabilities = [chromosome_selection_probabilities[c] for c in chromosomes]
selected_chromosomes = np.random.choice(
chromosomes, size=2, p=probabilities, replace=False
)
return selected_chromosomes
######################
# Crossover functions
######################
def crossover(self, **kwargs):
r"""
Wrapper for crossover mechanism.
This method assumes only 2 chromosomes to crossover
and passes them to the method set as self.crossover_method, which can be easily replaced
to facilitate alternative crossover schemes.
"""
return self.crossover_method(**kwargs)
def one_point_crossover(self, **kwargs):
r"""
Crossover two chromosomes about a single gene.
Input two chromosomes, and selection (a dict) in kwargs.
selection contains ``chromosome_1`` and ``chromosome_2``,
as well as a dict called ``other_data`` containing ``cut``,
which is the position about which to crossover the two chromosomes.
"""
selection = kwargs["selection"]
c1 = np.array(list(selection["chromosome_1"]))
c2 = np.array(list(selection["chromosome_2"]))
x = selection["other_data"]["cut"]
tmp = c2[:x].copy()
c2[:x], c1[:x] = c1[:x], tmp
return c1, c2
######################
# Mutation functions
######################
def mutation(self, **kwargs):
r"""
Wrapper for mutation mechanism.
All input arguments to the mutation method are passed directly to
the nominated mutation function, set as self.mutation_method.
"""
return self.mutation_method(**kwargs)
def element_wise_mutation(self, **kwargs):
r"""
Probabilistically mutate each gene independently.
"""
chromosomes = kwargs["chromosomes"]
force_mutation = kwargs["force_mutation"]
copy_chromosomes = copy.copy(chromosomes)
mutated_chromosomes = []
for c in copy_chromosomes:
try:
if np.all(c == 0):
self.log_print(
[
"Input chomosome {} has no interactions -- forcing mutation".format(
c
)
]
)
mutation_probability = 1.0
else:
mutation_probability = self.mutation_probability
except:
self.log_print(["Can't compare all w/ 0 :", c])
mutation_probability = self.mutation_probability
if np.random.rand() < mutation_probability or force_mutation:
num_mutations_to_perform = max(1, force_mutation)
self.mutation_count += 1
idx = np.random.choice(range(len(c)))
# print("Flipping idx {}".format(idx))
if int(c[idx]) == 0:
c[idx] = "1"
elif int(c[idx]) == 1:
c[idx] = "0"
mutated_chromosomes.append(c)
return mutated_chromosomes
######################
# Elitism functions
######################
def get_elite_models(self, **kwargs):
r"""
Wrapper for elite model selection method,
here set to self.elite_ranking_top_n_models.
"""
return self.elite_ranking_top_n_models(**kwargs)
def elite_ranking_top_n_models(self, model_fitnesses, **kwargs):
r"""
Get the top N models, and store info on the elite models to date.
:param dict model_fitnesses: the fitness of each model in this generation according to the
chosen objective function.
"""
elite_models = self.models_ranked_by_fitness[self.genetic_generation][
: self.num_protected_elite_models
]
self.log_print(
[
"Elite models at generation {}: {}".format(
self.genetic_generation, elite_models
)
]
)
for m in elite_models:
self.elite_models = self.elite_models.append(
pd.Series(
{
"model": m,
"generation": self.genetic_generation,
"elite_position": elite_models.index(m) + 1,
"chromosome": self.map_model_to_chromosome(m),
"f_score": self.model_f_score(m),
}
),
ignore_index=True,
)
self.most_elite_models_by_generation[
self.genetic_generation
] = self.models_ranked_by_fitness[self.genetic_generation][0]
if self.genetic_generation > self.unchanged_elite_num_generations_cutoff + 2:
gen = self.genetic_generation
recent_generations = list(
range(
max(0, gen - self.unchanged_elite_num_generations_cutoff), gen + 1
)
)
recent_elite_models = [
self.most_elite_models_by_generation[g] for g in recent_generations
]
unchanged = np.all(
np.array(recent_elite_models)
== self.most_elite_models_by_generation[gen]
)
if unchanged and self.terminate_early_if_top_model_unchanged:
# TODO this allows for unusual case where top model unchanged in 5 generations,
# but is improved upon in the subsequent generation.
# but since 5 generations are unchanged, termination is triggered and the new generation champion is winner
self.best_model_unchanged = True
self.log_print(
[
"Setting best_model_unchanged to {}".format(
self.best_model_unchanged
)
]
)
self.log_print(
[
"Elite model unchanged in last {} generations: {}. \nCurrently: {} with f-score {}".format(
self.unchanged_elite_num_generations_cutoff,
self.best_model_unchanged,
self.most_elite_models_by_generation[gen],
self.chromosome_f_score(
self.map_model_to_chromosome(
self.most_elite_models_by_generation[gen]
)
),
)
]
)
return elite_models
######################
# Processing given fitness to
# selection probabilities
######################
def get_selection_probabilities(self, **kwargs):
r"""
Wrapper for parent selection function, here set to self.truncate_to_top_half.
"""
return self.truncate_to_top_half(**kwargs)
def truncate_to_top_half(self, model_fitnesses, **kwargs):
r"""
Retain only the top-performing half of models considered at this generation,
for consideration as parents to offspring on the subsequent generation.
:param dict model_fitnesses: the fitness of each model in this generation according to the
chosen objective function.
"""
ranked_models = sorted(model_fitnesses, key=model_fitnesses.get, reverse=True)
num_models = len(ranked_models)
self.log_print(
[
"Considering truncation for {} models. Truncation rate = {}".format(
num_models, self.selection_truncation_rate
),
]
)
for m in ranked_models:
self.log_print(["fitness = {} \t Model={} ".format(model_fitnesses[m], m)])
truncation_cutoff = max(
int(num_models * self.selection_truncation_rate), 4
) # either consider top half, or top 4 if too small
truncation_cutoff = min(truncation_cutoff, num_models)
truncated_model_list = ranked_models[:truncation_cutoff]
truncated_model_fitnesses = {
mod: model_fitnesses[mod] for mod in truncated_model_list
}
# keep the others with zero fitness, so the gene pool reflect them
for m in ranked_models[truncation_cutoff:]:
self.log_print(
[
"Setting fitness to 0 for {} as it is {}th in rankings".format(
m, ranked_models.index(m)
)
]
)
truncated_model_fitnesses[m] = 0
sum_fitnesses = np.sum(list(truncated_model_fitnesses.values()))
self.log_print(
[
"Truncated model list:\n",
truncated_model_list,
"\nTruncated model fitnesses:\n",
truncated_model_fitnesses,
"\nsum fitnesses:",
sum_fitnesses,
]
)
model_probabilities = {
self.chromosome_string(self.map_model_to_chromosome(mod)): (
truncated_model_fitnesses[mod] / sum_fitnesses
)
for mod in truncated_model_fitnesses.keys()
}
self.log_print(["Chromosome Selection probabilities:\n", model_probabilities])
return model_probabilities
def prepare_chromosome_pair_dataframe(
self,
chromosome_probabilities,
force_mutation=False,
):
r"""
Given a set of individual chromosome fitnesses, generate database of pairs of
parent chromosomes, with probability proportional to the fitness of both parents.
"""
self.log_print(
[
"Setting up chromosome pair dataframe with initial probabilities",
chromosome_probabilities,
]
)
if len(chromosome_probabilities) == 1:
self.log_print(
["There is only one chromosome; not constructing selection database."]
)
return
# Register gene pool
for c in chromosome_probabilities:
model = self.map_chromosome_to_model(c)
gene_probability = pd.Series(
{
"model": model,
"chromosome": c,
"f_score": self.model_f_score(model),
"probability": chromosome_probabilities[c],
"generation": self.genetic_generation,
}
)
self.gene_pool.loc[len(self.gene_pool)] = gene_probability
# Construct df of pairs of chromosomes from the gene pool, where the probability of that
# pair being selected is the product of their individual fitnesses
t2 = time.time()
chromosome_combinations = list(
itertools.combinations(list(chromosome_probabilities.keys()), 2)
)
eg_combo = chromosome_combinations[0]
min_cut_pt = int(len(eg_combo[0]) * 0.25)
max_cut_pt = int(len(eg_combo[0]) * 0.75) + 1
self.log_print(
[
"example chrom combination : {}. \n min/max cut locations = {}/{}".format(
eg_combo, min_cut_pt, max_cut_pt
)
]
)
pair_data = []
count_good_pairs = 0
for c1, c2 in chromosome_combinations:
pair_prob = (
chromosome_probabilities[c1] * chromosome_probabilities[c2]
) # TODO better way to get pair prob?
# for cut1 in range(1, len(c1)-2):
if pair_prob > 0:
count_good_pairs += 1
self.log_print(
["Nonzero prob pair: {} & {}, prob = {}".format(c1, c2, pair_prob)]
)
for cut1 in range(min_cut_pt, max_cut_pt):
this_pair_df = {
"c1": c1,
"c2": c2,
"probability": pair_prob, # np.round(pair_prob, 2),
"cut1": cut1,
"c1_prob": chromosome_probabilities[c1],
"c2_prob": chromosome_probabilities[c2],
"force_mutation": force_mutation,
}
pair_data.append(this_pair_df)
self.chrom_pair_df = pd.DataFrame.from_dict(pair_data)
# normalise probabilities
try:
self.chrom_pair_df.probability = self.chrom_pair_df.probability.astype(
float
)
self.chrom_pair_df.probability = (
self.chrom_pair_df.probability / self.chrom_pair_df.probability.sum()
)
except:
self.log_print(
["Failing at final generation. chrom pair df:", self.chrom_pair_df]
)
self.log_print(
[
"starting chromosome pair dataframe setup. {} combinations in total from {} non-zero prob pairs. took {} sec and has len {}".format(
len(chromosome_combinations),
count_good_pairs,
np.round(time.time() - t2, 3),
len(self.chrom_pair_df),
)
]
)
self.log_print(
[
"Probs after preparing df:",
self.chrom_pair_df[["c1", "c2", "probability"]],
]
)
def get_pair_selection_order(self):
r"""
Use the probabilities of parental selection to define the order in which to generate offspring.
It is cheaper to perform this once than call the database repeatedly.
:return list pair_selection_order: list of tuples of the order in which to pass
the model pairs to the crossover mechanism to generate offspring
"""
pair_idx = self.chrom_pair_df.index.values
probabilities = self.chrom_pair_df.probability.values
# only keep nonzero probs
pair_idx = pair_idx[probabilities > 0]
probabilities = probabilities[probabilities > 0]
self.log_print(
[
"get_pair_selection_order probabilities: ",
probabilities,
"\n {} distinct".format(len(probabilities)),
"\n sum:",
np.sum(probabilities),
]
)
probabilities /= np.sum(probabilities)
n_samples = len(probabilities)
self.log_print(
["Getting {} samples from chromosome probabilities".format(n_samples)]
)
t1 = time.time()
pair_selection_order = np.random.choice(
a=pair_idx, size=n_samples, p=probabilities, replace=False
)
self.log_print(
[
"after {} s, pair_selection_order has {} elements ({} unique): \n {}".format(
np.round(time.time() - t1, 3),
len(pair_selection_order),
len(set(pair_selection_order)),
repr(pair_selection_order),
)
]
)
return pair_selection_order
######################
# Implement entire genetic algorithm iteration
######################
def consolidate_generation(self, model_fitnesses, **kwargs):
r"""
Following the training of all models on a generation, consolidate that generation.
This involves determining the strongest models from the generation,
and constructing the database of parent-pairs and their associated selection probabilities.
:param dict model_fitnesses: the fitness of each model in this generation according to the
chosen objective function.
"""
self.fitness_at_generation[self.genetic_generation] = model_fitnesses
self.models_ranked_by_fitness[self.genetic_generation] = sorted(
model_fitnesses, key=model_fitnesses.get, reverse=True
)
self.log_print(
[
"GA step. model ranked by fitness:",
self.models_ranked_by_fitness[self.genetic_generation],
]
)
self.get_elite_models(
model_fitnesses=model_fitnesses, num_protected_elite_models=2
)
self.chromosome_selection_probabilities = self.get_selection_probabilities(
model_fitnesses=model_fitnesses,
)
t_init = time.time()
self.prepare_chromosome_pair_dataframe(
chromosome_probabilities=self.chromosome_selection_probabilities
)
def genetic_algorithm_step(self, model_fitnesses, **kwargs):
r"""
Perform a complete step of the genetic algorithm, assuming all of the required steps have been performed.
That is, the database for parent selection must already be available.
:param dict model_fitnesses: the fitness of each model in this generation according to the
chosen objective function.
:returns list new_models: set of models to place on the next generation.
"""
# get the order to iterate through chromosome pairs
self.log_print(["Genetic algorithm step {}".format(self.genetic_generation)])
pair_selection_order = self.get_pair_selection_order()
init_num_chrom_pairs = len(pair_selection_order)
pair_selection_order = iter(pair_selection_order)
elite_models = list(
self.elite_models[
self.elite_models.generation == self.genetic_generation
].model
)
self.log_print(["elite models to start off with:", elite_models])
proposed_chromosomes = [
self.chromosome_string(self.map_model_to_chromosome(mod))
for mod in elite_models
] # list of chromosome strings to return
input_models = list(model_fitnesses.keys())
num_models_for_next_generation = len(input_models)
self.log_print(
["Num models reqd for generation:", num_models_for_next_generation]
)
num_loops_to_find_new_chromosome = 0
force_mutation = False
num_genes_to_force_mutate = 0
t_init = time.time()
while len(proposed_chromosomes) < num_models_for_next_generation:
# selection = self.selection()
try:
selected_id = next(pair_selection_order)
except:
self.log_print(["no pairs remaining."]) # TODO now what?
raise
selected_entry = self.chrom_pair_df.loc[selected_id]
selection = {
"chromosome_1": selected_entry["c1"],
"chromosome_2": selected_entry["c2"],
"other_data": {
"cut": int(selected_entry["cut1"]),
"force_mutation": bool(selected_entry["force_mutation"]),
},
}
suggested_chromosomes = self.crossover(selection=selection)
suggested_chromosomes = self.mutation(
chromosomes=suggested_chromosomes,
force_mutation=selection["other_data"]["force_mutation"],
)
c0_str = self.chromosome_string(suggested_chromosomes[0])
c1_str = self.chromosome_string(suggested_chromosomes[1])
for c in [c0_str, c1_str]:
if (
c not in proposed_chromosomes
and c != self.all_zero_chromosome_string
):
proposed_chromosomes.append(c)
self.log_print(
[
"num proposed chromosome now: {} of {}".format(
len(proposed_chromosomes),
num_models_for_next_generation,
),
"new chromosome:",
c,
]
)
birth = pd.Series(
{
"child": self.map_chromosome_to_model(c),
"chromosome_child": c,
"chromosome_parent_a": selection["chromosome_1"],
"chromosome_parent_b": selection["chromosome_2"],
"parent_a": self.map_chromosome_to_model(
selection["chromosome_1"]
),
"parent_b": self.map_chromosome_to_model(
selection["chromosome_2"]
),
"generation": self.genetic_generation,
"f_score": self.chromosome_f_score(c),
}
)
self.birth_register.loc[len(self.birth_register)] = birth
self.log_print(["Registering birth"])
if len(self.chrom_pair_df) == 0:
# already tried every available pair
num_genes_to_force_mutate += 1 # TODO increase number of genes to flip to diversify population when repetitive
self.log_print(
[
"Redrawing chromosome pair selection dataframe, enforcing mutation on {} genes".format(
num_genes_to_force_mutate
)
]
)
self.prepare_chromosome_pair_dataframe(
chromosome_probabilities=self.chromosome_selection_probabilities,
force_mutation=True
# force_mutation=num_genes_to_force_mutate
)
# chop extra chromosomes if generated
proposed_chromosomes = proposed_chromosomes[:num_models_for_next_generation]
self.previously_considered_chromosomes.extend(
[self.chromosome_string(r) for r in proposed_chromosomes]
)
# self.delta_f_by_generation[self.genetic_generation] = delta_f_score
self.chromosomes_at_generation[self.genetic_generation] = [
self.chromosome_string(r) for r in proposed_chromosomes
]
new_models = [self.map_chromosome_to_model(mod) for mod in proposed_chromosomes]
self.log_print(
[
"Genetic alg num new models:{}".format(len(new_models)),
"({} unique)".format(len(set(list(new_models)))),
]
)
self.genetic_generation += 1
return new_models
class GeneticAlgorithmFullyConnectedLikewisePauliTerms(GeneticAlgorithmQMLA):
r"""
Exact structure of :class:`~qmla.GeneticAlgorithmQMLA`, where the avaiable terms
are assumed to follow conventional pauliSet format,
and all sites are connected.
e.g. terms of the form
pauliSet_1J2_xJx_d2, pauliSet_1J2_yJy_d2, pauliSet_1J2_zJz_d2,
:param int num_sites: dimension to permit model search within
:param list base_terms: terms to use with pauliSet-type terms
"""
def __init__(self, num_sites, base_terms=["x", "y", "z"], **kwargs):
terms = []
for i in range(1, 1 + num_sites):
for j in range(i + 1, 1 + num_sites):
for t in base_terms:
new_term = "pauliSet_{i}J{j}_{o}J{o}_d{N}".format(
i=i,
j=j,
o=t,
N=num_sites,
)
terms.append(new_term)
super().__init__(genes=terms, num_sites=num_sites, **kwargs)
def multidimensional_shifting(num_samples, sample_size, elements, probabilities):
# replicate probabilities as many times as `num_samples`
replicated_probabilities = np.tile(probabilities, (num_samples, 1))
# get random shifting numbers & scale them correctly
random_shifts = np.random.random(replicated_probabilities.shape)
random_shifts /= random_shifts.sum(axis=1)[:, np.newaxis]
# shift by numbers & find largest (by finding the smallest of the negative)
shifted_probabilities = random_shifts - replicated_probabilities
return np.argpartition(shifted_probabilities, sample_size, axis=1)[:, :sample_size]
| 38.840436 | 148 | 0.583822 | 4,257 | 39,190 | 5.134367 | 0.131783 | 0.011209 | 0.018118 | 0.015784 | 0.282838 | 0.203367 | 0.153955 | 0.122844 | 0.10587 | 0.067621 | 0 | 0.007408 | 0.331768 | 39,190 | 1,008 | 149 | 38.878968 | 0.827211 | 0.209875 | 0 | 0.13229 | 0 | 0.001422 | 0.089338 | 0.00341 | 0 | 0 | 0 | 0.005952 | 0.002845 | 1 | 0.039829 | false | 0 | 0.01707 | 0 | 0.092461 | 0.051209 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
659fdd7538c48062d860fb75e27940d0c22d575b | 4,877 | py | Python | src/pvt_model/pvt_system/pvt_collector/absorber.py | BenWinchester/PVTModel | 6bf3976b06f406f632e0a9e525cd8b05359da239 | [
"MIT"
] | 1 | 2021-05-11T14:15:11.000Z | 2021-05-11T14:15:11.000Z | src/pvt_model/pvt_system/pvt_collector/absorber.py | BenWinchester/PVTModel | 6bf3976b06f406f632e0a9e525cd8b05359da239 | [
"MIT"
] | 14 | 2021-02-23T11:53:08.000Z | 2021-11-16T10:45:31.000Z | src/pvt_model/pvt_system/pvt_collector/absorber.py | BenWinchester/PVTModel | 6bf3976b06f406f632e0a9e525cd8b05359da239 | [
"MIT"
] | null | null | null | #!/usr/bin/python3.7
########################################################################################
# pvt_collector/absorber.py - Represents a absorber within a PVT panel.
#
# Author: Ben Winchester
# Copyright: Ben Winchester, 2021
########################################################################################
"""
The absorber module for the PV-T model.
This module represents a thermal absorber within a PV-T panel.
"""
import logging
import math
from ..__utils__ import (
CollectorParameters,
PVT_SYSTEM_MODEL_LOGGER_NAME,
OpticalLayerParameters,
)
from .__utils__ import (
OpticalLayer,
)
__all__ = ("Collector",)
# Get the logger for the run.
logger = logging.getLogger(PVT_SYSTEM_MODEL_LOGGER_NAME)
class Collector(OpticalLayer):
"""
Represents the thermal absorber (lower) layer of the PV-T panel.
.. attribute:: htf_heat_capacity
The heat capacity of the heat-transfer fluid passing through the absorber,
measured in Joules per kilogram Kelvin.
.. attribute:: mass_flow_rate
The mass flow rate of heat-transfer fluid through the absorber, measured in
kilograms per second.
.. attribute:: output_water_temperature
The temperature of the water outputted by the layer, measured in Kelvin.
.. attribute:: pump_power
The power consumed by the water pump, measured in Watts.
"""
# Pirvate Attributes:
#
# .. attribute:: _mass_flow_rate
# The mass flow rate of heat-trasnfer fluid through the absorber, measured in
# Litres per hour.
#
def __init__(self, absorber_params: CollectorParameters) -> None:
"""
Instantiate a absorber layer.
:param absorber_params:
The parameters needed to instantiate the absorber.
"""
super().__init__(
OpticalLayerParameters(
absorber_params.conductivity,
absorber_params.density,
absorber_params.heat_capacity,
absorber_params.thickness,
absorber_params.transmissivity,
absorber_params.absorptivity,
absorber_params.emissivity,
)
)
self.htf_heat_capacity = absorber_params.htf_heat_capacity
self.inner_pipe_diameter = absorber_params.inner_pipe_diameter
self.length = absorber_params.length
self._mass_flow_rate = absorber_params.mass_flow_rate
self.number_of_pipes = absorber_params.number_of_pipes
self.outer_pipe_diameter = absorber_params.outer_pipe_diameter
self.pipe_density = absorber_params.pipe_density
def __repr__(self) -> str:
"""
Returns a nice representation of the layer.
:return:
A `str` giving a nice representation of the layer.
"""
return (
"Collector("
f"absorptivity: {self.absorptivity}, "
f"conductivity: {self.conductivity}W/m^2*K, "
f"desntiy: {self.density}kg/m^3, "
f"emissivity: {self.emissivity}, "
f"heat_capacity: {self.heat_capacity}J/kg*K, "
f"htf_heat_capacity: {self.htf_heat_capacity}J/kg*K, "
f"inner_pipe_diameter: {self.inner_pipe_diameter}m, "
f"length: {self.length}m, "
f"mass_flow_rate: {self.mass_flow_rate}kg/s, "
f"outer_pipe_diameter: {self.outer_pipe_diameter}m, "
f"thickness: {self.thickness}m, "
f"transmissivity: {self.transmissivity}"
")"
)
@property
def htf_surface_area(self) -> float:
"""
Returns the contact area between the HTF and the absorber, measured in m^2.
:return:
The contact surface area, between the absorber (i.e., the pipes) and the
HTF passing through the pipes.
A single pass is assumed, with multiple pipes increasing the area, rather
than the length, of the absorber.
"""
return (
self.number_of_pipes # [pipes]
* math.pi
* self.inner_pipe_diameter # [m]
* self.length # [m]
)
@property
def htf_volume(self) -> float:
"""
Returns the volume of HTF that can be held within the absorber, measured in m^3
:return:
The volume of the HTF within the absorber, measured in meters cubed.
"""
return (
self.number_of_pipes # [pipes]
* math.pi
* (self.inner_pipe_diameter / 2) ** 2 # [m^2]
* self.length
)
@property
def mass_flow_rate(self) -> float:
"""
Return the mass-flow rate in kilograms per second.
:return:
d/dt(M) in kg/s
"""
return self._mass_flow_rate / (3600) # [kg/s]
| 29.737805 | 88 | 0.591552 | 551 | 4,877 | 5.023593 | 0.266788 | 0.080925 | 0.047688 | 0.04552 | 0.195087 | 0.131503 | 0.095376 | 0.070087 | 0.070087 | 0.070087 | 0 | 0.004922 | 0.291778 | 4,877 | 163 | 89 | 29.920245 | 0.796468 | 0.369694 | 0 | 0.176471 | 0 | 0 | 0.187886 | 0.080633 | 0 | 0 | 0 | 0 | 0 | 1 | 0.073529 | false | 0 | 0.058824 | 0 | 0.205882 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65a0448fb4145aa05b889365b7c74e7807c901fc | 4,319 | py | Python | base/models.py | EasyKey-PI2-2017-2/EasyKey | 0ccc94508ba815dd99f6265247094cf64e092597 | [
"MIT"
] | null | null | null | base/models.py | EasyKey-PI2-2017-2/EasyKey | 0ccc94508ba815dd99f6265247094cf64e092597 | [
"MIT"
] | 7 | 2020-02-12T00:10:52.000Z | 2022-03-11T23:18:13.000Z | base/models.py | EasyKey-PI2-2017-2/EasyKey | 0ccc94508ba815dd99f6265247094cf64e092597 | [
"MIT"
] | 1 | 2017-12-11T10:24:19.000Z | 2017-12-11T10:24:19.000Z | from django.db import models
from model_utils.models import TimeStampedModel
import numpy as np
import cv2
import glob
import time
#import picamera
WHITE_VALUE = 255
SCALE_VALUE_CM = 2.29
class Payment(TimeStampedModel):
value = models.FloatField(verbose_name="Valor da Compra")
token = models.CharField('Token', max_length=200)
timestamp = models.DateTimeField()
def __str__(self):
return self.value
class Key():
def __init__(self):
self.key = 0
self.templates = []
self.match = 0
self.contour = 0
self.scale = 0
self.serial = 0
def load_key(self):
# TODO Alterar quando estiver com a estrutura pronta
# TODO Tirar a foto usando o PiCamera e salvar nesse path abaixo
# camera = picamera.PiCamera()
# camera.capture('media/chave.jpg')
# camera.close()
#Código acima retirado, pois só irá funcinar na RaspberryPi
img = cv2.imread('media/a2.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.imwrite('media/loadgray.jpg', gray)
self.key = gray
def load_templates(self):
key_models = glob.glob('media/templates/*.jpg')
for path in key_models:
self.templates.append(path)
def verify_key_model(self):
match = False
for template in self.templates:
img_template = cv2.imread(template, 0)
w, h = img_template.shape[::-1]
res = cv2.matchTemplate(self.key, img_template,
cv2.TM_CCOEFF_NORMED)
threshold = 0.85
loc = np.where(res >= threshold)
if len(loc[0]) > 0:
match = True
break
else:
match = False
return match
def define_contour(self):
# transformações para imagem da chave
# TODO verificar size da chave pra cortar do size certo
key = self.key[132:270, 350:410]
blured = cv2.GaussianBlur(key, (5, 5), 0)
midpoint = (blured.max() - blured.min())//2 + blured.min()
_, key_limit = cv2.threshold(blured, midpoint, WHITE_VALUE,
cv2.THRESH_BINARY_INV)
self.contour = key_limit
cv2.imwrite('media/contorno.jpg', key_limit)
def define_scale(self):
# TODO quando tivermos a scale definitiva, descomentamos isso daqui
# transformações para imagem da scale
scale = self.key[85:240, 470:490]
#scale = cv2.imread('media/testescale.jpg')
# scale = cv2.cvtColor(scale, cv2.COLOR_BGR2GRAY)
midpoint = (scale.max() - scale.min())//2 + scale.min()
_, scale_limit = cv2.threshold(scale, midpoint, WHITE_VALUE,
cv2.THRESH_BINARY_INV)
scale_final = scale_limit.transpose()
cv2.imwrite('media/escala_scale.jpg', scale)
cv2.imwrite('media/escala.jpg', scale_limit)
first = 0
last = 0
for y, row in enumerate(scale_final):
for x, pixel in enumerate(row):
if pixel == 255 and first == 0:
first = x
elif pixel == 255 and x < first:
first = x
elif pixel == 255 and x > last:
last = x
size = last-first
# esse 1 no retorno é 1cm, valor conhecido de referência
self.scale = SCALE_VALUE_CM/size
def gcode(self):
f = open('media/gcode.nc', 'w')
f.write(self.g0(0, 0.365))
for x, row in enumerate(self.contour):
for y, pixel in enumerate(row):
if pixel == 255:
reference = (x, y)
break
if pixel == 255:
break
for x, row in enumerate(self.contour):
for y, pixel in enumerate(row):
if pixel == 255:
f.write(self.g1(reference, (x,y), self.scale))
break
f.write('M2')
f.close()
def g0(self, x, y):
return 'G0 X{} Y{}\n'.format(y, x)
def g1(self, reference, pixel, scale):
difx = reference[0] - pixel[0]
dify = reference[1] - pixel[1]
return 'G1 X{} Y{}\n'.format(dify * scale , difx * scale)
| 32.969466 | 75 | 0.552906 | 532 | 4,319 | 4.398496 | 0.340226 | 0.017949 | 0.025641 | 0.024359 | 0.114957 | 0.114957 | 0.114957 | 0.052991 | 0.052991 | 0.052991 | 0 | 0.038842 | 0.344293 | 4,319 | 130 | 76 | 33.223077 | 0.787429 | 0.139384 | 0 | 0.175258 | 0 | 0 | 0.045369 | 0.011612 | 0 | 0 | 0 | 0.007692 | 0 | 1 | 0.103093 | false | 0 | 0.061856 | 0.020619 | 0.257732 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65a2c0451d6fcd55b52771b8bccb21b9c2de2a07 | 507 | py | Python | disentanglement/tools/format-conversion/output-from-py-to-graph.py | yy2111/iSPY | 454ed4ec6a18b9e2dd3e13114b1263760054401e | [
"MIT"
] | 3 | 2022-03-05T22:12:41.000Z | 2022-03-09T08:45:53.000Z | disentanglement/tools/format-conversion/output-from-py-to-graph.py | yy2111/iSPY | 454ed4ec6a18b9e2dd3e13114b1263760054401e | [
"MIT"
] | null | null | null | disentanglement/tools/format-conversion/output-from-py-to-graph.py | yy2111/iSPY | 454ed4ec6a18b9e2dd3e13114b1263760054401e | [
"MIT"
] | 2 | 2021-09-22T09:32:15.000Z | 2022-03-04T02:45:09.000Z | #!/usr/bin/env python3
from __future__ import print_function
import argparse
import logging
import sys
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert the output from a run of the python system into content that can be evaluated.')
args = parser.parse_args()
done_training = False
for line in sys.stdin:
if line.startswith("#"):
continue
line = line.split('/')[-1].strip()
parts = line.split()
print(line)
| 25.35 | 138 | 0.658777 | 64 | 507 | 4.984375 | 0.734375 | 0.056426 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005195 | 0.240631 | 507 | 19 | 139 | 26.684211 | 0.823377 | 0.04142 | 0 | 0 | 0 | 0 | 0.197938 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.285714 | 0 | 0.285714 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65a3efffa00bd81d1b4b14b69ac25739ed690bca | 479 | py | Python | ET_Cust/signals.py | Noisyfox/EaTogether | d5b114881162fc34e71873bcacaacbe6b8da3a79 | [
"MIT"
] | null | null | null | ET_Cust/signals.py | Noisyfox/EaTogether | d5b114881162fc34e71873bcacaacbe6b8da3a79 | [
"MIT"
] | null | null | null | ET_Cust/signals.py | Noisyfox/EaTogether | d5b114881162fc34e71873bcacaacbe6b8da3a79 | [
"MIT"
] | null | null | null | from paypal.standard.models import ST_PP_COMPLETED
from paypal.standard.ipn.signals import valid_ipn_received
from ET.models import Customer
def top_up_notification(sender, **kwargs):
ipn_obj = sender
if ipn_obj.payment_status == ST_PP_COMPLETED:
customer = Customer.objects.get(pk=ipn_obj.custom)
customer.available_balance = customer.available_balance + float(ipn_obj.mc_gross)
customer.save()
valid_ipn_received.connect(top_up_notification)
| 34.214286 | 89 | 0.782881 | 67 | 479 | 5.298507 | 0.522388 | 0.067606 | 0.101408 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.139875 | 479 | 13 | 90 | 36.846154 | 0.86165 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.3 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65a4f4b7c54ab1a3f76fc774d6be3303df19883e | 2,070 | py | Python | lecture01/peak_finding.py | dom-lee/MIT-6.006 | c8b4d54da3af2810ea27add176644c1b2a983526 | [
"MIT"
] | null | null | null | lecture01/peak_finding.py | dom-lee/MIT-6.006 | c8b4d54da3af2810ea27add176644c1b2a983526 | [
"MIT"
] | null | null | null | lecture01/peak_finding.py | dom-lee/MIT-6.006 | c8b4d54da3af2810ea27add176644c1b2a983526 | [
"MIT"
] | null | null | null | from typing import *
"""
Lecture 1
Peak Finding (1-D and 2-D)
"""
def find_1d_peak(arr: List[int]) -> int:
"""
Complexity: Θ(log(n))
-> T(n) = T(n/2) + Θ(1)
"""
n = len(arr)
if n == 1:
return arr[0]
elif n == 2:
return arr[0] if arr[0] >= arr[1] else arr[1]
elif arr[n // 2] < arr[(n // 2) - 1]:
return find_1d_peak(arr[:n // 2])
elif arr[n // 2] < arr[(n // 2) + 1]:
return find_1d_peak(arr[(n // 2) + 1:])
return arr[n // 2]
def find_2d_peak(arr: List[List[int]]) -> int:
"""
Complexity: Θ(m*log(n))
-> T(n, m) = T(n/2, m) + Θ(m)
"""
n = len(arr)
m = len(arr[0])
middle_row = n // 2
max_in_row = float('-inf')
column_index = 0
for j in range(m):
if arr[middle_row][j] > max_in_row:
max_in_row = arr[middle_row][j]
column_index = j
if n == 1:
return max_in_row
elif n == 2:
if arr[middle_row][column_index] >= arr[middle_row - 1][column_index]:
return max_in_row
else:
return find_2d_peak(arr[:1])
elif arr[middle_row][column_index] <= arr[middle_row - 1][column_index]:
return find_2d_peak(arr[:middle_row])
elif arr[middle_row][column_index] <= arr[middle_row + 1][column_index]:
return find_2d_peak(arr[middle_row + 1:])
return max_in_row
if __name__ == "__main__":
array = [1, 2, 3, 4, 5, 6, 10, 8, 2]
matrix = [[4, 5, 6, 7, 8, 7, 6, 5, 4, 3, 2],
[5, 6, 7, 8, 9, 8, 7, 6, 5, 4, 3],
[6, 7, 8, 9, 10, 9, 8, 7, 6, 5, 4],
[7, 8, 9, 10, 11, 10, 9, 8, 7, 6, 5],
[8, 9, 10, 11, 12, 11, 10, 9, 8, 7, 6],
[7, 8, 9, 10, 11, 10, 9, 8, 7, 6, 5],
[6, 7, 8, 9, 10, 9, 8, 7, 6, 5, 4],
[5, 6, 7, 8, 9, 8, 7, 6, 5, 4, 3],
[4, 5, 6, 7, 8, 7, 6, 5, 4, 3, 2],
[3, 4, 5, 6, 7, 6, 5, 4, 3, 2, 1],
[2, 3, 4, 5, 6, 5, 4, 3, 2, 1, 0]]
print(find_1d_peak(array))
print(find_2d_peak(matrix))
| 26.538462 | 78 | 0.460386 | 375 | 2,070 | 2.389333 | 0.146667 | 0.026786 | 0.133929 | 0.035714 | 0.571429 | 0.457589 | 0.422991 | 0.422991 | 0.422991 | 0.422991 | 0 | 0.136966 | 0.343961 | 2,070 | 77 | 79 | 26.883117 | 0.522828 | 0.047826 | 0 | 0.306122 | 0 | 0 | 0.006356 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.040816 | false | 0 | 0.020408 | 0 | 0.285714 | 0.040816 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65a6ba776b62472879021cf9c70beb5b398a0705 | 26,220 | py | Python | utils.py | mion666459/thermal_signature_drone_detection | 04298d18078e5273881fc9419a097f109b6ebe7a | [
"Apache-2.0"
] | 5 | 2021-05-28T04:18:29.000Z | 2022-02-06T11:26:02.000Z | utils.py | mion666459/thermal_signature_drone_detection | 04298d18078e5273881fc9419a097f109b6ebe7a | [
"Apache-2.0"
] | null | null | null | utils.py | mion666459/thermal_signature_drone_detection | 04298d18078e5273881fc9419a097f109b6ebe7a | [
"Apache-2.0"
] | 4 | 2021-07-18T11:16:24.000Z | 2022-03-25T20:50:44.000Z | # standard imports
import cv2
import tensorflow as tf
import numpy as np
import colorsys
import random
"""
Utility functions to for training, pre and post processing
"""
def read_class_names(class_file_name):
""" function to load class names from a file """
# open class text file
with open(class_file_name, 'r') as f:
# intialise empty list to store names
names = []
# iterate over class names
for name in f:
# append class name from each line
names.append(name.strip('\n'))
return names
def transform_images(image, size):
""" transform image to specified input shape for training and scale pixel values to be between 0 and 1 """
# convert image to tensor
image = tf.convert_to_tensor(image)
# add batch dimension to image
image = tf.expand_dims(image, axis = 0)
# resize image to specified size
image = tf.image.resize_with_pad(image, size, size)
# standardize image
image = image / 255
# remove batch dimension
image = tf.squeeze(image, axis = 0)
return image.numpy()
def decode(yolo_output, num_of_anchor_bbox, classes, strides, anchors, index):
""" function to decode the outputs from yolo_v3 """
""" takes in tensor of shape (batch_size, gridsize_x, gridsize_y, number of anchor boxes, number of classes) """
""" returns tesnor of shape (batch_size, gridsize_x, gridsize_y, number of anchor boxes, number of classes) """
# takes in original anchors and process to scaled anchors based on strides for respective scales
anchors_scaled = (np.array(anchors).T/strides).T
# obtain dimensions from yolo_output
conv_shape = tf.shape(yolo_output)
batch_size = conv_shape[0]
grid_size = conv_shape[1:3]
# reshape yolo_output
yolo_output = tf.reshape(yolo_output, (batch_size, grid_size[0], grid_size[1], num_of_anchor_bbox, 5 + classes))
# split yolo_output along last axis to extract features
raw_dx_dy, raw_dw_dh, raw_objectiveness, raw_class_probs = tf.split(yolo_output, (2, 2, 1, classes), axis = -1)
# create grid where grid[x][y] == (y, x)
xy_grid = tf.meshgrid(tf.range(grid_size[1]), tf.range(grid_size[0]))
# reshape to [gx, gy, 1, 2] and cast to float32 data type
xy_grid = tf.expand_dims(tf.stack(xy_grid, axis = -1), axis = 2)
xy_grid = tf.cast(xy_grid, tf.float32)
# calculate the center position of the prediction box (train_input_size):
pred_xy = (tf.sigmoid(raw_dx_dy) + xy_grid) * strides[index]
# calculate the length and width of the prediction box (train_input_size):
pred_wh = (tf.exp(raw_dw_dh) * anchors_scaled[index]) * strides[index]
# concatenate pred_xy and pred_wh
pred_xywh = tf.concat([pred_xy, pred_wh], axis = -1)
# objectiveness score
pred_objectiveness = tf.sigmoid(raw_objectiveness)
# class probabilities
pred_prob = tf.sigmoid(raw_class_probs)
# concatenate decoded results
pred = tf.concat([pred_xywh, pred_objectiveness, pred_prob], axis = -1)
return pred
def bbox_iou(boxes1, boxes2):
""" function to determine iou from 2 boxes for tensors """
# obtain area of from the 2 boxes
boxes1_area = boxes1[..., 2] * boxes1[..., 3]
boxes2_area = boxes2[..., 2] * boxes2[..., 3]
# obtain boxes where properties are (x_min, y_min, x_max, y_max)
boxes1 = tf.concat([boxes1[..., :2] - boxes1[..., 2:] * 0.5, boxes1[..., :2] + boxes1[..., 2:] * 0.5], axis = -1)
boxes2 = tf.concat([boxes2[..., :2] - boxes2[..., 2:] * 0.5, boxes2[..., :2] + boxes2[..., 2:] * 0.5], axis = -1)
# obtain maximum coordinates amongst 2 box at top left corner
left_up = tf.maximum(boxes1[..., :2], boxes2[..., :2])
# obtain minimum coordinates amongst 2 box at bottom right corner
right_down = tf.minimum(boxes1[..., 2:], boxes2[..., 2:])
# obtain a positive intersection
inter_section = tf.maximum(right_down - left_up, 0.0)
# obtain intersection area
inter_area = inter_section[..., 0] * inter_section[..., 1]
# obtain union area
union_area = boxes1_area + boxes2_area - inter_area
# return iou
return 1.0 * inter_area / union_area
def bbox_giou(boxes1, boxes2):
""" function to determine giou (generalised iou) from 2 boxes """
# obtain boxes where properties are (x_min, y_min, x_max, y_max)
boxes1 = tf.concat([boxes1[..., :2] - boxes1[..., 2:] * 0.5, boxes1[..., :2] + boxes1[..., 2:] * 0.5], axis = -1)
boxes2 = tf.concat([boxes2[..., :2] - boxes2[..., 2:] * 0.5, boxes2[..., :2] + boxes2[..., 2:] * 0.5], axis = -1)
# obtain boxes where properties are (x_min, y_min, x_max, y_max)
boxes1 = tf.concat([tf.minimum(boxes1[..., :2], boxes1[..., 2:]),
tf.maximum(boxes1[..., :2], boxes1[..., 2:])], axis = -1)
boxes2 = tf.concat([tf.minimum(boxes2[..., :2], boxes2[..., 2:]),
tf.maximum(boxes2[..., :2], boxes2[..., 2:])], axis = -1)
# obtain area of from the 2 boxes
boxes1_area = (boxes1[..., 2] - boxes1[..., 0]) * (boxes1[..., 3] - boxes1[..., 1])
boxes2_area = (boxes2[..., 2] - boxes2[..., 0]) * (boxes2[..., 3] - boxes2[..., 1])
# obtain maximum coordinates amongst 2 box at top left corner
left_up = tf.maximum(boxes1[..., :2], boxes2[..., :2])
# obtain minimum coordinates amongst 2 box at bottom right corner
right_down = tf.minimum(boxes1[..., 2:], boxes2[..., 2:])
# obtain a positive intersection
inter_section = tf.maximum(right_down - left_up, 0.0)
# obtain intersection area
inter_area = inter_section[..., 0] * inter_section[..., 1]
# obtain union area
union_area = boxes1_area + boxes2_area - inter_area
# calculate the iou value between the two bounding boxes
iou = inter_area / union_area
# calculate the coordinates of the upper left corner and the lower right corner of the smallest closed convex
# surface
enclose_left_up = tf.minimum(boxes1[..., :2], boxes2[..., :2])
enclose_right_down = tf.maximum(boxes1[..., 2:], boxes2[..., 2:])
enclose = tf.maximum(enclose_right_down - enclose_left_up, 0.0)
# calculate the area of the smallest closed convex surface C
enclose_area = enclose[..., 0] * enclose[..., 1]
# calculate the GIoU value according to the GioU formula
giou = iou - 1.0 * (enclose_area - union_area) / enclose_area
return giou
def loss_func(pred, conv, label, bboxes, num_classes, train_input_size, iou_loss_threshold):
""" loss function to compiute losses comprising of giou, objectiviness and class probs losses for training """
""" giou replaces l2 norm losses of x, y, w, h as an improvement from original yolo_v3 """
# obtain number of classes
num_classes = num_classes
# obtain shape of raw yolo_v3 output (pre-decode)
conv_shape = tf.shape(conv)
# obtain batch size of raw yolo_v3 output (pre-decode)
batch_size = conv_shape[0]
# obtain output size of raw yolo_v3 output (pre-decode)
output_size = conv_shape[1]
# obtain train input size
train_input_size = tf.cast(train_input_size, tf.float32)
# reshape raw conv output
conv = tf.reshape(conv, (batch_size, output_size, output_size, 3, 5 + num_classes))
# obtain objectiveness scores and class probabilites for batch from raw conv output
conv_raw_objectiveness = conv[:, :, :, :, 4:5]
conv_raw_prob = conv[:, :, :, :, 5:]
# obtain predicted x, y, w, h and objectiveness scores for batch based on train_input_size post decode
pred_xywh = pred[:, :, :, :, 0:4]
pred_conf = pred[:, :, :, :, 4:5]
# obtain label x, y, w, h and objectiveness scores for batch based on train_input_size
label_xywh = label[:, :, :, :, 0:4]
respond_bbox = label[:, :, :, :, 4:5]
label_prob = label[:, :, :, :, 5:]
# obtain giou between predictions and labels
giou = tf.expand_dims(bbox_giou(pred_xywh, label_xywh), axis = -1)
# loss factor that gives higher weight to smaller boxes
bbox_loss_scale = 2.0 - 1.0 * label_xywh[:, :, :, :, 2:3] * label_xywh[:, :, :, :, 3:4] / (train_input_size ** 2)
# obtain giou loss
giou_loss = respond_bbox * bbox_loss_scale * (1 - giou)
# obtain iou between predictions and labels
iou = bbox_iou(pred_xywh[:, :, :, :, np.newaxis, :], bboxes[:, np.newaxis, np.newaxis, np.newaxis, :, :])
# find the value of iou with the largest prediction box
max_iou = tf.reduce_max(iou, axis = -1, keepdims = True)
# if the largest iou is less than the threshold, it is considered that the prediction box contains no objects,
# then the background box
respond_bgd = (1.0 - respond_bbox) * tf.cast(max_iou < iou_loss_threshold, tf.float32)
# focal factor on objectiveness loss
conf_focal = tf.pow(respond_bbox - pred_conf, 2)
# calculate the objectiveness loss
# we hope that if the grid contains objects, then the network output prediction box has a confidence of 1 and 0
# when there is no object.
conf_loss = conf_focal * (respond_bbox + respond_bgd) * \
tf.nn.sigmoid_cross_entropy_with_logits(labels = respond_bbox, logits = conv_raw_objectiveness)
# class probabilities loss
prob_loss = respond_bbox * tf.nn.sigmoid_cross_entropy_with_logits(labels = label_prob, logits = conv_raw_prob)
# sum up losses and take mean accross batch
giou_loss = tf.reduce_mean(tf.reduce_sum(giou_loss, axis = [1,2,3,4]))
conf_loss = tf.reduce_mean(tf.reduce_sum(conf_loss, axis = [1,2,3,4]))
prob_loss = tf.reduce_mean(tf.reduce_sum(prob_loss, axis = [1,2,3,4]))
if np.isnan(giou_loss):
giou_loss = tf.Variable(0, trainable = False, dtype = tf.float32)
return giou_loss, conf_loss, prob_loss
def postprocess_boxes(pred_bbox, original_image, train_input_size, score_threshold):
""" function to scale bboxes from train input size to original image size and remove bboxes with low scores """
# valid scle for box
valid_scale=[0, np.inf]
# turn bbox to array
pred_bbox = np.array(pred_bbox)
# obtain predicted x, y, w, h, objectiveness score, class probabilities
pred_xywh = pred_bbox[:, 0:4]
pred_objectiveness = pred_bbox[:, 4]
pred_prob = pred_bbox[:, 5:]
# 1. (x, y, w, h) --> (x_org, y_org, w_org, h_org)
# obtain original image width and height
org_h, org_w = original_image.shape[:2]
# obtain resize ratio for height and width
resize_ratio_h = train_input_size / org_h
resize_ratio_w = train_input_size / org_w
# scale x, y, w, h to original x, y, w, h
pred_coor = np.concatenate([np.expand_dims(pred_xywh[:, 0] / resize_ratio_w, axis = -1),
np.expand_dims(pred_xywh[:, 1] / resize_ratio_h, axis = -1),
np.expand_dims(pred_xywh[:, 2] / resize_ratio_w, axis = -1),
np.expand_dims(pred_xywh[:, 3] / resize_ratio_h, axis = -1),], axis = -1)
# 2. (x_org, y_org, w_org, h_org) --> (xmin_org, ymin_org, xmax_org, ymax_org)
# obtain diagonal image coordinates
pred_coor = np.concatenate([pred_coor[:, :2] - pred_coor[:, 2:] * 0.5,
pred_coor[:, :2] + pred_coor[:, 2:] * 0.5], axis = -1)
# 3. clip some boxes those are out of range
# clip bboxes where xmin_org, ymin_org < 0 and xmax_org, ymax_org out of bounds
pred_coor = np.concatenate([np.maximum(pred_coor[:, :2], [0, 0]),
np.minimum(pred_coor[:, 2:], [org_w - 1, org_h - 1])], axis = -1)
# mask that ensure that if xmin < xmax, ymin /> ymax and vice versa
invalid_mask = np.logical_or((pred_coor[:, 0] > pred_coor[:, 2]), (pred_coor[:, 1] > pred_coor[:, 3]))
pred_coor[invalid_mask] = 0
# 4. discard some invalid boxes
bboxes_scale = np.sqrt(np.multiply.reduce(pred_coor[:, 2:4] - pred_coor[:, 0:2], axis = -1))
scale_mask = np.logical_and((valid_scale[0] < bboxes_scale), (bboxes_scale < valid_scale[1]))
# 5. discard boxes with low scores
# obtain index of class with max prob for each bbox
classes = np.argmax(pred_prob, axis = -1)
# multiply max prob with objectivness score for each bbox
scores = pred_objectiveness * pred_prob[np.arange(len(pred_coor)), classes]
# obtain score mask based on score threshold
score_mask = scores > score_threshold
# obtain combined mask
mask = np.logical_and(scale_mask, score_mask)
# obtain coordinates, scores and classes after mask
coors, scores, classes = pred_coor[mask], scores[mask], classes[mask]
# return concatenated results
return np.concatenate([coors, scores[:, np.newaxis], classes[:, np.newaxis]], axis = -1)
def nms(bboxes, iou_threshold, sigma = 0.3, method = 'nms'):
""" function to implement non-maximal suppression / softmax non-maximal supression of bboxes """
""" takes bboxes with the shape of (num_of_box, 6), where 6 => (xmin, ymin, xmax, ymax, score, class) """
# remove duplicates in classes
classes_in_img = list(set(bboxes[:, 5]))
# initialise list to store best bboxes
best_bboxes = []
# iterate over each class
for cls in classes_in_img:
# get mask for bboxes with the same class and apply on bboxes to obtain array of bboxes with same class
cls_mask = (bboxes[:, 5] == cls)
cls_bboxes = bboxes[cls_mask]
# iterate while there are still bboxes in cls_bboxes
while len(cls_bboxes) > 0:
# select index of the bbox with the highest score
max_ind = np.argmax(cls_bboxes[:, 4])
# select bbox with highest score
best_bbox = cls_bboxes[max_ind]
# append to best _bbox list
best_bboxes.append(best_bbox)
# obtain cls_bboxes without best bbox
cls_bboxes = np.concatenate([cls_bboxes[: max_ind], cls_bboxes[max_ind + 1:]])
# calculate iou of remaining bboxes with best bbox
iou = bbox_iou(best_bbox[np.newaxis, :4], cls_bboxes[:, :4])
weight = np.ones((len(iou), ), dtype = np.float32)
# assert method to be either 'nms' or 'soft_nms'
assert method in ['nms', 'soft_nms']
if method == 'nms':
# obtain nms iou mask based on threshold
iou_mask = iou > iou_threshold
# apply mask on weights
weight[iou_mask.numpy()] = 0.0
if method == 'soft_nms':
# obtain soft_nms weights
weight = np.exp(-(1.0 * iou ** 2 / sigma))
# apply weights on cls_bboxes
cls_bboxes[:, 4] = cls_bboxes[:, 4] * weight
# obtain score mask of scores greater than zero
score_mask = cls_bboxes[:, 4] > 0.
# apply mask on cls_bboxes
cls_bboxes = cls_bboxes[score_mask]
return best_bboxes
def draw_bbox(image, bboxes, classes_file_path, show_label = True, show_confidence = True, Text_colors = (255,255,0),
rectangle_colors = '', tracking = False):
""" function to draw bboxes on image """
# obtain list of classes name
classes = read_class_names(classes_file_path)
# obtain length of classes
num_classes = len(classes)
# obtain shape of image
image_h, image_w, _ = image.shape
# obtain list of unique hsv (hue, saturation, value) for each class
hsv_tuples = [(1.0 * x / num_classes, 1., 1.) for x in range(num_classes)]
# obtain unique rgb tuples from hsv tuples
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
# scale rgb from 0-1 to 0-255
colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors))
# shuffle colors list with same seed
random.seed(0)
random.shuffle(colors)
random.seed(None)
# iterate over bbox in bboxes
for i, bbox in enumerate(bboxes):
# obtain coordinates of bbox
coor = np.array(bbox[:4], dtype = np.int32)
# obtain objectiveness score
score = bbox[4]
# obtain class index
class_ind = int(bbox[5])
# choose rectangle color if none is given, else chose from tuple
bbox_color = rectangle_colors if rectangle_colors != '' else colors[class_ind]
# obtain thickness of bboxes
bbox_thick = int(0.6 * (image_h + image_w) / 1000)
if bbox_thick < 1: bbox_thick = 1
# obtain font scale
fontScale = 0.75 * bbox_thick
# obtain tuples of min and max coordinates
(x1, y1), (x2, y2) = (coor[0], coor[1]), (coor[2], coor[3])
# generate bbox
cv2.rectangle(image, (x1, y1), (x2, y2), bbox_color, bbox_thick * 2)
# if show label is true
if show_label:
# get objectiveness score label
score_str = " {:.2f}".format(score) if show_confidence else ""
# if tracking show whole score without rounding
if tracking: score_str = " " + str(score)
# obtain label of class name with objectiveness score
label = "{}".format(classes[class_ind]) + score_str
# get text size
(text_width, text_height), baseline = cv2.getTextSize(label, cv2.FONT_HERSHEY_COMPLEX_SMALL,
fontScale, thickness = bbox_thick)
# put filled text rectangle
cv2.rectangle(image, (x1, y1), (x1 + text_width, y1 - text_height - baseline), bbox_color,
thickness = cv2.FILLED)
# put text above rectangle
cv2.putText(image, label, (x1, y1 - 4), cv2.FONT_HERSHEY_COMPLEX_SMALL,
fontScale, Text_colors, bbox_thick, lineType = cv2.LINE_AA)
return image
def detect_image(yolo_v3_model, image_paths, batch_frames, output_path, train_input_size, classes_file_path,
score_threshold, iou_threshold, num_of_anchor_bbox, strides, anchors, show = False,
rectangle_colors = ''):
""" function to take in image and apply bbox on it """
# obtain number of classes
num_of_classes = len(read_class_names(classes_file_path))
# create list to store images
original_images = []
# iterate over images in chronological order (last image is image of interest to put bbox)
for x in range(batch_frames):
# obtain original image
original_image = cv2.imread(image_paths[x])
# append original image to original_images list
original_images.append(original_image[:])
# convert original image to grayscale
image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
# preprocess image
image = transform_images(image[:], train_input_size)
# obtain concat frame if none exist
if x == 0:
concat_image = image[:]
# concatenate subsequent frames to concat_image
else:
concat_image = np.concatenate((concat_image, image), axis = -1)
# add batch dimensions to concatenated image
concat_image = concat_image[np.newaxis, ...].astype(np.float32)
# create constant tensor from concatenated image and feed it to yolo_v3_model
batched_input = tf.constant(concat_image)
yolo_output = yolo_v3_model(batched_input)
# list to store bboxes from respective scales
pred_bbox = []
# iterate over 3 scales
for i in range(3):
# decode resepctive yolo_output from each scale
pred_result = decode(yolo_output = yolo_output[i], num_of_anchor_bbox = num_of_anchor_bbox,
classes = num_of_classes, strides = strides, anchors = anchors, index = i)
# obtain results of shape (:, 5 + num_classes), i.e all bboxes
pred_result_reshaped = tf.reshape(pred_result, (-1, tf.shape(pred_result)[-1]))
# append to pred_bbox
pred_bbox.append(pred_result_reshaped)
# concatenate all bboxes from all scales
pred_bbox = tf.concat(pred_bbox, axis = 0)
# post process all bboxes using latest image in orignal_images
bboxes = postprocess_boxes(pred_bbox, original_images[-1], train_input_size, score_threshold)
# non maximal supression for bboxes
bboxes = nms(bboxes, iou_threshold, method = 'nms')
# draw bbox on latest image in orignal_images
image = draw_bbox(original_images[-1], bboxes, classes_file_path, rectangle_colors = rectangle_colors)
# save image if path to save is given
if output_path != '': cv2.imwrite(output_path, image)
# display image if show is true
if show:
# show the image
cv2.imshow("predicted image", image)
# load and hold the image
cv2.waitKey(0)
# to close the window after the required kill value was provided
cv2.destroyAllWindows()
return image
def detect_video(yolo_v3_model, video_path, batch_frames, output_path, train_input_size, classes_file_path,
score_threshold, iou_threshold, num_of_anchor_bbox, strides, anchors, show = False,
rectangle_colors = ''):
""" function to take in video and apply bbox on it """
# obtain number of classes
num_of_classes = len(read_class_names(classes_file_path))
# obtain VideoCapture object
vid = cv2.VideoCapture(video_path)
# obtain width, height and fps of video
# by default VideoCapture returns float instead of int
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(vid.get(cv2.CAP_PROP_FPS))
# obtain video codec
codec = cv2.VideoWriter_fourcc(*'XVID')
# obtain output_path
# output_path must be .mp4
out = cv2.VideoWriter(output_path, codec, fps+1, (width, height))
# create list to store images
images = []
# variable to track frame
frame = 0
while True:
try:
# grabs, decodes and returns the next video frame
_, image = vid.read()
# append original image to original_images list
images.append(image[:])
# increment frame
frame += 1
# if current frame is less than batch_frames
if frame < batch_frames:
# move to next frame
continue
# iterate over images in chronological order (last image is image of interest to put bbox)
for x in range(batch_frames):
# convert original image to grayscale
image = cv2.cvtColor(images[-batch_frames + x + 1], cv2.COLOR_BGR2RGB)
# preprocess image
image = transform_images(image[:], train_input_size)
# obtain concat frame if none exist
if x == 0:
concat_image = image[:]
# concatenate subsequent frames to concat_image
else:
concat_image = np.concatenate((concat_image, image), axis = -1)
except:
break
# add batch dimensions to concatenated image
concat_image = concat_image[np.newaxis, ...].astype(np.float32)
# create constant tensor from concatenated image and feed it to yolo_v3_model
batched_input = tf.constant(concat_image)
yolo_output = yolo_v3_model(batched_input)
# list to store bboxes from respective scales
pred_bbox = []
# iterate over 3 scales
for i in range(3):
# decode resepctive yolo_output from each scale
pred_result = decode(yolo_output = yolo_output[i], num_of_anchor_bbox = num_of_anchor_bbox,
classes = num_of_classes, strides = strides, anchors = anchors, index = i)
# append to pred_bbox
pred_bbox.append(pred_result)
# obtain results of shape (:, 5 + num_classes), i.e all bboxes
pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
# concatenate all bboxes from all scales
pred_bbox = tf.concat(pred_bbox, axis = 0)
# post process all bboxes using latest image in orignal_images
bboxes = postprocess_boxes(pred_bbox, images[-1], train_input_size, score_threshold)
# non maximal supression for bboxes
bboxes = nms(bboxes, iou_threshold, method = 'nms')
# draw bbox on latest image in orignal_images
image = draw_bbox(images[-1], bboxes, classes_file_path, rectangle_colors = rectangle_colors)
# save image frame to video path if path to save is given
if output_path != '': out.write(image)
# display image frame (i.e play video) if show is true
if show:
# show the image
cv2.imshow('output', image)
# if q key is presssed
if cv2.waitKey(25) & 0xFF == ord("q"):
# end session
cv2.destroyAllWindows()
# break out of while loop
break
# When everything done, release the capture
vid.release()
cv2.destroyAllWindows() | 38.165939 | 118 | 0.609191 | 3,496 | 26,220 | 4.390446 | 0.137014 | 0.008795 | 0.01733 | 0.007818 | 0.389472 | 0.34973 | 0.332204 | 0.312724 | 0.288227 | 0.278976 | 0 | 0.024462 | 0.289054 | 26,220 | 687 | 119 | 38.165939 | 0.798938 | 0.31495 | 0 | 0.233716 | 0 | 0 | 0.004064 | 0 | 0 | 0 | 0.000232 | 0 | 0.003831 | 1 | 0.042146 | false | 0 | 0.019157 | 0 | 0.099617 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65a6df31b987878cfe1251f1bd53a3143c9b2fc4 | 1,011 | py | Python | benchbuild/projects/benchbuild/linpack.py | sturmianseq/benchbuild | e3cc1a24e877261e90baf781aa67a9d6f6528dac | [
"MIT"
] | 11 | 2017-10-05T08:59:35.000Z | 2021-05-29T01:43:07.000Z | benchbuild/projects/benchbuild/linpack.py | sturmianseq/benchbuild | e3cc1a24e877261e90baf781aa67a9d6f6528dac | [
"MIT"
] | 326 | 2016-07-12T08:11:43.000Z | 2022-03-28T07:10:11.000Z | benchbuild/projects/benchbuild/linpack.py | sturmianseq/benchbuild | e3cc1a24e877261e90baf781aa67a9d6f6528dac | [
"MIT"
] | 13 | 2016-06-17T12:13:35.000Z | 2022-01-04T16:09:12.000Z | import logging
import benchbuild as bb
from benchbuild.environments.domain.declarative import ContainerImage
from benchbuild.source import HTTP
from benchbuild.utils import path
from benchbuild.utils.cmd import patch
LOG = logging.getLogger(__name__)
class Linpack(bb.Project):
""" Linpack (C-Version) """
NAME = 'linpack'
DOMAIN = 'scientific'
GROUP = 'benchbuild'
SOURCE = [
HTTP(
remote={'5_88': 'http://www.netlib.org/benchmark/linpackc.new'},
local='linpack.c'
)
]
CONTAINER = ContainerImage().from_('benchbuild:alpine')
def compile(self) -> None:
lp_patch = path.template_path("patches/linpack.patch")
(patch["-p0"] < lp_patch)()
self.ldflags += ["-lm"]
clang = bb.compiler.cc(self)
_clang = bb.watch(clang)
_clang("-o", 'linpack', "linpack.c")
def run_tests(self) -> None:
linpack = bb.wrap('linpack', self)
_linpack = bb.watch(linpack)
_linpack()
| 25.923077 | 76 | 0.624135 | 114 | 1,011 | 5.412281 | 0.491228 | 0.113452 | 0.090762 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005202 | 0.239367 | 1,011 | 38 | 77 | 26.605263 | 0.797139 | 0.018793 | 0 | 0 | 0 | 0 | 0.155488 | 0.021341 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068966 | false | 0 | 0.206897 | 0 | 0.482759 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65a80ee7e408fdd7c7802b301559381730cc6c1d | 1,674 | py | Python | analysis/study_definition_stp.py | opensafely/antibody-and-antiviral-deployment | 27cd171870fdd161468d1cabd1eaee76f1943593 | [
"MIT"
] | null | null | null | analysis/study_definition_stp.py | opensafely/antibody-and-antiviral-deployment | 27cd171870fdd161468d1cabd1eaee76f1943593 | [
"MIT"
] | 1 | 2022-03-18T16:20:19.000Z | 2022-03-18T16:20:19.000Z | analysis/study_definition_stp.py | opensafely/antibody-and-antiviral-deployment | 27cd171870fdd161468d1cabd1eaee76f1943593 | [
"MIT"
] | null | null | null | ################################################################################
#
# Description: This script provides the formal specification of the data
# that will be extracted from the OpenSAFELY database.
#
# Output: output/data/input_stp.csv.gz
#
# Author(s): M Green
# Date last updated: 17/05/2022
#
################################################################################
# IMPORT STATEMENTS ----
## Import code building blocks from cohort extractor package
from cohortextractor import (
StudyDefinition,
patients,
filter_codes_by_category,
Measure
)
# DEFINE STUDY POPULATION ----
## Define study population and variables
study = StudyDefinition(
# PRELIMINARIES ----
## Configure the expectations framework
default_expectations = {
"date": {"earliest": "2021-11-01", "latest": "today"},
"rate": "uniform",
"incidence": 0.05,
},
## Define index date
index_date = "2020-03-01",
# POPULATION ----
population = patients.satisfying(
"""
registered_treated
""",
registered_treated = patients.registered_as_of("index_date"),
),
# STP (NHS administration region based on geography, currenty closest match to CMDU)
stp = patients.registered_practice_as_of(
"index_date",
returning = "stp_code",
return_expectations = {
"rate": "universal",
"category": {
"ratios": {
"STP1": 0.1,
"STP2": 0.1,
"STP3": 0.1,
"STP4": 0.1,
"STP5": 0.1,
"STP6": 0.1,
"STP7": 0.1,
"STP8": 0.1,
"STP9": 0.1,
"STP10": 0.1,
}
},
},
),
)
| 22.32 | 86 | 0.530466 | 165 | 1,674 | 5.278788 | 0.612121 | 0.022962 | 0.04822 | 0.029851 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.046326 | 0.252091 | 1,674 | 74 | 87 | 22.621622 | 0.649361 | 0.32497 | 0 | 0.051282 | 0 | 0 | 0.174533 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.025641 | 0 | 0.025641 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65a82d08e9fdb34f0a46c49817ccc000c9d61a1d | 3,466 | py | Python | GameFiles/gameIntro.py | ConnerGallimore/Project-Run | 1a65f238412eafd675064b888d365c80a4516fa5 | [
"MIT"
] | null | null | null | GameFiles/gameIntro.py | ConnerGallimore/Project-Run | 1a65f238412eafd675064b888d365c80a4516fa5 | [
"MIT"
] | null | null | null | GameFiles/gameIntro.py | ConnerGallimore/Project-Run | 1a65f238412eafd675064b888d365c80a4516fa5 | [
"MIT"
] | null | null | null | import pygame
import sys
from GameFiles.gameFunctions import *
#from GameFiles.ProjectRun import *
white = (255, 255, 255)
yellow = (255, 255, 0)
black = (0, 0, 0)
red = (200, 0, 0)
green = (0, 200, 0)
bright_red = (255, 0, 0)
bright_green = (0, 255, 0)
intro = True
level = False
level_value = 0
def unpause():
global intro
intro = False
def game_quit():
pygame.quit()
sys.exit()
def level_select():
global level
level = True
def level_one():
global level_value
global level
unpause()
level_value = 0
level = False
def level_two():
global level_value
global level
unpause()
level_value = 1
level = False
def level_three():
global level_value
global level
unpause()
level_value = 2
level = False
def exitmenu():
global level
level = False
def game_intro(screen, screen_width, screen_height, FramePerSec, FPS):
screen.fill(white)
largeText = pygame.font.Font('freesansbold.ttf', 80)
TitleSurf, TitleRect = text_objects("Project Run", largeText)
TitleRect.center = ((screen_width/2),(screen_height/2))
screen.blit(TitleSurf, TitleRect)
score_text = pygame.font.Font("freesansbold.ttf", 20)
text = score_text.render("P Key to Enter Store / Space Key to Jump / Esc Key to Pause", True, yellow, black)
textRect = text.get_rect()
textRect.center = (320,300)
screen.blit(text, textRect)
start_x = 150
start_y = 350
start_w = 100
start_h = 50
exit_x = 350
exit_y = 350
exit_w = 100
exit_h = 50
global intro
global level
while intro:
screen.fill(white)
largeText = pygame.font.Font('freesansbold.ttf', 80)
TitleSurf, TitleRect = text_objects("Project Run", largeText)
TitleRect.center = ((screen_width/2),(screen_height/2))
screen.blit(TitleSurf, TitleRect)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
# x-cord + rect.width > mouse pos x > x-cord and y-cord + rect.height > mouse pos y > y-cord
button(screen, "Start", start_x, start_y, start_w, start_h, green, bright_green, level_select)
button(screen, "Quit", exit_x, exit_y, exit_w, exit_h, red, bright_red, game_quit)
pygame.display.update()
FramePerSec.tick(FPS)
while(level):
screen.fill(white)
largeText = pygame.font.Font('freesansbold.ttf', 80)
TitleSurf, TitleRect = text_objects("Level Select", largeText)
TitleRect.center = ((screen_width/2),(screen_height/4))
screen.blit(TitleSurf, TitleRect)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
button(screen, "level 1", 50 + start_x/2, start_y - 100, start_w, start_h, green, bright_green, level_one)
button(screen, "level 2", 50 + start_x + start_x/2 , start_y - 100, start_w, start_h, green, bright_green, level_two)
button(screen, "level 3", 50 + start_x * 2 + start_x/2, start_y - 100, start_w, start_h, green, bright_green, level_three)
button(screen, "Back", exit_x - 80, exit_y, exit_w, exit_h, red, bright_red, exitmenu)
pygame.display.update()
FramePerSec.tick(FPS)
intro = True
level = False
return level_value
| 32.698113 | 138 | 0.620312 | 465 | 3,466 | 4.462366 | 0.208602 | 0.047711 | 0.02506 | 0.05012 | 0.53012 | 0.510843 | 0.475181 | 0.475181 | 0.37253 | 0.346506 | 0 | 0.043032 | 0.269186 | 3,466 | 105 | 139 | 33.009524 | 0.776155 | 0.036065 | 0 | 0.515464 | 0 | 0 | 0.057203 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.082474 | false | 0 | 0.030928 | 0 | 0.123711 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65a9c1de13a66dcd45ac5c259814b896118a63d3 | 13,572 | py | Python | official/projects/longformer/longformer_encoder.py | duncanriach-nvidia/tensorflow-models | f95f014e6192434f405b7d6209c885072a3f6b6d | [
"Apache-2.0"
] | 1 | 2020-05-20T11:40:56.000Z | 2020-05-20T11:40:56.000Z | official/projects/longformer/longformer_encoder.py | duncanriach-nvidia/tensorflow-models | f95f014e6192434f405b7d6209c885072a3f6b6d | [
"Apache-2.0"
] | null | null | null | official/projects/longformer/longformer_encoder.py | duncanriach-nvidia/tensorflow-models | f95f014e6192434f405b7d6209c885072a3f6b6d | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Longformer encoder. Modified From huggingface/transformers."""
# pylint: disable=g-classes-have-attributes
from typing import Any, Callable, List, Optional, Union
from absl import logging
import tensorflow as tf
from official.modeling.tf_utils import get_shape_list
from official.nlp.modeling import layers
from official.projects.longformer.longformer_encoder_block import LongformerEncoderBlock
_Initializer = Union[str, tf.keras.initializers.Initializer]
_approx_gelu = lambda x: tf.keras.activations.gelu(x, approximate=True)
class LongformerEncoder(tf.keras.layers.Layer):
"""LongformerEncoder.
Args:
vocab_size: The size of the token vocabulary.
attention_window: list of ints representing the window size for each layer.
global_attention_size: the size of global attention used for each token.
pad_token_id: the token id for the pad token
hidden_size: The size of the transformer hidden layers.
num_layers: The number of transformer layers.
num_attention_heads: The number of attention heads for each transformer. The
hidden size must be divisible by the number of attention heads.
max_sequence_length: The maximum sequence length that this encoder can
consume. If None, max_sequence_length uses the value from sequence length.
This determines the variable shape for positional embeddings.
type_vocab_size: The number of types that the 'type_ids' input can take.
inner_dim: The output dimension of the first Dense layer in a two-layer
feedforward network for each transformer.
inner_activation: The activation for the first Dense layer in a two-layer
feedforward network for each transformer.
output_dropout: Dropout probability for the post-attention and output
dropout.
attention_dropout: The dropout rate to use for the attention layers within
the transformer layers.
initializer: The initialzer to use for all weights in this encoder.
output_range: The sequence output range, [0, output_range), by slicing the
target sequence of the last transformer layer. `None` means the entire
target sequence will attend to the source sequence, which yields the full
output.
embedding_width: The width of the word embeddings. If the embedding width is
not equal to hidden size, embedding parameters will be factorized into two
matrices in the shape of ['vocab_size', 'embedding_width'] and
['embedding_width', 'hidden_size'] ('embedding_width' is usually much
smaller than 'hidden_size').
embedding_layer: An optional Layer instance which will be called to generate
embeddings for the input word IDs.
norm_first: Whether to normalize inputs to attention and intermediate dense
layers. If set False, output of attention and intermediate dense layers is
normalized.
"""
def __init__(
self,
vocab_size: int,
attention_window: Union[List[int], int] = 512,
global_attention_size: int = 0,
pad_token_id: int = 1,
hidden_size: int = 768,
num_layers: int = 12,
num_attention_heads: int = 12,
max_sequence_length: int = 512,
type_vocab_size: int = 16,
inner_dim: int = 3072,
inner_activation: Callable[..., Any] = _approx_gelu,
output_dropout: float = 0.1,
attention_dropout: float = 0.1,
initializer: _Initializer = tf.keras.initializers.TruncatedNormal(
stddev=0.02),
output_range: Optional[int] = None,
embedding_width: Optional[int] = None,
embedding_layer: Optional[tf.keras.layers.Layer] = None,
norm_first: bool = False,
**kwargs):
super().__init__(**kwargs)
# Longformer args
self._attention_window = attention_window
self._global_attention_size = global_attention_size
self._pad_token_id = pad_token_id
activation = tf.keras.activations.get(inner_activation)
initializer = tf.keras.initializers.get(initializer)
if embedding_width is None:
embedding_width = hidden_size
if embedding_layer is None:
self._embedding_layer = layers.OnDeviceEmbedding(
vocab_size=vocab_size,
embedding_width=embedding_width,
initializer=initializer,
name='word_embeddings')
else:
self._embedding_layer = embedding_layer
self._position_embedding_layer = layers.PositionEmbedding(
initializer=initializer,
max_length=max_sequence_length,
name='position_embedding')
self._type_embedding_layer = layers.OnDeviceEmbedding(
vocab_size=type_vocab_size,
embedding_width=embedding_width,
initializer=initializer,
use_one_hot=True,
name='type_embeddings')
self._embedding_norm_layer = tf.keras.layers.LayerNormalization(
name='embeddings/layer_norm', axis=-1, epsilon=1e-12, dtype=tf.float32)
self._embedding_dropout = tf.keras.layers.Dropout(
rate=output_dropout, name='embedding_dropout')
# We project the 'embedding' output to 'hidden_size' if it is not already
# 'hidden_size'.
self._embedding_projection = None
if embedding_width != hidden_size:
self._embedding_projection = tf.keras.layers.experimental.EinsumDense(
'...x,xy->...y',
output_shape=hidden_size,
bias_axes='y',
kernel_initializer=initializer,
name='embedding_projection')
self._transformer_layers = []
self._attention_mask_layer = layers.SelfAttentionMask(
name='self_attention_mask')
for i in range(num_layers):
layer = LongformerEncoderBlock(
global_attention_size=global_attention_size,
num_attention_heads=num_attention_heads,
inner_dim=inner_dim,
inner_activation=inner_activation,
attention_window=attention_window[i],
layer_id=i,
output_dropout=output_dropout,
attention_dropout=attention_dropout,
norm_first=norm_first,
output_range=output_range if i == num_layers - 1 else None,
kernel_initializer=initializer,
name=f'transformer/layer_{i}')
self._transformer_layers.append(layer)
self._pooler_layer = tf.keras.layers.Dense(
units=hidden_size,
activation='tanh',
kernel_initializer=initializer,
name='pooler_transform')
self._config = {
'vocab_size': vocab_size,
'hidden_size': hidden_size,
'num_layers': num_layers,
'num_attention_heads': num_attention_heads,
'max_sequence_length': max_sequence_length,
'type_vocab_size': type_vocab_size,
'inner_dim': inner_dim,
'inner_activation': tf.keras.activations.serialize(activation),
'output_dropout': output_dropout,
'attention_dropout': attention_dropout,
'initializer': tf.keras.initializers.serialize(initializer),
'output_range': output_range,
'embedding_width': embedding_width,
'embedding_layer': embedding_layer,
'norm_first': norm_first,
'attention_window': attention_window,
'global_attention_size': global_attention_size,
'pad_token_id': pad_token_id,
}
self.inputs = dict(
input_word_ids=tf.keras.Input(shape=(None,), dtype=tf.int32),
input_mask=tf.keras.Input(shape=(None,), dtype=tf.int32),
input_type_ids=tf.keras.Input(shape=(None,), dtype=tf.int32))
def call(self, inputs):
word_embeddings = None
if isinstance(inputs, dict):
word_ids = inputs.get('input_word_ids') # input_ids
mask = inputs.get('input_mask') # attention_mask
type_ids = inputs.get('input_type_ids') # token_type_ids
word_embeddings = inputs.get('input_word_embeddings',
None) # input_embeds
else:
raise ValueError(f'Unexpected inputs type to {self.__class__}.')
(
padding_len,
word_ids,
mask,
type_ids,
word_embeddings,
) = self._pad_to_window_size(
word_ids=word_ids,
mask=mask,
type_ids=type_ids,
word_embeddings=word_embeddings,
pad_token_id=self._pad_token_id)
if word_embeddings is None:
word_embeddings = self._embedding_layer(word_ids)
# absolute position embeddings.
position_embeddings = self._position_embedding_layer(word_embeddings)
type_embeddings = self._type_embedding_layer(type_ids)
embeddings = word_embeddings + position_embeddings + type_embeddings
embeddings = self._embedding_norm_layer(embeddings)
embeddings = self._embedding_dropout(embeddings)
if self._embedding_projection is not None:
embeddings = self._embedding_projection(embeddings)
batch_size, seq_len = get_shape_list(mask)
# create masks with fixed len global_attention_size
mask = tf.transpose(
tf.concat(
values=[
tf.ones(
(self._global_attention_size, batch_size), tf.int32) * 2,
tf.transpose(mask)[self._global_attention_size:]
],
axis=0))
is_index_masked = tf.math.less(mask, 1)
is_index_global_attn = tf.transpose(
tf.concat(
values=[
tf.ones((self._global_attention_size, batch_size), tf.bool),
tf.zeros((seq_len - self._global_attention_size, batch_size),
tf.bool)
],
axis=0))
# Longformer
attention_mask = mask
extended_attention_mask = tf.reshape(
attention_mask, (tf.shape(mask)[0], tf.shape(mask)[1], 1, 1))
attention_mask = tf.cast(
tf.math.abs(1 - extended_attention_mask), tf.dtypes.float32) * -10000.0
encoder_outputs = []
x = embeddings
# TFLongformerEncoder
for layer in self._transformer_layers:
x = layer([x, attention_mask, is_index_masked, is_index_global_attn])
encoder_outputs.append(x)
last_encoder_output = encoder_outputs[-1]
if padding_len > 0:
last_encoder_output = last_encoder_output[:, :-padding_len]
first_token_tensor = last_encoder_output[:, 0, :]
pooled_output = self._pooler_layer(first_token_tensor)
return dict(
sequence_output=last_encoder_output,
pooled_output=pooled_output,
encoder_outputs=encoder_outputs)
def get_embedding_table(self):
return self._embedding_layer.embeddings
def get_embedding_layer(self):
return self._embedding_layer
def get_config(self):
return dict(self._config)
@property
def transformer_layers(self):
"""List of Transformer layers in the encoder."""
return self._transformer_layers
@property
def pooler_layer(self):
"""The pooler dense layer after the transformer layers."""
return self._pooler_layer
@classmethod
def from_config(cls, config, custom_objects=None):
if 'embedding_layer' in config and config['embedding_layer'] is not None:
warn_string = (
'You are reloading a model that was saved with a '
'potentially-shared embedding layer object. If you contine to '
'train this model, the embedding layer will no longer be shared. '
'To work around this, load the model outside of the Keras API.')
print('WARNING: ' + warn_string)
logging.warn(warn_string)
return cls(**config)
def _pad_to_window_size(
self,
word_ids,
mask,
type_ids,
word_embeddings,
pad_token_id,
):
# padding
attention_window = max(self._attention_window)
assert (attention_window %
2 == 0), ('`attention_window` should be an even value.'
f'Given {attention_window}')
input_shape = get_shape_list(
word_ids) if word_ids is not None else get_shape_list(word_embeddings)
batch_size, seq_len = input_shape[:2]
if seq_len is not None:
padding_len = (attention_window -
seq_len % attention_window) % attention_window
else:
padding_len = 0
paddings = tf.convert_to_tensor([[0, 0], [0, padding_len]])
if word_ids is not None:
word_ids = tf.pad(word_ids, paddings, constant_values=pad_token_id)
if word_embeddings is not None:
def pad_embeddings():
word_ids_padding = tf.fill((batch_size, padding_len), self.pad_token_id)
word_embeddings_padding = self._embedding_layer(word_ids_padding)
return tf.concat([word_embeddings, word_embeddings_padding], axis=-2)
word_embeddings = tf.cond(
tf.math.greater(padding_len, 0), pad_embeddings,
lambda: word_embeddings)
mask = tf.pad(
mask, paddings,
constant_values=False) # no attention on the padding tokens
token_type_ids = tf.pad(
type_ids, paddings, constant_values=0) # pad with token_type_id = 0
return (
padding_len,
word_ids,
mask,
token_type_ids,
word_embeddings,
)
| 37.081967 | 88 | 0.691424 | 1,714 | 13,572 | 5.204784 | 0.194866 | 0.032956 | 0.027687 | 0.012891 | 0.201547 | 0.132608 | 0.086425 | 0.072974 | 0.042036 | 0.029145 | 0 | 0.007927 | 0.228485 | 13,572 | 365 | 89 | 37.183562 | 0.844045 | 0.243442 | 0 | 0.144 | 0 | 0 | 0.086151 | 0.00828 | 0 | 0 | 0 | 0 | 0.004 | 1 | 0.04 | false | 0 | 0.024 | 0.012 | 0.104 | 0.004 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65aaf835808677c6bce77e36c52ee20ad761695a | 3,904 | py | Python | brew.py | jsam/renku-python | 37730aa497724860b691ea2a3bd3e3d5993d1a93 | [
"Apache-2.0"
] | 2 | 2019-03-09T17:56:57.000Z | 2019-07-03T15:20:22.000Z | brew.py | jsam/renku-python | 37730aa497724860b691ea2a3bd3e3d5993d1a93 | [
"Apache-2.0"
] | null | null | null | brew.py | jsam/renku-python | 37730aa497724860b691ea2a3bd3e3d5993d1a93 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright 2018 - Swiss Data Science Center (SDSC)
# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and
# Eidgenössische Technische Hochschule Zürich (ETHZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate of Homebrew formulas."""
import json
import os
import sys
import requests
from pkg_resources import get_distribution
if len(sys.argv) > 1:
NAME = sys.argv[1]
else:
NAME = 'renku'
BLACKLIST = {
'ruamel.ordereddict',
}
RESOURCE = """ resource "{package}" do
url "{url}"
sha256 "{sha256}"
end
"""
DEPENDENCY = ' depends_on "{package}"'
DEPENDENCIES = (
'git-lfs',
'libxml2',
'node',
'python',
)
FORMULA = """class {formula} < Formula
include Language::Python::Virtualenv
desc "{desc}"
homepage "{homepage}"
url "{url}"
sha256 "{sha256}"
version_scheme 1
head "{homepage}"
{dependencies}
{resources}
def install
venv = virtualenv_create(libexec, "python3")
venv.pip_install resources
venv.pip_install_and_link buildpath
end
test do
system "true"
end
end"""
SUFFIXES = {
# 'py2.py3-none-any.whl': 10,
'.tar.gz': 5,
'.zip': 1,
}
def find_release(package, releases, dependencies=None):
"""Return the best release."""
dependencies = dependencies if dependencies is not None else {}
for release in releases:
url = release['url']
old_priority = dependencies.get(package, {}).get('priority', 0)
for suffix, priority in SUFFIXES.items():
if url.endswith(suffix):
if old_priority < priority:
sha256 = release['digests']['sha256']
dependencies[package] = {
'package': package,
'url': url,
'sha256': sha256,
'priority': priority,
}
return dependencies[package]
response = requests.get('https://pypi.org/pypi/{NAME}/json'.format(NAME=NAME))
if response.status_code != 200:
print(FORMULA, response)
sys.exit(1)
description = response.json()
version = os.environ.get('PY_BREW_VERSION')
if version is None:
version = get_distribution(NAME).version
release = find_release(NAME, description['releases'][version])
with open('Pipfile.lock') as f:
lock = json.load(f)
dependencies = {}
for package, settings in lock['default'].items():
if package in BLACKLIST:
continue
pypi_response = requests.get(
'https://pypi.org/pypi/{package}/json'.format(package=package)
)
if pypi_response.status_code != 200:
continue
pypi = pypi_response.json()
if settings.get('editable', False):
continue
releases = pypi['releases'][settings['version'].lstrip('=')]
find_release(package, releases, dependencies=dependencies)
print(
FORMULA.format(
dependencies='\n'.join(
DEPENDENCY.format(package=package) for package in DEPENDENCIES
),
resources='\n'.join(
RESOURCE.format(**package)
for name, package in dependencies.items() if name != NAME
),
desc=description['info']['summary'].rstrip('.'),
formula=description['info']['name'].capitalize(),
homepage=description['info']['home_page'],
url=release['url'],
sha256=release['sha256'],
)
)
| 25.025641 | 78 | 0.633965 | 451 | 3,904 | 5.436807 | 0.425721 | 0.02447 | 0.014682 | 0.022023 | 0.059543 | 0.028548 | 0.028548 | 0 | 0 | 0 | 0 | 0.019522 | 0.238986 | 3,904 | 155 | 79 | 25.187097 | 0.805789 | 0.206711 | 0 | 0.115385 | 0 | 0 | 0.249919 | 0.025741 | 0 | 0 | 0 | 0 | 0 | 1 | 0.009615 | false | 0 | 0.048077 | 0 | 0.067308 | 0.019231 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65acf03c20907f5df4a1166fec320269b817df47 | 655 | py | Python | app.py | IT-COMMUNITY-ICT-RUSSIA/itmo-chart-backend | 808d55ea8c45d8b199824df702ba688adbb283b5 | [
"Apache-2.0"
] | null | null | null | app.py | IT-COMMUNITY-ICT-RUSSIA/itmo-chart-backend | 808d55ea8c45d8b199824df702ba688adbb283b5 | [
"Apache-2.0"
] | null | null | null | app.py | IT-COMMUNITY-ICT-RUSSIA/itmo-chart-backend | 808d55ea8c45d8b199824df702ba688adbb283b5 | [
"Apache-2.0"
] | null | null | null | from dotenv import load_dotenv
load_dotenv()
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from modules import routers, database
app = FastAPI(
title="ITMOCHART",
description="ICT Hack #3 2021",
)
DB = database.MongoDbWrapper()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(router=routers.service_router, tags=["Service Endpoints"])
app.include_router(router=routers.user_router, tags=["User Management Endpoints"])
app.include_router(router=routers.chart_router, tags=["Chart Endpoints"])
| 24.259259 | 82 | 0.752672 | 78 | 655 | 6.153846 | 0.461538 | 0.0625 | 0.1 | 0.1375 | 0.21875 | 0.158333 | 0 | 0 | 0 | 0 | 0 | 0.008711 | 0.123664 | 655 | 26 | 83 | 25.192308 | 0.827526 | 0 | 0 | 0 | 0 | 0 | 0.129771 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65af098ef6b07b85474afb15e3ff5859a0a9a82b | 1,712 | py | Python | sa/profiles/Eltex/MA4000/get_capabilities.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 84 | 2017-10-22T11:01:39.000Z | 2022-02-27T03:43:48.000Z | sa/profiles/Eltex/MA4000/get_capabilities.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 22 | 2017-12-11T07:21:56.000Z | 2021-09-23T02:53:50.000Z | sa/profiles/Eltex/MA4000/get_capabilities.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 23 | 2017-12-06T06:59:52.000Z | 2022-02-24T00:02:25.000Z | # ---------------------------------------------------------------------
# Eltex.MA4000.get_capabilities
# ---------------------------------------------------------------------
# Copyright (C) 2007-2017 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
# NOC modules
from noc.sa.profiles.Generic.get_capabilities import Script as BaseScript
from noc.sa.profiles.Generic.get_capabilities import false_on_cli_error
class Script(BaseScript):
name = "Eltex.MA4000.get_capabilities"
rx_stack = re.compile(r"^\s*\*?(?P<box_id>\d+)\s+", re.MULTILINE)
@false_on_cli_error
def has_lldp_cli(self):
"""
Check box has lldp enabled
"""
cmd = self.cli("show lldp configuration")
return "LLDP state: Enabled" in cmd
@false_on_cli_error
def has_stp_cli(self):
"""
Check box has STP enabled
"""
cmd = self.cli("show spanning-tree active")
return "spanning tree: off" not in cmd
@false_on_cli_error
def has_lacp_cli(self):
"""
Check box has STP enabled
"""
for ch in self.scripts.get_portchannel():
if ch["type"] == "L":
return True
return False
def execute_platform_cli(self, caps):
try:
cmd = self.cli("show stack")
s = []
for match in self.rx_stack.finditer(cmd):
s += [match.group("box_id")]
if s:
caps["Stack | Members"] = len(s) if len(s) != 1 else 0
caps["Stack | Member Ids"] = " | ".join(s)
except Exception:
pass
| 29.517241 | 73 | 0.505257 | 196 | 1,712 | 4.265306 | 0.438776 | 0.07177 | 0.047847 | 0.07177 | 0.325359 | 0.261962 | 0.236842 | 0.169856 | 0 | 0 | 0 | 0.014504 | 0.275117 | 1,712 | 57 | 74 | 30.035088 | 0.659146 | 0.239486 | 0 | 0.096774 | 0 | 0 | 0.16 | 0.044082 | 0 | 0 | 0 | 0 | 0 | 1 | 0.129032 | false | 0.032258 | 0.096774 | 0 | 0.451613 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65b06877c297cfd2b35b0e04e7845fc1510ccf62 | 6,836 | py | Python | astar.py | mukundhanraj/a-star-algorithm | 8d7cb5d3232e53a4279e75dc1ee7044c4561825f | [
"MIT"
] | null | null | null | astar.py | mukundhanraj/a-star-algorithm | 8d7cb5d3232e53a4279e75dc1ee7044c4561825f | [
"MIT"
] | null | null | null | astar.py | mukundhanraj/a-star-algorithm | 8d7cb5d3232e53a4279e75dc1ee7044c4561825f | [
"MIT"
] | null | null | null | # A Star path planning
# Copyright (c) 2022 Mukundhan Rajendiran
# MIT License
#
# Description: Using A star algorith to find the optimum path from staring
# to goal poisiton
import heapq as heap
import numpy as np
import cv2
def getAdjNodes(curr_node, validPoints, clearance, step):
"""
Definition
---
Method to generate all adjacent nodes for a given node
Parameters
---
curr_node : node of intrest
validPoints : list of all valid points
clearance : minimum distance required from obstacles
step : step size for each movement
Returns
---
adjNodes : list of adjacent nodes with cost from parent node
"""
adjNodes = []
moves = [-60, -30, 0, 30, 60]
flag = True
for move in moves:
# Checking if the point is valid
angle = curr_node[-1] + move
x = int(curr_node[0] + (step * np.cos(np.radians(angle))))
y = int(curr_node[1] + (step * np.sin(np.radians(angle))))
if (x, y) in validPoints:
# Checking for clearance
for i in range(clearance):
if not (int(x + (i * np.cos(np.radians(angle)))),
int(y + (i * np.sin(np.radians(angle))))) \
in validPoints:
flag = False
break
if not flag:
break
if flag:
adjNodes.append(((x, y, angle), step))
return adjNodes
def updateNode(new_node, curr_node, node_cost, queue, parent_map, cost, goal,
thresh):
"""
Definition
---
Method to update nodes based on cost and closed list of nodes
Parameters
---
new_node : node of intrest
curr_node : parent node
node_cost : dict of all nodes mapped to costs
queue : priority queue of nodes to check
parent_map : dict of nodes mapped to parent node_cost
cost : cost to get to new node from parent node
goal : goal node
thresh : Threshold from goal point
Returns
---
Reached : if new_node is goal node returns True othervise returns False
node_cost : dict of all nodes mapped to costs
queue : priority queue of nodes to check
parent_map : dict of nodes mapped to parent node_cost
"""
dist = abs(np.linalg.norm(np.asarray(
new_node[0:2]) - np.asarray(goal[0:2])))
new_cost = node_cost[curr_node] + cost + dist
temp_cost = node_cost.get(new_node)
if not temp_cost or (temp_cost > new_cost):
node_cost[new_node] = new_cost
parent_map[new_node[0:2]] = curr_node[0:2]
heap.heappush(queue, (new_cost, new_node))
if abs(np.linalg.norm(np.asarray(goal[0:2])
- np.asarray(new_node[0:2]))) < thresh:
return True, node_cost, queue, parent_map
return False, node_cost, queue, parent_map
def astar(start, goal, validPoints, clearance, step, thresh):
"""
Definition
---
Method to get least cost path from starting to goal node using dijkstra's
Parameters
---
start : starting node
goal : goal node
validPoints : list of all valid points
clearance : minimum distance required from obstacles
step : step size for each movement
thresh : Threshold from goal point
Returns
---
Reached : if path is found True othervise False
parent_map : dict of nodes mapped to parent node_cost
closed : list of all the explored nodes
"""
closed = []
queue = []
node_cost = {}
parent_map = {}
reached = False
node_cost[start] = 0
heap.heappush(queue, (0, start))
print(abs(np.linalg.norm(np.asarray(goal[0:2])
- np.asarray(start[0:2]))) < thresh)
if abs(np.linalg.norm(np.asarray(goal[0:2])
- np.asarray(start[0:2]))) < thresh:
reached = True
parent_map[goal[0:2]] = start[0:2]
while not reached and queue:
curr_cost, curr_node = heap.heappop(queue)
closed.append(curr_node[0:2])
adjNodes = getAdjNodes(curr_node, validPoints, clearance, step)
for new_node, cost in adjNodes:
if new_node[0:2] in closed:
continue
print('checking for node: ', new_node[0:2])
flag, node_cost, queue, parent_map = updateNode(
new_node, curr_node, node_cost, queue, parent_map, cost,
goal, thresh)
if flag:
closed.append(new_node[0:2])
reached = True
break
return reached, parent_map, closed
def getPath(parent_map, start, goal, closed):
"""
Definition
---
Method to generate path using backtracking
Parameters
---
parent_map : dict of nodes mapped to parent node_cost
start : starting node
goal : goal node
closed : list of all the explored nodes
Returns
---
path: list of all the points from starting to goal position
"""
curr_node = closed[-1]
parent_node = parent_map[curr_node]
path = [curr_node]
while not parent_node == start[0:2]:
curr_node = parent_node
parent_node = parent_map[curr_node]
path.append(curr_node)
path.append(start[0:2])
return path[::-1]
def animate(map_len, map_bre, validPoints, closed, path, parent_map):
"""
Definition
---
Method to animate the nodes explored by dijkstra's algorithm and plot the
best path
Parameters
---
map_len : length of map
map_bre : breadth of map
validPoints : list of all valid points
closed : list of all the explored nodes
path: list of all the points from starting to goal position
parent_map : dict of nodes mapped to parent node_cost
"""
map_frame = np.zeros((map_bre + 1, map_len + 1, 3))
resize = (800, 500)
for point in validPoints:
map_frame[map_bre - point[1], point[0]] = [255, 255, 255]
cv2.circle(map_frame, (path[-1][0], map_bre
- path[-1][1]), 2, [0, 0, 255], -1)
cv2.circle(map_frame, (path[0][0], map_bre
- path[0][1]), 2, [0, 255, 0], -1)
for point in closed:
if(point == path[0]):
continue
parent = parent_map[point]
cv2.line(map_frame, (point[0], map_bre - point[1]),
(parent[0], map_bre - parent[1]), [255, 0, 0], 2)
cv2.imshow('map_frame', cv2.resize(map_frame, resize))
cv2.waitKey(1)
for point in path:
if(point == path[0]):
continue
parent = parent_map[point]
cv2.line(map_frame, (point[0], map_bre - point[1]),
(parent[0], map_bre - parent[1]), [0, 255, 0], 2)
cv2.imshow('map_frame', cv2.resize(map_frame, resize))
cv2.waitKey(1)
print('done, press any key to exit..')
cv2.waitKey(0)
| 31.502304 | 77 | 0.597572 | 946 | 6,836 | 4.207188 | 0.165962 | 0.045226 | 0.01809 | 0.013568 | 0.468844 | 0.418844 | 0.364322 | 0.325377 | 0.303266 | 0.303266 | 0 | 0.028255 | 0.301053 | 6,836 | 216 | 78 | 31.648148 | 0.80473 | 0.327823 | 0 | 0.230769 | 0 | 0 | 0.015537 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.048077 | false | 0 | 0.028846 | 0 | 0.125 | 0.028846 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65b0a95f423d0aab407698c3ce15d54d5e48ee66 | 3,256 | py | Python | 12-2021-07-09/src/app_clean.py | eotp/python-FU-class | f0a7518b3e3204a77e8855bef91afeaabb0d52ac | [
"MIT"
] | null | null | null | 12-2021-07-09/src/app_clean.py | eotp/python-FU-class | f0a7518b3e3204a77e8855bef91afeaabb0d52ac | [
"MIT"
] | null | null | null | 12-2021-07-09/src/app_clean.py | eotp/python-FU-class | f0a7518b3e3204a77e8855bef91afeaabb0d52ac | [
"MIT"
] | 1 | 2020-12-04T15:37:28.000Z | 2020-12-04T15:37:28.000Z | """ Run the file with: streamlit run [filename] """
import geopandas as gpd
import pandas as pd
from matplotlib import pyplot as plt
from datetime import date
import streamlit as st
import folium
from streamlit_folium import folium_static
import utils
""" 0. Load Datasets """
# utils.load_files() is marked with the @st.cache keyword
# Caching allows to store return variables in memory
# Saving execution time for time costly operations
_europe, _bl, _kreise, _kreise_full, _gdf_europe = utils.load_files()
europe = _europe.copy()
bl = _bl.copy()
kreise = _kreise.copy()
kreise_full = _kreise_full.copy()
gdf_europe = _gdf_europe.copy()
""" 1. Design the app """
""" 1.1 Main screen """
st.title("WW2 Airstrikes")
""" 1.2. Build sidebar """
st.sidebar.title("Data selection")
st.sidebar.header("Years of interest")
""" 1.2.1. SelectBox allowing to choose a year between 1939 and 1945 """
year = st.sidebar.selectbox("Select a year", list(range(1939, 1946)))
""" 1.2.2. SelectBox allowing to choose a Feature to inspect """
column_of_choice = st.sidebar.selectbox(
"Select data of interest", ["Number of Attacks", "High Explosives Weight"]
)
# translate selection into actual column name used in dataset
trans_col = {
"Number of Attacks": "attacks",
"High Explosives Weight": "explosives_weight",
}
column = trans_col[column_of_choice]
""" 1.2.3. SelectBox allowing to choose a Landkreis to inspect """
kreis = st.sidebar.selectbox("Select a Landkreis", sorted(kreise["name"].unique()))
""" 1.2.4. Slidebar to choose date interval for plot """
date_min = kreise_full["Mission Date"].min()
date_min = date(date_min.year, date_min.month, date_min.day)
date_max = kreise_full["Mission Date"].max()
date_max = date(date_max.year, date_max.month, date_max.day)
y_min, y_max = st.sidebar.slider(
"Select Date", date_min, date_max, (date_min, date_max)
)
""" 2. First Map --- Whole Europe """
st.title("Europe")
utils.create_and_display_map(
europe, f"{column}_{year}", ["name", f"{column}_{year}"], "feature.properties.name"
)
""" 3. Second Map --- German Bundesländer """
st.title("German Bundesländer")
utils.create_and_display_map(
bl, f"{column}_{year}", ["name", f"{column}_{year}"], "feature.properties.name"
)
""" 4. Third Map --- German Landkreise """
st.title("German Landkreise")
utils.create_and_display_map(
kreise,
f"{column}_{year}",
["name", f"{column}_{year}"],
"feature.properties.name",
center=kreis,
)
""" 5. Time Plot for chosen Landkreis (see 1.2.3.) """
utils.timeplot(kreise_full, kreis, None, y_min, y_max)
# hidden behind checkbox
if st.checkbox("Show Targets"):
temp_df = kreise_full.copy()
temp_df = temp_df.loc[temp_df["name"] == kreis]
temp_df["lat"] = temp_df["Target Latitude"]
temp_df["lon"] = temp_df["Target Longitude"]
st.map(temp_df)
""" 6. Marker-Cluster Map (instead of Choropleth Map)"""
# hidden behind checkbox
st.warning("Map hidden behind the checkbox below takes a long time to load!")
if st.checkbox("Show Marker Cluster Map"):
st.warning(
"Don't click on the Markers, just zoom in. Or your poor PC won't have a fun time."
)
folium_static(utils.markerCluster(gdf_europe.loc[gdf_europe["year"] == year]))
| 29.071429 | 90 | 0.70301 | 487 | 3,256 | 4.544148 | 0.347023 | 0.024401 | 0.029824 | 0.033891 | 0.154089 | 0.063714 | 0.063714 | 0.063714 | 0.063714 | 0.063714 | 0 | 0.015563 | 0.151413 | 3,256 | 111 | 91 | 29.333333 | 0.785378 | 0.094287 | 0 | 0.047619 | 0 | 0.015873 | 0.288027 | 0.029399 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.126984 | 0 | 0.126984 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65b0faefb4448e00001534f030863623970928db | 1,483 | py | Python | inner.py | andreasbock/metamorphosis_H3 | 578b02000a4bc87011284a95b2ba6628a68e7821 | [
"MIT"
] | null | null | null | inner.py | andreasbock/metamorphosis_H3 | 578b02000a4bc87011284a95b2ba6628a68e7821 | [
"MIT"
] | null | null | null | inner.py | andreasbock/metamorphosis_H3 | 578b02000a4bc87011284a95b2ba6628a68e7821 | [
"MIT"
] | null | null | null | from firedrake import *
import numpy as np
op2.init(log_level="WARNING")
parameters['assembly_cache']['enabled'] = False
def solve_inner(V, u, bcs):
"""
V: function space to solve the problem on
u: velocity of advection
bcs: boundary conditions as `Expression`s
"""
# BCs
top = DirichletBC(V, bcs[0], "top")
bottom = DirichletBC(V, bcs[1], "bottom")
bcs = [top, bottom]
# Functions
I = Function(V)
dI = TestFunction(V)
# Construct and solve problem
solver_parameters = {'ksp_type': 'preonly',
'pc_type': 'lu'}
solver_parameters = {'ksp_type': 'cg',
'pc_type': 'lu'}
F = ((dI.dx(1) + u * dI.dx(0)) * (I.dx(1) + u * I.dx(0))) * dx
solve(F == 0, I, bcs=bcs, solver_parameters=solver_parameters)
return I
def solve_inner_test(res, degree, u, bcs, analytical, suff, path):
mesh = PeriodicUnitIntervalMesh(res)
mesh = ExtrudedMesh(mesh, res)
e = FiniteElement("CG", interval, degree)
e = TensorProductElement(e, e)
V = FunctionSpace(mesh, e)
u = interpolate(u, V)
I = solve_inner(V, u, bcs)
File('imgs/I_' + str(res) + '_' + suff + '.pvd').write(I)
analytical = interpolate(analytical, V)
File('analytical.pvd').write(analytical)
l2 = errornorm(I, analytical)
diff = interpolate(I - analytical, V)
engy = sqrt(assemble(((diff.dx(1) + u * diff.dx(0))**2) * dx))
return (l2, engy)
| 27.462963 | 66 | 0.588672 | 195 | 1,483 | 4.394872 | 0.425641 | 0.074679 | 0.014002 | 0.028005 | 0.035006 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011807 | 0.257586 | 1,483 | 53 | 67 | 27.981132 | 0.766576 | 0.101821 | 0 | 0.0625 | 0 | 0 | 0.083527 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.0625 | 0 | 0.1875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65b1fbec87e89e517c563106fb2b06c5520be7ae | 18,442 | py | Python | admin/copyright.py | hejamu/gromacs | 4f4b9e4b197ae78456faada74c9f4cab7d128de6 | [
"BSD-2-Clause"
] | 384 | 2015-01-02T19:44:15.000Z | 2022-03-27T15:13:15.000Z | admin/copyright.py | hejamu/gromacs | 4f4b9e4b197ae78456faada74c9f4cab7d128de6 | [
"BSD-2-Clause"
] | 9 | 2015-04-07T20:48:00.000Z | 2022-01-24T21:29:26.000Z | admin/copyright.py | hejamu/gromacs | 4f4b9e4b197ae78456faada74c9f4cab7d128de6 | [
"BSD-2-Clause"
] | 258 | 2015-01-19T11:19:57.000Z | 2022-03-18T08:59:52.000Z | #!/usr/bin/env python3
#
# This file is part of the GROMACS molecular simulation package.
#
# Copyright (c) 2013,2014,2015,2016,2018 by the GROMACS development team.
# Copyright (c) 2019,2020,2021, by the GROMACS development team, led by
# Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
# and including many others, as listed in the AUTHORS file in the
# top-level source directory and at http://www.gromacs.org.
#
# GROMACS is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# GROMACS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with GROMACS; if not, see
# http://www.gnu.org/licenses, or write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# If you want to redistribute modifications to GROMACS, please
# consider that scientific software is very special. Version
# control is crucial - bugs must be traceable. We will be happy to
# consider code for inclusion in the official distribution, but
# derived work must not be called official GROMACS. Details are found
# in the README & COPYING files - if they are missing, get the
# official version at http://www.gromacs.org.
#
# To help us fund GROMACS development, we humbly ask that you cite
# the research papers on the package. Check out http://www.gromacs.org.
"""Checks and/or updates copyright headers in GROMACS source files.
It is used internally by several bash scripts to do copyright-relates tasks,
but can also be invoked directly for some rare use cases.
See docs/dev-manual/code-formatting.rst for more details.
"""
import datetime
import os.path
import re
import sys
from optparse import OptionParser
class CopyrightState(object):
"""Information about an existing (or non-existing) copyright header."""
def __init__(self, has_copyright, is_correct, is_newstyle, years, other_copyrights):
self.has_copyright = has_copyright
self.is_correct = is_correct
self.is_newstyle = is_newstyle
self.years = years
self.other_copyrights = other_copyrights
class CopyrightChecker(object):
"""Logic for analyzing existing copyright headers and generating new ones."""
_header = ["", "This file is part of the GROMACS molecular simulation package.", ""]
_copyright = "Copyright (c) {0}, by the GROMACS development team, led by"
_footer = """
Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
and including many others, as listed in the AUTHORS file in the
top-level source directory and at http://www.gromacs.org.
GROMACS is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public License
as published by the Free Software Foundation; either version 2.1
of the License, or (at your option) any later version.
GROMACS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with GROMACS; if not, see
http://www.gnu.org/licenses, or write to the Free Software Foundation,
Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
If you want to redistribute modifications to GROMACS, please
consider that scientific software is very special. Version
control is crucial - bugs must be traceable. We will be happy to
consider code for inclusion in the official distribution, but
derived work must not be called official GROMACS. Details are found
in the README & COPYING files - if they are missing, get the
official version at http://www.gromacs.org.
To help us fund GROMACS development, we humbly ask that you cite
the research papers on the package. Check out http://www.gromacs.org.
""".strip().splitlines()
def check_copyright(self, comment_block):
"""Analyze existing copyright header for correctness and extract information."""
copyright_re = r'Copyright \(c\) (([0-9]{4}[,-])*[0-9]{4}),? by the GROMACS development team,'
has_copyright = False
is_newstyle = True
is_correct = True
next_header_line = 0
next_footer_line = 0
append_next_line_to_other_copyrights = False
existing_years = ''
other_copyrights = []
for line in comment_block:
if append_next_line_to_other_copyrights:
other_copyrights[-1] += ' ' + line
append_next_line_to_other_copyrights = False
continue
if 'Copyright' in line:
has_copyright = True
match = re.match(copyright_re, line)
if match:
existing_years = match.group(1)
new_line = self._copyright.format(existing_years)
if line != new_line:
is_correct = False
else:
other_copyrights.append(line[line.find('Copyright'):])
if not line.startswith('Copyright'):
append_next_line_to_other_copyrights = True
if next_header_line != -1 or next_footer_line != 0:
is_correct = False
continue
if line.startswith('Written by the Gromacs development team'):
has_copyright = True
if next_header_line >= 0:
if line == self._header[next_header_line]:
next_header_line += 1
if next_header_line >= len(self._header):
next_header_line = -1
else:
is_correct = False
is_newstyle = False
elif next_footer_line >= 0:
if line == self._footer[next_footer_line]:
next_footer_line += 1
if next_footer_line >= len(self._footer):
next_footer_line = -1
else:
is_correct = False
else:
is_correct = False
if next_header_line != -1 or next_footer_line != -1:
is_correct = False
return CopyrightState(has_copyright, is_correct, is_newstyle, existing_years, other_copyrights)
def process_copyright(self, state, options, current_years, reporter):
"""Determine whether a copyrigth header needs to be updated and report issues."""
need_update = False
if state.years:
if options.replace_years:
if state.years != current_years:
need_update = True
reporter.report('copyright years replaced')
new_years = current_years
else:
new_years = state.years
if not new_years.endswith(current_years):
if options.update_year:
need_update = True
new_years += ',' + current_years
if options.check or not need_update:
reporter.report('copyright year outdated')
else:
reporter.report('copyright year added')
else:
new_years = current_years
if not state.has_copyright:
if options.add_missing:
need_update = True
if options.check or not need_update:
reporter.report('copyright header missing')
elif options.add_missing:
reporter.report('copyright header added')
else:
if not state.is_newstyle:
if options.replace_header:
need_update = True
if options.check or not need_update:
reporter.report('copyright header incorrect')
else:
reporter.report('copyright header replaced')
elif not state.is_correct:
if options.update_header:
need_update = True
if options.check or not need_update:
reporter.report('copyright header outdated')
else:
reporter.report('copyright header updated')
return need_update, new_years
def get_copyright_text(self, years, other_copyrights):
"""Construct a new copyright header."""
output = []
output.extend(self._header)
if other_copyrights:
for line in other_copyrights:
outline = line.rstrip()
if outline.endswith(','):
outline = outline[:-1]
if not outline.endswith('.'):
outline += '.'
output.append(outline)
output.append(self._copyright.format(years))
output.extend(self._footer)
return output
class Reporter(object):
"""Wrapper for reporting issues in a file."""
def __init__(self, reportfile, filename):
self._reportfile = reportfile
self._filename = filename
def report(self, text):
self._reportfile.write(self._filename + ': ' + text + '\n');
class CommentHandlerC(object):
"""Handler for extracting and creating C-style comments."""
def extract_first_comment_block(self, content_lines):
if not content_lines or not content_lines[0].startswith('/*'):
return ([], 0)
comment_block = [content_lines[0][2:].strip()]
line_index = 1
while line_index < len(content_lines):
line = content_lines[line_index]
if '*/' in content_lines[line_index]:
break
comment_block.append(line.lstrip('* ').rstrip())
line_index += 1
return (comment_block, line_index + 1)
def create_comment_block(self, lines):
output = []
output.append(('/* ' + lines[0]).rstrip())
output.extend([(' * ' + x).rstrip() for x in lines[1:]])
output.append(' */')
return output
class CommentHandlerSimple(object):
"""Handler for extracting and creating sh-style comments.
Also other comments of the same type, but with a different comment
character are supported."""
def __init__(self, comment_char):
self._comment_char = comment_char
def extract_first_comment_block(self, content_lines):
if not content_lines or not content_lines[0].startswith(self._comment_char):
return ([], 0)
comment_block = []
line_index = 0
while line_index < len(content_lines):
line = content_lines[line_index]
if not line.startswith(self._comment_char):
break
comment_block.append(line.lstrip(self._comment_char + ' ').rstrip())
line_index += 1
if line == self._comment_char + ' the research papers on the package. Check out http://www.gromacs.org.':
break
while line_index < len(content_lines):
line = content_lines[line_index].rstrip()
if len(line) > 0 and line != self._comment_char:
break
line_index += 1
return (comment_block, line_index)
def create_comment_block(self, lines):
output = []
output.extend([(self._comment_char + ' ' + x).rstrip() for x in lines])
output.append('')
return output
comment_handlers = {
'c': CommentHandlerC(),
'tex': CommentHandlerSimple('%'),
'sh': CommentHandlerSimple('#')
}
def select_comment_handler(override, filename):
"""Select comment handler for a file based on file name and input options."""
filetype = override
if not filetype and filename != '-':
basename = os.path.basename(filename)
root, ext = os.path.splitext(basename)
if ext == '.cmakein':
dummy, ext2 = os.path.splitext(root)
if ext2:
ext = ext2
if ext in ('.c', '.cu', '.cpp', '.cl', '.h', '.cuh', '.clh', '.y', '.l', '.pre', '.bm'):
filetype = 'c'
elif ext in ('.tex',):
filetype = 'tex'
elif basename in ('CMakeLists.txt', 'GMXRC', 'git-pre-commit') or \
ext in ('.cmake', '.cmakein', '.py', '.sh', '.bash', '.csh', '.zsh'):
filetype = 'sh'
if filetype in comment_handlers:
return comment_handlers[filetype]
if filetype:
sys.stderr.write("Unsupported input format: {0}\n".format(filetype))
elif filename != '-':
sys.stderr.write("Unsupported input format: {0}\n".format(filename))
else:
sys.stderr.write("No file name or file type provided.\n")
sys.exit(1)
def create_copyright_header(years, other_copyrights=None, language='c'):
if language not in comment_handlers:
sys.strerr.write("Unsupported language: {0}\n".format(language))
sys.exit(1)
copyright_checker = CopyrightChecker()
comment_handler = comment_handlers[language]
copyright_lines = copyright_checker.get_copyright_text(years, other_copyrights)
comment_lines = comment_handler.create_comment_block(copyright_lines)
return '\n'.join(comment_lines) + '\n'
def process_options():
"""Process input options."""
parser = OptionParser()
parser.add_option('-l', '--lang',
help='Comment type to use (c or sh)')
parser.add_option('-y', '--years',
help='Comma-separated list of years')
parser.add_option('-F', '--files',
help='File to read list of files from')
parser.add_option('--check', action='store_true',
help='Do not modify the files, only check the copyright (default action). ' +
'If specified together with --update, do the modifications ' +
'but produce output as if only --check was provided.')
parser.add_option('--update-year', action='store_true',
help='Update the copyright year if outdated')
parser.add_option('--replace-years', action='store_true',
help='Replace the copyright years with those given with --years')
parser.add_option('--update-header', action='store_true',
help='Update the copyright header if outdated')
parser.add_option('--replace-header', action='store_true',
help='Replace any copyright header with the current one')
parser.add_option('--remove-old-copyrights', action='store_true',
help='Remove copyright statements not in the new format')
parser.add_option('--add-missing', action='store_true',
help='Add missing copyright headers')
options, args = parser.parse_args()
filenames = args
if options.files:
with open(options.files, 'r') as filelist:
filenames = [x.strip() for x in filelist.read().splitlines()]
elif not filenames:
filenames = ['-']
# Default is --check if nothing provided.
if not options.check and not options.update_year and \
not options.update_header and not options.replace_header and \
not options.add_missing:
options.check = True
return options, filenames
def main():
"""Do processing as a stand-alone script."""
options, filenames = process_options()
years = options.years
if not years:
years = str(datetime.date.today().year)
if years.endswith(','):
years = years[:-1]
checker = CopyrightChecker()
# Process each input file in turn.
for filename in filenames:
comment_handler = select_comment_handler(options.lang, filename)
# Read the input file. We are doing an in-place operation, so can't
# operate in pass-through mode.
if filename == '-':
contents = sys.stdin.read().splitlines()
reporter = Reporter(sys.stderr, '<stdin>')
else:
with open(filename, 'r', encoding='utf-8') as inputfile:
contents = inputfile.read().splitlines()
reporter = Reporter(sys.stdout, filename)
output = []
# Keep lines that must be at the beginning of the file and skip them in
# the check.
if contents and (contents[0].startswith('#!/') or \
contents[0].startswith('%code requires') or \
contents[0].startswith('/* #if')):
output.append(contents[0])
contents = contents[1:]
# Remove and skip empty lines at the beginning.
while contents and len(contents[0]) == 0:
contents = contents[1:]
# Analyze the first comment block in the file.
comment_block, line_count = comment_handler.extract_first_comment_block(contents)
state = checker.check_copyright(comment_block)
need_update, file_years = checker.process_copyright(state, options, years, reporter)
if state.other_copyrights and options.remove_old_copyrights:
need_update = True
state.other_copyrights = []
reporter.report('old copyrights removed')
if need_update:
# Remove the original comment if it was a copyright comment.
if state.has_copyright:
contents = contents[line_count:]
new_block = checker.get_copyright_text(file_years, state.other_copyrights)
output.extend(comment_handler.create_comment_block(new_block))
# Write the output file if required.
if need_update or filename == '-':
# Append the rest of the input file as it was.
output.extend(contents)
output = '\n'.join(output) + '\n'
if filename == '-':
sys.stdout.write(output)
else:
with open(filename, 'w', encoding='utf-8') as outputfile:
outputfile.write(output)
if __name__ == "__main__":
main()
| 41.723982 | 117 | 0.620106 | 2,238 | 18,442 | 4.97319 | 0.183646 | 0.025606 | 0.013477 | 0.010692 | 0.421204 | 0.371788 | 0.334951 | 0.309075 | 0.294879 | 0.281042 | 0 | 0.008749 | 0.287279 | 18,442 | 441 | 118 | 41.818594 | 0.838025 | 0.169776 | 0 | 0.209877 | 0 | 0.003086 | 0.204396 | 0.00329 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046296 | false | 0 | 0.015432 | 0 | 0.123457 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65b3eaa5d2f4e42ffbe5b1fde7fa124d0d24b795 | 998 | py | Python | main.py | Jidnyesh/udemy-course-cracker | 1420b2af4b20706c35eab9f9a6e792163a4a0b38 | [
"Unlicense"
] | 1 | 2019-07-18T14:09:06.000Z | 2019-07-18T14:09:06.000Z | main.py | Jidnyesh/udemy-course-cracker | 1420b2af4b20706c35eab9f9a6e792163a4a0b38 | [
"Unlicense"
] | null | null | null | main.py | Jidnyesh/udemy-course-cracker | 1420b2af4b20706c35eab9f9a6e792163a4a0b38 | [
"Unlicense"
] | null | null | null | from requests import get
from pattern.web import plaintext
import sys
import webbrowser
headers = {
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',
}
with open('wordlist.txt','r') as wordl:
wordlist = wordl.readlines()
wordl.close()
wordlist = [x.strip() for x in wordlist]
course = input('Paste the url of course you want to crack\n')
price = input('What is the current price of the course\n')
compare = 'Current price: FreeOriginal price: ₹'+price+'Discount:100'
def attack(course):
for word in wordlist:
url = course+'?couponCode='+word
print("Trying : "+word)
htmlString = get(url,headers=headers).text
webText = plaintext(htmlString)
if compare in webText:
print('\n' + word + ' is the coupon code for the course and it is free now')
webbrowser.open_new_tab(url)
sys.exit()
attack(course=course)
| 32.193548 | 145 | 0.660321 | 144 | 998 | 4.555556 | 0.590278 | 0.015244 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.037371 | 0.222445 | 998 | 30 | 146 | 33.266667 | 0.806701 | 0 | 0 | 0 | 0 | 0.04 | 0.351703 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.16 | 0 | 0.2 | 0.08 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65b48680c43684076e37622a4c384cce303d3ca6 | 2,304 | py | Python | csrv/model/cards/runner/card01004_test.py | mrroach/CentralServer | e377c65d8f3adf5a2d3273acd4f459be697aea56 | [
"Apache-2.0"
] | null | null | null | csrv/model/cards/runner/card01004_test.py | mrroach/CentralServer | e377c65d8f3adf5a2d3273acd4f459be697aea56 | [
"Apache-2.0"
] | null | null | null | csrv/model/cards/runner/card01004_test.py | mrroach/CentralServer | e377c65d8f3adf5a2d3273acd4f459be697aea56 | [
"Apache-2.0"
] | 1 | 2020-09-20T11:26:20.000Z | 2020-09-20T11:26:20.000Z | import unittest
from csrv.model import cards
from csrv.model.cards import corp
from csrv.model.cards import runner
from csrv.model import deck
from csrv.model import errors
from csrv.model import game
from csrv.model import premade_decks
from csrv.model import test_base
from csrv.model import timing_phases
from csrv.model.cards.runner import card01004
class Card01004Test(test_base.TestBase):
def setUp(self):
test_base.TestBase.setUp(self)
self.card = card01004.Card01004(self.game, self.game.runner)
self.game.runner.clicks.set(4)
self.game.runner.credits.set(5)
self.game.runner.grip.add(self.card)
self.game.insert_next_phase(
timing_phases.RunnerTurnActions(self.game, self.game.runner))
def test_playable(self):
self.assertIn(self.card._play_event_action,
self.game.current_phase().choices())
def test_card(self):
card = cards.Registry.get('Card01108')(self.game, self.game.corp)
server = self.game.corp.new_remote_server()
server.install(card)
response = self.card._play_event_action.request().new_response()
response.server = server
self.game.resolve_current_phase(self.card._play_event_action, response)
self.assertEqual(1, len(self.game.runner.credit_pools))
self.assertEqual(9, list(self.game.runner.credit_pools)[0].value)
choice = [c for c in self.game.current_phase().choices()
if c.server == server][0]
self.game.resolve_current_phase(choice, None)
self.assertEqual(2, len(self.game.runner.find_pools()))
self.assertEqual(
14, sum([p.value for p in self.game.runner.find_pools()]))
# Skip past phase 4_1
self.game.current_phase().end_phase()
# indicate that we want to continue the run
self.game.resolve_current_phase(
self.game.run._jack_out_action, None)
self.assertIsInstance(
self.game.current_phase(), timing_phases.TakeBrainDamage)
choices = self.game.current_phase().choices()
self.assertEqual(0, len(choices))
self.game.resolve_current_phase(None, None)
self.assertTrue(self.game.runner_flatlined)
self.assertTrue(self.game.corp_wins)
self.assertEqual(4, self.game.runner.max_hand_size)
self.assertEqual(1, self.game.runner.brain_damage)
if __name__ == '__main__':
unittest.main()
| 34.38806 | 75 | 0.736111 | 333 | 2,304 | 4.918919 | 0.294294 | 0.141636 | 0.102564 | 0.081197 | 0.274725 | 0.037851 | 0 | 0 | 0 | 0 | 0 | 0.019878 | 0.148438 | 2,304 | 66 | 76 | 34.909091 | 0.814985 | 0.026476 | 0 | 0 | 0 | 0 | 0.007589 | 0 | 0 | 0 | 0 | 0 | 0.211538 | 1 | 0.057692 | false | 0 | 0.211538 | 0 | 0.288462 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65b5378414785d89834b281f738b418936b732a5 | 478 | py | Python | Web-programming/counting_sort.py | Mayner0220/Programmers | 42e4783a526506fb7d8208841a76201909ed5c5c | [
"Apache-2.0"
] | 1 | 2021-04-01T06:19:02.000Z | 2021-04-01T06:19:02.000Z | Web-programming/counting_sort.py | Mayner0220/Programmers | 42e4783a526506fb7d8208841a76201909ed5c5c | [
"Apache-2.0"
] | null | null | null | Web-programming/counting_sort.py | Mayner0220/Programmers | 42e4783a526506fb7d8208841a76201909ed5c5c | [
"Apache-2.0"
] | null | null | null | def couting_sort(nums, max_num):
sort_list = [0] * len(nums)
cnt_list = [0] * (max_num + 1)
for i in range(len(nums)):
cnt_list[nums[i]] += 1
for i in range(1, len(cnt_list)):
cnt_list[i] += cnt_list[i - 1]
for i in range(len(nums)):
sort_list[cnt_list[nums[i]] - 1] = nums[i]
cnt_list[nums[i]] -= 1
return sort_list
numbers = [1, 0, 3, 1, 0, 2, 5, 2, 1, 4]
result = couting_sort(numbers, max(numbers))
print(result) | 23.9 | 50 | 0.569038 | 85 | 478 | 3.035294 | 0.270588 | 0.189922 | 0.05814 | 0.081395 | 0.344961 | 0.20155 | 0.147287 | 0 | 0 | 0 | 0 | 0.050847 | 0.259414 | 478 | 20 | 51 | 23.9 | 0.677966 | 0 | 0 | 0.142857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0 | 0 | 0.142857 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65b68ceef1970cd974ee16287d025599649ab6e1 | 3,713 | py | Python | src/py/analysis_lib/behaviour/arena_setup_adapter_test.py | LandonFuhr/aseen | 9a7d6b0a66930872cf1da7d3a5493326285f3bd1 | [
"MIT"
] | null | null | null | src/py/analysis_lib/behaviour/arena_setup_adapter_test.py | LandonFuhr/aseen | 9a7d6b0a66930872cf1da7d3a5493326285f3bd1 | [
"MIT"
] | null | null | null | src/py/analysis_lib/behaviour/arena_setup_adapter_test.py | LandonFuhr/aseen | 9a7d6b0a66930872cf1da7d3a5493326285f3bd1 | [
"MIT"
] | null | null | null | from analysis_lib.behaviour.arena_setup_adapter import get_arena_setup_from_dict, CircleGeometry, RectangleGeometry
def test_it_reads_id():
arena_setup = get_arena_setup_from_dict(fake_arena_setup)
area0 = arena_setup.areas[0]
assert area0._id == "Chamber 1"
def test_it_reads_circle_geometry():
arena_setup = get_arena_setup_from_dict(fake_arena_setup)
area0 = arena_setup.areas[0]
assert isinstance(area0.geometry, CircleGeometry)
assert area0.geometry.center.x == 100
assert area0.geometry.center.y == 200
assert area0.geometry.radius_x == 50
assert area0.geometry.radius_y == 75
assert area0.geometry.rotation == 0
def test_it_reads_rectangle_geometry():
arena_setup = get_arena_setup_from_dict(fake_arena_setup)
area0 = arena_setup.areas[1]
assert isinstance(area0.geometry, RectangleGeometry)
assert area0.geometry.top_left.x == 100
assert area0.geometry.top_left.y == 200
assert area0.geometry.width == 50
assert area0.geometry.height == 75
assert area0.geometry.rotation == 12.4
def test_it_reads_color_palette():
arena_setup = get_arena_setup_from_dict(fake_arena_setup)
area0 = arena_setup.areas[0]
assert area0.color_palette.active.fill == "rgba(0,0,0,0.2)"
assert area0.color_palette.active.border == "rgba(0,0,0,1)"
assert area0.color_palette.inactive.fill == "rgba(0,0,0,0.05)"
assert area0.color_palette.inactive.border == "rgba(0,0,0,0.2)"
def test_it_reads_both_areas_and_interaction_zones():
arena_setup = get_arena_setup_from_dict(fake_arena_setup)
assert len(arena_setup.areas) == 2
assert len(arena_setup.interaction_zones) == 1
fake_arena_setup = {
"areas": [
{
"id": "Chamber 1",
"geometry": {
"type": "circle",
"center": {
"x": 100,
"y": 200
},
"radiusX": 50,
"radiusY": 75,
"rotation": 0
},
"colorPalette": {
"active": {
"fill": "rgba(0,0,0,0.2)",
"border": "rgba(0,0,0,1)"
},
"inactive": {
"fill": "rgba(0,0,0,0.05)",
"border": "rgba(0,0,0,0.2)"
}
}
},
{
"id": "Chamber 2",
"geometry": {
"type": "rectangle",
"topLeft": {
"x": 100,
"y": 200
},
"width": 50,
"height": 75,
"rotation": 12.4
},
"colorPalette": {
"active": {
"fill": "rgba(0,0,0,0.2)",
"border": "rgba(0,0,0,1)"
},
"inactive": {
"fill": "rgba(0,0,0,0.05)",
"border": "rgba(0,0,0,0.2)"
}
}
}
],
"interactionZones": [
{
"id": "Chamber 2",
"geometry": {
"type": "rectangle",
"topLeft": {
"x": 100,
"y": 200
},
"width": 50,
"height": 75,
"rotation": 0
},
"colorPalette": {
"active": {
"fill": "rgba(0,0,0,0.2)",
"border": "rgba(0,0,0,1)"
},
"inactive": {
"fill": "rgba(0,0,0,0.05)",
"border": "rgba(0,0,0,0.2)"
}
}
}
]
}
| 30.434426 | 115 | 0.473202 | 398 | 3,713 | 4.208543 | 0.158291 | 0.052537 | 0.050149 | 0.066866 | 0.666866 | 0.488358 | 0.48 | 0.471045 | 0.446567 | 0.446567 | 0 | 0.075321 | 0.392136 | 3,713 | 121 | 116 | 30.68595 | 0.666814 | 0 | 0 | 0.458716 | 0 | 0 | 0.157016 | 0 | 0 | 0 | 0 | 0 | 0.174312 | 1 | 0.045872 | false | 0 | 0.009174 | 0 | 0.055046 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65b7b8321a60af4c0dbcaf5a0f3a72af00e3c7a3 | 3,527 | py | Python | token_registry/priv/bigchaindb.py | canuckasaurus/archeio-solanaszn | 55dc8712cf0409d110ae342cbbc620745668a4df | [
"MIT"
] | 1 | 2021-12-18T16:17:19.000Z | 2021-12-18T16:17:19.000Z | token_registry/priv/bigchaindb.py | canuckasaurus/archeio-solanaszn | 55dc8712cf0409d110ae342cbbc620745668a4df | [
"MIT"
] | null | null | null | token_registry/priv/bigchaindb.py | canuckasaurus/archeio-solanaszn | 55dc8712cf0409d110ae342cbbc620745668a4df | [
"MIT"
] | null | null | null | from protocol_mkh import Protocol
import json, sys, requests
try:
from itertools import izip as zip
except ImportError:
pass
import logging, os
from bigchaindb_driver import BigchainDB
bdb_root_url = 'http://bchaindb:9984'
bdb = BigchainDB(bdb_root_url)
#if "PORT_DEBUG" in os.environ:
# logging.basicConfig(filename='port.log', level=logging.DEBUG)
logging.basicConfig(filename='port.log', level=logging.DEBUG)
class Beam(Protocol):
def handler_transactioncreate(self, json_str_parameters):
try:
params = json.loads(json_str_parameters)
logging.debug('create args ----> %s' % params)
pk = params['public_key']
logging.debug('0001 %s' % pk)
asset = params['asset']
logging.debug('0002 %s' % asset)
prk = params['private_key']
logging.debug('0003 %s' % prk)
metadata = params['metadata']
logging.debug('0004 %s' % metadata)
prepared_token_tx = bdb.transactions.prepare(
operation='CREATE',
signers=pk,
metadata=metadata,
asset={ "data": asset} )
logging.debug('1111')
fulfilled_token_tx = bdb.transactions.fulfill(
prepared_token_tx,
private_keys=prk)
logging.debug('2222')
data = bdb.transactions.send_commit(fulfilled_token_tx)
logging.debug('3333')
ret = {'handler':'transactioncreate', 'status':'ok', 'data':data}
return json.dumps(ret)
except Exception as e:
logging.debug('error %s' % str(e))
return json.dumps({
'handler':'transactioncreate',
'status': 'failed',
'error': 'port exception %s' % str(e)})
def handler_transactionupdate(self, json_str_parameters):
try:
params = json.loads(json_str_parameters)
creation_tx = bdb.transactions.retrieve(params['txid'])
asset_id = creation_tx['id']
transfer_asset = { 'id': asset_id }
output_index = 0
output = creation_tx['outputs'][output_index]
transfer_input = {
'fulfillment': output['condition']['details'],
'fulfills': {
'output_index': output_index,
'transaction_id': creation_tx['id'],
},
'owners_before': output['public_keys'],
}
prepared_transfer_tx = bdb.transactions.prepare(
operation='TRANSFER',
asset=transfer_asset,
inputs=transfer_input,
metadata=params['metadata'],
recipients=params['recipient_public_key'],
)
fulfilled_transfer_tx = bdb.transactions.fulfill(
prepared_transfer_tx,
private_keys=params['owner_private_key'],
)
data = bdb.transactions.send_commit(fulfilled_transfer_tx)
ret = {'handler':'transactioncreate', 'status':'ok', 'data':data}
return json.dumps(ret)
except Exception as e:
logging.debug('error %s' % str(e))
return json.dumps({
'handler':'transactioncreate',
'status': 'failed',
'error': 'port exception %s' % str(e)})
if __name__ == '__main__':
logging.debug('main run')
Beam().run()
| 35.27 | 77 | 0.552594 | 344 | 3,527 | 5.468023 | 0.313953 | 0.082935 | 0.045189 | 0.021265 | 0.408293 | 0.339181 | 0.298777 | 0.298777 | 0.245614 | 0.245614 | 0 | 0.014001 | 0.331727 | 3,527 | 99 | 78 | 35.626263 | 0.784048 | 0.026935 | 0 | 0.253012 | 0 | 0 | 0.1479 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.024096 | false | 0.012048 | 0.072289 | 0 | 0.156627 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65bcb322636197f378e40c4264277ac4b05f44c4 | 2,180 | py | Python | turbine/code/py/helpers.py | gounley/swift-t | 77d92f21ae73762d3d7f247c4fd23c58d3441210 | [
"Apache-2.0"
] | null | null | null | turbine/code/py/helpers.py | gounley/swift-t | 77d92f21ae73762d3d7f247c4fd23c58d3441210 | [
"Apache-2.0"
] | null | null | null | turbine/code/py/helpers.py | gounley/swift-t | 77d92f21ae73762d3d7f247c4fd23c58d3441210 | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 University of Chicago and Argonne National Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
# HELPERS.PY
# Python helpers for JSON module
import json
# Type classes for comparison:
type_str = "x".__class__
type_list = [].__class__
type_dict = {}.__class__
def set_key_type(k):
""" Convert to integer if possible """
try:
result = int(k)
except ValueError:
result = k
return result
# def json_path(filename, path):
# """ Reusable function to search a JSON tree """
# fp = open(filename, "r")
# J = json.load(fp)
def json_path(s, path):
""" Reusable function to search a JSON tree """
J = json.loads(s)
P = path.split(",")
for p in P:
k = set_key_type(p)
J = J[k]
return J
def json_type(filename, path):
""" Obtain the type of the entry at given path in the JSON tree """
global type_str, type_list, type_dict
J = json_path(filename, path)
c = J.__class__
if c == type_str:
return "string"
elif c == type_list:
return "list"
elif c == type_dict:
return "dict"
else:
raise "ERROR"
def json_dict_entries(filename, path):
""" Assume dict and return all keys at given path """
J = json_path(filename, path)
L = []
for i in J.keys():
L.append(i)
result = ",".join(L)
return result
def json_list_length(filename, path):
""" Assume list and return length of it """
J = json_path(filename, path)
return str(len(J))
def json_get(filename, path):
""" Return whatever is at the given path (usually scalar) """
J = json_path(filename, path)
return str(J)
| 26.91358 | 74 | 0.652294 | 323 | 2,180 | 4.275542 | 0.408669 | 0.078204 | 0.057929 | 0.072411 | 0.127444 | 0.097031 | 0.097031 | 0.053584 | 0 | 0 | 0 | 0.004863 | 0.245413 | 2,180 | 80 | 75 | 27.25 | 0.83465 | 0.489908 | 0 | 0.142857 | 0 | 0 | 0.020853 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.02381 | 0 | 0.357143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65bd80440f874daf2ce14edcf08673d982969ab9 | 2,639 | py | Python | 2020/Python/day23.py | kamoshi/Advent-of-Code | 5b78fa467409e8b8c5a16efe31684b8ce493bcee | [
"MIT"
] | 1 | 2020-12-21T13:27:52.000Z | 2020-12-21T13:27:52.000Z | 2020/Python/day23.py | kamoshi/advent-of-code | 5b78fa467409e8b8c5a16efe31684b8ce493bcee | [
"MIT"
] | null | null | null | 2020/Python/day23.py | kamoshi/advent-of-code | 5b78fa467409e8b8c5a16efe31684b8ce493bcee | [
"MIT"
] | null | null | null | from itertools import chain
from typing import Tuple
# Implementation uses generic python list -> pretty inefficient; too slow for part 2
# (it's a pure function though ;) )
def move_cups(cups: list[int]) -> list[int]:
cups_round = cups[4:]
current = cups[0]
taken = [cups[1], cups[2], cups[3]]
minimum, maximum = min(cups_round), max(cups_round)
destination = current
while (destination := destination - 1) in taken or destination < minimum:
if destination < minimum:
destination = maximum + 1
for i in range(len(cups_round)):
if cups_round[i] == destination:
cups_round = cups_round[:i+1] + taken + cups_round[i+1:]
break
cups_round.append(current)
return cups_round
def to_hash_list(order: str, min_cups: int = 0) -> Tuple[dict[int, int], int, int]:
hash_list = {}
order = list(chain(map(int, order), [x for x in range(len(order)+1, min_cups+1)]))
for i in range(len(order)):
hash_list[int(order[i])] = int(order[(i+1) % len(order)])
return hash_list, min(order), max(order)
def get_list_from_1(hash_list: dict[int, int]) -> str:
out, next_cup = [], 1
while (next_cup := hash_list[next_cup]) != 1:
out.append(next_cup)
return "".join(map(str, out))
# Implementation uses dictionary as a form of a singly-linked list, more efficient
def move_cups_hash(cups: dict[int, int], minimum: int, maximum: int, ptr: int) -> int:
forward = current = ptr
taken = [(forward := cups[forward]) for _ in range(3)]
cups[current] = cups[taken[-1]] # change pointers away from taken cups
destination = current
while (destination := destination - 1) in taken or destination < minimum:
if destination < minimum:
destination = maximum + 1
forward = cups[destination]
cups[destination], cups[taken[-1]] = taken[0], forward # change pointers to include the taken cups
return cups[current]
def solve_p1(order: str, moves: int) -> str:
hash_list, minimum, maximum = to_hash_list(order, min_cups=0)
ptr = int(order[0])
for _ in range(moves):
ptr = move_cups_hash(hash_list, minimum, maximum, ptr)
return get_list_from_1(hash_list)
def solve_p2(order: str, moves: int, min_cups: int) -> int:
hash_list, minimum, maximum = to_hash_list(order, min_cups)
ptr = int(order[0])
for _ in range(moves):
ptr = move_cups_hash(hash_list, minimum, maximum, ptr)
return hash_list[1] * hash_list[hash_list[1]]
print(solve_p1(order="389547612", moves=100))
print(solve_p2(order="389547612", moves=10000000, min_cups=1000000))
| 34.723684 | 103 | 0.658962 | 390 | 2,639 | 4.307692 | 0.220513 | 0.07619 | 0.030952 | 0.052381 | 0.31369 | 0.31369 | 0.272619 | 0.272619 | 0.272619 | 0.272619 | 0 | 0.033221 | 0.212959 | 2,639 | 75 | 104 | 35.186667 | 0.775638 | 0.104585 | 0 | 0.264151 | 0 | 0 | 0.007637 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.113208 | false | 0 | 0.037736 | 0 | 0.264151 | 0.037736 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65be606b8c93275755b3b2bac1c11ceb6ae54156 | 3,069 | py | Python | runner/Runner.py | dhy2000/CO_Judger | fcc1d97b738268ca15cfbe8aa628032ce636a14e | [
"MIT"
] | null | null | null | runner/Runner.py | dhy2000/CO_Judger | fcc1d97b738268ca15cfbe8aa628032ce636a14e | [
"MIT"
] | null | null | null | runner/Runner.py | dhy2000/CO_Judger | fcc1d97b738268ca15cfbe8aa628032ce636a14e | [
"MIT"
] | null | null | null | '''
包含的要素:
1. 源代码的路径
2. 仿真软件的路径,所需的脚本等
3. 仿真软件的输出路径
要执行的事情:
1. 将源代码解压到指定的临时目录
2. 读取临时目录下的所有源代码
3. 编译, 调用仿真,保存输出
'''
import zipfile
import os, shutil
from utils.IO import IO
from configs.config import Config
from utils.Testcase import Testcase
class Runner:
globconf = Config.getConfig("configs/global.json")
def __init__(self, src, path):
self.src = src
self.path = path
self.loadcode()
pass
def _includeProtect(self, vfile):
macro = vfile.upper()
macro = ''.join(map(lambda x: x if (x.isupper() or x.islower() or x.isdigit()) else '_', macro.strip()))
macro = '_INCLUDED_%s_V' % macro
# print("# Include Protect %s" % vfile)
# print("macro = ", macro)
with open(vfile, "r") as fp:
content = fp.read()
content = '`ifndef {macro}\n`define {macro}\n\n'.format(macro=macro) + content + '\n\n`endif\n'
with open(vfile, "w") as fp:
fp.write(content)
pass
def _addIncProtect(self, fpath, curdir):
for fn in os.listdir(fpath + '/' + curdir):
if fn[-2:] == '.v':
self.v_list.append(curdir + '/' + fn)
self._includeProtect(fpath + '/' + curdir + '/' + fn)
cur_f = (fpath + '/' + curdir + '/' + fn)
if os.path.isdir(cur_f):
self._addIncProtect(fpath, curdir + '/' + fn)
def loadcode(self):
if not os.path.exists(self.src):
IO.writestr("! Runner.loadcode: Source Not Exist!")
return False
src_unzip = self.path + '/' + 'src_unzip'
self.src_unzip = src_unzip
if os.path.exists(src_unzip):
shutil.rmtree(src_unzip)
os.mkdir(self.path + '/' + 'src_unzip')
try:
zip = zipfile.ZipFile(self.src)
zip.extractall(src_unzip)
zip.close()
except:
IO.writestr("! Runner.loadcode: Error occured on extracting zip")
return False
self.v_list = []
self._addIncProtect(src_unzip, '')
# copy testbench
tb = self.globconf['testbench']
shutil.copyfile(src=tb, dst=src_unzip+'/tb.v')
return True
def loadtest(self, testcase: Testcase):
in_name = self.path + '/' + self.globconf['inputfilename']
hexname = testcase.path + '/' + testcase.hex
if not os.path.exists(hexname):
IO.writestr('! Runner.loadtest: Hex Machine Code Not Exist')
return False
shutil.copyfile(src=hexname, dst=in_name)
return True
def compile(self):
pass
def run(self, testcase, out):
r = self.loadtest(testcase)
if not r:
IO.writestr('! Runner.run: load testcase error.')
return False
outpath = "{path}/out".format(path=self.path)
if not os.path.exists(outpath):
os.mkdir(outpath)
###### Code Here ######
return True
| 33 | 113 | 0.540893 | 357 | 3,069 | 4.568627 | 0.341737 | 0.04905 | 0.02943 | 0.020233 | 0.031269 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003388 | 0.326817 | 3,069 | 92 | 114 | 33.358696 | 0.78606 | 0.063864 | 0 | 0.142857 | 0 | 0 | 0.11498 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0.042857 | 0.071429 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |