max_stars_repo_path
stringlengths
4
286
max_stars_repo_name
stringlengths
5
119
max_stars_count
int64
0
191k
id
stringlengths
1
7
content
stringlengths
6
1.03M
content_cleaned
stringlengths
6
1.03M
language
stringclasses
111 values
language_score
float64
0.03
1
comments
stringlengths
0
556k
edu_score
float64
0.32
5.03
edu_int_score
int64
0
5
tests/bytecode/mp-tests/try4.py
LabAixBidouille/micropython
303
6621651
<reponame>LabAixBidouille/micropython try: f() except A: g() except: h() try: f() except A: g() except B as c: h() try: f() except A: g() except B as c: h() except: i()
try: f() except A: g() except: h() try: f() except A: g() except B as c: h() try: f() except A: g() except B as c: h() except: i()
none
1
1.939423
2
fom/__init__.py
MichalKononenko/FoundationsOfMechanics
0
6621652
""" Library for working with symplectic geometries as defined in Abraham and Marsden's Foundations of Mechanics. This works attempts to mirror the definitions in the textbook as close as possible. The :mod:`fom.interfaces` contains the API documentation, defined in the form of Python abstract classes that have no implemented methods. When wiring custom objects into this library, it is recommended that these interfaces are implemented in order to provide robust code. They should have no implementation conflicts. Type consistency is checked by ``mypy``. """
""" Library for working with symplectic geometries as defined in Abraham and Marsden's Foundations of Mechanics. This works attempts to mirror the definitions in the textbook as close as possible. The :mod:`fom.interfaces` contains the API documentation, defined in the form of Python abstract classes that have no implemented methods. When wiring custom objects into this library, it is recommended that these interfaces are implemented in order to provide robust code. They should have no implementation conflicts. Type consistency is checked by ``mypy``. """
en
0.938639
Library for working with symplectic geometries as defined in Abraham and Marsden's Foundations of Mechanics. This works attempts to mirror the definitions in the textbook as close as possible. The :mod:`fom.interfaces` contains the API documentation, defined in the form of Python abstract classes that have no implemented methods. When wiring custom objects into this library, it is recommended that these interfaces are implemented in order to provide robust code. They should have no implementation conflicts. Type consistency is checked by ``mypy``.
1.98686
2
register_lib.py
jacob975/TATIRP
0
6621653
#!/usr/bin/python ''' Program: This is a liberary program of register Usage: 1. from register_lib import [func name] or import curvefit 2. use it in your lovely code. Editor: Jacob975 ################################# update log 20180628 version alpha 1 1. Remove some ''' import numpy as np # calculate the inner product and error of two side, from star_1 to star_2 and from star_1 to star_3. def inner_product(star_1, star_2, star_3, sigma): try: inner_prod = (star_2[0] - star_1[0])*(star_3[0] - star_1[0]) + (star_2[1] - star_1[1])*(star_3[1] - star_1[1]) x_part_1 = np.power(star_1[0] - star_2[0], 2) x_error_1 = (2 * np.power(sigma, 2))/x_part_1 x_part_2 = np.power(star_1[0] - star_3[0], 2) x_error_2 = (2 * np.power(sigma, 2))/x_part_2 y_part_1 = np.power(star_1[1] - star_2[1], 2) y_error_1 = (2 * np.power(sigma, 2))/y_part_1 y_part_2 = np.power(star_1[1] - star_3[1], 2) y_error_2 = (2 * np.power(sigma, 2))/y_part_2 var = x_part_1*x_part_2*(x_error_1 + x_error_2) + y_part_1*y_part_2*(y_error_1 + y_error_2) error = np.power(var, 0.5) except : return 0, 0 else: return inner_prod, error # check the number of matching inner prod of two stars, then return the number. def num_relation_lister(ref_star, star, error): valid_inner_prod = 0 for ref_inner_prod in ref_star: for i in xrange(len(star)): if ref_inner_prod <= star[i] + error[i] and ref_inner_prod >= star[i] - error[i]: valid_inner_prod = valid_inner_prod + 1 continue return valid_inner_prod # choose a star as a target, than choose two else the calculate the inner product. def get_inner_product(iraf_table, infos = None): inner_prod_star_list = [] inner_prod_error_list = [] sigma = 2.0 # choose a star, named A for i in xrange(len(iraf_table)): inner_prod_star = np.array([]) inner_prod_error = np.array([]) # choose two else stars, named B and C, to get inner product of two side AB and AC. for j in xrange(len(iraf_table)): if i == j: continue for k in xrange(len(iraf_table)): if k == i: continue if k <= j: continue inner_prod, error = inner_product(iraf_table[i,1:3], iraf_table[j,1:3], iraf_table[k,1:3], sigma) inner_prod_star = np.append(inner_prod_star, inner_prod) inner_prod_error = np.append(inner_prod_error, error) # set all inner product as a list, seems like DNA of this star inner_prod_star_list.append(inner_prod_star) inner_prod_error_list.append(inner_prod_error) inner_prod_star_list = np.array(inner_prod_star_list) inner_prod_error_list = np.array(inner_prod_error_list) return inner_prod_star_list, inner_prod_error_list # choose a star as a target, than choose two else the calculate the inner product. def get_inner_product_SE(SE_table): inner_prod_star_list = [] inner_prod_error_list = [] sigma = 2.0 # choose a star, named A for i in xrange(len(SE_table)): inner_prod_star = np.array([]) inner_prod_error = np.array([]) # choose two else stars, named B and C, to get inner product of two side AB and AC. for j in xrange(len(SE_table)): if i == j: continue for k in xrange(len(SE_table)): if k == i: continue if k <= j: continue inner_prod, error = inner_product(SE_table[i,2:4], SE_table[j,2:4], SE_table[k,2:4], sigma) inner_prod_star = np.append(inner_prod_star, inner_prod) inner_prod_error = np.append(inner_prod_error, error) # set all inner product as a list, seems like DNA of this star inner_prod_star_list.append(inner_prod_star) inner_prod_error_list.append(inner_prod_error) inner_prod_star_list = np.array(inner_prod_star_list) inner_prod_error_list = np.array(inner_prod_error_list) return inner_prod_star_list, inner_prod_error_list #-------------------------------------------------------------------- # This is a func to wipe out exotic number in a list # This one is made for matching images def get_rid_of_exotic_severe(value_list, VERBOSE = 0): answer_value_list = value_list[:] std = np.std(answer_value_list) # resursive condition while std > 1 : mean = np.mean(answer_value_list) # get the error of each value to the mean, than delete one with largest error. sub_value_list = np.subtract(answer_value_list, mean) abs_value_list = np.absolute(sub_value_list) index_max = np.argmax(abs_value_list) answer_value_list= np.delete(answer_value_list, index_max) std = np.std(answer_value_list) return answer_value_list # This one is made for scif calculation def get_rid_of_exotic(value_list): std = np.std(value_list) mean = np.mean(value_list) # get the error of each value to the mean, than delete one with largest error. sub_value_list = np.subtract(value_list, mean) abs_value_list = np.absolute(sub_value_list) for i in xrange(len(abs_value_list)): if abs_value_list[i] >= 3 * std: value_list = np.delete(value_list, i) value_list = get_rid_of_exotic(value_list) return value_list return value_list
#!/usr/bin/python ''' Program: This is a liberary program of register Usage: 1. from register_lib import [func name] or import curvefit 2. use it in your lovely code. Editor: Jacob975 ################################# update log 20180628 version alpha 1 1. Remove some ''' import numpy as np # calculate the inner product and error of two side, from star_1 to star_2 and from star_1 to star_3. def inner_product(star_1, star_2, star_3, sigma): try: inner_prod = (star_2[0] - star_1[0])*(star_3[0] - star_1[0]) + (star_2[1] - star_1[1])*(star_3[1] - star_1[1]) x_part_1 = np.power(star_1[0] - star_2[0], 2) x_error_1 = (2 * np.power(sigma, 2))/x_part_1 x_part_2 = np.power(star_1[0] - star_3[0], 2) x_error_2 = (2 * np.power(sigma, 2))/x_part_2 y_part_1 = np.power(star_1[1] - star_2[1], 2) y_error_1 = (2 * np.power(sigma, 2))/y_part_1 y_part_2 = np.power(star_1[1] - star_3[1], 2) y_error_2 = (2 * np.power(sigma, 2))/y_part_2 var = x_part_1*x_part_2*(x_error_1 + x_error_2) + y_part_1*y_part_2*(y_error_1 + y_error_2) error = np.power(var, 0.5) except : return 0, 0 else: return inner_prod, error # check the number of matching inner prod of two stars, then return the number. def num_relation_lister(ref_star, star, error): valid_inner_prod = 0 for ref_inner_prod in ref_star: for i in xrange(len(star)): if ref_inner_prod <= star[i] + error[i] and ref_inner_prod >= star[i] - error[i]: valid_inner_prod = valid_inner_prod + 1 continue return valid_inner_prod # choose a star as a target, than choose two else the calculate the inner product. def get_inner_product(iraf_table, infos = None): inner_prod_star_list = [] inner_prod_error_list = [] sigma = 2.0 # choose a star, named A for i in xrange(len(iraf_table)): inner_prod_star = np.array([]) inner_prod_error = np.array([]) # choose two else stars, named B and C, to get inner product of two side AB and AC. for j in xrange(len(iraf_table)): if i == j: continue for k in xrange(len(iraf_table)): if k == i: continue if k <= j: continue inner_prod, error = inner_product(iraf_table[i,1:3], iraf_table[j,1:3], iraf_table[k,1:3], sigma) inner_prod_star = np.append(inner_prod_star, inner_prod) inner_prod_error = np.append(inner_prod_error, error) # set all inner product as a list, seems like DNA of this star inner_prod_star_list.append(inner_prod_star) inner_prod_error_list.append(inner_prod_error) inner_prod_star_list = np.array(inner_prod_star_list) inner_prod_error_list = np.array(inner_prod_error_list) return inner_prod_star_list, inner_prod_error_list # choose a star as a target, than choose two else the calculate the inner product. def get_inner_product_SE(SE_table): inner_prod_star_list = [] inner_prod_error_list = [] sigma = 2.0 # choose a star, named A for i in xrange(len(SE_table)): inner_prod_star = np.array([]) inner_prod_error = np.array([]) # choose two else stars, named B and C, to get inner product of two side AB and AC. for j in xrange(len(SE_table)): if i == j: continue for k in xrange(len(SE_table)): if k == i: continue if k <= j: continue inner_prod, error = inner_product(SE_table[i,2:4], SE_table[j,2:4], SE_table[k,2:4], sigma) inner_prod_star = np.append(inner_prod_star, inner_prod) inner_prod_error = np.append(inner_prod_error, error) # set all inner product as a list, seems like DNA of this star inner_prod_star_list.append(inner_prod_star) inner_prod_error_list.append(inner_prod_error) inner_prod_star_list = np.array(inner_prod_star_list) inner_prod_error_list = np.array(inner_prod_error_list) return inner_prod_star_list, inner_prod_error_list #-------------------------------------------------------------------- # This is a func to wipe out exotic number in a list # This one is made for matching images def get_rid_of_exotic_severe(value_list, VERBOSE = 0): answer_value_list = value_list[:] std = np.std(answer_value_list) # resursive condition while std > 1 : mean = np.mean(answer_value_list) # get the error of each value to the mean, than delete one with largest error. sub_value_list = np.subtract(answer_value_list, mean) abs_value_list = np.absolute(sub_value_list) index_max = np.argmax(abs_value_list) answer_value_list= np.delete(answer_value_list, index_max) std = np.std(answer_value_list) return answer_value_list # This one is made for scif calculation def get_rid_of_exotic(value_list): std = np.std(value_list) mean = np.mean(value_list) # get the error of each value to the mean, than delete one with largest error. sub_value_list = np.subtract(value_list, mean) abs_value_list = np.absolute(sub_value_list) for i in xrange(len(abs_value_list)): if abs_value_list[i] >= 3 * std: value_list = np.delete(value_list, i) value_list = get_rid_of_exotic(value_list) return value_list return value_list
en
0.886475
#!/usr/bin/python Program: This is a liberary program of register Usage: 1. from register_lib import [func name] or import curvefit 2. use it in your lovely code. Editor: Jacob975 ################################# update log 20180628 version alpha 1 1. Remove some # calculate the inner product and error of two side, from star_1 to star_2 and from star_1 to star_3. # check the number of matching inner prod of two stars, then return the number. # choose a star as a target, than choose two else the calculate the inner product. # choose a star, named A # choose two else stars, named B and C, to get inner product of two side AB and AC. # set all inner product as a list, seems like DNA of this star # choose a star as a target, than choose two else the calculate the inner product. # choose a star, named A # choose two else stars, named B and C, to get inner product of two side AB and AC. # set all inner product as a list, seems like DNA of this star #-------------------------------------------------------------------- # This is a func to wipe out exotic number in a list # This one is made for matching images # resursive condition # get the error of each value to the mean, than delete one with largest error. # This one is made for scif calculation # get the error of each value to the mean, than delete one with largest error.
2.688974
3
src/awscli_login/__main__.py
kstateome/awscli-login
0
6621654
<reponame>kstateome/awscli-login # from signal import signal, SIGINT, SIGTERM import logging import signal import traceback import sys import os import subprocess import pickle from argparse import Namespace from datetime import datetime from functools import wraps import boto3 from botocore.session import Session from daemoniker import Daemonizer, SignalHandler1 from daemoniker import send, SIGINT, SIGTERM, SIGABRT from awscli_login.namespace_passer import _LocalNamespacePasser from .config import ( Profile, ERROR_NONE, ERROR_UNKNOWN, ) from .exceptions import AlreadyLoggedOut, AWSCLILogin from .logger import ( configConsoleLogger, configFileLogger, ) from .saml import ( authenticate, refresh, ) from .util import ( get_selection, nap, remove_credentials, save_credentials, ) from .awscli_typing import Role logger = logging.getLogger(__package__) def save_sts_token(session: Session, client, saml: str, role: Role, duration) -> datetime: params = dict( RoleArn=role[1], PrincipalArn=role[0], SAMLAssertion=saml, ) if duration: params['DurationSeconds'] = duration # duration is optional and can be set by the role; # avoid passing if not set. token = client.assume_role_with_saml(**params) logger.info("Retrieved temporary Amazon credentials for role: " + role[1]) return save_credentials(session, token) def daemonize(profile: Profile, session: Session, client: boto3.client, role: Role, expires: datetime) -> bool: with Daemonizer() as (is_setup, daemonizer): is_parent, profile, session, client, role, expires = daemonizer( profile.pidfile, profile, session, client, role, expires, ) if not is_parent: sighandler = SignalHandler1(profile.pidfile) sighandler.start() logger = configFileLogger(profile.logfile, logging.INFO) logger.info('Startig refresh process for role %s' % role[1]) # TODO add retries! while (True): retries = 0 nap(expires, 0.9) while (True): try: saml, _ = refresh( profile.ecp_endpoint_url, profile.cookies, ) except Exception as e: retries += 1 if (retries < 4): logger.info('Refresh failed: %s' % str(e)) nap(expires, 0.2) else: raise else: break expires = save_sts_token(session, client, saml, role) return is_parent def error_handler(skip_args=True, validate=False): """ A decorator for exception handling and logging. """ def decorator(f): @wraps(f) def wrapper(args: Namespace, session: Session): exp = None # type: Exception code = ERROR_NONE sig = None try: configConsoleLogger(args.verbose) if not skip_args: profile = Profile(session, args, validate) else: profile = Profile(session, None, validate) f(profile, session) except AWSCLILogin as e: code = e.code exp = e except SIGINT as e: sig = 'SIGINT' except SIGABRT as e: sig = 'SIGABRT' except SIGTERM as e: sig = 'SIGTERM' except Exception as e: code = ERROR_UNKNOWN exp = e finally: if code: logger.error(str(exp), exc_info=False) logger.debug(traceback.format_exc()) if sig: logger.info('Received signal: %s. Shutting down...' % sig) exit(code) return wrapper return decorator def windowsdaemonize(profile, role, expires): python_path = sys.executable python_path = os.path.abspath(python_path) python_dir = os.path.dirname(python_path) pythonw_path = python_dir + '/pythonw.exe' success_timeout = 30 with _LocalNamespacePasser() as worker_argpath: # Write an argvector for the worker to the namespace passer worker_argv = [ profile, # namespace_path role, expires ] with open(worker_argpath, 'wb') as f: # Use the highest available protocol pickle.dump(worker_argv, f, protocol=-1) # Create an env for the worker to let it know what to do worker_env = {} worker_env.update(dict(os.environ)) # Figure out the path to the current file # worker_target = os.path.abspath(__file__) worker_cmd = f'"{python_path}" -m awscli_login.windaemon "{worker_argpath}"' try: # This will wait for the worker to finish, or cancel it at # the timeout. subprocess.run( worker_cmd, env=worker_env, timeout=success_timeout ) except subprocess.TimeoutExpired as exc: raise ChildProcessError( 'Timeout while waiting for daemon init.' ) from exc @error_handler(skip_args=False, validate=True) def main(profile: Profile, session: Session): is_parent = True try: client = boto3.client('sts') # TODO force-refresh should kill refresh! if not profile.force_refresh: profile.raise_if_logged_in() # Must know username to lookup cookies profile.get_username() try: saml, roles = refresh( profile.ecp_endpoint_url, profile.cookies, ) except Exception: creds = profile.get_credentials() saml, roles = authenticate(profile.ecp_endpoint_url, profile.cookies, *creds) duration = profile.duration role = get_selection(roles, profile.role_arn) expires = save_sts_token(session, client, saml, role, duration) if not profile.force_refresh and not profile.disable_refresh: if sys.platform != 'win32': is_parent = daemonize(profile, session, client, role, expires) else: windowsdaemonize(profile, role, expires) except Exception as e: raise finally: if not is_parent: logger.info('Exiting refresh process') @error_handler() def logout(profile: Profile, session: Session): try: send(profile.pidfile, SIGINT) if os.path.exists(profile.pidfile): os.remove(profile.pidfile) remove_credentials(session) except IOError: raise AlreadyLoggedOut
# from signal import signal, SIGINT, SIGTERM import logging import signal import traceback import sys import os import subprocess import pickle from argparse import Namespace from datetime import datetime from functools import wraps import boto3 from botocore.session import Session from daemoniker import Daemonizer, SignalHandler1 from daemoniker import send, SIGINT, SIGTERM, SIGABRT from awscli_login.namespace_passer import _LocalNamespacePasser from .config import ( Profile, ERROR_NONE, ERROR_UNKNOWN, ) from .exceptions import AlreadyLoggedOut, AWSCLILogin from .logger import ( configConsoleLogger, configFileLogger, ) from .saml import ( authenticate, refresh, ) from .util import ( get_selection, nap, remove_credentials, save_credentials, ) from .awscli_typing import Role logger = logging.getLogger(__package__) def save_sts_token(session: Session, client, saml: str, role: Role, duration) -> datetime: params = dict( RoleArn=role[1], PrincipalArn=role[0], SAMLAssertion=saml, ) if duration: params['DurationSeconds'] = duration # duration is optional and can be set by the role; # avoid passing if not set. token = client.assume_role_with_saml(**params) logger.info("Retrieved temporary Amazon credentials for role: " + role[1]) return save_credentials(session, token) def daemonize(profile: Profile, session: Session, client: boto3.client, role: Role, expires: datetime) -> bool: with Daemonizer() as (is_setup, daemonizer): is_parent, profile, session, client, role, expires = daemonizer( profile.pidfile, profile, session, client, role, expires, ) if not is_parent: sighandler = SignalHandler1(profile.pidfile) sighandler.start() logger = configFileLogger(profile.logfile, logging.INFO) logger.info('Startig refresh process for role %s' % role[1]) # TODO add retries! while (True): retries = 0 nap(expires, 0.9) while (True): try: saml, _ = refresh( profile.ecp_endpoint_url, profile.cookies, ) except Exception as e: retries += 1 if (retries < 4): logger.info('Refresh failed: %s' % str(e)) nap(expires, 0.2) else: raise else: break expires = save_sts_token(session, client, saml, role) return is_parent def error_handler(skip_args=True, validate=False): """ A decorator for exception handling and logging. """ def decorator(f): @wraps(f) def wrapper(args: Namespace, session: Session): exp = None # type: Exception code = ERROR_NONE sig = None try: configConsoleLogger(args.verbose) if not skip_args: profile = Profile(session, args, validate) else: profile = Profile(session, None, validate) f(profile, session) except AWSCLILogin as e: code = e.code exp = e except SIGINT as e: sig = 'SIGINT' except SIGABRT as e: sig = 'SIGABRT' except SIGTERM as e: sig = 'SIGTERM' except Exception as e: code = ERROR_UNKNOWN exp = e finally: if code: logger.error(str(exp), exc_info=False) logger.debug(traceback.format_exc()) if sig: logger.info('Received signal: %s. Shutting down...' % sig) exit(code) return wrapper return decorator def windowsdaemonize(profile, role, expires): python_path = sys.executable python_path = os.path.abspath(python_path) python_dir = os.path.dirname(python_path) pythonw_path = python_dir + '/pythonw.exe' success_timeout = 30 with _LocalNamespacePasser() as worker_argpath: # Write an argvector for the worker to the namespace passer worker_argv = [ profile, # namespace_path role, expires ] with open(worker_argpath, 'wb') as f: # Use the highest available protocol pickle.dump(worker_argv, f, protocol=-1) # Create an env for the worker to let it know what to do worker_env = {} worker_env.update(dict(os.environ)) # Figure out the path to the current file # worker_target = os.path.abspath(__file__) worker_cmd = f'"{python_path}" -m awscli_login.windaemon "{worker_argpath}"' try: # This will wait for the worker to finish, or cancel it at # the timeout. subprocess.run( worker_cmd, env=worker_env, timeout=success_timeout ) except subprocess.TimeoutExpired as exc: raise ChildProcessError( 'Timeout while waiting for daemon init.' ) from exc @error_handler(skip_args=False, validate=True) def main(profile: Profile, session: Session): is_parent = True try: client = boto3.client('sts') # TODO force-refresh should kill refresh! if not profile.force_refresh: profile.raise_if_logged_in() # Must know username to lookup cookies profile.get_username() try: saml, roles = refresh( profile.ecp_endpoint_url, profile.cookies, ) except Exception: creds = profile.get_credentials() saml, roles = authenticate(profile.ecp_endpoint_url, profile.cookies, *creds) duration = profile.duration role = get_selection(roles, profile.role_arn) expires = save_sts_token(session, client, saml, role, duration) if not profile.force_refresh and not profile.disable_refresh: if sys.platform != 'win32': is_parent = daemonize(profile, session, client, role, expires) else: windowsdaemonize(profile, role, expires) except Exception as e: raise finally: if not is_parent: logger.info('Exiting refresh process') @error_handler() def logout(profile: Profile, session: Session): try: send(profile.pidfile, SIGINT) if os.path.exists(profile.pidfile): os.remove(profile.pidfile) remove_credentials(session) except IOError: raise AlreadyLoggedOut
en
0.773662
# from signal import signal, SIGINT, SIGTERM # duration is optional and can be set by the role; # avoid passing if not set. # TODO add retries! A decorator for exception handling and logging. # type: Exception # Write an argvector for the worker to the namespace passer # namespace_path # Use the highest available protocol # Create an env for the worker to let it know what to do # Figure out the path to the current file # worker_target = os.path.abspath(__file__) # This will wait for the worker to finish, or cancel it at # the timeout. # TODO force-refresh should kill refresh! # Must know username to lookup cookies
1.640871
2
site/thicc/urls/production.py
aldenjenkins/ThiccGaming
0
6621655
<gh_stars>0 #from django.conf.urls import url from .common import urlpatterns as common_urlpatterns #from rest_framework_swagger.views import get_swagger_view urlpatterns = [ ] + common_urlpatterns
#from django.conf.urls import url from .common import urlpatterns as common_urlpatterns #from rest_framework_swagger.views import get_swagger_view urlpatterns = [ ] + common_urlpatterns
en
0.235939
#from django.conf.urls import url #from rest_framework_swagger.views import get_swagger_view
1.277766
1
tests/test_internals_filter.py
MinKPark/ptvsd
1
6621656
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See LICENSE in the project root # for license information. import os import pytest import ptvsd from ptvsd.wrapper import InternalsFilter INTERNAL_DIR = os.path.dirname(os.path.abspath(ptvsd.__file__)) @pytest.mark.parametrize('path', [ os.path.abspath(ptvsd.__file__), # File used by VS/VSC to launch ptvsd os.path.join('somepath', 'ptvsd_launcher.py'), # Any file under ptvsd os.path.join(INTERNAL_DIR, 'somefile.py'), ]) def test_internal_paths(path): int_filter = InternalsFilter() assert int_filter.is_internal_path(path) @pytest.mark.parametrize('path', [ __file__, os.path.join('somepath', 'somefile.py'), ]) def test_user_file_paths(path): int_filter = InternalsFilter() assert not int_filter.is_internal_path(path)
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See LICENSE in the project root # for license information. import os import pytest import ptvsd from ptvsd.wrapper import InternalsFilter INTERNAL_DIR = os.path.dirname(os.path.abspath(ptvsd.__file__)) @pytest.mark.parametrize('path', [ os.path.abspath(ptvsd.__file__), # File used by VS/VSC to launch ptvsd os.path.join('somepath', 'ptvsd_launcher.py'), # Any file under ptvsd os.path.join(INTERNAL_DIR, 'somefile.py'), ]) def test_internal_paths(path): int_filter = InternalsFilter() assert int_filter.is_internal_path(path) @pytest.mark.parametrize('path', [ __file__, os.path.join('somepath', 'somefile.py'), ]) def test_user_file_paths(path): int_filter = InternalsFilter() assert not int_filter.is_internal_path(path)
en
0.830169
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See LICENSE in the project root # for license information. # File used by VS/VSC to launch ptvsd # Any file under ptvsd
2.25665
2
main.py
sippyca/voice_interface
0
6621657
import channels, interfaces # In production, the credentials will be obtained using load_dotenv creds = interfaces.Credentials("192.168.1.1:1111", "username", "password") rest_interface = (interfaces.RestInterface(creds)) def get_channel(): """Get channel information for channel 123456 from the Rest interface.""" channel_id: str = "123456" channel = channels.Channel(rest_interface, channel_id) r = channel.get() print(r.text) def main(): get_channel() if __name__ == "__main__": main()
import channels, interfaces # In production, the credentials will be obtained using load_dotenv creds = interfaces.Credentials("192.168.1.1:1111", "username", "password") rest_interface = (interfaces.RestInterface(creds)) def get_channel(): """Get channel information for channel 123456 from the Rest interface.""" channel_id: str = "123456" channel = channels.Channel(rest_interface, channel_id) r = channel.get() print(r.text) def main(): get_channel() if __name__ == "__main__": main()
en
0.763908
# In production, the credentials will be obtained using load_dotenv Get channel information for channel 123456 from the Rest interface.
2.579529
3
piestats/web/player_names.py
jrgp/soldat-pystats
8
6621658
import re trailing_name_count_matcher = re.compile('(.+)\((\d+)\)$') def remove_redundant_player_names(names): ''' Given a list of names, remove ones with trailing (1) through (n) depending on correctness Assumes names will be a list of strings with no duplicates. Order is preserved on return ''' if len(names) == 1: return names original_names = set(names) for name in list(names): m = trailing_name_count_matcher.match(name) if m: prefix, count = m.groups() count = int(count) if (count == 1 and prefix in original_names) or (count > 1 and '%s(%d)' % (prefix, count - 1) in original_names): names.remove(name) return names
import re trailing_name_count_matcher = re.compile('(.+)\((\d+)\)$') def remove_redundant_player_names(names): ''' Given a list of names, remove ones with trailing (1) through (n) depending on correctness Assumes names will be a list of strings with no duplicates. Order is preserved on return ''' if len(names) == 1: return names original_names = set(names) for name in list(names): m = trailing_name_count_matcher.match(name) if m: prefix, count = m.groups() count = int(count) if (count == 1 and prefix in original_names) or (count > 1 and '%s(%d)' % (prefix, count - 1) in original_names): names.remove(name) return names
en
0.833286
Given a list of names, remove ones with trailing (1) through (n) depending on correctness Assumes names will be a list of strings with no duplicates. Order is preserved on return
3.605433
4
datapack/data/scripts/quests/154_SacrificeToSea/__init__.py
DigitalCoin1/L2SPERO
0
6621659
<reponame>DigitalCoin1/L2SPERO # Maked by Mr. Have fun! Version 0.2 import sys from com.l2jfrozen.gameserver.model.quest import State from com.l2jfrozen.gameserver.model.quest import QuestState from com.l2jfrozen.gameserver.model.quest.jython import QuestJython as JQuest qn = "154_SacrificeToSea" FOX_FUR_ID = 1032 FOX_FUR_YARN_ID = 1033 MAIDEN_DOLL_ID = 1034 EARING_ID = 113 class Quest (JQuest) : def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr) def onEvent (self,event,st) : htmltext = event if event == "1" : st.set("id","0") htmltext = "30312-04.htm" st.set("cond","1") st.setState(STARTED) st.playSound("ItemSound.quest_accept") return htmltext def onTalk (self,npc,player): htmltext = "<html><body>You are either not carrying out your quest or don't meet the criteria.</body></html>" st = player.getQuestState(qn) if not st : return htmltext npcId = npc.getNpcId() id = st.getState() if id == CREATED : st.setState(STARTING) st.set("cond","0") st.set("onlyone","0") st.set("id","0") if npcId == 30312 and st.getInt("cond")==0 and st.getInt("onlyone")==0 : if st.getInt("cond")<15 : if player.getLevel() >= 2 : htmltext = "30312-03.htm" return htmltext else: htmltext = "30312-02.htm" st.exitQuest(1) else: htmltext = "30312-02.htm" st.exitQuest(1) elif npcId == 30312 and st.getInt("cond")==0 and st.getInt("onlyone")==1 : htmltext = "<html><body>This quest has already been completed.</body></html>" if id == STARTED: if npcId == 30312 and st.getInt("cond")==1 and (st.getQuestItemsCount(FOX_FUR_YARN_ID)==0 and st.getQuestItemsCount(MAIDEN_DOLL_ID)==0) and st.getQuestItemsCount(FOX_FUR_ID)<10 : htmltext = "30312-05.htm" elif npcId == 30312 and st.getInt("cond")==1 and st.getQuestItemsCount(FOX_FUR_ID)>=10 : htmltext = "30312-08.htm" elif npcId == 30051 and st.getInt("cond")==1 and st.getQuestItemsCount(FOX_FUR_ID)<10 and st.getQuestItemsCount(FOX_FUR_ID)>0 : htmltext = "30051-01.htm" elif npcId == 30051 and st.getInt("cond")==1 and st.getQuestItemsCount(FOX_FUR_ID)>=10 and st.getQuestItemsCount(FOX_FUR_YARN_ID)==0 and st.getQuestItemsCount(MAIDEN_DOLL_ID)==0 and st.getQuestItemsCount(MAIDEN_DOLL_ID)<10 : htmltext = "30051-02.htm" st.giveItems(FOX_FUR_YARN_ID,1) st.takeItems(FOX_FUR_ID,st.getQuestItemsCount(FOX_FUR_ID)) elif npcId == 30051 and st.getInt("cond")==1 and st.getQuestItemsCount(FOX_FUR_YARN_ID)>=1 : htmltext = "30051-03.htm" elif npcId == 30051 and st.getInt("cond")==1 and st.getQuestItemsCount(MAIDEN_DOLL_ID)==1 : htmltext = "30051-04.htm" elif npcId == 30312 and st.getInt("cond")==1 and st.getQuestItemsCount(FOX_FUR_YARN_ID)>=1 : htmltext = "30312-06.htm" elif npcId == 30055 and st.getInt("cond")==1 and st.getQuestItemsCount(FOX_FUR_YARN_ID)>=1 : htmltext = "30055-01.htm" st.giveItems(MAIDEN_DOLL_ID,1) st.takeItems(FOX_FUR_YARN_ID,st.getQuestItemsCount(FOX_FUR_YARN_ID)) elif npcId == 30055 and st.getInt("cond")==1 and st.getQuestItemsCount(MAIDEN_DOLL_ID)>=1 : htmltext = "30055-02.htm" elif npcId == 30055 and st.getInt("cond")==1 and st.getQuestItemsCount(FOX_FUR_YARN_ID)==0 and st.getQuestItemsCount(MAIDEN_DOLL_ID)==0 : htmltext = "30055-03.htm" elif npcId == 30312 and st.getInt("cond")==1 and st.getQuestItemsCount(MAIDEN_DOLL_ID)>=1 and st.getInt("onlyone")==0 : if st.getInt("id") != 154 : st.set("id","154") htmltext = "30312-07.htm" st.giveItems(EARING_ID,1) st.takeItems(MAIDEN_DOLL_ID,-1) st.addExpAndSp(100,0) st.set("cond","0") st.setState(COMPLETED) st.playSound("ItemSound.quest_finish") st.set("onlyone","1") return htmltext def onKill(self,npc,player,isPet): st = player.getQuestState(qn) if not st : return if st.getState() != STARTED : return npcId = npc.getNpcId() if npcId == 20481 : st.set("id","0") if st.getInt("cond") == 1 and st.getQuestItemsCount(FOX_FUR_ID)<10 and st.getQuestItemsCount(FOX_FUR_YARN_ID) == 0 : if st.getRandom(10)<4 : st.giveItems(FOX_FUR_ID,1) if st.getQuestItemsCount(FOX_FUR_ID) == 10 : st.playSound("ItemSound.quest_middle") else: st.playSound("ItemSound.quest_itemget") elif npcId == 20545 : st.set("id","0") if st.getInt("cond") == 1 and st.getQuestItemsCount(FOX_FUR_ID)<10 and st.getQuestItemsCount(FOX_FUR_YARN_ID) == 0 : if st.getRandom(10)<4 : st.giveItems(FOX_FUR_ID,1) if st.getQuestItemsCount(FOX_FUR_ID) == 10 : st.playSound("ItemSound.quest_middle") else: st.playSound("ItemSound.quest_itemget") return QUEST = Quest(154,qn,"Sacrifice To Sea") CREATED = State('Start', QUEST) STARTING = State('Starting', QUEST) STARTED = State('Started', QUEST) COMPLETED = State('Completed', QUEST) QUEST.setInitialState(CREATED) QUEST.addStartNpc(30312) QUEST.addTalkId(30312) QUEST.addTalkId(30051) QUEST.addTalkId(30055) QUEST.addKillId(20481) QUEST.addKillId(20545) STARTED.addQuestDrop(20481,FOX_FUR_ID,1) STARTED.addQuestDrop(20545,FOX_FUR_ID,1) STARTED.addQuestDrop(30051,FOX_FUR_YARN_ID,1) STARTED.addQuestDrop(30055,MAIDEN_DOLL_ID,1)
# Maked by Mr. Have fun! Version 0.2 import sys from com.l2jfrozen.gameserver.model.quest import State from com.l2jfrozen.gameserver.model.quest import QuestState from com.l2jfrozen.gameserver.model.quest.jython import QuestJython as JQuest qn = "154_SacrificeToSea" FOX_FUR_ID = 1032 FOX_FUR_YARN_ID = 1033 MAIDEN_DOLL_ID = 1034 EARING_ID = 113 class Quest (JQuest) : def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr) def onEvent (self,event,st) : htmltext = event if event == "1" : st.set("id","0") htmltext = "30312-04.htm" st.set("cond","1") st.setState(STARTED) st.playSound("ItemSound.quest_accept") return htmltext def onTalk (self,npc,player): htmltext = "<html><body>You are either not carrying out your quest or don't meet the criteria.</body></html>" st = player.getQuestState(qn) if not st : return htmltext npcId = npc.getNpcId() id = st.getState() if id == CREATED : st.setState(STARTING) st.set("cond","0") st.set("onlyone","0") st.set("id","0") if npcId == 30312 and st.getInt("cond")==0 and st.getInt("onlyone")==0 : if st.getInt("cond")<15 : if player.getLevel() >= 2 : htmltext = "30312-03.htm" return htmltext else: htmltext = "30312-02.htm" st.exitQuest(1) else: htmltext = "30312-02.htm" st.exitQuest(1) elif npcId == 30312 and st.getInt("cond")==0 and st.getInt("onlyone")==1 : htmltext = "<html><body>This quest has already been completed.</body></html>" if id == STARTED: if npcId == 30312 and st.getInt("cond")==1 and (st.getQuestItemsCount(FOX_FUR_YARN_ID)==0 and st.getQuestItemsCount(MAIDEN_DOLL_ID)==0) and st.getQuestItemsCount(FOX_FUR_ID)<10 : htmltext = "30312-05.htm" elif npcId == 30312 and st.getInt("cond")==1 and st.getQuestItemsCount(FOX_FUR_ID)>=10 : htmltext = "30312-08.htm" elif npcId == 30051 and st.getInt("cond")==1 and st.getQuestItemsCount(FOX_FUR_ID)<10 and st.getQuestItemsCount(FOX_FUR_ID)>0 : htmltext = "30051-01.htm" elif npcId == 30051 and st.getInt("cond")==1 and st.getQuestItemsCount(FOX_FUR_ID)>=10 and st.getQuestItemsCount(FOX_FUR_YARN_ID)==0 and st.getQuestItemsCount(MAIDEN_DOLL_ID)==0 and st.getQuestItemsCount(MAIDEN_DOLL_ID)<10 : htmltext = "30051-02.htm" st.giveItems(FOX_FUR_YARN_ID,1) st.takeItems(FOX_FUR_ID,st.getQuestItemsCount(FOX_FUR_ID)) elif npcId == 30051 and st.getInt("cond")==1 and st.getQuestItemsCount(FOX_FUR_YARN_ID)>=1 : htmltext = "30051-03.htm" elif npcId == 30051 and st.getInt("cond")==1 and st.getQuestItemsCount(MAIDEN_DOLL_ID)==1 : htmltext = "30051-04.htm" elif npcId == 30312 and st.getInt("cond")==1 and st.getQuestItemsCount(FOX_FUR_YARN_ID)>=1 : htmltext = "30312-06.htm" elif npcId == 30055 and st.getInt("cond")==1 and st.getQuestItemsCount(FOX_FUR_YARN_ID)>=1 : htmltext = "30055-01.htm" st.giveItems(MAIDEN_DOLL_ID,1) st.takeItems(FOX_FUR_YARN_ID,st.getQuestItemsCount(FOX_FUR_YARN_ID)) elif npcId == 30055 and st.getInt("cond")==1 and st.getQuestItemsCount(MAIDEN_DOLL_ID)>=1 : htmltext = "30055-02.htm" elif npcId == 30055 and st.getInt("cond")==1 and st.getQuestItemsCount(FOX_FUR_YARN_ID)==0 and st.getQuestItemsCount(MAIDEN_DOLL_ID)==0 : htmltext = "30055-03.htm" elif npcId == 30312 and st.getInt("cond")==1 and st.getQuestItemsCount(MAIDEN_DOLL_ID)>=1 and st.getInt("onlyone")==0 : if st.getInt("id") != 154 : st.set("id","154") htmltext = "30312-07.htm" st.giveItems(EARING_ID,1) st.takeItems(MAIDEN_DOLL_ID,-1) st.addExpAndSp(100,0) st.set("cond","0") st.setState(COMPLETED) st.playSound("ItemSound.quest_finish") st.set("onlyone","1") return htmltext def onKill(self,npc,player,isPet): st = player.getQuestState(qn) if not st : return if st.getState() != STARTED : return npcId = npc.getNpcId() if npcId == 20481 : st.set("id","0") if st.getInt("cond") == 1 and st.getQuestItemsCount(FOX_FUR_ID)<10 and st.getQuestItemsCount(FOX_FUR_YARN_ID) == 0 : if st.getRandom(10)<4 : st.giveItems(FOX_FUR_ID,1) if st.getQuestItemsCount(FOX_FUR_ID) == 10 : st.playSound("ItemSound.quest_middle") else: st.playSound("ItemSound.quest_itemget") elif npcId == 20545 : st.set("id","0") if st.getInt("cond") == 1 and st.getQuestItemsCount(FOX_FUR_ID)<10 and st.getQuestItemsCount(FOX_FUR_YARN_ID) == 0 : if st.getRandom(10)<4 : st.giveItems(FOX_FUR_ID,1) if st.getQuestItemsCount(FOX_FUR_ID) == 10 : st.playSound("ItemSound.quest_middle") else: st.playSound("ItemSound.quest_itemget") return QUEST = Quest(154,qn,"Sacrifice To Sea") CREATED = State('Start', QUEST) STARTING = State('Starting', QUEST) STARTED = State('Started', QUEST) COMPLETED = State('Completed', QUEST) QUEST.setInitialState(CREATED) QUEST.addStartNpc(30312) QUEST.addTalkId(30312) QUEST.addTalkId(30051) QUEST.addTalkId(30055) QUEST.addKillId(20481) QUEST.addKillId(20545) STARTED.addQuestDrop(20481,FOX_FUR_ID,1) STARTED.addQuestDrop(20545,FOX_FUR_ID,1) STARTED.addQuestDrop(30051,FOX_FUR_YARN_ID,1) STARTED.addQuestDrop(30055,MAIDEN_DOLL_ID,1)
en
0.843975
# Maked by Mr. Have fun! Version 0.2
2.413329
2
ezntfs/__init__.py
lezgomatt/ezntfs
13
6621660
"""An easy-to-use wrapper for NTFS-3G on macOS.""" __version__ = "1.1.1"
"""An easy-to-use wrapper for NTFS-3G on macOS.""" __version__ = "1.1.1"
en
0.481813
An easy-to-use wrapper for NTFS-3G on macOS.
1.038569
1
utils.py
karanvivekbhargava/vanilla-neural-network
1
6621661
import numpy as np import matplotlib.pyplot as plt from sklearn import datasets from sklearn.utils import shuffle class Net: def __init__(self): self.data = None self.W = None def feedData(self, x_train, y_train, x_valid, y_valid, x_test, y_test): self.data = [x_train, y_train, x_valid, y_valid, x_test, y_test] def addLayer(self, dim): assert(self.data != None), "No data has been added" if (self.W == None): self.W = [] self.W.append(np.random.normal( 0, 1 / np.sqrt(self.data[0].shape[1]), (self.data[0].shape[1], dim))) else: self.W.append(np.random.normal( 0, 1 / np.sqrt(self.W[-1].shape[1]), (self.W[-1].shape[1], dim))) def solver(self, graphFlag=False): assert(self.W != None), "No layers have been added" assert(self.W[0].shape[0] == self.data[0].shape[1] ), "The network input dimension doesn't match the data" assert(self.W[-1].shape[1] == self.data[1].shape[1] ), "The network output dimension doesn't match the data" x_train, y_train, x_valid, y_valid, x_test, y_test = self.data[ 0], self.data[1], self.data[2], self.data[3], self.data[4], self.data[5] # Call bb solver bb_solver(self.W, x_train, y_train, x_valid, y_valid, x_test, y_test, graphFlag=False) def readData(split_ratio=0.7): # import some data to play with iris = datasets.load_iris() X = iris.data # we take all the features. y = iris.target X, y = shuffle(X, y) meanX = np.average(X, axis=0) # Subtract the mean from the data to make the training faster X -= meanX y = y.reshape((-1, 1)) separating_index = int(split_ratio * y.shape[0]) x_train, y_train, x_test, y_test = X[:separating_index], y[: separating_index], X[separating_index:], y[separating_index:] separating_index1 = int(0.33 * y_test.shape[0]) x_valid = x_test[:separating_index1] y_valid = y_test[:separating_index1] x_test = x_test[separating_index1:] y_test = y_test[separating_index1:] def make_hot(a): numdata = a.shape[0] rval = np.zeros((numdata, 3)) for i in range(numdata): rval[i, a[i]] = 1 return rval # convert labels to ones hot using this function y_test = make_hot(y_test) y_valid = make_hot(y_valid) y_train = make_hot(y_train) return x_train, y_train, x_valid, y_valid, x_test, y_test def list_to_vec(list_of_matrices): """Convert a list of matrices into a vector""" return np.concatenate([l.ravel() for l in list_of_matrices]) def vec_to_list(vec, list_of_matrices): """Convert a vector into a list of matrices. The returned value will have the same shape as list_of_matrices, and will overwrite the values in that list""" loc = 0 for m in list_of_matrices: shape = m.shape m[:] = vec[loc:loc + np.size(m)].reshape(shape) loc = loc + np.size(m) return list_of_matrices def logreg_objective(x, D, c): z = np.multiply(c, np.dot(D, x)) l_z = np.zeros_like(z) # We take exp() of only the negative values in z l_z[z <= 0] = -z[z <= 0] + np.log(1 + np.exp(z[z <= 0])) # We take exp(-z) for positive values in z l_z[z > 0] = np.log(1 + np.exp(-z[z > 0])) return np.sum(l_z) def logreg_grad(x, D, c): z = np.multiply(c, np.dot(D, x)) cez = np.zeros_like(z) cez[z >= 0] = np.multiply( c[z >= 0], np.divide(-np.exp(-z[z >= 0]), 1 + np.exp(-z[z >= 0]))) cez[z < 0] = np.multiply(c[z < 0], -1 / (1 + np.exp(z[z < 0]))) return np.dot(D.transpose(), cez) def log_entropy_softmax(z, ones_hot): """The log entropy of the softmax layer""" # shift everything so we don't have to exponentiate positive numbers z = z - np.max(z, axis=1)[:, None] # compute the negative log likelihood s = np.sum(np.exp(z), axis=1)[:, None] nll = -z + np.log(s) # sum over the entries corresponding to the correct class return np.sum(nll * ones_hot) / z.shape[0] def net_objective(weights, data, labs): """The objective function of a neural net""" num_lays = len(weights) z = data.dot(weights[0]) # Each layer performs data*weights. This way we have 1 feature vector per row of data for j in range(1, num_lays): z = smrelu(z).dot(weights[j]) return log_entropy_softmax(z, labs) def smrelu(x): y = np.zeros_like(x) # take exp() of only the negative values in x y[x <= 0] = np.log(1 + np.exp(x[x <= 0])) # take exp(-x) for positive values in x, so exp doesn't blow up y[x > 0] = x[x > 0] + np.log(1 + np.exp(-x[x > 0])) return y def smrelu_grad(x): """The smoothed relu gradient""" rval = np.zeros(x.shape) ind = x < 0 rval[ind] = np.exp(x[ind]) / (1 + np.exp(x[ind])) ind = np.negative(ind) rval[ind] = 1 / (1 + np.exp(-x[ind])) return rval def log_entropy_grad(z, ones_hot): """The gradient of the log entropy of the softmax""" # shift everything so we don't have to exponentiate positive numbers z = z - np.max(z, axis=1)[:, None] # compute the negative log likelihood s = np.sum(np.exp(z), axis=1)[:, None] grad = -ones_hot + np.exp(z) / s return grad / z.shape[0] def net_grad(weights, data, labs): """The gradient of the neural net objective""" num_lays = len(weights) # Forward pass: Each layer performs y_next = sigma(y*weights). This way we have 1 feature vector per column z = [data.dot(weights[0])] # The y's are activations y = [data] for j in range(1, num_lays): y.append(smrelu(z[j - 1])) z.append(y[j].dot(weights[j])) # Backward pass: loop over the layers a produce gradients # This is how much the loss depends on it's input dzt = log_entropy_grad(z[-1], labs) # this is how much the loss depends on the deepest weight matrix dw = [y[-1].T.dot(dzt)] # loop over remaining layers for j in reversed(range(num_lays - 1)): # gradient with respect to z dzt = dzt.dot(weights[j + 1].T) * smrelu_grad(z[j]) # gradient with respect to W dw.append(y[j].T.dot(dzt)) dw.reverse() return dw # We define a separate function to calculate the initial step size for this problem def getInitialStep(grad, W, x0): y = x0 + np.random.normal(0, 0.01, x0.shape) return 2 * np.linalg.norm(y - x0) / np.linalg.norm(grad(vec_to_list(y, W)) - grad(vec_to_list(x0, W))) def bb_solver(W, x_train, y_train, x_valid, y_valid, x_test, y_test, graphFlag=False): D = x_train L = y_train # Define the function handles def f(W): # run the training data through the network return net_objective(W, D, L) def grad(W): return list_to_vec(net_grad(W, D, L)) # Run the BB solver x0 = list_to_vec(W) alpha = 0.1 res = [] norm_grad0 = np.linalg.norm(grad(vec_to_list(x0, W))) res.append(norm_grad0) norm_grad_curr = 1e18 # Initialize it to infinity x_curr = x0 x_old = x0 step_curr = getInitialStep(grad, W, x0) for iter in range(200): d = -grad(vec_to_list(x_curr, W)) delta_x = x_curr - x_old delta_g = grad(vec_to_list(x_curr, W)) - grad(vec_to_list(x_old, W)) norm_delta_x = np.linalg.norm(delta_x) if norm_delta_x == 0: step_curr = getInitialStep(grad, W, x0) else: step_curr = norm_delta_x / \ np.inner(delta_x.flatten(), delta_g.flatten()) while f(vec_to_list(x_curr + step_curr * d, W)) > ((f(vec_to_list(x_curr, W)) + alpha * np.inner(step_curr * d.flatten(), grad(vec_to_list(x_curr, W)).flatten()))): step_curr = step_curr * 0.5 x_old = x_curr x_curr = x_curr + step_curr * d norm_grad_curr = np.linalg.norm(grad(vec_to_list(x_curr, W))) res.append(norm_grad_curr) # print norm_grad_curr W = vec_to_list(x_curr, W) # the final learned weights # Calculate the train and test accuracies num_layers = len(W) output = x_train # initialize output to input for i in range(num_layers - 1): # last layer has softmax non-linearity # new output after passing through a layer output = smrelu(np.dot(output, W[i])) output = np.dot(output, W[-1]) # multiply with final layer weights # the index of the max value in the 10x1 output of the neural net is the predicted label train_predicted = np.argmax(output, axis=1) accuracy_train = np.sum(train_predicted == np.argmax( y_train, axis=1)) * 100.0 / x_train.shape[0] print "Training Accuracy = ", accuracy_train output = x_test # initialize output to input for i in range(num_layers - 1): # last layer has softmax non-linearity # new output after passing through a layer output = smrelu(np.dot(output, W[i])) output = np.dot(output, W[-1]) # multiply with final layer weights # the index of the max value in the 10x1 output of the neural net is the predicted label test_predicted = np.argmax(output, axis=1) accuracy_test = np.sum(np.argmax(output, axis=1) == np.argmax( y_test, axis=1)) * 100.0 / x_test.shape[0] print "Test Accuracy = ", accuracy_test if (graphFlag): # Plot the convergence curve plt.figure(1) plt.plot(range(len(res)), res) plt.yscale('log') plt.title('Residuals of Neural Network with Barzilai-Borwein solver') plt.xlabel('Number of iterations') plt.ylabel('Residual') plt.show()
import numpy as np import matplotlib.pyplot as plt from sklearn import datasets from sklearn.utils import shuffle class Net: def __init__(self): self.data = None self.W = None def feedData(self, x_train, y_train, x_valid, y_valid, x_test, y_test): self.data = [x_train, y_train, x_valid, y_valid, x_test, y_test] def addLayer(self, dim): assert(self.data != None), "No data has been added" if (self.W == None): self.W = [] self.W.append(np.random.normal( 0, 1 / np.sqrt(self.data[0].shape[1]), (self.data[0].shape[1], dim))) else: self.W.append(np.random.normal( 0, 1 / np.sqrt(self.W[-1].shape[1]), (self.W[-1].shape[1], dim))) def solver(self, graphFlag=False): assert(self.W != None), "No layers have been added" assert(self.W[0].shape[0] == self.data[0].shape[1] ), "The network input dimension doesn't match the data" assert(self.W[-1].shape[1] == self.data[1].shape[1] ), "The network output dimension doesn't match the data" x_train, y_train, x_valid, y_valid, x_test, y_test = self.data[ 0], self.data[1], self.data[2], self.data[3], self.data[4], self.data[5] # Call bb solver bb_solver(self.W, x_train, y_train, x_valid, y_valid, x_test, y_test, graphFlag=False) def readData(split_ratio=0.7): # import some data to play with iris = datasets.load_iris() X = iris.data # we take all the features. y = iris.target X, y = shuffle(X, y) meanX = np.average(X, axis=0) # Subtract the mean from the data to make the training faster X -= meanX y = y.reshape((-1, 1)) separating_index = int(split_ratio * y.shape[0]) x_train, y_train, x_test, y_test = X[:separating_index], y[: separating_index], X[separating_index:], y[separating_index:] separating_index1 = int(0.33 * y_test.shape[0]) x_valid = x_test[:separating_index1] y_valid = y_test[:separating_index1] x_test = x_test[separating_index1:] y_test = y_test[separating_index1:] def make_hot(a): numdata = a.shape[0] rval = np.zeros((numdata, 3)) for i in range(numdata): rval[i, a[i]] = 1 return rval # convert labels to ones hot using this function y_test = make_hot(y_test) y_valid = make_hot(y_valid) y_train = make_hot(y_train) return x_train, y_train, x_valid, y_valid, x_test, y_test def list_to_vec(list_of_matrices): """Convert a list of matrices into a vector""" return np.concatenate([l.ravel() for l in list_of_matrices]) def vec_to_list(vec, list_of_matrices): """Convert a vector into a list of matrices. The returned value will have the same shape as list_of_matrices, and will overwrite the values in that list""" loc = 0 for m in list_of_matrices: shape = m.shape m[:] = vec[loc:loc + np.size(m)].reshape(shape) loc = loc + np.size(m) return list_of_matrices def logreg_objective(x, D, c): z = np.multiply(c, np.dot(D, x)) l_z = np.zeros_like(z) # We take exp() of only the negative values in z l_z[z <= 0] = -z[z <= 0] + np.log(1 + np.exp(z[z <= 0])) # We take exp(-z) for positive values in z l_z[z > 0] = np.log(1 + np.exp(-z[z > 0])) return np.sum(l_z) def logreg_grad(x, D, c): z = np.multiply(c, np.dot(D, x)) cez = np.zeros_like(z) cez[z >= 0] = np.multiply( c[z >= 0], np.divide(-np.exp(-z[z >= 0]), 1 + np.exp(-z[z >= 0]))) cez[z < 0] = np.multiply(c[z < 0], -1 / (1 + np.exp(z[z < 0]))) return np.dot(D.transpose(), cez) def log_entropy_softmax(z, ones_hot): """The log entropy of the softmax layer""" # shift everything so we don't have to exponentiate positive numbers z = z - np.max(z, axis=1)[:, None] # compute the negative log likelihood s = np.sum(np.exp(z), axis=1)[:, None] nll = -z + np.log(s) # sum over the entries corresponding to the correct class return np.sum(nll * ones_hot) / z.shape[0] def net_objective(weights, data, labs): """The objective function of a neural net""" num_lays = len(weights) z = data.dot(weights[0]) # Each layer performs data*weights. This way we have 1 feature vector per row of data for j in range(1, num_lays): z = smrelu(z).dot(weights[j]) return log_entropy_softmax(z, labs) def smrelu(x): y = np.zeros_like(x) # take exp() of only the negative values in x y[x <= 0] = np.log(1 + np.exp(x[x <= 0])) # take exp(-x) for positive values in x, so exp doesn't blow up y[x > 0] = x[x > 0] + np.log(1 + np.exp(-x[x > 0])) return y def smrelu_grad(x): """The smoothed relu gradient""" rval = np.zeros(x.shape) ind = x < 0 rval[ind] = np.exp(x[ind]) / (1 + np.exp(x[ind])) ind = np.negative(ind) rval[ind] = 1 / (1 + np.exp(-x[ind])) return rval def log_entropy_grad(z, ones_hot): """The gradient of the log entropy of the softmax""" # shift everything so we don't have to exponentiate positive numbers z = z - np.max(z, axis=1)[:, None] # compute the negative log likelihood s = np.sum(np.exp(z), axis=1)[:, None] grad = -ones_hot + np.exp(z) / s return grad / z.shape[0] def net_grad(weights, data, labs): """The gradient of the neural net objective""" num_lays = len(weights) # Forward pass: Each layer performs y_next = sigma(y*weights). This way we have 1 feature vector per column z = [data.dot(weights[0])] # The y's are activations y = [data] for j in range(1, num_lays): y.append(smrelu(z[j - 1])) z.append(y[j].dot(weights[j])) # Backward pass: loop over the layers a produce gradients # This is how much the loss depends on it's input dzt = log_entropy_grad(z[-1], labs) # this is how much the loss depends on the deepest weight matrix dw = [y[-1].T.dot(dzt)] # loop over remaining layers for j in reversed(range(num_lays - 1)): # gradient with respect to z dzt = dzt.dot(weights[j + 1].T) * smrelu_grad(z[j]) # gradient with respect to W dw.append(y[j].T.dot(dzt)) dw.reverse() return dw # We define a separate function to calculate the initial step size for this problem def getInitialStep(grad, W, x0): y = x0 + np.random.normal(0, 0.01, x0.shape) return 2 * np.linalg.norm(y - x0) / np.linalg.norm(grad(vec_to_list(y, W)) - grad(vec_to_list(x0, W))) def bb_solver(W, x_train, y_train, x_valid, y_valid, x_test, y_test, graphFlag=False): D = x_train L = y_train # Define the function handles def f(W): # run the training data through the network return net_objective(W, D, L) def grad(W): return list_to_vec(net_grad(W, D, L)) # Run the BB solver x0 = list_to_vec(W) alpha = 0.1 res = [] norm_grad0 = np.linalg.norm(grad(vec_to_list(x0, W))) res.append(norm_grad0) norm_grad_curr = 1e18 # Initialize it to infinity x_curr = x0 x_old = x0 step_curr = getInitialStep(grad, W, x0) for iter in range(200): d = -grad(vec_to_list(x_curr, W)) delta_x = x_curr - x_old delta_g = grad(vec_to_list(x_curr, W)) - grad(vec_to_list(x_old, W)) norm_delta_x = np.linalg.norm(delta_x) if norm_delta_x == 0: step_curr = getInitialStep(grad, W, x0) else: step_curr = norm_delta_x / \ np.inner(delta_x.flatten(), delta_g.flatten()) while f(vec_to_list(x_curr + step_curr * d, W)) > ((f(vec_to_list(x_curr, W)) + alpha * np.inner(step_curr * d.flatten(), grad(vec_to_list(x_curr, W)).flatten()))): step_curr = step_curr * 0.5 x_old = x_curr x_curr = x_curr + step_curr * d norm_grad_curr = np.linalg.norm(grad(vec_to_list(x_curr, W))) res.append(norm_grad_curr) # print norm_grad_curr W = vec_to_list(x_curr, W) # the final learned weights # Calculate the train and test accuracies num_layers = len(W) output = x_train # initialize output to input for i in range(num_layers - 1): # last layer has softmax non-linearity # new output after passing through a layer output = smrelu(np.dot(output, W[i])) output = np.dot(output, W[-1]) # multiply with final layer weights # the index of the max value in the 10x1 output of the neural net is the predicted label train_predicted = np.argmax(output, axis=1) accuracy_train = np.sum(train_predicted == np.argmax( y_train, axis=1)) * 100.0 / x_train.shape[0] print "Training Accuracy = ", accuracy_train output = x_test # initialize output to input for i in range(num_layers - 1): # last layer has softmax non-linearity # new output after passing through a layer output = smrelu(np.dot(output, W[i])) output = np.dot(output, W[-1]) # multiply with final layer weights # the index of the max value in the 10x1 output of the neural net is the predicted label test_predicted = np.argmax(output, axis=1) accuracy_test = np.sum(np.argmax(output, axis=1) == np.argmax( y_test, axis=1)) * 100.0 / x_test.shape[0] print "Test Accuracy = ", accuracy_test if (graphFlag): # Plot the convergence curve plt.figure(1) plt.plot(range(len(res)), res) plt.yscale('log') plt.title('Residuals of Neural Network with Barzilai-Borwein solver') plt.xlabel('Number of iterations') plt.ylabel('Residual') plt.show()
en
0.81412
# Call bb solver # import some data to play with # we take all the features. # Subtract the mean from the data to make the training faster # convert labels to ones hot using this function Convert a list of matrices into a vector Convert a vector into a list of matrices. The returned value will have the same shape as list_of_matrices, and will overwrite the values in that list # We take exp() of only the negative values in z # We take exp(-z) for positive values in z The log entropy of the softmax layer # shift everything so we don't have to exponentiate positive numbers # compute the negative log likelihood # sum over the entries corresponding to the correct class The objective function of a neural net # Each layer performs data*weights. This way we have 1 feature vector per row of data # take exp() of only the negative values in x # take exp(-x) for positive values in x, so exp doesn't blow up The smoothed relu gradient The gradient of the log entropy of the softmax # shift everything so we don't have to exponentiate positive numbers # compute the negative log likelihood The gradient of the neural net objective # Forward pass: Each layer performs y_next = sigma(y*weights). This way we have 1 feature vector per column # The y's are activations # Backward pass: loop over the layers a produce gradients # This is how much the loss depends on it's input # this is how much the loss depends on the deepest weight matrix # loop over remaining layers # gradient with respect to z # gradient with respect to W # We define a separate function to calculate the initial step size for this problem # Define the function handles # run the training data through the network # Run the BB solver # Initialize it to infinity # print norm_grad_curr # the final learned weights # Calculate the train and test accuracies # initialize output to input # last layer has softmax non-linearity # new output after passing through a layer # multiply with final layer weights # the index of the max value in the 10x1 output of the neural net is the predicted label # initialize output to input # last layer has softmax non-linearity # new output after passing through a layer # multiply with final layer weights # the index of the max value in the 10x1 output of the neural net is the predicted label # Plot the convergence curve
3.080384
3
glr/train_lr.py
dlkt-review-and-empirical-evaluation/dlkt-review-and-empirical-evaluation
0
6621662
import argparse import numpy as np import pandas as pd from scipy.sparse import load_npz, csr_matrix from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score as acc, roc_auc_score as auc, f1_score as f1, matthews_corrcoef as mc, \ mean_squared_error as mse, precision_score as prec, recall_score as recall from sklearn.model_selection import KFold from pathlib import Path def compute_metrics(y_pred, y): bin_pred = np.round(y_pred) results = {} results['acc'] = acc(y, bin_pred) results['auc'] = auc(y, y_pred) results['prec'] = prec(y, bin_pred) results['recall'] = recall(y, bin_pred) results['f1'] = f1(y, bin_pred) results['mcc'] = mc(y, bin_pred) results['rmse'] = np.sqrt(mse(y, y_pred)) # nll = log_loss(y, y_pred) return results if __name__ == "__main__": parser = argparse.ArgumentParser(description='Train logistic regression on sparse feature matrix.') parser.add_argument('--X_file', type=str) parser.add_argument('--dataset', type=str) parser.add_argument('--iter', type=int, default=10000) args = parser.parse_args() features_suffix = (args.X_file.split("-")[-1]).split(".")[0] # Load sparse dataset X = csr_matrix(load_npz(args.X_file)) data = pd.read_csv(f'../data/preprocessed/{args.dataset}/preprocessed_data.csv', sep="\t") kfold = KFold(n_splits=5) results = [] users = data["user_id"].unique() for i, (train_i, test_i) in enumerate(kfold.split(users)): print(f"fold {i + 1}") # Train-test split train_df = data[data["user_id"].isin(users[train_i])] test_df = data[data["user_id"].isin(users[test_i])] # Student-wise train-test split user_ids = X[:, 0].toarray().flatten() users_train = train_df["user_id"].unique() users_test = test_df["user_id"].unique() train = X[np.where(np.isin(user_ids, users_train))] test = X[np.where(np.isin(user_ids, users_test))] # First 5 columns are the original dataset, including label in column 3 X_train, y_train = train[:, 5:], train[:, 3].toarray().flatten() X_test, y_test = test[:, 5:], test[:, 3].toarray().flatten() # Train model = LogisticRegression(solver="lbfgs", max_iter=args.iter) model.fit(X_train, y_train) y_pred_train = model.predict_proba(X_train)[:, 1] y_pred_test = model.predict_proba(X_test)[:, 1] # Write predictions to csv # test_df[f"LR_{features_suffix}"] = y_pred_test # print('write') # test_df.to_csv(f'data/{args.dataset}/preprocessed_data_test.csv', sep="\t", index=False) train_results = compute_metrics(y_pred_train, y_train) test_results = compute_metrics(y_pred_test, y_test) results.append(test_results) print(f"kfold iteration {i + 1}: {args.dataset}, features = {features_suffix}, test results = {test_results}") Path('results').mkdir(exist_ok=True) results_df = pd.DataFrame(results) print(results_df) results_df.to_csv(f'results/5-fold-lrbest-{args.dataset}.csv', index=False)
import argparse import numpy as np import pandas as pd from scipy.sparse import load_npz, csr_matrix from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score as acc, roc_auc_score as auc, f1_score as f1, matthews_corrcoef as mc, \ mean_squared_error as mse, precision_score as prec, recall_score as recall from sklearn.model_selection import KFold from pathlib import Path def compute_metrics(y_pred, y): bin_pred = np.round(y_pred) results = {} results['acc'] = acc(y, bin_pred) results['auc'] = auc(y, y_pred) results['prec'] = prec(y, bin_pred) results['recall'] = recall(y, bin_pred) results['f1'] = f1(y, bin_pred) results['mcc'] = mc(y, bin_pred) results['rmse'] = np.sqrt(mse(y, y_pred)) # nll = log_loss(y, y_pred) return results if __name__ == "__main__": parser = argparse.ArgumentParser(description='Train logistic regression on sparse feature matrix.') parser.add_argument('--X_file', type=str) parser.add_argument('--dataset', type=str) parser.add_argument('--iter', type=int, default=10000) args = parser.parse_args() features_suffix = (args.X_file.split("-")[-1]).split(".")[0] # Load sparse dataset X = csr_matrix(load_npz(args.X_file)) data = pd.read_csv(f'../data/preprocessed/{args.dataset}/preprocessed_data.csv', sep="\t") kfold = KFold(n_splits=5) results = [] users = data["user_id"].unique() for i, (train_i, test_i) in enumerate(kfold.split(users)): print(f"fold {i + 1}") # Train-test split train_df = data[data["user_id"].isin(users[train_i])] test_df = data[data["user_id"].isin(users[test_i])] # Student-wise train-test split user_ids = X[:, 0].toarray().flatten() users_train = train_df["user_id"].unique() users_test = test_df["user_id"].unique() train = X[np.where(np.isin(user_ids, users_train))] test = X[np.where(np.isin(user_ids, users_test))] # First 5 columns are the original dataset, including label in column 3 X_train, y_train = train[:, 5:], train[:, 3].toarray().flatten() X_test, y_test = test[:, 5:], test[:, 3].toarray().flatten() # Train model = LogisticRegression(solver="lbfgs", max_iter=args.iter) model.fit(X_train, y_train) y_pred_train = model.predict_proba(X_train)[:, 1] y_pred_test = model.predict_proba(X_test)[:, 1] # Write predictions to csv # test_df[f"LR_{features_suffix}"] = y_pred_test # print('write') # test_df.to_csv(f'data/{args.dataset}/preprocessed_data_test.csv', sep="\t", index=False) train_results = compute_metrics(y_pred_train, y_train) test_results = compute_metrics(y_pred_test, y_test) results.append(test_results) print(f"kfold iteration {i + 1}: {args.dataset}, features = {features_suffix}, test results = {test_results}") Path('results').mkdir(exist_ok=True) results_df = pd.DataFrame(results) print(results_df) results_df.to_csv(f'results/5-fold-lrbest-{args.dataset}.csv', index=False)
en
0.494346
# nll = log_loss(y, y_pred) # Load sparse dataset # Train-test split # Student-wise train-test split # First 5 columns are the original dataset, including label in column 3 # Train # Write predictions to csv # test_df[f"LR_{features_suffix}"] = y_pred_test # print('write') # test_df.to_csv(f'data/{args.dataset}/preprocessed_data_test.csv', sep="\t", index=False)
2.37911
2
swing/parsers.py
docker-swing/swing
1
6621663
import configparser import os import yaml from .errors import InvalidConfigError, InvalidRequirementsError, InvalidChartDefinitionError from .helpers import is_readable_dir, is_readable_file class Config: def __init__(self, server_url, email, password): self.server_url = server_url self.email = email self.password = password class Requirement: def __init__(self, chart_name, version=None, file=None): self.chart_name = chart_name self.version = version self.file = file class ChartDefinition: def __init__(self, name, version): self.name = name self.version = version def parse_config(config_path=None): config = configparser.ConfigParser() if not config_path: config_path = os.path.join(os.path.expanduser('~'), '.swing') with open(config_path, 'r') as f: config.read_file(f) if 'swing' not in config: raise InvalidConfigError('Config missing swing section') server_url = config['swing'].get('server') email = config['swing'].get('email') password = config['swing'].get('password') if not server_url: raise InvalidConfigError('Missing server url option') if not email: raise InvalidConfigError('Missing user email option') if not password: raise InvalidConfigError('Missing user password option') return Config(server_url, email, password) def parse_requirements(requirements_path): if not is_readable_file(requirements_path): raise InvalidRequirementsError(f'Invalid requirements file path ({requirements_path})') with open(requirements_path, 'r') as f: try: yaml_file = yaml.safe_load(f) except yaml.YAMLError: raise InvalidRequirementsError('Requirements are not valid yaml file') dependencies = yaml_file.get('dependencies') if not dependencies: raise InvalidRequirementsError('Requirements file missing dependencies attribute') requirements = [] for d in dependencies: if not d.get('name'): raise InvalidRequirementsError('Requirement\'s name has to be specified') if not d.get('file') and not d.get('version'): raise InvalidRequirementsError('Requirement\'s version has to be specified') if d.get('file') and not is_readable_dir(d.get('file')): raise InvalidRequirementsError('Requirement\'s directory is not valid') requirements.append(Requirement(d.get('name'), d.get('version'), d.get('file'))) return requirements def parse_chart_definition(definition_path): if not is_readable_file(definition_path): raise InvalidChartDefinitionError('No definition file') with open(definition_path, 'r') as f: try: definition_yaml = yaml.safe_load(f) except yaml.YAMLError: raise InvalidChartDefinitionError('Invalid definition file') chart_name = definition_yaml.get('name') version = definition_yaml.get('version') if not chart_name or not version: raise InvalidChartDefinitionError('Definition name or version empty') return ChartDefinition(chart_name, version)
import configparser import os import yaml from .errors import InvalidConfigError, InvalidRequirementsError, InvalidChartDefinitionError from .helpers import is_readable_dir, is_readable_file class Config: def __init__(self, server_url, email, password): self.server_url = server_url self.email = email self.password = password class Requirement: def __init__(self, chart_name, version=None, file=None): self.chart_name = chart_name self.version = version self.file = file class ChartDefinition: def __init__(self, name, version): self.name = name self.version = version def parse_config(config_path=None): config = configparser.ConfigParser() if not config_path: config_path = os.path.join(os.path.expanduser('~'), '.swing') with open(config_path, 'r') as f: config.read_file(f) if 'swing' not in config: raise InvalidConfigError('Config missing swing section') server_url = config['swing'].get('server') email = config['swing'].get('email') password = config['swing'].get('password') if not server_url: raise InvalidConfigError('Missing server url option') if not email: raise InvalidConfigError('Missing user email option') if not password: raise InvalidConfigError('Missing user password option') return Config(server_url, email, password) def parse_requirements(requirements_path): if not is_readable_file(requirements_path): raise InvalidRequirementsError(f'Invalid requirements file path ({requirements_path})') with open(requirements_path, 'r') as f: try: yaml_file = yaml.safe_load(f) except yaml.YAMLError: raise InvalidRequirementsError('Requirements are not valid yaml file') dependencies = yaml_file.get('dependencies') if not dependencies: raise InvalidRequirementsError('Requirements file missing dependencies attribute') requirements = [] for d in dependencies: if not d.get('name'): raise InvalidRequirementsError('Requirement\'s name has to be specified') if not d.get('file') and not d.get('version'): raise InvalidRequirementsError('Requirement\'s version has to be specified') if d.get('file') and not is_readable_dir(d.get('file')): raise InvalidRequirementsError('Requirement\'s directory is not valid') requirements.append(Requirement(d.get('name'), d.get('version'), d.get('file'))) return requirements def parse_chart_definition(definition_path): if not is_readable_file(definition_path): raise InvalidChartDefinitionError('No definition file') with open(definition_path, 'r') as f: try: definition_yaml = yaml.safe_load(f) except yaml.YAMLError: raise InvalidChartDefinitionError('Invalid definition file') chart_name = definition_yaml.get('name') version = definition_yaml.get('version') if not chart_name or not version: raise InvalidChartDefinitionError('Definition name or version empty') return ChartDefinition(chart_name, version)
none
1
2.767723
3
aydin/cli/test/test_cli.py
AhmetCanSolak/aydin
78
6621664
from click.testing import CliRunner from aydin.cli.cli import cli, handle_files from aydin.io.datasets import examples_single from aydin.util.log.log import Log def test_info(): Log.override_test_exclusion = True Log.force_click_echo = True image_path = examples_single.generic_lizard.get_path() runner = CliRunner() result = runner.invoke(cli, ['info', image_path]) assert result.exit_code == 0 assert "Reading" in result.output assert "Metadata" in result.output assert "batch" in result.output def test_cite(): runner = CliRunner() result = runner.invoke(cli, ['cite']) assert result.exit_code == 0 assert "10.5281/zenodo.5654826" in result.output def test_handle_files(): file_list = [ examples_single.generic_lizard.get_path(), examples_single.noisy_fountain.get_path(), ] filepaths, image_arrays, metadatas = handle_files(file_list, slicing="") assert filepaths == file_list assert image_arrays[0].shape == examples_single.generic_lizard.get_array().shape assert image_arrays[0].dtype == examples_single.generic_lizard.get_array().dtype assert image_arrays[1].shape == examples_single.noisy_fountain.get_array().shape assert image_arrays[1].dtype == examples_single.noisy_fountain.get_array().dtype assert metadatas[0].shape == examples_single.generic_lizard.get_array().shape assert metadatas[0].dtype == examples_single.generic_lizard.get_array().dtype assert metadatas[1].shape == examples_single.noisy_fountain.get_array().shape assert metadatas[1].dtype == examples_single.noisy_fountain.get_array().dtype def test_denoise(): image_path = examples_single.noisy_fountain.get_path() # Denoise runner = CliRunner() result = runner.invoke(cli, ['denoise', image_path]) assert result.exit_code == 0 # TODO: turn this into a saveload testcase # Denoise with the pre-trained model # result = runner.invoke(cli, ['denoise', '--model-path=', '--use-model', image_path]) # assert result.exit_code == 0 # denoised = denoised.clip(0, 1) # # psnr_noisy = psnr(noisy, image) # ssim_noisy = ssim(noisy, image) # print("noisy", psnr_noisy, ssim_noisy) # # psnr_denoised = psnr(denoised, image) # ssim_denoised = ssim(denoised, image) # print("denoised", psnr_denoised, ssim_denoised) # # assert psnr_denoised > psnr_noisy and ssim_denoised > ssim_noisy # assert psnr_denoised > psnr_noisy and ssim_denoised > ssim_noisy # # # if the line below fails, then the parameters of the image the lgbm regressohave been broken. # # do not change the number below, but instead, fix the problem -- most likely a parameter. # # assert psnr_denoised > min_psnr and ssim_denoised > min_ssim
from click.testing import CliRunner from aydin.cli.cli import cli, handle_files from aydin.io.datasets import examples_single from aydin.util.log.log import Log def test_info(): Log.override_test_exclusion = True Log.force_click_echo = True image_path = examples_single.generic_lizard.get_path() runner = CliRunner() result = runner.invoke(cli, ['info', image_path]) assert result.exit_code == 0 assert "Reading" in result.output assert "Metadata" in result.output assert "batch" in result.output def test_cite(): runner = CliRunner() result = runner.invoke(cli, ['cite']) assert result.exit_code == 0 assert "10.5281/zenodo.5654826" in result.output def test_handle_files(): file_list = [ examples_single.generic_lizard.get_path(), examples_single.noisy_fountain.get_path(), ] filepaths, image_arrays, metadatas = handle_files(file_list, slicing="") assert filepaths == file_list assert image_arrays[0].shape == examples_single.generic_lizard.get_array().shape assert image_arrays[0].dtype == examples_single.generic_lizard.get_array().dtype assert image_arrays[1].shape == examples_single.noisy_fountain.get_array().shape assert image_arrays[1].dtype == examples_single.noisy_fountain.get_array().dtype assert metadatas[0].shape == examples_single.generic_lizard.get_array().shape assert metadatas[0].dtype == examples_single.generic_lizard.get_array().dtype assert metadatas[1].shape == examples_single.noisy_fountain.get_array().shape assert metadatas[1].dtype == examples_single.noisy_fountain.get_array().dtype def test_denoise(): image_path = examples_single.noisy_fountain.get_path() # Denoise runner = CliRunner() result = runner.invoke(cli, ['denoise', image_path]) assert result.exit_code == 0 # TODO: turn this into a saveload testcase # Denoise with the pre-trained model # result = runner.invoke(cli, ['denoise', '--model-path=', '--use-model', image_path]) # assert result.exit_code == 0 # denoised = denoised.clip(0, 1) # # psnr_noisy = psnr(noisy, image) # ssim_noisy = ssim(noisy, image) # print("noisy", psnr_noisy, ssim_noisy) # # psnr_denoised = psnr(denoised, image) # ssim_denoised = ssim(denoised, image) # print("denoised", psnr_denoised, ssim_denoised) # # assert psnr_denoised > psnr_noisy and ssim_denoised > ssim_noisy # assert psnr_denoised > psnr_noisy and ssim_denoised > ssim_noisy # # # if the line below fails, then the parameters of the image the lgbm regressohave been broken. # # do not change the number below, but instead, fix the problem -- most likely a parameter. # # assert psnr_denoised > min_psnr and ssim_denoised > min_ssim
en
0.622785
# Denoise # TODO: turn this into a saveload testcase # Denoise with the pre-trained model # result = runner.invoke(cli, ['denoise', '--model-path=', '--use-model', image_path]) # assert result.exit_code == 0 # denoised = denoised.clip(0, 1) # # psnr_noisy = psnr(noisy, image) # ssim_noisy = ssim(noisy, image) # print("noisy", psnr_noisy, ssim_noisy) # # psnr_denoised = psnr(denoised, image) # ssim_denoised = ssim(denoised, image) # print("denoised", psnr_denoised, ssim_denoised) # # assert psnr_denoised > psnr_noisy and ssim_denoised > ssim_noisy # assert psnr_denoised > psnr_noisy and ssim_denoised > ssim_noisy # # # if the line below fails, then the parameters of the image the lgbm regressohave been broken. # # do not change the number below, but instead, fix the problem -- most likely a parameter. # # assert psnr_denoised > min_psnr and ssim_denoised > min_ssim
2.049311
2
recsim/agents/layers/cluster_click_statistics.py
MontrealAI/recsim
625
6621665
<filename>recsim/agents/layers/cluster_click_statistics.py # coding=utf-8 # coding=utf-8 # Copyright 2019 The RecSim Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Helper class to collect cluster click and impression counts.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from gym import spaces import numpy as np from recsim.agents.layers import sufficient_statistics class ClusterClickStatsLayer(sufficient_statistics.SufficientStatisticsLayer): """Track impressions and clicks on a per-cluster basis and pass down to agent. This module assumes each document belongs to single cluster and we know the number of possible clusters. Every time we increase impression count for a cluster if the agent recommends a document from that cluster. We also increase click count for a cluster if user responds a click. """ def __init__(self, base_agent_ctor, observation_space, action_space, **kwargs): """Initializes a ClusterClickStatsLayer object. Args: base_agent_ctor: a constructor for the base agent. observation_space: a gym.spaces object specifying the format of observations. action_space: A gym.spaces object that specifies the format of actions. **kwargs: arguments to pass to the downstream agent at construction time. """ single_response_space = observation_space.spaces['response'].spaces[0] if 'cluster_id' not in single_response_space.spaces: raise ValueError('observation_space.spaces[\'response\']' ' must contain \'cluster_id\' key.') cluster_id_space = single_response_space.spaces['cluster_id'] if isinstance(cluster_id_space, spaces.Box): if len(cluster_id_space.high) > 1: raise ValueError('cluster_id response field must be 0 dimensional.') num_clusters = cluster_id_space.high elif isinstance(cluster_id_space, spaces.Discrete): num_clusters = cluster_id_space.n else: raise ValueError('cluster_id response field must be either gym.spaces.Box' ' or gym spaces.Discrete') self._num_clusters = num_clusters if 'click' not in single_response_space.spaces: raise ValueError( 'observation_space.spaces[\'response\'] must contain \'click\' key.') suf_stat_space = spaces.Dict({ 'impression_count': spaces.Box( shape=(num_clusters,), dtype=np.float32, low=0.0, high=np.inf), 'click_count': spaces.Box( shape=(num_clusters,), dtype=np.float32, low=0.0, high=np.inf) }) super(ClusterClickStatsLayer, self).__init__(base_agent_ctor, observation_space, action_space, suf_stat_space, **kwargs) def _create_observation(self): return { 'impression_count': np.array(self._sufficient_statistics['impression_count']), 'click_count': np.array(self._sufficient_statistics['click_count']), } def _update(self, observation): """Updates user impression/click count given user response on each item.""" if self._sufficient_statistics is None: self._sufficient_statistics = { 'impression_count': [ 0, ] * self._num_clusters, 'click_count': [ 0, ] * self._num_clusters } if observation['response'] is not None: for response in observation['response']: cluster_id = int(response['cluster_id']) self._sufficient_statistics['impression_count'][cluster_id] += 1 if response['click']: self._sufficient_statistics['click_count'][cluster_id] += 1
<filename>recsim/agents/layers/cluster_click_statistics.py # coding=utf-8 # coding=utf-8 # Copyright 2019 The RecSim Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Helper class to collect cluster click and impression counts.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from gym import spaces import numpy as np from recsim.agents.layers import sufficient_statistics class ClusterClickStatsLayer(sufficient_statistics.SufficientStatisticsLayer): """Track impressions and clicks on a per-cluster basis and pass down to agent. This module assumes each document belongs to single cluster and we know the number of possible clusters. Every time we increase impression count for a cluster if the agent recommends a document from that cluster. We also increase click count for a cluster if user responds a click. """ def __init__(self, base_agent_ctor, observation_space, action_space, **kwargs): """Initializes a ClusterClickStatsLayer object. Args: base_agent_ctor: a constructor for the base agent. observation_space: a gym.spaces object specifying the format of observations. action_space: A gym.spaces object that specifies the format of actions. **kwargs: arguments to pass to the downstream agent at construction time. """ single_response_space = observation_space.spaces['response'].spaces[0] if 'cluster_id' not in single_response_space.spaces: raise ValueError('observation_space.spaces[\'response\']' ' must contain \'cluster_id\' key.') cluster_id_space = single_response_space.spaces['cluster_id'] if isinstance(cluster_id_space, spaces.Box): if len(cluster_id_space.high) > 1: raise ValueError('cluster_id response field must be 0 dimensional.') num_clusters = cluster_id_space.high elif isinstance(cluster_id_space, spaces.Discrete): num_clusters = cluster_id_space.n else: raise ValueError('cluster_id response field must be either gym.spaces.Box' ' or gym spaces.Discrete') self._num_clusters = num_clusters if 'click' not in single_response_space.spaces: raise ValueError( 'observation_space.spaces[\'response\'] must contain \'click\' key.') suf_stat_space = spaces.Dict({ 'impression_count': spaces.Box( shape=(num_clusters,), dtype=np.float32, low=0.0, high=np.inf), 'click_count': spaces.Box( shape=(num_clusters,), dtype=np.float32, low=0.0, high=np.inf) }) super(ClusterClickStatsLayer, self).__init__(base_agent_ctor, observation_space, action_space, suf_stat_space, **kwargs) def _create_observation(self): return { 'impression_count': np.array(self._sufficient_statistics['impression_count']), 'click_count': np.array(self._sufficient_statistics['click_count']), } def _update(self, observation): """Updates user impression/click count given user response on each item.""" if self._sufficient_statistics is None: self._sufficient_statistics = { 'impression_count': [ 0, ] * self._num_clusters, 'click_count': [ 0, ] * self._num_clusters } if observation['response'] is not None: for response in observation['response']: cluster_id = int(response['cluster_id']) self._sufficient_statistics['impression_count'][cluster_id] += 1 if response['click']: self._sufficient_statistics['click_count'][cluster_id] += 1
en
0.809967
# coding=utf-8 # coding=utf-8 # Copyright 2019 The RecSim Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Helper class to collect cluster click and impression counts. Track impressions and clicks on a per-cluster basis and pass down to agent. This module assumes each document belongs to single cluster and we know the number of possible clusters. Every time we increase impression count for a cluster if the agent recommends a document from that cluster. We also increase click count for a cluster if user responds a click. Initializes a ClusterClickStatsLayer object. Args: base_agent_ctor: a constructor for the base agent. observation_space: a gym.spaces object specifying the format of observations. action_space: A gym.spaces object that specifies the format of actions. **kwargs: arguments to pass to the downstream agent at construction time. Updates user impression/click count given user response on each item.
2.300014
2
jacoren/memory.py
kuszaj/jacoren
1
6621666
# -*- coding: utf-8 -*- """Utilities for memory info.""" import psutil from collections import OrderedDict def memory_ram(percent=False): """ Return memory metrics. Function returns a OrderedDict:: { 'total': <total memory>, 'available': <available memory>, 'used': <used memory>, 'free': <free memory>, ... }, Above fields are available for every platform. Additionally, there are other, platform-specific fields: Linux: * ``active`` - currently or very recently used * ``inactive`` - marked as not used * ``buffers`` - cache for things like file system metadata * ``cached`` - cache for various other things * ``shared`` - shared among multiple processes BSD: * ``active`` - currently or very recently used * ``inactive`` - marked as not used * ``buffers`` - cache for things like file system metadata * ``cached`` - cache for various other things * ``shared`` - shared among multiple processes * ``wired`` - marked to always stay in RAM, never to disk OSX: * ``active`` - currently or very recently used * ``inactive`` - marked as not used * ``wired`` - marked to always stay in RAM, never to disk Other POSIX platforms: * ``active`` - currently or very recently used * ``inactive`` - marked as not used :Example: >>> import jacoren >>> jacoren.memory.memory_ram() OrderedDict([('total', 4218454016), ('available', 1113473024), ('used', 2908168192), ('free', 138534912), ('active', 2210504704), ('inactive', 1627693056), ('buffers', 43229184), ('cached', 1128521728), ('shared', 156925952)]) >>> jacoren.memory.memory_ram(percent=True) OrderedDict([('total', 4218454016), ('available', 26.77), ('used', 68.54), ('free', 4.0), ('active', 51.99), ('inactive', 38.25), ('buffers', 1.07), ('cached', 26.39), ('shared', 3.74)]) :param percent: If true, function will return all values (except for ``total``) as percentages. Otherwise, it will return them as bytes. :type percent: bool .. note:: ``used`` and ``free`` can be calculated differently and do not necessarily will match with ``total - free`` and ``total - used``. It is recommended to use mostly ``total`` and ``available`` fields. :returns: RAM metrics :rtype: OrderedDict """ metrics = psutil.virtual_memory()._asdict() del metrics['percent'] if percent: total = metrics.pop('total') return OrderedDict( [('total', total)] + [(k, round(100. * v / total, 2)) for k, v in metrics.items()] ) else: return metrics def memory_swap(percent=False): """ Return swap metrics. Function returns a OrderedDict:: { 'total': <total memory>, 'used': <used memory>, 'free': <free memory>, ... }, Above fields are available for every platform. Additionally, there are other, platform-specific fields: POSIX platforms: * ``sin`` - bytes swapped in from disk * ``sout`` - bytes swapped out from disk :Example: >>> import jacoren >>> jacoren.memory.memory_swap() OrderedDict([('total', 2097147904), ('used', 615657472), ('free', 1481490432), ('sin', 190640128), ('sout', 704741376)]) >>> jacoren.memory.memory_swap(percent=True) OrderedDict([('total', 2097147904), ('used', 29.4), ('free', 70.64), ('sin', 190640128), ('sout', 704741376)]) :param percent: If true, function will return ``used`` and ``free`` as percentages. Otherwise, it will return them as bytes. Other fields are always returned as bytes. :type percent: bool :returns: Swap metrics :rtype: OrderedDict """ metrics = psutil.swap_memory() if percent: metrics = OrderedDict([ ('total', metrics.total), ('used', metrics.percent), ('free', round(100. * metrics.free / metrics.total, 2)), ('sin', metrics.sin), ('sout', metrics.sout), ]) else: metrics = metrics._asdict() del metrics['percent'] if psutil.WINDOWS: # sin and sout always 0 for Windows del metrics['sin'] del metrics['sout'] return metrics def memory(percent=False): """ Return memory metrics. Function amalgamates all other functions available in this module. It returns an OrderedDict instance:: { 'ram': <memory_ram(percent)>, 'swap': <memory_ram(percent)>, } For more specific description please refer to appropriate description of above functions. :param percent: If true, function will return some values as percentages. Otherwise, it will return them as bytes. :type percent: bool :returns: Swap metrics :rtype: OrderedDict .. seealso:: :func:`jacoren.memory.memory_ram`, :func:`jacoren.memory.memory_swap` """ return OrderedDict(( ('ram', memory_ram(percent)), ('swap', memory_swap(percent)), ))
# -*- coding: utf-8 -*- """Utilities for memory info.""" import psutil from collections import OrderedDict def memory_ram(percent=False): """ Return memory metrics. Function returns a OrderedDict:: { 'total': <total memory>, 'available': <available memory>, 'used': <used memory>, 'free': <free memory>, ... }, Above fields are available for every platform. Additionally, there are other, platform-specific fields: Linux: * ``active`` - currently or very recently used * ``inactive`` - marked as not used * ``buffers`` - cache for things like file system metadata * ``cached`` - cache for various other things * ``shared`` - shared among multiple processes BSD: * ``active`` - currently or very recently used * ``inactive`` - marked as not used * ``buffers`` - cache for things like file system metadata * ``cached`` - cache for various other things * ``shared`` - shared among multiple processes * ``wired`` - marked to always stay in RAM, never to disk OSX: * ``active`` - currently or very recently used * ``inactive`` - marked as not used * ``wired`` - marked to always stay in RAM, never to disk Other POSIX platforms: * ``active`` - currently or very recently used * ``inactive`` - marked as not used :Example: >>> import jacoren >>> jacoren.memory.memory_ram() OrderedDict([('total', 4218454016), ('available', 1113473024), ('used', 2908168192), ('free', 138534912), ('active', 2210504704), ('inactive', 1627693056), ('buffers', 43229184), ('cached', 1128521728), ('shared', 156925952)]) >>> jacoren.memory.memory_ram(percent=True) OrderedDict([('total', 4218454016), ('available', 26.77), ('used', 68.54), ('free', 4.0), ('active', 51.99), ('inactive', 38.25), ('buffers', 1.07), ('cached', 26.39), ('shared', 3.74)]) :param percent: If true, function will return all values (except for ``total``) as percentages. Otherwise, it will return them as bytes. :type percent: bool .. note:: ``used`` and ``free`` can be calculated differently and do not necessarily will match with ``total - free`` and ``total - used``. It is recommended to use mostly ``total`` and ``available`` fields. :returns: RAM metrics :rtype: OrderedDict """ metrics = psutil.virtual_memory()._asdict() del metrics['percent'] if percent: total = metrics.pop('total') return OrderedDict( [('total', total)] + [(k, round(100. * v / total, 2)) for k, v in metrics.items()] ) else: return metrics def memory_swap(percent=False): """ Return swap metrics. Function returns a OrderedDict:: { 'total': <total memory>, 'used': <used memory>, 'free': <free memory>, ... }, Above fields are available for every platform. Additionally, there are other, platform-specific fields: POSIX platforms: * ``sin`` - bytes swapped in from disk * ``sout`` - bytes swapped out from disk :Example: >>> import jacoren >>> jacoren.memory.memory_swap() OrderedDict([('total', 2097147904), ('used', 615657472), ('free', 1481490432), ('sin', 190640128), ('sout', 704741376)]) >>> jacoren.memory.memory_swap(percent=True) OrderedDict([('total', 2097147904), ('used', 29.4), ('free', 70.64), ('sin', 190640128), ('sout', 704741376)]) :param percent: If true, function will return ``used`` and ``free`` as percentages. Otherwise, it will return them as bytes. Other fields are always returned as bytes. :type percent: bool :returns: Swap metrics :rtype: OrderedDict """ metrics = psutil.swap_memory() if percent: metrics = OrderedDict([ ('total', metrics.total), ('used', metrics.percent), ('free', round(100. * metrics.free / metrics.total, 2)), ('sin', metrics.sin), ('sout', metrics.sout), ]) else: metrics = metrics._asdict() del metrics['percent'] if psutil.WINDOWS: # sin and sout always 0 for Windows del metrics['sin'] del metrics['sout'] return metrics def memory(percent=False): """ Return memory metrics. Function amalgamates all other functions available in this module. It returns an OrderedDict instance:: { 'ram': <memory_ram(percent)>, 'swap': <memory_ram(percent)>, } For more specific description please refer to appropriate description of above functions. :param percent: If true, function will return some values as percentages. Otherwise, it will return them as bytes. :type percent: bool :returns: Swap metrics :rtype: OrderedDict .. seealso:: :func:`jacoren.memory.memory_ram`, :func:`jacoren.memory.memory_swap` """ return OrderedDict(( ('ram', memory_ram(percent)), ('swap', memory_swap(percent)), ))
en
0.625031
# -*- coding: utf-8 -*- Utilities for memory info. Return memory metrics. Function returns a OrderedDict:: { 'total': <total memory>, 'available': <available memory>, 'used': <used memory>, 'free': <free memory>, ... }, Above fields are available for every platform. Additionally, there are other, platform-specific fields: Linux: * ``active`` - currently or very recently used * ``inactive`` - marked as not used * ``buffers`` - cache for things like file system metadata * ``cached`` - cache for various other things * ``shared`` - shared among multiple processes BSD: * ``active`` - currently or very recently used * ``inactive`` - marked as not used * ``buffers`` - cache for things like file system metadata * ``cached`` - cache for various other things * ``shared`` - shared among multiple processes * ``wired`` - marked to always stay in RAM, never to disk OSX: * ``active`` - currently or very recently used * ``inactive`` - marked as not used * ``wired`` - marked to always stay in RAM, never to disk Other POSIX platforms: * ``active`` - currently or very recently used * ``inactive`` - marked as not used :Example: >>> import jacoren >>> jacoren.memory.memory_ram() OrderedDict([('total', 4218454016), ('available', 1113473024), ('used', 2908168192), ('free', 138534912), ('active', 2210504704), ('inactive', 1627693056), ('buffers', 43229184), ('cached', 1128521728), ('shared', 156925952)]) >>> jacoren.memory.memory_ram(percent=True) OrderedDict([('total', 4218454016), ('available', 26.77), ('used', 68.54), ('free', 4.0), ('active', 51.99), ('inactive', 38.25), ('buffers', 1.07), ('cached', 26.39), ('shared', 3.74)]) :param percent: If true, function will return all values (except for ``total``) as percentages. Otherwise, it will return them as bytes. :type percent: bool .. note:: ``used`` and ``free`` can be calculated differently and do not necessarily will match with ``total - free`` and ``total - used``. It is recommended to use mostly ``total`` and ``available`` fields. :returns: RAM metrics :rtype: OrderedDict Return swap metrics. Function returns a OrderedDict:: { 'total': <total memory>, 'used': <used memory>, 'free': <free memory>, ... }, Above fields are available for every platform. Additionally, there are other, platform-specific fields: POSIX platforms: * ``sin`` - bytes swapped in from disk * ``sout`` - bytes swapped out from disk :Example: >>> import jacoren >>> jacoren.memory.memory_swap() OrderedDict([('total', 2097147904), ('used', 615657472), ('free', 1481490432), ('sin', 190640128), ('sout', 704741376)]) >>> jacoren.memory.memory_swap(percent=True) OrderedDict([('total', 2097147904), ('used', 29.4), ('free', 70.64), ('sin', 190640128), ('sout', 704741376)]) :param percent: If true, function will return ``used`` and ``free`` as percentages. Otherwise, it will return them as bytes. Other fields are always returned as bytes. :type percent: bool :returns: Swap metrics :rtype: OrderedDict # sin and sout always 0 for Windows Return memory metrics. Function amalgamates all other functions available in this module. It returns an OrderedDict instance:: { 'ram': <memory_ram(percent)>, 'swap': <memory_ram(percent)>, } For more specific description please refer to appropriate description of above functions. :param percent: If true, function will return some values as percentages. Otherwise, it will return them as bytes. :type percent: bool :returns: Swap metrics :rtype: OrderedDict .. seealso:: :func:`jacoren.memory.memory_ram`, :func:`jacoren.memory.memory_swap`
3.067973
3
spring_cloud/commons/helpers/cache/cache_manager.py
haribo0915/Spring-Cloud-in-Python
5
6621667
<gh_stars>1-10 # -*- coding: utf-8 -*- """ A cache manager wrapper that supports some syntax sugar. Usage: value = cache_manager.get(cache_key) \ .on_cache_miss(lambda: retrieve_value(key)) """ __author__ = "Waterball (<EMAIL>)" __license__ = "Apache 2.0" # standard library from abc import ABC, abstractmethod class OnCacheMiss: def __init__(self, cache_manager, key, value): self.__cache_manager = cache_manager self.__key = key self.__value = value @abstractmethod def on_cache_miss(self, cache_miss_func): """ :param cache_miss_func: (lambda ()->value) """ if not self.__value: value = cache_miss_func() self.__cache_manager.put(self.__key, value) return value return self.__value class CacheManager(ABC): """ Service Provider Interface (SPI) for basic caching. We might want to extend this class with many features in the future. (e.g. timeout, evict-and-replacement) """ def get(self, key) -> OnCacheMiss: value = self.retrieve_value(key) return OnCacheMiss(self, key, value) @abstractmethod def retrieve_value(self, key): pass @abstractmethod def put(self, key, value): pass class NaiveCacheManager(CacheManager): """ A very simple cache implementation without any optimization (i.e. only put them into a dict). """ def __init__(self): self.dict = {} def retrieve_value(self, key): return self.dict.get(key) def put(self, key, value): self.dict[key] = value
# -*- coding: utf-8 -*- """ A cache manager wrapper that supports some syntax sugar. Usage: value = cache_manager.get(cache_key) \ .on_cache_miss(lambda: retrieve_value(key)) """ __author__ = "Waterball (<EMAIL>)" __license__ = "Apache 2.0" # standard library from abc import ABC, abstractmethod class OnCacheMiss: def __init__(self, cache_manager, key, value): self.__cache_manager = cache_manager self.__key = key self.__value = value @abstractmethod def on_cache_miss(self, cache_miss_func): """ :param cache_miss_func: (lambda ()->value) """ if not self.__value: value = cache_miss_func() self.__cache_manager.put(self.__key, value) return value return self.__value class CacheManager(ABC): """ Service Provider Interface (SPI) for basic caching. We might want to extend this class with many features in the future. (e.g. timeout, evict-and-replacement) """ def get(self, key) -> OnCacheMiss: value = self.retrieve_value(key) return OnCacheMiss(self, key, value) @abstractmethod def retrieve_value(self, key): pass @abstractmethod def put(self, key, value): pass class NaiveCacheManager(CacheManager): """ A very simple cache implementation without any optimization (i.e. only put them into a dict). """ def __init__(self): self.dict = {} def retrieve_value(self, key): return self.dict.get(key) def put(self, key, value): self.dict[key] = value
en
0.75503
# -*- coding: utf-8 -*- A cache manager wrapper that supports some syntax sugar. Usage: value = cache_manager.get(cache_key) \ .on_cache_miss(lambda: retrieve_value(key)) # standard library :param cache_miss_func: (lambda ()->value) Service Provider Interface (SPI) for basic caching. We might want to extend this class with many features in the future. (e.g. timeout, evict-and-replacement) A very simple cache implementation without any optimization (i.e. only put them into a dict).
3.245261
3
mace/python/tools/dsp_ops.py
huuuuusy/MACE-Learn
7
6621668
<reponame>huuuuusy/MACE-Learn<filename>mace/python/tools/dsp_ops.py # Copyright 2018 Xiaomi, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class DspOps(object): def __init__(self): self.dsp_ops = { 'INPUT': 'INPUT"', 'OUTPUT': 'OUTPUT', 'NoOp': 'Nop', 'FLATTEN': 'Flatten', 'Identity': 'Nop', 'Placeholder': 'INPUT', 'Const': 'Const', 'QuantizedConv2D': 'QuantizedConv2d_8x8to32', 'QuantizedMatMul': 'QuantizedMatMul_8x8to32', 'QuantizeDownAndShrinkRange': 'QuantizeDownAndShrinkRange_32to8', 'QuantizedRelu': 'QuantizedRelu_8', 'QuantizedReluX': 'QuantizedReluX_8', 'QuantizedMaxPool': 'QuantizedMaxPool_8', 'QuantizedAvgPool': 'QuantizedAvgPool_8', 'QuantizedConcat': 'QuantizedConcat_8', 'QuantizedBiasAdd': 'QuantizedBiasAdd_8p8to32', 'QuantizedResizeBilinear': 'QuantizedResizeBilinear_8', 'QuantizedSpaceToBatchND': 'QuantizedSpaceToBatchND_8', 'QuantizedBatchToSpaceND': 'QuantizedBatchToSpaceND_8', 'QuantizedSoftmax': 'QuantizedSoftmax_8', 'QuantizedTanh': 'QuantizedTanh_8', 'Min': 'Min_f', 'Max': 'Max_f', 'QuantizeV2': 'Quantize', 'Dequantize': 'Dequantize', 'Softmax': 'Softmax_f', 'Reshape': 'Reshape', 'QuantizedReshape': 'QuantizedReshape', 'Sigmoid': 'Sigmoid_f', 'Slice': 'Slice_f', 'Add': 'Add_f', 'Mul': 'Mul_f', 'Requantize': 'Requantize_32to8', 'RequantizationRange': 'RequantizationRange_32', 'Sub': 'Sub_f', 'Pack': 'Pack_int32', 'StridedSlice': 'StridedSlice_f', 'ExpandDims': 'ExpandDims_f', 'QuantizedMul': 'QuantizedMul_8x8to32', 'QuantizedAdd': 'QuantizedAdd_8p8to32', 'Pad': 'Pad_f', 'SpaceToBatchND': 'SpaceToBatchND_f', 'BatchToSpaceND': 'BatchToSpaceND_f', 'ResizeBilinear': 'ResizeBilinear_f', 'ConcatV2': 'ConcatV2_f', 'Conv2DBackpropInput': 'Deconv_f', 'Tanh': 'Tanh_f', 'Split': 'Split_f', 'Transpose': 'Transpose_f', 'Concat': 'Concat_f', 'AddN': 'AddN_f', } def has_op(self, tf_op): return tf_op in self.dsp_ops def map_nn_op(self, tf_op): if tf_op not in self.dsp_ops: raise Exception('Could not map nn op for: ', tf_op) return self.dsp_ops[tf_op]
# Copyright 2018 Xiaomi, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class DspOps(object): def __init__(self): self.dsp_ops = { 'INPUT': 'INPUT"', 'OUTPUT': 'OUTPUT', 'NoOp': 'Nop', 'FLATTEN': 'Flatten', 'Identity': 'Nop', 'Placeholder': 'INPUT', 'Const': 'Const', 'QuantizedConv2D': 'QuantizedConv2d_8x8to32', 'QuantizedMatMul': 'QuantizedMatMul_8x8to32', 'QuantizeDownAndShrinkRange': 'QuantizeDownAndShrinkRange_32to8', 'QuantizedRelu': 'QuantizedRelu_8', 'QuantizedReluX': 'QuantizedReluX_8', 'QuantizedMaxPool': 'QuantizedMaxPool_8', 'QuantizedAvgPool': 'QuantizedAvgPool_8', 'QuantizedConcat': 'QuantizedConcat_8', 'QuantizedBiasAdd': 'QuantizedBiasAdd_8p8to32', 'QuantizedResizeBilinear': 'QuantizedResizeBilinear_8', 'QuantizedSpaceToBatchND': 'QuantizedSpaceToBatchND_8', 'QuantizedBatchToSpaceND': 'QuantizedBatchToSpaceND_8', 'QuantizedSoftmax': 'QuantizedSoftmax_8', 'QuantizedTanh': 'QuantizedTanh_8', 'Min': 'Min_f', 'Max': 'Max_f', 'QuantizeV2': 'Quantize', 'Dequantize': 'Dequantize', 'Softmax': 'Softmax_f', 'Reshape': 'Reshape', 'QuantizedReshape': 'QuantizedReshape', 'Sigmoid': 'Sigmoid_f', 'Slice': 'Slice_f', 'Add': 'Add_f', 'Mul': 'Mul_f', 'Requantize': 'Requantize_32to8', 'RequantizationRange': 'RequantizationRange_32', 'Sub': 'Sub_f', 'Pack': 'Pack_int32', 'StridedSlice': 'StridedSlice_f', 'ExpandDims': 'ExpandDims_f', 'QuantizedMul': 'QuantizedMul_8x8to32', 'QuantizedAdd': 'QuantizedAdd_8p8to32', 'Pad': 'Pad_f', 'SpaceToBatchND': 'SpaceToBatchND_f', 'BatchToSpaceND': 'BatchToSpaceND_f', 'ResizeBilinear': 'ResizeBilinear_f', 'ConcatV2': 'ConcatV2_f', 'Conv2DBackpropInput': 'Deconv_f', 'Tanh': 'Tanh_f', 'Split': 'Split_f', 'Transpose': 'Transpose_f', 'Concat': 'Concat_f', 'AddN': 'AddN_f', } def has_op(self, tf_op): return tf_op in self.dsp_ops def map_nn_op(self, tf_op): if tf_op not in self.dsp_ops: raise Exception('Could not map nn op for: ', tf_op) return self.dsp_ops[tf_op]
en
0.858705
# Copyright 2018 Xiaomi, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
1.627438
2
profile_generator/schema/schema.py
nethy/profile-generator
0
6621669
from abc import ABCMeta, abstractmethod from collections.abc import Mapping from typing import Any, Optional class SchemaError(Exception, metaclass=ABCMeta): ... class Schema(metaclass=ABCMeta): @abstractmethod def validate(self, data: Any) -> Optional[SchemaError]: ... def process( # pylint: disable=no-self-use self, data: Any # pylint: disable=unused-argument ) -> Mapping[str, str]: return {}
from abc import ABCMeta, abstractmethod from collections.abc import Mapping from typing import Any, Optional class SchemaError(Exception, metaclass=ABCMeta): ... class Schema(metaclass=ABCMeta): @abstractmethod def validate(self, data: Any) -> Optional[SchemaError]: ... def process( # pylint: disable=no-self-use self, data: Any # pylint: disable=unused-argument ) -> Mapping[str, str]: return {}
en
0.50174
# pylint: disable=no-self-use # pylint: disable=unused-argument
2.955651
3
astropy/utils/tests/test_parsing.py
MatiasRepetto/astropy
1
6621670
<filename>astropy/utils/tests/test_parsing.py<gh_stars>1-10 # Licensed under a 3-clause BSD style license - see LICENSE.rst import importlib import secrets import sys from textwrap import dedent import pytest from astropy.utils.parsing import lex, yacc, _TAB_HEADER def _docstring_canary(): """Docstring that's here just to check for -OO.""" @pytest.mark.skipif(not _docstring_canary.__doc__, reason="Test cannot be run with -OO") def test_generate_parser(tmp_path, monkeypatch): # Write Python code into the temporary directory, so that the # generated tables will also go into the temporary directory. # We use a unique suffix so that the test can be run multiple times # without weirdness due to module caching. suffix = secrets.token_hex(16) lexer_file = tmp_path / f'test_parsing_lexer_{suffix}.py' lexer_file.write_text(dedent(fr""" from astropy.utils.parsing import lex def make_lexer(): tokens = ('NUMBER', 'PLUS') t_PLUS = r'\+' def t_NUMBER(t): r'\d+' t.value = int(t.value) return t return lex('test_parsing_lextab_{suffix}', 'test_parsing_lexer_{suffix}') """)) parser_file = tmp_path / f'test_parsing_parser_{suffix}.py' parser_file.write_text(dedent(fr""" from astropy.utils.parsing import yacc def make_parser(): tokens = ('NUMBER', 'PLUS') def p_expression_number(p): 'expression : NUMBER' p[0] = p[1] def p_expression_plus(p): 'expression : expression PLUS NUMBER' p[0] = p[1] + p[3] return yacc('test_parsing_parsetab_{suffix}', 'test_parsing_parser_{suffix}') """)) monkeypatch.syspath_prepend(tmp_path) lexer_mod = importlib.import_module(f'test_parsing_lexer_{suffix}') lexer = lexer_mod.make_lexer() parser_mod = importlib.import_module(f'test_parsing_parser_{suffix}') parser = parser_mod.make_parser() result = parser.parse('1+2+3', lexer=lexer) assert result == 6 lextab = (tmp_path / f'test_parsing_lextab_{suffix}.py').read_text() assert lextab.startswith(_TAB_HEADER.format(package=f'test_parsing_lexer_{suffix}')) parsetab = (tmp_path / f'test_parsing_parsetab_{suffix}.py').read_text() assert parsetab.startswith(_TAB_HEADER.format(package=f'test_parsing_parser_{suffix}'))
<filename>astropy/utils/tests/test_parsing.py<gh_stars>1-10 # Licensed under a 3-clause BSD style license - see LICENSE.rst import importlib import secrets import sys from textwrap import dedent import pytest from astropy.utils.parsing import lex, yacc, _TAB_HEADER def _docstring_canary(): """Docstring that's here just to check for -OO.""" @pytest.mark.skipif(not _docstring_canary.__doc__, reason="Test cannot be run with -OO") def test_generate_parser(tmp_path, monkeypatch): # Write Python code into the temporary directory, so that the # generated tables will also go into the temporary directory. # We use a unique suffix so that the test can be run multiple times # without weirdness due to module caching. suffix = secrets.token_hex(16) lexer_file = tmp_path / f'test_parsing_lexer_{suffix}.py' lexer_file.write_text(dedent(fr""" from astropy.utils.parsing import lex def make_lexer(): tokens = ('NUMBER', 'PLUS') t_PLUS = r'\+' def t_NUMBER(t): r'\d+' t.value = int(t.value) return t return lex('test_parsing_lextab_{suffix}', 'test_parsing_lexer_{suffix}') """)) parser_file = tmp_path / f'test_parsing_parser_{suffix}.py' parser_file.write_text(dedent(fr""" from astropy.utils.parsing import yacc def make_parser(): tokens = ('NUMBER', 'PLUS') def p_expression_number(p): 'expression : NUMBER' p[0] = p[1] def p_expression_plus(p): 'expression : expression PLUS NUMBER' p[0] = p[1] + p[3] return yacc('test_parsing_parsetab_{suffix}', 'test_parsing_parser_{suffix}') """)) monkeypatch.syspath_prepend(tmp_path) lexer_mod = importlib.import_module(f'test_parsing_lexer_{suffix}') lexer = lexer_mod.make_lexer() parser_mod = importlib.import_module(f'test_parsing_parser_{suffix}') parser = parser_mod.make_parser() result = parser.parse('1+2+3', lexer=lexer) assert result == 6 lextab = (tmp_path / f'test_parsing_lextab_{suffix}.py').read_text() assert lextab.startswith(_TAB_HEADER.format(package=f'test_parsing_lexer_{suffix}')) parsetab = (tmp_path / f'test_parsing_parsetab_{suffix}.py').read_text() assert parsetab.startswith(_TAB_HEADER.format(package=f'test_parsing_parser_{suffix}'))
en
0.564862
# Licensed under a 3-clause BSD style license - see LICENSE.rst Docstring that's here just to check for -OO. # Write Python code into the temporary directory, so that the # generated tables will also go into the temporary directory. # We use a unique suffix so that the test can be run multiple times # without weirdness due to module caching. from astropy.utils.parsing import lex def make_lexer(): tokens = ('NUMBER', 'PLUS') t_PLUS = r'\+' def t_NUMBER(t): r'\d+' t.value = int(t.value) return t return lex('test_parsing_lextab_{suffix}', 'test_parsing_lexer_{suffix}') from astropy.utils.parsing import yacc def make_parser(): tokens = ('NUMBER', 'PLUS') def p_expression_number(p): 'expression : NUMBER' p[0] = p[1] def p_expression_plus(p): 'expression : expression PLUS NUMBER' p[0] = p[1] + p[3] return yacc('test_parsing_parsetab_{suffix}', 'test_parsing_parser_{suffix}')
2.271583
2
work/tests/test_admin.py
allink/allink-apps
1
6621671
<reponame>allink/allink-apps # -*- coding: utf-8 -*- from __future__ import unicode_literals from django.contrib import admin from cms.utils.urlutils import admin_reverse from ..admin import WorkAdmin from . import BaseWorkTest class TestWorkAdmin(BaseWorkTest): def test_all_translations(self): # Check that all the available languages appear in `all_translations` model_admin = WorkAdmin(self.work1, admin.site) all_translations = model_admin.all_translations(self.work1) obj_id = self.work1.id change_url = admin_reverse('work_work_change', args=[obj_id]) self.assertTrue(change_url + '?language=en' in all_translations) self.assertTrue(change_url + '?language=de' in all_translations) self.assertTrue(change_url + '?language=fr' in all_translations)
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.contrib import admin from cms.utils.urlutils import admin_reverse from ..admin import WorkAdmin from . import BaseWorkTest class TestWorkAdmin(BaseWorkTest): def test_all_translations(self): # Check that all the available languages appear in `all_translations` model_admin = WorkAdmin(self.work1, admin.site) all_translations = model_admin.all_translations(self.work1) obj_id = self.work1.id change_url = admin_reverse('work_work_change', args=[obj_id]) self.assertTrue(change_url + '?language=en' in all_translations) self.assertTrue(change_url + '?language=de' in all_translations) self.assertTrue(change_url + '?language=fr' in all_translations)
en
0.732762
# -*- coding: utf-8 -*- # Check that all the available languages appear in `all_translations`
2.155003
2
crop_images.py
BluePinetree/KaKR_3rd_month-Car_class_classification
0
6621672
import os import sys import numpy as np import pandas as pd from tqdm import tqdm import matplotlib.pyplot as plt import cv2 import PIL # https://www.kaggle.com/tmheo74/3rd-ml-month-car-image-cropping-updated-7-10 # 데이터 경로 TRAIN_IMG_PATH = os.path.join(os.getcwd(), 'input/train') TEST_IMG_PATH = os.path.join(os.getcwd(), 'input/test') DATA_PATH = os.path.join(os.getcwd(), 'input') def crop_boxing_img(img, pos, margin=16): width, height = img.size x1 = max(0, pos[0] - margin) y1 = max(0, pos[1] - margin) x2 = min(width, pos[2] + margin) y2 = min(height, pos[3] + margin) cropped_img = img.crop((x1,y1,x2,y2)) # plt.imshow(cropped_img) # plt.show() return cropped_img def main(): df_train = pd.read_csv(os.path.join(DATA_PATH, 'train.csv')) df_test = pd.read_csv(os.path.join(DATA_PATH, 'test.csv')) if not os.path.exists('./input/train_crop'): os.mkdir('./input/train_crop') if not os.path.exists('./input/test_crop'): os.mkdir('./input/test_crop') # 훈련 이미지 자르기 for i, img_name in tqdm(enumerate(df_train['img_file'])): img = PIL.Image.open(os.path.join(TRAIN_IMG_PATH, img_name)) pos = df_train.iloc[i][['bbox_x1', 'bbox_y1', 'bbox_x2', 'bbox_y2']].values.reshape(-1) cropped_img = crop_boxing_img(img, pos) cropped_img.save(os.path.join(DATA_PATH, 'train_crop/'+img_name)) # 시험 이미지 자르기 for i, img_name in tqdm(enumerate(df_test['img_file'])): img = PIL.Image.open(os.path.join(TEST_IMG_PATH, img_name)) pos = df_test.iloc[i][['bbox_x1', 'bbox_y1', 'bbox_x2', 'bbox_y2']].values.reshape(-1) cropped_img = crop_boxing_img(img, pos) cropped_img.save(os.path.join(DATA_PATH, 'test_crop/' + img_name)) if __name__ == '__main__': main()
import os import sys import numpy as np import pandas as pd from tqdm import tqdm import matplotlib.pyplot as plt import cv2 import PIL # https://www.kaggle.com/tmheo74/3rd-ml-month-car-image-cropping-updated-7-10 # 데이터 경로 TRAIN_IMG_PATH = os.path.join(os.getcwd(), 'input/train') TEST_IMG_PATH = os.path.join(os.getcwd(), 'input/test') DATA_PATH = os.path.join(os.getcwd(), 'input') def crop_boxing_img(img, pos, margin=16): width, height = img.size x1 = max(0, pos[0] - margin) y1 = max(0, pos[1] - margin) x2 = min(width, pos[2] + margin) y2 = min(height, pos[3] + margin) cropped_img = img.crop((x1,y1,x2,y2)) # plt.imshow(cropped_img) # plt.show() return cropped_img def main(): df_train = pd.read_csv(os.path.join(DATA_PATH, 'train.csv')) df_test = pd.read_csv(os.path.join(DATA_PATH, 'test.csv')) if not os.path.exists('./input/train_crop'): os.mkdir('./input/train_crop') if not os.path.exists('./input/test_crop'): os.mkdir('./input/test_crop') # 훈련 이미지 자르기 for i, img_name in tqdm(enumerate(df_train['img_file'])): img = PIL.Image.open(os.path.join(TRAIN_IMG_PATH, img_name)) pos = df_train.iloc[i][['bbox_x1', 'bbox_y1', 'bbox_x2', 'bbox_y2']].values.reshape(-1) cropped_img = crop_boxing_img(img, pos) cropped_img.save(os.path.join(DATA_PATH, 'train_crop/'+img_name)) # 시험 이미지 자르기 for i, img_name in tqdm(enumerate(df_test['img_file'])): img = PIL.Image.open(os.path.join(TEST_IMG_PATH, img_name)) pos = df_test.iloc[i][['bbox_x1', 'bbox_y1', 'bbox_x2', 'bbox_y2']].values.reshape(-1) cropped_img = crop_boxing_img(img, pos) cropped_img.save(os.path.join(DATA_PATH, 'test_crop/' + img_name)) if __name__ == '__main__': main()
ko
0.850519
# https://www.kaggle.com/tmheo74/3rd-ml-month-car-image-cropping-updated-7-10 # 데이터 경로 # plt.imshow(cropped_img) # plt.show() # 훈련 이미지 자르기 # 시험 이미지 자르기
2.745796
3
payo_anasayfa.py
cagataysarioglu/pyPasswordManager_Qt
3
6621673
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'payo_anasayfa.ui' # # Created by: PyQt5 UI code generator 5.15.2 # # WARNING: Any manual changes made to this file will be lost when pyuic5 is # run again. Do not edit this file unless you know what you are doing. from PyQt5 import QtCore, QtGui, QtWidgets class Ui_AnaPencere(object): def setupUi(self, AnaPencere): AnaPencere.setObjectName("AnaPencere") AnaPencere.resize(668, 584) AnaPencere.setMinimumSize(QtCore.QSize(668, 584)) AnaPencere.setMaximumSize(QtCore.QSize(668, 584)) font = QtGui.QFont() font.setFamily("Calibri") font.setPointSize(10) AnaPencere.setFont(font) icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap("../../Kullanıcılar/H_Cagatay_Sarioglu/Resimler/Uygulama Görselleri/hcs_ikon_16x16.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off) AnaPencere.setWindowIcon(icon) AnaPencere.setStyleSheet("QWidget {\n" " background-color:rgb(0, 0, 0);\n" " color: rgb(240, 240, 240);\n" " border-color: rgb(58, 58, 58);\n" "}\n" "\n" "QPlainTextEdit {\n" " background-color:rgb(0, 0, 0);\n" " color: rgb(200, 200, 200);\n" " selection-background-color: rgb(255, 153, 0);\n" " selection-color: rgb(0, 0, 0);\n" "}\n" "\n" "QTabWidget::pane {\n" " border-top: 1px solid #000000;\n" "}\n" "\n" "QTableWidget{\n" "color: #cfcfcf;\n" "background-color:#363636;\n" "}\n" "\n" "QTabBar::tab {\n" " background-color:rgb(0, 0, 0);\n" " border-style: outset;\n" " border-width: 1px;\n" " border-right-color: qlineargradient(spread:pad, x1:0.4, y1:0.5, x2:0.6, y2:0.5, stop:0 rgba(115, 115, 115, 255), stop:1 rgba(62, 62, 62, 255));\n" " border-left-color: qlineargradient(spread:pad, x1:0.6, y1:0.5, x2:0.4, y2:0.5, stop:0 rgba(115, 115, 115, 255), stop:1 rgba(62, 62, 62, 255));\n" " border-bottom-color: rgb(58, 58, 58);\n" " border-bottom-width: 1px;\n" " border-top-width: 0px;\n" " border-style: solid;\n" " color: rgb(255, 153, 0);\n" " padding: 4px;\n" "}\n" "\n" "QTabBar::tab:selected, QTabBar::tab:hover {\n" " color: rgb(255, 255, 255);\n" " background-color:rgb(0, 0, 0);\n" " border-color:rgb(42, 42, 42);\n" " margin-left: 0px;\n" " margin-right: 0px;\n" " border-bottom-right-radius:4px;\n" " border-bottom-left-radius:4px;\n" "}\n" "\n" "QTabBar::tab:last:selected {\n" " background-color:rgb(0, 0, 0);\n" " border-color:rgb(42, 42, 42);\n" " margin-left: 0px;\n" " margin-right: 0px;\n" " border-bottom-right-radius:4px;\n" " border-bottom-left-radius:4px;\n" "}\n" "\n" "QTabBar::tab:!selected {\n" " margin-bottom: 4px;\n" " border-bottom-right-radius:4px;\n" " border-bottom-left-radius:4px;\n" "}\n" "\n" "QPushButton{\n" " border-style: outset;\n" " border-width: 2px;\n" " border-top-color: qlineargradient(spread:pad, x1:0.5, y1:0.6, x2:0.5, y2:0.4, stop:0 rgba(115, 115, 115, 255), stop:1 rgba(62, 62, 62, 255));\n" " border-right-color: qlineargradient(spread:pad, x1:0.4, y1:0.5, x2:0.6, y2:0.5, stop:0 rgba(115, 115, 115, 255), stop:1 rgba(62, 62, 62, 255));\n" " border-left-color: qlineargradient(spread:pad, x1:0.6, y1:0.5, x2:0.4, y2:0.5, stop:0 rgba(115, 115, 115, 255), stop:1 rgba(62, 62, 62, 255));\n" " border-bottom-color: rgb(58, 58, 58);\n" " border-bottom-width: 1px;\n" " border-style: solid;\n" " color: rgb(255, 255, 255);\n" " padding: 6px;\n" " background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(77, 77, 77, 255), stop:1 rgba(97, 97, 97, 255));\n" "}\n" "\n" "QPushButton:hover{\n" " border-style: outset;\n" " border-width: 2px;\n" " border-top-color: qlineargradient(spread:pad, x1:0.5, y1:0.6, x2:0.5, y2:0.4, stop:0 rgba(180, 180, 180, 255), stop:1 rgba(110, 110, 110, 255));\n" " border-right-color: qlineargradient(spread:pad, x1:0.4, y1:0.5, x2:0.6, y2:0.5, stop:0 rgba(180, 180, 180, 255), stop:1 rgba(110, 110, 110, 255));\n" " border-left-color: qlineargradient(spread:pad, x1:0.6, y1:0.5, x2:0.4, y2:0.5, stop:0 rgba(180, 180, 180, 255), stop:1 rgba(110, 110, 110, 255));\n" " border-bottom-color: rgb(115, 115, 115);\n" " border-bottom-width: 1px;\n" " border-style: solid;\n" " color: rgb(255, 255, 255);\n" " padding: 6px;\n" " background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(107, 107, 107, 255), stop:1 rgba(157, 157, 157, 255));\n" "}\n" "\n" "QPushButton:pressed{\n" " border-style: outset;\n" " border-width: 2px;\n" " border-top-color: qlineargradient(spread:pad, x1:0.5, y1:0.6, x2:0.5, y2:0.4, stop:0 rgba(62, 62, 62, 255), stop:1 rgba(22, 22, 22, 255));\n" " border-right-color: qlineargradient(spread:pad, x1:0.4, y1:0.5, x2:0.6, y2:0.5, stop:0 rgba(115, 115, 115, 255), stop:1 rgba(62, 62, 62, 255));\n" " border-left-color: qlineargradient(spread:pad, x1:0.6, y1:0.5, x2:0.4, y2:0.5, stop:0 rgba(115, 115, 115, 255), stop:1 rgba(62, 62, 62, 255));\n" " border-bottom-color: rgb(58, 58, 58);\n" " border-bottom-width: 1px;\n" " border-style: solid;\n" " color: rgb(255, 255, 255);\n" " padding: 6px;\n" " background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(77, 77, 77, 255), stop:1 rgba(97, 97, 97, 255));\n" "}\n" "\n" "QPushButton:disabled{\n" " border-style: outset;\n" " border-width: 2px;\n" " border-top-color: qlineargradient(spread:pad, x1:0.5, y1:0.6, x2:0.5, y2:0.4, stop:0 rgba(115, 115, 115, 255), stop:1 rgba(62, 62, 62, 255));\n" " border-right-color: qlineargradient(spread:pad, x1:0.4, y1:0.5, x2:0.6, y2:0.5, stop:0 rgba(115, 115, 115, 255), stop:1 rgba(62, 62, 62, 255));\n" " border-left-color: qlineargradient(spread:pad, x1:0.6, y1:0.5, x2:0.4, y2:0.5, stop:0 rgba(115, 115, 115, 255), stop:1 rgba(62, 62, 62, 255));\n" " border-bottom-color: rgb(58, 58, 58);\n" " border-bottom-width: 1px;\n" " border-style: solid;\n" " color: rgb(0, 0, 0);\n" " padding: 6px;\n" " background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(57, 57, 57, 255), stop:1 rgba(77, 77, 77, 255));\n" "}\n" "\n" "QLineEdit {\n" " border-width: 1px; border-radius: 4px;\n" " border-color: rgb(58, 58, 58);\n" " border-style: inset;\n" " padding: 0 8px;\n" " color: rgb(255, 255, 255);\n" " background:rgb(101, 101, 101);\n" " selection-background-color: rgb(187, 187, 187);\n" " selection-color: rgb(60, 63, 65);\n" "}\n" "\n" "QProgressBar {\n" " text-align: center;\n" " color: rgb(255, 255, 255);\n" " border-width: 1px; \n" " border-radius: 10px;\n" " border-color: rgb(58, 58, 58);\n" " border-style: inset;\n" "}\n" "\n" "QProgressBar::chunk {\n" " background-color: qlineargradient(spread:pad, x1:0.5, y1:0.7, x2:0.5, y2:0.3, stop:0 rgba(0, 200, 0, 255), stop:1 rgba(30, 230, 30, 255));\n" " border-radius: 10px;\n" "}\n" "\n" "QMenuBar {\n" " background:rgb(0, 0, 0);\n" " color: rgb(255, 153, 0);\n" "}\n" "\n" "QMenuBar::item {\n" " spacing: 3px; \n" " padding: 1px 4px;\n" " background: transparent;\n" "}\n" "\n" "QMenuBar::item:selected { \n" " background:rgb(115, 115, 115);\n" "}\n" "\n" "QMenu {\n" " border-width: 2px; \n" " border-radius: 10px;\n" " border-color: rgb(255, 153, 0);\n" " border-style: outset;\n" "}\n" "\n" "QMenu::item {\n" " spacing: 3px; \n" " padding: 3px 15px;\n" "}\n" "\n" "QMenu::item:selected {\n" " spacing: 3px; \n" " padding: 3px 15px;\n" " background:rgb(115, 115, 115);\n" " color:rgb(255, 255, 255);\n" " border-width: 1px; \n" " border-radius: 10px;\n" " border-color: rgb(58, 58, 58);\n" " border-style: inset;\n" "}") self.centralwidget = QtWidgets.QWidget(AnaPencere) self.centralwidget.setObjectName("centralwidget") self.grpKullaniciGiris = QtWidgets.QGroupBox(self.centralwidget) self.grpKullaniciGiris.setGeometry(QtCore.QRect(30, 30, 481, 191)) font = QtGui.QFont() font.setFamily("Bahnschrift") font.setPointSize(9) font.setBold(False) font.setWeight(50) self.grpKullaniciGiris.setFont(font) self.grpKullaniciGiris.setObjectName("grpKullaniciGiris") self.layoutWidget = QtWidgets.QWidget(self.grpKullaniciGiris) self.layoutWidget.setGeometry(QtCore.QRect(10, 30, 451, 139)) self.layoutWidget.setObjectName("layoutWidget") self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget) self.verticalLayout.setContentsMargins(0, 0, 0, 0) self.verticalLayout.setObjectName("verticalLayout") self.horizontalLayout_4 = QtWidgets.QHBoxLayout() self.horizontalLayout_4.setObjectName("horizontalLayout_4") self.lblUygulama = QtWidgets.QLabel(self.layoutWidget) font = QtGui.QFont() font.setFamily("Bahnschrift") font.setPointSize(10) font.setBold(False) font.setWeight(50) self.lblUygulama.setFont(font) self.lblUygulama.setObjectName("lblUygulama") self.horizontalLayout_4.addWidget(self.lblUygulama) spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_4.addItem(spacerItem) self.lneUygulama = QtWidgets.QLineEdit(self.layoutWidget) self.lneUygulama.setMinimumSize(QtCore.QSize(300, 0)) self.lneUygulama.setObjectName("lneUygulama") self.horizontalLayout_4.addWidget(self.lneUygulama) self.verticalLayout.addLayout(self.horizontalLayout_4) self.horizontalLayout_3 = QtWidgets.QHBoxLayout() self.horizontalLayout_3.setObjectName("horizontalLayout_3") self.lblKullanici = QtWidgets.QLabel(self.layoutWidget) font = QtGui.QFont() font.setFamily("Bahnschrift") font.setPointSize(10) font.setBold(False) font.setWeight(50) self.lblKullanici.setFont(font) self.lblKullanici.setObjectName("lblKullanici") self.horizontalLayout_3.addWidget(self.lblKullanici) spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_3.addItem(spacerItem1) self.lneKullanici = QtWidgets.QLineEdit(self.layoutWidget) self.lneKullanici.setMinimumSize(QtCore.QSize(300, 0)) self.lneKullanici.setObjectName("lneKullanici") self.horizontalLayout_3.addWidget(self.lneKullanici) self.verticalLayout.addLayout(self.horizontalLayout_3) self.horizontalLayout_2 = QtWidgets.QHBoxLayout() self.horizontalLayout_2.setObjectName("horizontalLayout_2") self.lblAcarga = QtWidgets.QLabel(self.layoutWidget) font = QtGui.QFont() font.setFamily("Bahnschrift") font.setPointSize(10) font.setBold(False) font.setWeight(50) self.lblAcarga.setFont(font) self.lblAcarga.setObjectName("lblAcarga") self.horizontalLayout_2.addWidget(self.lblAcarga) spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_2.addItem(spacerItem2) self.lneAcarga = QtWidgets.QLineEdit(self.layoutWidget) self.lneAcarga.setMinimumSize(QtCore.QSize(300, 0)) self.lneAcarga.setObjectName("lneAcarga") self.horizontalLayout_2.addWidget(self.lneAcarga) self.verticalLayout.addLayout(self.horizontalLayout_2) self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setObjectName("horizontalLayout") self.lblEkBilgi = QtWidgets.QLabel(self.layoutWidget) font = QtGui.QFont() font.setFamily("Bahnschrift") font.setPointSize(10) font.setBold(False) font.setWeight(50) self.lblEkBilgi.setFont(font) self.lblEkBilgi.setObjectName("lblEkBilgi") self.horizontalLayout.addWidget(self.lblEkBilgi) spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem3) self.lneEkBilgi = QtWidgets.QLineEdit(self.layoutWidget) self.lneEkBilgi.setMinimumSize(QtCore.QSize(300, 0)) self.lneEkBilgi.setObjectName("lneEkBilgi") self.horizontalLayout.addWidget(self.lneEkBilgi) self.verticalLayout.addLayout(self.horizontalLayout) self.dgmTumu = QtWidgets.QPushButton(self.centralwidget) self.dgmTumu.setGeometry(QtCore.QRect(30, 480, 121, 50)) self.dgmTumu.setMinimumSize(QtCore.QSize(0, 50)) font = QtGui.QFont() font.setFamily("Bahnschrift") font.setPointSize(10) font.setBold(False) font.setWeight(50) self.dgmTumu.setFont(font) self.dgmTumu.setObjectName("dgmTumu") self.dgmCik = QtWidgets.QPushButton(self.centralwidget) self.dgmCik.setGeometry(QtCore.QRect(570, 480, 61, 50)) self.dgmCik.setMinimumSize(QtCore.QSize(0, 50)) font = QtGui.QFont() font.setFamily("Bahnschrift") font.setPointSize(10) font.setBold(False) font.setWeight(50) self.dgmCik.setFont(font) self.dgmCik.setObjectName("dgmCik") self.layoutWidget1 = QtWidgets.QWidget(self.centralwidget) self.layoutWidget1.setGeometry(QtCore.QRect(530, 30, 98, 201)) self.layoutWidget1.setObjectName("layoutWidget1") self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.layoutWidget1) self.verticalLayout_2.setContentsMargins(0, 0, 0, 0) self.verticalLayout_2.setObjectName("verticalLayout_2") self.dgmSakla = QtWidgets.QPushButton(self.layoutWidget1) self.dgmSakla.setMinimumSize(QtCore.QSize(0, 50)) font = QtGui.QFont() font.setFamily("Bahnschrift") font.setPointSize(10) font.setBold(False) font.setWeight(50) self.dgmSakla.setFont(font) self.dgmSakla.setObjectName("dgmSakla") self.verticalLayout_2.addWidget(self.dgmSakla) self.dgmGuncelle = QtWidgets.QPushButton(self.layoutWidget1) self.dgmGuncelle.setMinimumSize(QtCore.QSize(0, 50)) font = QtGui.QFont() font.setFamily("Bahnschrift") font.setPointSize(10) font.setBold(False) font.setWeight(50) self.dgmGuncelle.setFont(font) self.dgmGuncelle.setObjectName("dgmGuncelle") self.verticalLayout_2.addWidget(self.dgmGuncelle) self.dgmSil = QtWidgets.QPushButton(self.layoutWidget1) self.dgmSil.setMinimumSize(QtCore.QSize(0, 50)) font = QtGui.QFont() font.setFamily("Bahnschrift") font.setPointSize(10) font.setBold(False) font.setWeight(50) self.dgmSil.setFont(font) self.dgmSil.setObjectName("dgmSil") self.verticalLayout_2.addWidget(self.dgmSil) self.layoutWidget2 = QtWidgets.QWidget(self.centralwidget) self.layoutWidget2.setGeometry(QtCore.QRect(112, 240, 401, 42)) self.layoutWidget2.setObjectName("layoutWidget2") self.horizontalLayout_5 = QtWidgets.QHBoxLayout(self.layoutWidget2) self.horizontalLayout_5.setContentsMargins(0, 0, 0, 0) self.horizontalLayout_5.setObjectName("horizontalLayout_5") self.lneAra = QtWidgets.QLineEdit(self.layoutWidget2) self.lneAra.setMinimumSize(QtCore.QSize(250, 30)) self.lneAra.setMaximumSize(QtCore.QSize(16777215, 30)) self.lneAra.setObjectName("lneAra") self.horizontalLayout_5.addWidget(self.lneAra) spacerItem4 = QtWidgets.QSpacerItem(15, 20, QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_5.addItem(spacerItem4) self.dgmAra = QtWidgets.QPushButton(self.layoutWidget2) self.dgmAra.setMinimumSize(QtCore.QSize(50, 30)) self.dgmAra.setMaximumSize(QtCore.QSize(16777215, 30)) font = QtGui.QFont() font.setFamily("Bahnschrift") font.setPointSize(10) font.setBold(False) font.setWeight(50) self.dgmAra.setFont(font) self.dgmAra.setObjectName("dgmAra") self.horizontalLayout_5.addWidget(self.dgmAra) self.dgmTemizle = QtWidgets.QPushButton(self.layoutWidget2) self.dgmTemizle.setMinimumSize(QtCore.QSize(50, 30)) self.dgmTemizle.setMaximumSize(QtCore.QSize(16777215, 30)) font = QtGui.QFont() font.setFamily("Bahnschrift") font.setPointSize(10) font.setBold(False) font.setWeight(50) self.dgmTemizle.setFont(font) self.dgmTemizle.setObjectName("dgmTemizle") self.horizontalLayout_5.addWidget(self.dgmTemizle) self.anaSayfaTablosu = QtWidgets.QTableWidget(self.centralwidget) self.anaSayfaTablosu.setGeometry(QtCore.QRect(30, 300, 601, 161)) self.anaSayfaTablosu.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContents) self.anaSayfaTablosu.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows) self.anaSayfaTablosu.setShowGrid(False) self.anaSayfaTablosu.setWordWrap(False) self.anaSayfaTablosu.setCornerButtonEnabled(True) self.anaSayfaTablosu.setRowCount(100) self.anaSayfaTablosu.setColumnCount(5) self.anaSayfaTablosu.setObjectName("anaSayfaTablosu") self.anaSayfaTablosu.horizontalHeader().setCascadingSectionResizes(False) self.anaSayfaTablosu.horizontalHeader().setHighlightSections(True) self.anaSayfaTablosu.horizontalHeader().setMinimumSectionSize(40) self.anaSayfaTablosu.horizontalHeader().setSortIndicatorShown(False) self.anaSayfaTablosu.horizontalHeader().setStretchLastSection(False) self.anaSayfaTablosu.verticalHeader().setVisible(False) AnaPencere.setCentralWidget(self.centralwidget) self.menuCubugu = QtWidgets.QMenuBar(AnaPencere) self.menuCubugu.setGeometry(QtCore.QRect(0, 0, 668, 23)) self.menuCubugu.setObjectName("menuCubugu") self.menuYardim = QtWidgets.QMenu(self.menuCubugu) self.menuYardim.setObjectName("menuYardim") self.menuHakkinda = QtWidgets.QMenu(self.menuYardim) self.menuHakkinda.setObjectName("menuHakkinda") AnaPencere.setMenuBar(self.menuCubugu) self.durumCubugu = QtWidgets.QStatusBar(AnaPencere) self.durumCubugu.setObjectName("durumCubugu") AnaPencere.setStatusBar(self.durumCubugu) self.actHcsYazilim = QtWidgets.QAction(AnaPencere) self.actHcsYazilim.setObjectName("actHcsYazilim") self.menuHakkinda.addAction(self.actHcsYazilim) self.menuYardim.addAction(self.menuHakkinda.menuAction()) self.menuCubugu.addAction(self.menuYardim.menuAction()) self.retranslateUi(AnaPencere) QtCore.QMetaObject.connectSlotsByName(AnaPencere) def retranslateUi(self, AnaPencere): _translate = QtCore.QCoreApplication.translate AnaPencere.setWindowTitle(_translate("AnaPencere", "Parola Yöneticisi")) self.grpKullaniciGiris.setTitle(_translate("AnaPencere", "Kullanıcı Giriş Bilgileri")) self.lblUygulama.setText(_translate("AnaPencere", "Uygulama:")) self.lneUygulama.setPlaceholderText(_translate("AnaPencere", "Kullandığınız uygulamanın adı")) self.lblKullanici.setText(_translate("AnaPencere", "Kullanıcı:")) self.lneKullanici.setPlaceholderText(_translate("AnaPencere", "Kullanıcı adınız veya e-postanız")) self.lblAcarga.setText(_translate("AnaPencere", "Açarga:")) self.lneAcarga.setPlaceholderText(_translate("AnaPencere", "Belirlediğiniz parolanız")) self.lblEkBilgi.setText(_translate("AnaPencere", "Ek Bilgi:")) self.lneEkBilgi.setPlaceholderText(_translate("AnaPencere", "Varsa ek bilgileriniz")) self.dgmTumu.setText(_translate("AnaPencere", "Tümünü Göster")) self.dgmCik.setText(_translate("AnaPencere", "Çık")) self.dgmSakla.setText(_translate("AnaPencere", "Sakla")) self.dgmGuncelle.setText(_translate("AnaPencere", "Güncelle")) self.dgmSil.setText(_translate("AnaPencere", "Sil")) self.lneAra.setPlaceholderText(_translate("AnaPencere", "Aratacağınız anahtar sözcük")) self.dgmAra.setText(_translate("AnaPencere", "Ara")) self.dgmTemizle.setText(_translate("AnaPencere", "Temizle")) self.menuYardim.setTitle(_translate("AnaPencere", "Yardım")) self.menuHakkinda.setTitle(_translate("AnaPencere", "Hakkında")) self.actHcsYazilim.setText(_translate("AnaPencere", "hçsYazılım"))
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'payo_anasayfa.ui' # # Created by: PyQt5 UI code generator 5.15.2 # # WARNING: Any manual changes made to this file will be lost when pyuic5 is # run again. Do not edit this file unless you know what you are doing. from PyQt5 import QtCore, QtGui, QtWidgets class Ui_AnaPencere(object): def setupUi(self, AnaPencere): AnaPencere.setObjectName("AnaPencere") AnaPencere.resize(668, 584) AnaPencere.setMinimumSize(QtCore.QSize(668, 584)) AnaPencere.setMaximumSize(QtCore.QSize(668, 584)) font = QtGui.QFont() font.setFamily("Calibri") font.setPointSize(10) AnaPencere.setFont(font) icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap("../../Kullanıcılar/H_Cagatay_Sarioglu/Resimler/Uygulama Görselleri/hcs_ikon_16x16.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off) AnaPencere.setWindowIcon(icon) AnaPencere.setStyleSheet("QWidget {\n" " background-color:rgb(0, 0, 0);\n" " color: rgb(240, 240, 240);\n" " border-color: rgb(58, 58, 58);\n" "}\n" "\n" "QPlainTextEdit {\n" " background-color:rgb(0, 0, 0);\n" " color: rgb(200, 200, 200);\n" " selection-background-color: rgb(255, 153, 0);\n" " selection-color: rgb(0, 0, 0);\n" "}\n" "\n" "QTabWidget::pane {\n" " border-top: 1px solid #000000;\n" "}\n" "\n" "QTableWidget{\n" "color: #cfcfcf;\n" "background-color:#363636;\n" "}\n" "\n" "QTabBar::tab {\n" " background-color:rgb(0, 0, 0);\n" " border-style: outset;\n" " border-width: 1px;\n" " border-right-color: qlineargradient(spread:pad, x1:0.4, y1:0.5, x2:0.6, y2:0.5, stop:0 rgba(115, 115, 115, 255), stop:1 rgba(62, 62, 62, 255));\n" " border-left-color: qlineargradient(spread:pad, x1:0.6, y1:0.5, x2:0.4, y2:0.5, stop:0 rgba(115, 115, 115, 255), stop:1 rgba(62, 62, 62, 255));\n" " border-bottom-color: rgb(58, 58, 58);\n" " border-bottom-width: 1px;\n" " border-top-width: 0px;\n" " border-style: solid;\n" " color: rgb(255, 153, 0);\n" " padding: 4px;\n" "}\n" "\n" "QTabBar::tab:selected, QTabBar::tab:hover {\n" " color: rgb(255, 255, 255);\n" " background-color:rgb(0, 0, 0);\n" " border-color:rgb(42, 42, 42);\n" " margin-left: 0px;\n" " margin-right: 0px;\n" " border-bottom-right-radius:4px;\n" " border-bottom-left-radius:4px;\n" "}\n" "\n" "QTabBar::tab:last:selected {\n" " background-color:rgb(0, 0, 0);\n" " border-color:rgb(42, 42, 42);\n" " margin-left: 0px;\n" " margin-right: 0px;\n" " border-bottom-right-radius:4px;\n" " border-bottom-left-radius:4px;\n" "}\n" "\n" "QTabBar::tab:!selected {\n" " margin-bottom: 4px;\n" " border-bottom-right-radius:4px;\n" " border-bottom-left-radius:4px;\n" "}\n" "\n" "QPushButton{\n" " border-style: outset;\n" " border-width: 2px;\n" " border-top-color: qlineargradient(spread:pad, x1:0.5, y1:0.6, x2:0.5, y2:0.4, stop:0 rgba(115, 115, 115, 255), stop:1 rgba(62, 62, 62, 255));\n" " border-right-color: qlineargradient(spread:pad, x1:0.4, y1:0.5, x2:0.6, y2:0.5, stop:0 rgba(115, 115, 115, 255), stop:1 rgba(62, 62, 62, 255));\n" " border-left-color: qlineargradient(spread:pad, x1:0.6, y1:0.5, x2:0.4, y2:0.5, stop:0 rgba(115, 115, 115, 255), stop:1 rgba(62, 62, 62, 255));\n" " border-bottom-color: rgb(58, 58, 58);\n" " border-bottom-width: 1px;\n" " border-style: solid;\n" " color: rgb(255, 255, 255);\n" " padding: 6px;\n" " background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(77, 77, 77, 255), stop:1 rgba(97, 97, 97, 255));\n" "}\n" "\n" "QPushButton:hover{\n" " border-style: outset;\n" " border-width: 2px;\n" " border-top-color: qlineargradient(spread:pad, x1:0.5, y1:0.6, x2:0.5, y2:0.4, stop:0 rgba(180, 180, 180, 255), stop:1 rgba(110, 110, 110, 255));\n" " border-right-color: qlineargradient(spread:pad, x1:0.4, y1:0.5, x2:0.6, y2:0.5, stop:0 rgba(180, 180, 180, 255), stop:1 rgba(110, 110, 110, 255));\n" " border-left-color: qlineargradient(spread:pad, x1:0.6, y1:0.5, x2:0.4, y2:0.5, stop:0 rgba(180, 180, 180, 255), stop:1 rgba(110, 110, 110, 255));\n" " border-bottom-color: rgb(115, 115, 115);\n" " border-bottom-width: 1px;\n" " border-style: solid;\n" " color: rgb(255, 255, 255);\n" " padding: 6px;\n" " background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(107, 107, 107, 255), stop:1 rgba(157, 157, 157, 255));\n" "}\n" "\n" "QPushButton:pressed{\n" " border-style: outset;\n" " border-width: 2px;\n" " border-top-color: qlineargradient(spread:pad, x1:0.5, y1:0.6, x2:0.5, y2:0.4, stop:0 rgba(62, 62, 62, 255), stop:1 rgba(22, 22, 22, 255));\n" " border-right-color: qlineargradient(spread:pad, x1:0.4, y1:0.5, x2:0.6, y2:0.5, stop:0 rgba(115, 115, 115, 255), stop:1 rgba(62, 62, 62, 255));\n" " border-left-color: qlineargradient(spread:pad, x1:0.6, y1:0.5, x2:0.4, y2:0.5, stop:0 rgba(115, 115, 115, 255), stop:1 rgba(62, 62, 62, 255));\n" " border-bottom-color: rgb(58, 58, 58);\n" " border-bottom-width: 1px;\n" " border-style: solid;\n" " color: rgb(255, 255, 255);\n" " padding: 6px;\n" " background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(77, 77, 77, 255), stop:1 rgba(97, 97, 97, 255));\n" "}\n" "\n" "QPushButton:disabled{\n" " border-style: outset;\n" " border-width: 2px;\n" " border-top-color: qlineargradient(spread:pad, x1:0.5, y1:0.6, x2:0.5, y2:0.4, stop:0 rgba(115, 115, 115, 255), stop:1 rgba(62, 62, 62, 255));\n" " border-right-color: qlineargradient(spread:pad, x1:0.4, y1:0.5, x2:0.6, y2:0.5, stop:0 rgba(115, 115, 115, 255), stop:1 rgba(62, 62, 62, 255));\n" " border-left-color: qlineargradient(spread:pad, x1:0.6, y1:0.5, x2:0.4, y2:0.5, stop:0 rgba(115, 115, 115, 255), stop:1 rgba(62, 62, 62, 255));\n" " border-bottom-color: rgb(58, 58, 58);\n" " border-bottom-width: 1px;\n" " border-style: solid;\n" " color: rgb(0, 0, 0);\n" " padding: 6px;\n" " background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(57, 57, 57, 255), stop:1 rgba(77, 77, 77, 255));\n" "}\n" "\n" "QLineEdit {\n" " border-width: 1px; border-radius: 4px;\n" " border-color: rgb(58, 58, 58);\n" " border-style: inset;\n" " padding: 0 8px;\n" " color: rgb(255, 255, 255);\n" " background:rgb(101, 101, 101);\n" " selection-background-color: rgb(187, 187, 187);\n" " selection-color: rgb(60, 63, 65);\n" "}\n" "\n" "QProgressBar {\n" " text-align: center;\n" " color: rgb(255, 255, 255);\n" " border-width: 1px; \n" " border-radius: 10px;\n" " border-color: rgb(58, 58, 58);\n" " border-style: inset;\n" "}\n" "\n" "QProgressBar::chunk {\n" " background-color: qlineargradient(spread:pad, x1:0.5, y1:0.7, x2:0.5, y2:0.3, stop:0 rgba(0, 200, 0, 255), stop:1 rgba(30, 230, 30, 255));\n" " border-radius: 10px;\n" "}\n" "\n" "QMenuBar {\n" " background:rgb(0, 0, 0);\n" " color: rgb(255, 153, 0);\n" "}\n" "\n" "QMenuBar::item {\n" " spacing: 3px; \n" " padding: 1px 4px;\n" " background: transparent;\n" "}\n" "\n" "QMenuBar::item:selected { \n" " background:rgb(115, 115, 115);\n" "}\n" "\n" "QMenu {\n" " border-width: 2px; \n" " border-radius: 10px;\n" " border-color: rgb(255, 153, 0);\n" " border-style: outset;\n" "}\n" "\n" "QMenu::item {\n" " spacing: 3px; \n" " padding: 3px 15px;\n" "}\n" "\n" "QMenu::item:selected {\n" " spacing: 3px; \n" " padding: 3px 15px;\n" " background:rgb(115, 115, 115);\n" " color:rgb(255, 255, 255);\n" " border-width: 1px; \n" " border-radius: 10px;\n" " border-color: rgb(58, 58, 58);\n" " border-style: inset;\n" "}") self.centralwidget = QtWidgets.QWidget(AnaPencere) self.centralwidget.setObjectName("centralwidget") self.grpKullaniciGiris = QtWidgets.QGroupBox(self.centralwidget) self.grpKullaniciGiris.setGeometry(QtCore.QRect(30, 30, 481, 191)) font = QtGui.QFont() font.setFamily("Bahnschrift") font.setPointSize(9) font.setBold(False) font.setWeight(50) self.grpKullaniciGiris.setFont(font) self.grpKullaniciGiris.setObjectName("grpKullaniciGiris") self.layoutWidget = QtWidgets.QWidget(self.grpKullaniciGiris) self.layoutWidget.setGeometry(QtCore.QRect(10, 30, 451, 139)) self.layoutWidget.setObjectName("layoutWidget") self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget) self.verticalLayout.setContentsMargins(0, 0, 0, 0) self.verticalLayout.setObjectName("verticalLayout") self.horizontalLayout_4 = QtWidgets.QHBoxLayout() self.horizontalLayout_4.setObjectName("horizontalLayout_4") self.lblUygulama = QtWidgets.QLabel(self.layoutWidget) font = QtGui.QFont() font.setFamily("Bahnschrift") font.setPointSize(10) font.setBold(False) font.setWeight(50) self.lblUygulama.setFont(font) self.lblUygulama.setObjectName("lblUygulama") self.horizontalLayout_4.addWidget(self.lblUygulama) spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_4.addItem(spacerItem) self.lneUygulama = QtWidgets.QLineEdit(self.layoutWidget) self.lneUygulama.setMinimumSize(QtCore.QSize(300, 0)) self.lneUygulama.setObjectName("lneUygulama") self.horizontalLayout_4.addWidget(self.lneUygulama) self.verticalLayout.addLayout(self.horizontalLayout_4) self.horizontalLayout_3 = QtWidgets.QHBoxLayout() self.horizontalLayout_3.setObjectName("horizontalLayout_3") self.lblKullanici = QtWidgets.QLabel(self.layoutWidget) font = QtGui.QFont() font.setFamily("Bahnschrift") font.setPointSize(10) font.setBold(False) font.setWeight(50) self.lblKullanici.setFont(font) self.lblKullanici.setObjectName("lblKullanici") self.horizontalLayout_3.addWidget(self.lblKullanici) spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_3.addItem(spacerItem1) self.lneKullanici = QtWidgets.QLineEdit(self.layoutWidget) self.lneKullanici.setMinimumSize(QtCore.QSize(300, 0)) self.lneKullanici.setObjectName("lneKullanici") self.horizontalLayout_3.addWidget(self.lneKullanici) self.verticalLayout.addLayout(self.horizontalLayout_3) self.horizontalLayout_2 = QtWidgets.QHBoxLayout() self.horizontalLayout_2.setObjectName("horizontalLayout_2") self.lblAcarga = QtWidgets.QLabel(self.layoutWidget) font = QtGui.QFont() font.setFamily("Bahnschrift") font.setPointSize(10) font.setBold(False) font.setWeight(50) self.lblAcarga.setFont(font) self.lblAcarga.setObjectName("lblAcarga") self.horizontalLayout_2.addWidget(self.lblAcarga) spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_2.addItem(spacerItem2) self.lneAcarga = QtWidgets.QLineEdit(self.layoutWidget) self.lneAcarga.setMinimumSize(QtCore.QSize(300, 0)) self.lneAcarga.setObjectName("lneAcarga") self.horizontalLayout_2.addWidget(self.lneAcarga) self.verticalLayout.addLayout(self.horizontalLayout_2) self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setObjectName("horizontalLayout") self.lblEkBilgi = QtWidgets.QLabel(self.layoutWidget) font = QtGui.QFont() font.setFamily("Bahnschrift") font.setPointSize(10) font.setBold(False) font.setWeight(50) self.lblEkBilgi.setFont(font) self.lblEkBilgi.setObjectName("lblEkBilgi") self.horizontalLayout.addWidget(self.lblEkBilgi) spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem3) self.lneEkBilgi = QtWidgets.QLineEdit(self.layoutWidget) self.lneEkBilgi.setMinimumSize(QtCore.QSize(300, 0)) self.lneEkBilgi.setObjectName("lneEkBilgi") self.horizontalLayout.addWidget(self.lneEkBilgi) self.verticalLayout.addLayout(self.horizontalLayout) self.dgmTumu = QtWidgets.QPushButton(self.centralwidget) self.dgmTumu.setGeometry(QtCore.QRect(30, 480, 121, 50)) self.dgmTumu.setMinimumSize(QtCore.QSize(0, 50)) font = QtGui.QFont() font.setFamily("Bahnschrift") font.setPointSize(10) font.setBold(False) font.setWeight(50) self.dgmTumu.setFont(font) self.dgmTumu.setObjectName("dgmTumu") self.dgmCik = QtWidgets.QPushButton(self.centralwidget) self.dgmCik.setGeometry(QtCore.QRect(570, 480, 61, 50)) self.dgmCik.setMinimumSize(QtCore.QSize(0, 50)) font = QtGui.QFont() font.setFamily("Bahnschrift") font.setPointSize(10) font.setBold(False) font.setWeight(50) self.dgmCik.setFont(font) self.dgmCik.setObjectName("dgmCik") self.layoutWidget1 = QtWidgets.QWidget(self.centralwidget) self.layoutWidget1.setGeometry(QtCore.QRect(530, 30, 98, 201)) self.layoutWidget1.setObjectName("layoutWidget1") self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.layoutWidget1) self.verticalLayout_2.setContentsMargins(0, 0, 0, 0) self.verticalLayout_2.setObjectName("verticalLayout_2") self.dgmSakla = QtWidgets.QPushButton(self.layoutWidget1) self.dgmSakla.setMinimumSize(QtCore.QSize(0, 50)) font = QtGui.QFont() font.setFamily("Bahnschrift") font.setPointSize(10) font.setBold(False) font.setWeight(50) self.dgmSakla.setFont(font) self.dgmSakla.setObjectName("dgmSakla") self.verticalLayout_2.addWidget(self.dgmSakla) self.dgmGuncelle = QtWidgets.QPushButton(self.layoutWidget1) self.dgmGuncelle.setMinimumSize(QtCore.QSize(0, 50)) font = QtGui.QFont() font.setFamily("Bahnschrift") font.setPointSize(10) font.setBold(False) font.setWeight(50) self.dgmGuncelle.setFont(font) self.dgmGuncelle.setObjectName("dgmGuncelle") self.verticalLayout_2.addWidget(self.dgmGuncelle) self.dgmSil = QtWidgets.QPushButton(self.layoutWidget1) self.dgmSil.setMinimumSize(QtCore.QSize(0, 50)) font = QtGui.QFont() font.setFamily("Bahnschrift") font.setPointSize(10) font.setBold(False) font.setWeight(50) self.dgmSil.setFont(font) self.dgmSil.setObjectName("dgmSil") self.verticalLayout_2.addWidget(self.dgmSil) self.layoutWidget2 = QtWidgets.QWidget(self.centralwidget) self.layoutWidget2.setGeometry(QtCore.QRect(112, 240, 401, 42)) self.layoutWidget2.setObjectName("layoutWidget2") self.horizontalLayout_5 = QtWidgets.QHBoxLayout(self.layoutWidget2) self.horizontalLayout_5.setContentsMargins(0, 0, 0, 0) self.horizontalLayout_5.setObjectName("horizontalLayout_5") self.lneAra = QtWidgets.QLineEdit(self.layoutWidget2) self.lneAra.setMinimumSize(QtCore.QSize(250, 30)) self.lneAra.setMaximumSize(QtCore.QSize(16777215, 30)) self.lneAra.setObjectName("lneAra") self.horizontalLayout_5.addWidget(self.lneAra) spacerItem4 = QtWidgets.QSpacerItem(15, 20, QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_5.addItem(spacerItem4) self.dgmAra = QtWidgets.QPushButton(self.layoutWidget2) self.dgmAra.setMinimumSize(QtCore.QSize(50, 30)) self.dgmAra.setMaximumSize(QtCore.QSize(16777215, 30)) font = QtGui.QFont() font.setFamily("Bahnschrift") font.setPointSize(10) font.setBold(False) font.setWeight(50) self.dgmAra.setFont(font) self.dgmAra.setObjectName("dgmAra") self.horizontalLayout_5.addWidget(self.dgmAra) self.dgmTemizle = QtWidgets.QPushButton(self.layoutWidget2) self.dgmTemizle.setMinimumSize(QtCore.QSize(50, 30)) self.dgmTemizle.setMaximumSize(QtCore.QSize(16777215, 30)) font = QtGui.QFont() font.setFamily("Bahnschrift") font.setPointSize(10) font.setBold(False) font.setWeight(50) self.dgmTemizle.setFont(font) self.dgmTemizle.setObjectName("dgmTemizle") self.horizontalLayout_5.addWidget(self.dgmTemizle) self.anaSayfaTablosu = QtWidgets.QTableWidget(self.centralwidget) self.anaSayfaTablosu.setGeometry(QtCore.QRect(30, 300, 601, 161)) self.anaSayfaTablosu.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContents) self.anaSayfaTablosu.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows) self.anaSayfaTablosu.setShowGrid(False) self.anaSayfaTablosu.setWordWrap(False) self.anaSayfaTablosu.setCornerButtonEnabled(True) self.anaSayfaTablosu.setRowCount(100) self.anaSayfaTablosu.setColumnCount(5) self.anaSayfaTablosu.setObjectName("anaSayfaTablosu") self.anaSayfaTablosu.horizontalHeader().setCascadingSectionResizes(False) self.anaSayfaTablosu.horizontalHeader().setHighlightSections(True) self.anaSayfaTablosu.horizontalHeader().setMinimumSectionSize(40) self.anaSayfaTablosu.horizontalHeader().setSortIndicatorShown(False) self.anaSayfaTablosu.horizontalHeader().setStretchLastSection(False) self.anaSayfaTablosu.verticalHeader().setVisible(False) AnaPencere.setCentralWidget(self.centralwidget) self.menuCubugu = QtWidgets.QMenuBar(AnaPencere) self.menuCubugu.setGeometry(QtCore.QRect(0, 0, 668, 23)) self.menuCubugu.setObjectName("menuCubugu") self.menuYardim = QtWidgets.QMenu(self.menuCubugu) self.menuYardim.setObjectName("menuYardim") self.menuHakkinda = QtWidgets.QMenu(self.menuYardim) self.menuHakkinda.setObjectName("menuHakkinda") AnaPencere.setMenuBar(self.menuCubugu) self.durumCubugu = QtWidgets.QStatusBar(AnaPencere) self.durumCubugu.setObjectName("durumCubugu") AnaPencere.setStatusBar(self.durumCubugu) self.actHcsYazilim = QtWidgets.QAction(AnaPencere) self.actHcsYazilim.setObjectName("actHcsYazilim") self.menuHakkinda.addAction(self.actHcsYazilim) self.menuYardim.addAction(self.menuHakkinda.menuAction()) self.menuCubugu.addAction(self.menuYardim.menuAction()) self.retranslateUi(AnaPencere) QtCore.QMetaObject.connectSlotsByName(AnaPencere) def retranslateUi(self, AnaPencere): _translate = QtCore.QCoreApplication.translate AnaPencere.setWindowTitle(_translate("AnaPencere", "Parola Yöneticisi")) self.grpKullaniciGiris.setTitle(_translate("AnaPencere", "Kullanıcı Giriş Bilgileri")) self.lblUygulama.setText(_translate("AnaPencere", "Uygulama:")) self.lneUygulama.setPlaceholderText(_translate("AnaPencere", "Kullandığınız uygulamanın adı")) self.lblKullanici.setText(_translate("AnaPencere", "Kullanıcı:")) self.lneKullanici.setPlaceholderText(_translate("AnaPencere", "Kullanıcı adınız veya e-postanız")) self.lblAcarga.setText(_translate("AnaPencere", "Açarga:")) self.lneAcarga.setPlaceholderText(_translate("AnaPencere", "Belirlediğiniz parolanız")) self.lblEkBilgi.setText(_translate("AnaPencere", "Ek Bilgi:")) self.lneEkBilgi.setPlaceholderText(_translate("AnaPencere", "Varsa ek bilgileriniz")) self.dgmTumu.setText(_translate("AnaPencere", "Tümünü Göster")) self.dgmCik.setText(_translate("AnaPencere", "Çık")) self.dgmSakla.setText(_translate("AnaPencere", "Sakla")) self.dgmGuncelle.setText(_translate("AnaPencere", "Güncelle")) self.dgmSil.setText(_translate("AnaPencere", "Sil")) self.lneAra.setPlaceholderText(_translate("AnaPencere", "Aratacağınız anahtar sözcük")) self.dgmAra.setText(_translate("AnaPencere", "Ara")) self.dgmTemizle.setText(_translate("AnaPencere", "Temizle")) self.menuYardim.setTitle(_translate("AnaPencere", "Yardım")) self.menuHakkinda.setTitle(_translate("AnaPencere", "Hakkında")) self.actHcsYazilim.setText(_translate("AnaPencere", "hçsYazılım"))
en
0.845419
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'payo_anasayfa.ui' # # Created by: PyQt5 UI code generator 5.15.2 # # WARNING: Any manual changes made to this file will be lost when pyuic5 is # run again. Do not edit this file unless you know what you are doing. #000000;\n" #cfcfcf;\n" #363636;\n"
1.754706
2
ims3d.py
R-Laurent/detection
0
6621674
<gh_stars>0 #!/usr/bin/python3 import os import sys import argparse import logging import numpy as np import pymatgenims3d.py -r 1 naphtalene.xyz import geometry.geometry import graph_theory.detect_cycle import grids.angular import grids.geode import interface.gaussian # Create logger logger = logging.getLogger('log') logger.setLevel(logging.DEBUG) # create console handler and set level to error ch = logging.StreamHandler() ch.setLevel(logging.ERROR) # create file handler and set level to info fh = logging.FileHandler("log_ims_prep_angular", mode="w") fh.setLevel(logging.INFO) # create formatter #formatter = logging.Formatter( # '%(asctime)s - %(name)s - %(levelname)s - %(message)s') formatter = logging.Formatter( '%(levelname)s - %(message)s') # add formatter to ch ch.setFormatter(formatter) fh.setFormatter(formatter) # add ch to logger logger.addHandler(ch) logger.addHandler(fh) def valtoRGB(values): """ Returns RGB colors for each value of values arg: values[:] """ min_val = np.min(values) max_val = np.max(values) rgb=[] for val in values: ratio = (val-min_val)/(max_val-min_val) if (ratio<0.5): R = 1 B = 1 - 2 * ratio G = B else: B = 1 R = 1 - ratio G = R rgb.append(np.asarray([R, G, B])) return rgb def generate_command_line(args): command_line = "python3 ims3d.py " for arg in vars(args): command_line = command_line + " {} {}".format(arg, getattr(args, arg)) return command_line #def readgeom(f): # """ Store a geometry from a file into the geom list """ # logger.debug("in readgeom") # fgeom = open(f, "r") # geom = [] # for line in fgeom.readlines(): # l = line.strip() # print(l) # geom.append(l) # logger.debug(l) # fgeom.close() # return geom def main(): # parser = argparse.ArgumentParser( description='Generate gaussian inputs for IMS calculations.') parser.add_argument( '-v', '--verbose', action='store_true', help='More info') parser.add_argument( '-d', '--debug', action='store_true', help='Debug info') parser.add_argument( '-r', '--radius', type=float, help="Set the radius to 1 angstrom" ) parser.add_argument( '-n', '--npts', type=int, help="Number of angular points by half circle. default: %(default)s", default=12) parser.add_argument( '--batch', '-b', type=int, help="Change the number of bq per batch. default: infinity", default=float('inf')) parser.add_argument( '--depth', type=int, help="Change the depth for geodesic grid generation: %(default)s", default=3) parser.add_argument( '-o', '--orient', action='store_true', help="Reorient the molecule along its principal symmetry axis", default=False) parser.add_argument( '-i', '--ignoreH', action='store_true', help="Ignore hydrogen atoms for the generation of the surface", default=False) parser.add_argument( '-p', '--preview', action='store_true', help="Preview the grid and the resulting surface", default=False) parser.add_argument( '-a', '--angular', action='store_true', help="Activate the deprecated angular grid", default=False) parser.add_argument( '-c', '--cycle-max-size', type=int, help='Auto detect cycles of max size: %(default)s', default=7) parser.add_argument( 'geomfile', type=str, help="Geometry file in xyz format. default: %(default)s", default="geom.xyz") args = parser.parse_args() for arg in vars(args): print("{:} ... {:}".format(arg, getattr(args, arg))) if (args.debug): logger.setLevel(logging.DEBUG) fh.setLevel(logging.DEBUG) elif(args.verbose): logger.setLevel(logging.INFO) ignoreH = args.ignoreH preview = args.preview ntheta = args.npts orient = args.orient angular = args.angular depth = args.depth maxbq = args.batch cycle_max_size = args.cycle_max_size # # Read the geometry in the geom file # geomfile = args.geomfile geom = geometry.geometry.Geometry(geomfile, orient=orient) geomfile_atomsonly = geom.getgeomfilename_Atomsonly() cycles = [] molecularGraph = graph_theory.detect_cycle.MolecularGraph(geomfile_atomsonly) for c in molecularGraph.getCycles(): if len(c) <= cycle_max_size: cycles.append(list(c)) os.remove(geomfile_atomsonly) if (len(cycles)>0): for cycle in cycles: atomlist = [int(str(i).replace('a', '')) - 0 for i in cycle] barycenter = geom.getBarycenter(atomlist) print(atomlist) print(barycenter) geom.addPseudoAtom(barycenter) # # Generate the full command_line # command_line = generate_command_line(args) print(command_line) logger.info(command_line) grid=[] if angular: if args.radius: radius_all = args.radius r_grid = grids.angular.angular_grid(ignoreH = ignoreH, ntheta = ntheta, radius_all = radius_all) else: r_grid = grids.angular.angular_grid(ignoreH = ignoreH, ntheta = ntheta, radius_all = None) angular_grid, angular_grid_normals = grids.angular.generate_angular_grid(geom, r_grid, logger) grids.angular.writegrid(angular_grid, angular_grid_normals) grid = angular_grid else: if args.radius: radius_all = args.radius geodesic_grid = grids.geode.geodesic_grid(ignoreH = ignoreH, depth = depth, radius_all = radius_all) else: geodesic_grid = grids.geode.geodesic_grid(ignoreH = ignoreH, depth = depth, radius_all = None) grid = grids.geode.generate_geodesic_grid(geom, geodesic_grid, logger) print(len(grid)) grids.geode.writegrid(grid) interface.gaussian.generate_gaussianFile(geom, grid, logger, maxbq = maxbq) if preview==True: import open3d as o3d point_cloud = np.loadtxt("points_values.csv", delimiter=",", skiprows=1) # points_normals = np.loadtxt("normals.csv", delimiter=",", skiprows=1) pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(point_cloud[:,:3]) # pcd.normals = o3d.utility.Vector3dVector(points_normals[:,:3]) # point_rgb = valtoRGB(point_cloud[:,3]) # pcd.colors = o3d.utility.Vector3dVector(np.asarray(point_rgb)) o3d.visualization.draw_geometries([pcd]) poisson_mesh = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(pcd, depth=9)[0] poisson_mesh.compute_vertex_normals() o3d.visualization.draw_geometries([poisson_mesh]) o3d.io.write_triangle_mesh("./p_mesh_c.ply", poisson_mesh) if __name__ == "__main__": main()
#!/usr/bin/python3 import os import sys import argparse import logging import numpy as np import pymatgenims3d.py -r 1 naphtalene.xyz import geometry.geometry import graph_theory.detect_cycle import grids.angular import grids.geode import interface.gaussian # Create logger logger = logging.getLogger('log') logger.setLevel(logging.DEBUG) # create console handler and set level to error ch = logging.StreamHandler() ch.setLevel(logging.ERROR) # create file handler and set level to info fh = logging.FileHandler("log_ims_prep_angular", mode="w") fh.setLevel(logging.INFO) # create formatter #formatter = logging.Formatter( # '%(asctime)s - %(name)s - %(levelname)s - %(message)s') formatter = logging.Formatter( '%(levelname)s - %(message)s') # add formatter to ch ch.setFormatter(formatter) fh.setFormatter(formatter) # add ch to logger logger.addHandler(ch) logger.addHandler(fh) def valtoRGB(values): """ Returns RGB colors for each value of values arg: values[:] """ min_val = np.min(values) max_val = np.max(values) rgb=[] for val in values: ratio = (val-min_val)/(max_val-min_val) if (ratio<0.5): R = 1 B = 1 - 2 * ratio G = B else: B = 1 R = 1 - ratio G = R rgb.append(np.asarray([R, G, B])) return rgb def generate_command_line(args): command_line = "python3 ims3d.py " for arg in vars(args): command_line = command_line + " {} {}".format(arg, getattr(args, arg)) return command_line #def readgeom(f): # """ Store a geometry from a file into the geom list """ # logger.debug("in readgeom") # fgeom = open(f, "r") # geom = [] # for line in fgeom.readlines(): # l = line.strip() # print(l) # geom.append(l) # logger.debug(l) # fgeom.close() # return geom def main(): # parser = argparse.ArgumentParser( description='Generate gaussian inputs for IMS calculations.') parser.add_argument( '-v', '--verbose', action='store_true', help='More info') parser.add_argument( '-d', '--debug', action='store_true', help='Debug info') parser.add_argument( '-r', '--radius', type=float, help="Set the radius to 1 angstrom" ) parser.add_argument( '-n', '--npts', type=int, help="Number of angular points by half circle. default: %(default)s", default=12) parser.add_argument( '--batch', '-b', type=int, help="Change the number of bq per batch. default: infinity", default=float('inf')) parser.add_argument( '--depth', type=int, help="Change the depth for geodesic grid generation: %(default)s", default=3) parser.add_argument( '-o', '--orient', action='store_true', help="Reorient the molecule along its principal symmetry axis", default=False) parser.add_argument( '-i', '--ignoreH', action='store_true', help="Ignore hydrogen atoms for the generation of the surface", default=False) parser.add_argument( '-p', '--preview', action='store_true', help="Preview the grid and the resulting surface", default=False) parser.add_argument( '-a', '--angular', action='store_true', help="Activate the deprecated angular grid", default=False) parser.add_argument( '-c', '--cycle-max-size', type=int, help='Auto detect cycles of max size: %(default)s', default=7) parser.add_argument( 'geomfile', type=str, help="Geometry file in xyz format. default: %(default)s", default="geom.xyz") args = parser.parse_args() for arg in vars(args): print("{:} ... {:}".format(arg, getattr(args, arg))) if (args.debug): logger.setLevel(logging.DEBUG) fh.setLevel(logging.DEBUG) elif(args.verbose): logger.setLevel(logging.INFO) ignoreH = args.ignoreH preview = args.preview ntheta = args.npts orient = args.orient angular = args.angular depth = args.depth maxbq = args.batch cycle_max_size = args.cycle_max_size # # Read the geometry in the geom file # geomfile = args.geomfile geom = geometry.geometry.Geometry(geomfile, orient=orient) geomfile_atomsonly = geom.getgeomfilename_Atomsonly() cycles = [] molecularGraph = graph_theory.detect_cycle.MolecularGraph(geomfile_atomsonly) for c in molecularGraph.getCycles(): if len(c) <= cycle_max_size: cycles.append(list(c)) os.remove(geomfile_atomsonly) if (len(cycles)>0): for cycle in cycles: atomlist = [int(str(i).replace('a', '')) - 0 for i in cycle] barycenter = geom.getBarycenter(atomlist) print(atomlist) print(barycenter) geom.addPseudoAtom(barycenter) # # Generate the full command_line # command_line = generate_command_line(args) print(command_line) logger.info(command_line) grid=[] if angular: if args.radius: radius_all = args.radius r_grid = grids.angular.angular_grid(ignoreH = ignoreH, ntheta = ntheta, radius_all = radius_all) else: r_grid = grids.angular.angular_grid(ignoreH = ignoreH, ntheta = ntheta, radius_all = None) angular_grid, angular_grid_normals = grids.angular.generate_angular_grid(geom, r_grid, logger) grids.angular.writegrid(angular_grid, angular_grid_normals) grid = angular_grid else: if args.radius: radius_all = args.radius geodesic_grid = grids.geode.geodesic_grid(ignoreH = ignoreH, depth = depth, radius_all = radius_all) else: geodesic_grid = grids.geode.geodesic_grid(ignoreH = ignoreH, depth = depth, radius_all = None) grid = grids.geode.generate_geodesic_grid(geom, geodesic_grid, logger) print(len(grid)) grids.geode.writegrid(grid) interface.gaussian.generate_gaussianFile(geom, grid, logger, maxbq = maxbq) if preview==True: import open3d as o3d point_cloud = np.loadtxt("points_values.csv", delimiter=",", skiprows=1) # points_normals = np.loadtxt("normals.csv", delimiter=",", skiprows=1) pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(point_cloud[:,:3]) # pcd.normals = o3d.utility.Vector3dVector(points_normals[:,:3]) # point_rgb = valtoRGB(point_cloud[:,3]) # pcd.colors = o3d.utility.Vector3dVector(np.asarray(point_rgb)) o3d.visualization.draw_geometries([pcd]) poisson_mesh = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(pcd, depth=9)[0] poisson_mesh.compute_vertex_normals() o3d.visualization.draw_geometries([poisson_mesh]) o3d.io.write_triangle_mesh("./p_mesh_c.ply", poisson_mesh) if __name__ == "__main__": main()
en
0.473627
#!/usr/bin/python3 # Create logger # create console handler and set level to error # create file handler and set level to info # create formatter #formatter = logging.Formatter( # '%(asctime)s - %(name)s - %(levelname)s - %(message)s') # add formatter to ch # add ch to logger Returns RGB colors for each value of values arg: values[:] #def readgeom(f): # """ Store a geometry from a file into the geom list """ # logger.debug("in readgeom") # fgeom = open(f, "r") # geom = [] # for line in fgeom.readlines(): # l = line.strip() # print(l) # geom.append(l) # logger.debug(l) # fgeom.close() # return geom # # # Read the geometry in the geom file # # # Generate the full command_line # # points_normals = np.loadtxt("normals.csv", delimiter=",", skiprows=1) # pcd.normals = o3d.utility.Vector3dVector(points_normals[:,:3]) # point_rgb = valtoRGB(point_cloud[:,3]) # pcd.colors = o3d.utility.Vector3dVector(np.asarray(point_rgb))
2.327233
2
release/stubs.min/System/Windows/Forms/__init___parts/DataGridViewCellPaintingEventArgs.py
YKato521/ironpython-stubs
0
6621675
<reponame>YKato521/ironpython-stubs class DataGridViewCellPaintingEventArgs(HandledEventArgs): """ Provides data for the System.Windows.Forms.DataGridView.CellPainting event. DataGridViewCellPaintingEventArgs(dataGridView: DataGridView,graphics: Graphics,clipBounds: Rectangle,cellBounds: Rectangle,rowIndex: int,columnIndex: int,cellState: DataGridViewElementStates,value: object,formattedValue: object,errorText: str,cellStyle: DataGridViewCellStyle,advancedBorderStyle: DataGridViewAdvancedBorderStyle,paintParts: DataGridViewPaintParts) """ def Paint(self, clipBounds, paintParts): """ Paint(self: DataGridViewCellPaintingEventArgs,clipBounds: Rectangle,paintParts: DataGridViewPaintParts) Paints the specified parts of the cell for the area in the specified bounds. clipBounds: A System.Drawing.Rectangle that specifies the area of the System.Windows.Forms.DataGridView to be painted. paintParts: A bitwise combination of System.Windows.Forms.DataGridViewPaintParts values specifying the parts to paint. """ pass def PaintBackground(self, clipBounds, cellsPaintSelectionBackground): """ PaintBackground(self: DataGridViewCellPaintingEventArgs,clipBounds: Rectangle,cellsPaintSelectionBackground: bool) Paints the cell background for the area in the specified bounds. clipBounds: A System.Drawing.Rectangle that specifies the area of the System.Windows.Forms.DataGridView to be painted. cellsPaintSelectionBackground: true to paint the background of the specified bounds with the color of the System.Windows.Forms.DataGridViewCellStyle.SelectionBackColor property of the System.Windows.Forms.DataGridViewCell.InheritedStyle; false to paint the background of the specified bounds with the color of the System.Windows.Forms.DataGridViewCellStyle.BackColor property of the System.Windows.Forms.DataGridViewCell.InheritedStyle. """ pass def PaintContent(self, clipBounds): """ PaintContent(self: DataGridViewCellPaintingEventArgs,clipBounds: Rectangle) Paints the cell content for the area in the specified bounds. clipBounds: A System.Drawing.Rectangle that specifies the area of the System.Windows.Forms.DataGridView to be painted. """ pass @staticmethod def __new__( self, dataGridView, graphics, clipBounds, cellBounds, rowIndex, columnIndex, cellState, value, formattedValue, errorText, cellStyle, advancedBorderStyle, paintParts, ): """ __new__(cls: type,dataGridView: DataGridView,graphics: Graphics,clipBounds: Rectangle,cellBounds: Rectangle,rowIndex: int,columnIndex: int,cellState: DataGridViewElementStates,value: object,formattedValue: object,errorText: str,cellStyle: DataGridViewCellStyle,advancedBorderStyle: DataGridViewAdvancedBorderStyle,paintParts: DataGridViewPaintParts) """ pass AdvancedBorderStyle = property( lambda self: object(), lambda self, v: None, lambda self: None ) """Gets the border style of the current System.Windows.Forms.DataGridViewCell. Get: AdvancedBorderStyle(self: DataGridViewCellPaintingEventArgs) -> DataGridViewAdvancedBorderStyle """ CellBounds = property( lambda self: object(), lambda self, v: None, lambda self: None ) """Get the bounds of the current System.Windows.Forms.DataGridViewCell. Get: CellBounds(self: DataGridViewCellPaintingEventArgs) -> Rectangle """ CellStyle = property(lambda self: object(), lambda self, v: None, lambda self: None) """Gets the cell style of the current System.Windows.Forms.DataGridViewCell. Get: CellStyle(self: DataGridViewCellPaintingEventArgs) -> DataGridViewCellStyle """ ClipBounds = property( lambda self: object(), lambda self, v: None, lambda self: None ) """Gets the area of the System.Windows.Forms.DataGridView that needs to be repainted. Get: ClipBounds(self: DataGridViewCellPaintingEventArgs) -> Rectangle """ ColumnIndex = property( lambda self: object(), lambda self, v: None, lambda self: None ) """Gets the column index of the current System.Windows.Forms.DataGridViewCell. Get: ColumnIndex(self: DataGridViewCellPaintingEventArgs) -> int """ ErrorText = property(lambda self: object(), lambda self, v: None, lambda self: None) """Gets a string that represents an error message for the current System.Windows.Forms.DataGridViewCell. Get: ErrorText(self: DataGridViewCellPaintingEventArgs) -> str """ FormattedValue = property( lambda self: object(), lambda self, v: None, lambda self: None ) """Gets the formatted value of the current System.Windows.Forms.DataGridViewCell. Get: FormattedValue(self: DataGridViewCellPaintingEventArgs) -> object """ Graphics = property(lambda self: object(), lambda self, v: None, lambda self: None) """Gets the System.Drawing.Graphics used to paint the current System.Windows.Forms.DataGridViewCell. Get: Graphics(self: DataGridViewCellPaintingEventArgs) -> Graphics """ PaintParts = property( lambda self: object(), lambda self, v: None, lambda self: None ) """The cell parts that are to be painted. Get: PaintParts(self: DataGridViewCellPaintingEventArgs) -> DataGridViewPaintParts """ RowIndex = property(lambda self: object(), lambda self, v: None, lambda self: None) """Gets the row index of the current System.Windows.Forms.DataGridViewCell. Get: RowIndex(self: DataGridViewCellPaintingEventArgs) -> int """ State = property(lambda self: object(), lambda self, v: None, lambda self: None) """Gets the state of the current System.Windows.Forms.DataGridViewCell. Get: State(self: DataGridViewCellPaintingEventArgs) -> DataGridViewElementStates """ Value = property(lambda self: object(), lambda self, v: None, lambda self: None) """Gets the value of the current System.Windows.Forms.DataGridViewCell. Get: Value(self: DataGridViewCellPaintingEventArgs) -> object """
class DataGridViewCellPaintingEventArgs(HandledEventArgs): """ Provides data for the System.Windows.Forms.DataGridView.CellPainting event. DataGridViewCellPaintingEventArgs(dataGridView: DataGridView,graphics: Graphics,clipBounds: Rectangle,cellBounds: Rectangle,rowIndex: int,columnIndex: int,cellState: DataGridViewElementStates,value: object,formattedValue: object,errorText: str,cellStyle: DataGridViewCellStyle,advancedBorderStyle: DataGridViewAdvancedBorderStyle,paintParts: DataGridViewPaintParts) """ def Paint(self, clipBounds, paintParts): """ Paint(self: DataGridViewCellPaintingEventArgs,clipBounds: Rectangle,paintParts: DataGridViewPaintParts) Paints the specified parts of the cell for the area in the specified bounds. clipBounds: A System.Drawing.Rectangle that specifies the area of the System.Windows.Forms.DataGridView to be painted. paintParts: A bitwise combination of System.Windows.Forms.DataGridViewPaintParts values specifying the parts to paint. """ pass def PaintBackground(self, clipBounds, cellsPaintSelectionBackground): """ PaintBackground(self: DataGridViewCellPaintingEventArgs,clipBounds: Rectangle,cellsPaintSelectionBackground: bool) Paints the cell background for the area in the specified bounds. clipBounds: A System.Drawing.Rectangle that specifies the area of the System.Windows.Forms.DataGridView to be painted. cellsPaintSelectionBackground: true to paint the background of the specified bounds with the color of the System.Windows.Forms.DataGridViewCellStyle.SelectionBackColor property of the System.Windows.Forms.DataGridViewCell.InheritedStyle; false to paint the background of the specified bounds with the color of the System.Windows.Forms.DataGridViewCellStyle.BackColor property of the System.Windows.Forms.DataGridViewCell.InheritedStyle. """ pass def PaintContent(self, clipBounds): """ PaintContent(self: DataGridViewCellPaintingEventArgs,clipBounds: Rectangle) Paints the cell content for the area in the specified bounds. clipBounds: A System.Drawing.Rectangle that specifies the area of the System.Windows.Forms.DataGridView to be painted. """ pass @staticmethod def __new__( self, dataGridView, graphics, clipBounds, cellBounds, rowIndex, columnIndex, cellState, value, formattedValue, errorText, cellStyle, advancedBorderStyle, paintParts, ): """ __new__(cls: type,dataGridView: DataGridView,graphics: Graphics,clipBounds: Rectangle,cellBounds: Rectangle,rowIndex: int,columnIndex: int,cellState: DataGridViewElementStates,value: object,formattedValue: object,errorText: str,cellStyle: DataGridViewCellStyle,advancedBorderStyle: DataGridViewAdvancedBorderStyle,paintParts: DataGridViewPaintParts) """ pass AdvancedBorderStyle = property( lambda self: object(), lambda self, v: None, lambda self: None ) """Gets the border style of the current System.Windows.Forms.DataGridViewCell. Get: AdvancedBorderStyle(self: DataGridViewCellPaintingEventArgs) -> DataGridViewAdvancedBorderStyle """ CellBounds = property( lambda self: object(), lambda self, v: None, lambda self: None ) """Get the bounds of the current System.Windows.Forms.DataGridViewCell. Get: CellBounds(self: DataGridViewCellPaintingEventArgs) -> Rectangle """ CellStyle = property(lambda self: object(), lambda self, v: None, lambda self: None) """Gets the cell style of the current System.Windows.Forms.DataGridViewCell. Get: CellStyle(self: DataGridViewCellPaintingEventArgs) -> DataGridViewCellStyle """ ClipBounds = property( lambda self: object(), lambda self, v: None, lambda self: None ) """Gets the area of the System.Windows.Forms.DataGridView that needs to be repainted. Get: ClipBounds(self: DataGridViewCellPaintingEventArgs) -> Rectangle """ ColumnIndex = property( lambda self: object(), lambda self, v: None, lambda self: None ) """Gets the column index of the current System.Windows.Forms.DataGridViewCell. Get: ColumnIndex(self: DataGridViewCellPaintingEventArgs) -> int """ ErrorText = property(lambda self: object(), lambda self, v: None, lambda self: None) """Gets a string that represents an error message for the current System.Windows.Forms.DataGridViewCell. Get: ErrorText(self: DataGridViewCellPaintingEventArgs) -> str """ FormattedValue = property( lambda self: object(), lambda self, v: None, lambda self: None ) """Gets the formatted value of the current System.Windows.Forms.DataGridViewCell. Get: FormattedValue(self: DataGridViewCellPaintingEventArgs) -> object """ Graphics = property(lambda self: object(), lambda self, v: None, lambda self: None) """Gets the System.Drawing.Graphics used to paint the current System.Windows.Forms.DataGridViewCell. Get: Graphics(self: DataGridViewCellPaintingEventArgs) -> Graphics """ PaintParts = property( lambda self: object(), lambda self, v: None, lambda self: None ) """The cell parts that are to be painted. Get: PaintParts(self: DataGridViewCellPaintingEventArgs) -> DataGridViewPaintParts """ RowIndex = property(lambda self: object(), lambda self, v: None, lambda self: None) """Gets the row index of the current System.Windows.Forms.DataGridViewCell. Get: RowIndex(self: DataGridViewCellPaintingEventArgs) -> int """ State = property(lambda self: object(), lambda self, v: None, lambda self: None) """Gets the state of the current System.Windows.Forms.DataGridViewCell. Get: State(self: DataGridViewCellPaintingEventArgs) -> DataGridViewElementStates """ Value = property(lambda self: object(), lambda self, v: None, lambda self: None) """Gets the value of the current System.Windows.Forms.DataGridViewCell. Get: Value(self: DataGridViewCellPaintingEventArgs) -> object """
en
0.457443
Provides data for the System.Windows.Forms.DataGridView.CellPainting event. DataGridViewCellPaintingEventArgs(dataGridView: DataGridView,graphics: Graphics,clipBounds: Rectangle,cellBounds: Rectangle,rowIndex: int,columnIndex: int,cellState: DataGridViewElementStates,value: object,formattedValue: object,errorText: str,cellStyle: DataGridViewCellStyle,advancedBorderStyle: DataGridViewAdvancedBorderStyle,paintParts: DataGridViewPaintParts) Paint(self: DataGridViewCellPaintingEventArgs,clipBounds: Rectangle,paintParts: DataGridViewPaintParts) Paints the specified parts of the cell for the area in the specified bounds. clipBounds: A System.Drawing.Rectangle that specifies the area of the System.Windows.Forms.DataGridView to be painted. paintParts: A bitwise combination of System.Windows.Forms.DataGridViewPaintParts values specifying the parts to paint. PaintBackground(self: DataGridViewCellPaintingEventArgs,clipBounds: Rectangle,cellsPaintSelectionBackground: bool) Paints the cell background for the area in the specified bounds. clipBounds: A System.Drawing.Rectangle that specifies the area of the System.Windows.Forms.DataGridView to be painted. cellsPaintSelectionBackground: true to paint the background of the specified bounds with the color of the System.Windows.Forms.DataGridViewCellStyle.SelectionBackColor property of the System.Windows.Forms.DataGridViewCell.InheritedStyle; false to paint the background of the specified bounds with the color of the System.Windows.Forms.DataGridViewCellStyle.BackColor property of the System.Windows.Forms.DataGridViewCell.InheritedStyle. PaintContent(self: DataGridViewCellPaintingEventArgs,clipBounds: Rectangle) Paints the cell content for the area in the specified bounds. clipBounds: A System.Drawing.Rectangle that specifies the area of the System.Windows.Forms.DataGridView to be painted. __new__(cls: type,dataGridView: DataGridView,graphics: Graphics,clipBounds: Rectangle,cellBounds: Rectangle,rowIndex: int,columnIndex: int,cellState: DataGridViewElementStates,value: object,formattedValue: object,errorText: str,cellStyle: DataGridViewCellStyle,advancedBorderStyle: DataGridViewAdvancedBorderStyle,paintParts: DataGridViewPaintParts) Gets the border style of the current System.Windows.Forms.DataGridViewCell. Get: AdvancedBorderStyle(self: DataGridViewCellPaintingEventArgs) -> DataGridViewAdvancedBorderStyle Get the bounds of the current System.Windows.Forms.DataGridViewCell. Get: CellBounds(self: DataGridViewCellPaintingEventArgs) -> Rectangle Gets the cell style of the current System.Windows.Forms.DataGridViewCell. Get: CellStyle(self: DataGridViewCellPaintingEventArgs) -> DataGridViewCellStyle Gets the area of the System.Windows.Forms.DataGridView that needs to be repainted. Get: ClipBounds(self: DataGridViewCellPaintingEventArgs) -> Rectangle Gets the column index of the current System.Windows.Forms.DataGridViewCell. Get: ColumnIndex(self: DataGridViewCellPaintingEventArgs) -> int Gets a string that represents an error message for the current System.Windows.Forms.DataGridViewCell. Get: ErrorText(self: DataGridViewCellPaintingEventArgs) -> str Gets the formatted value of the current System.Windows.Forms.DataGridViewCell. Get: FormattedValue(self: DataGridViewCellPaintingEventArgs) -> object Gets the System.Drawing.Graphics used to paint the current System.Windows.Forms.DataGridViewCell. Get: Graphics(self: DataGridViewCellPaintingEventArgs) -> Graphics The cell parts that are to be painted. Get: PaintParts(self: DataGridViewCellPaintingEventArgs) -> DataGridViewPaintParts Gets the row index of the current System.Windows.Forms.DataGridViewCell. Get: RowIndex(self: DataGridViewCellPaintingEventArgs) -> int Gets the state of the current System.Windows.Forms.DataGridViewCell. Get: State(self: DataGridViewCellPaintingEventArgs) -> DataGridViewElementStates Gets the value of the current System.Windows.Forms.DataGridViewCell. Get: Value(self: DataGridViewCellPaintingEventArgs) -> object
2.394863
2
Language Proficiency/Python/Collections/Word Order/word_order.py
xuedong/hacker-rank
1
6621676
<reponame>xuedong/hacker-rank #!/usr/bin/env python3 from collections import OrderedDict if __name__ == "__main__": n = int(input()) d = OrderedDict() total = 0 for _ in range(n): word = input() if word in d: d[word] += 1 else: d[word] = 1 total += 1 print(total) print(*(d.values()))
#!/usr/bin/env python3 from collections import OrderedDict if __name__ == "__main__": n = int(input()) d = OrderedDict() total = 0 for _ in range(n): word = input() if word in d: d[word] += 1 else: d[word] = 1 total += 1 print(total) print(*(d.values()))
fr
0.221828
#!/usr/bin/env python3
3.740814
4
Helper-Scripts/RGB_to_YCbCr.py
wolfdale/NSFW-ImageScanner
0
6621677
<gh_stars>0 from PIL import Image def main(): img = Image.open('Input_Image.jpg') ycbcr_image = Image.new('RGB', img.size, 'black') ycbcr = convert_to_ycbcr(img) pix = ycbcr_image.load() for i in range(0, img.size[0]): for j in range(0, img.size[1]): pix[i, j] = tuple(map(int, ycbcr[i * img.size[1] + j])) ycbcr_image.save('Output_Image.jpg') #function to Convert rgb image to ycbcr def convert_to_ycbcr(img): dummy=[] x = img.size[0] y = img.size[1] for i in range(x): for j in range(y): r, g, b = img.getpixel((i,j)) dummy.append( ( 16 + (65.738 * r + 129.057 * g + 25.064 * b) / 256, 128 + (-37.945 * r - 74.494 * g + 112.439 * b) / 256, 128 + (112.439 * r - 94.154 * g - 18.285 * b) / 256 )) return dummy if __name__=='__main__': print 'Processing...' main() print 'Done !'
from PIL import Image def main(): img = Image.open('Input_Image.jpg') ycbcr_image = Image.new('RGB', img.size, 'black') ycbcr = convert_to_ycbcr(img) pix = ycbcr_image.load() for i in range(0, img.size[0]): for j in range(0, img.size[1]): pix[i, j] = tuple(map(int, ycbcr[i * img.size[1] + j])) ycbcr_image.save('Output_Image.jpg') #function to Convert rgb image to ycbcr def convert_to_ycbcr(img): dummy=[] x = img.size[0] y = img.size[1] for i in range(x): for j in range(y): r, g, b = img.getpixel((i,j)) dummy.append( ( 16 + (65.738 * r + 129.057 * g + 25.064 * b) / 256, 128 + (-37.945 * r - 74.494 * g + 112.439 * b) / 256, 128 + (112.439 * r - 94.154 * g - 18.285 * b) / 256 )) return dummy if __name__=='__main__': print 'Processing...' main() print 'Done !'
en
0.616815
#function to Convert rgb image to ycbcr
3.294709
3
ansible-devel/test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/plugins/module_utils/echo_impl.py
satishcarya/ansible
0
6621678
<filename>ansible-devel/test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/plugins/module_utils/echo_impl.py from __future__ import (absolute_import, division, print_function) __metaclass__ = type import json from ansible.module_utils import basic from ansible.module_utils.basic import _load_params, AnsibleModule def do_echo(): p = _load_params() d = json.loads(basic._ANSIBLE_ARGS) d['ANSIBLE_MODULE_ARGS'] = {} basic._ANSIBLE_ARGS = json.dumps(d).encode('utf-8') module = AnsibleModule(argument_spec={}) module.exit_json(args_in=p)
<filename>ansible-devel/test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/plugins/module_utils/echo_impl.py from __future__ import (absolute_import, division, print_function) __metaclass__ = type import json from ansible.module_utils import basic from ansible.module_utils.basic import _load_params, AnsibleModule def do_echo(): p = _load_params() d = json.loads(basic._ANSIBLE_ARGS) d['ANSIBLE_MODULE_ARGS'] = {} basic._ANSIBLE_ARGS = json.dumps(d).encode('utf-8') module = AnsibleModule(argument_spec={}) module.exit_json(args_in=p)
none
1
1.598118
2
dedo/utils/init_utils.py
console-beaver/dedo
23
6621679
""" Utilities for deform sim in PyBullet. Note: this code is for research i.e. quick experimentation; it has minimal comments for now, but if we see further interest from the community -- we will add further comments, unify the style, improve efficiency and add unittests. @contactrika """ from pathlib import Path # automatically converts forward slashes if needed import numpy as np import pybullet import pybullet_data from .mesh_utils import get_mesh_data def get_preset_properties(object_preset_dict, deform_obj_name, key): if object_preset_dict is None or \ deform_obj_name not in object_preset_dict.keys(): return None if key in object_preset_dict[deform_obj_name].keys(): return object_preset_dict[deform_obj_name][key] def load_rigid_object(sim, obj_file_name, scale, init_pos, init_ori, mass=0.0, texture_file=None, rgba_color=None): """Load a rigid object from file, create visual and collision shapes.""" if obj_file_name.endswith('.obj'): # mesh info xyz_scale = [scale, scale, scale] viz_shape_id = sim.createVisualShape( shapeType=pybullet.GEOM_MESH, rgbaColor=rgba_color, fileName=obj_file_name, meshScale=xyz_scale) col_shape_id = sim.createCollisionShape( shapeType=pybullet.GEOM_MESH, fileName=obj_file_name, meshScale=xyz_scale) rigid_id = sim.createMultiBody( baseMass=mass, # mass==0 => fixed at position where it is loaded basePosition=init_pos, baseCollisionShapeIndex=col_shape_id, baseVisualShapeIndex=viz_shape_id, baseOrientation=pybullet.getQuaternionFromEuler(init_ori)) elif obj_file_name.endswith('.urdf'): # URDF file rigid_id = sim.loadURDF( obj_file_name, init_pos, pybullet.getQuaternionFromEuler(init_ori), useFixedBase=True if mass <= 0 else False, globalScaling=scale) else: print('Unknown file extension', obj_file_name) assert(False), 'load_rigid_object supports only obj and URDF files' sim.changeDynamics(rigid_id, -1, mass, lateralFriction=1.0, spinningFriction=1.0, rollingFriction=1.0, restitution=0.0) n_jt = sim.getNumJoints(rigid_id) if texture_file is not None: texture_id = sim.loadTexture(texture_file) kwargs = {} if hasattr(pybullet, 'VISUAL_SHAPE_DOUBLE_SIDED'): kwargs['flags'] = pybullet.VISUAL_SHAPE_DOUBLE_SIDED if obj_file_name.endswith('figure_headless.urdf'): sim.changeVisualShape( # only changing the body of the figure rigid_id, 0, rgbaColor=[1, 1, 1, 1], textureUniqueId=texture_id, **kwargs) else: for i in range(-1, n_jt): sim.changeVisualShape( rigid_id, i, rgbaColor=[1,1,1,1], textureUniqueId=texture_id, **kwargs) return rigid_id def load_deform_object(sim, obj_file_name, texture_file_name, scale, init_pos, init_ori, bending_stiffness, damping_stiffness, elastic_stiffness, friction_coeff, self_collision, debug): """Load object from obj file with pybullet's loadSoftBody().""" if debug: print('Loading filename', obj_file_name) # Note: do not set very small mass (e.g. 0.01 causes instabilities). deform_id = sim.loadSoftBody( mass=1, # 1kg is default; bad sim with lower mass fileName=str(Path(obj_file_name)), scale=scale, basePosition=init_pos, baseOrientation=pybullet.getQuaternionFromEuler(init_ori), springElasticStiffness=elastic_stiffness, springDampingStiffness=damping_stiffness, springBendingStiffness=bending_stiffness, frictionCoeff=friction_coeff, # collisionMargin=0.003, # how far apart do two objects begin interacting useSelfCollision=self_collision, springDampingAllDirections=1, useFaceContact=True, useNeoHookean=0, useMassSpring=True, useBendingSprings=True, # repulsionStiffness=10000000, ) # PyBullet examples for loading and anchoring deformables: # github.com/bulletphysics/bullet3/examples/pybullet/examples/deformable_anchor.py sim.setPhysicsEngineParameter(sparseSdfVoxelSize=0.25) texture_id = sim.loadTexture(str(Path(texture_file_name))) kwargs = {} if hasattr(pybullet, 'VISUAL_SHAPE_DOUBLE_SIDED'): kwargs['flags'] = pybullet.VISUAL_SHAPE_DOUBLE_SIDED sim.changeVisualShape( deform_id, -1, rgbaColor=[1,1,1,1], textureUniqueId=texture_id, **kwargs) num_mesh_vertices = get_mesh_data(sim, deform_id)[0] if debug: print('Loaded deform_id', deform_id, 'with', num_mesh_vertices, 'mesh vertices', 'init_pos', init_pos) # Pybullet will struggle with very large meshes, so we should keep mesh # sizes to a limited number of vertices and faces. # Large meshes will load on Linux/Ubuntu, but sim will run too slowly. # Meshes with >2^13=8196 vertices will fail to load on OS X due to shared # memory limits, as noted here: # github.com/bulletphysics/bullet3/issues/1965 assert(num_mesh_vertices < 2**13) # make sure mesh has less than ~8K verts return deform_id def reset_bullet(args, sim, plane_texture=None, debug=False): """Reset/initialize pybullet simulation.""" dist, pitch, yaw, pos_x, pos_y, pos_z = args.cam_viewmat cam_args = { 'cameraDistance': dist, 'cameraPitch': pitch, 'cameraYaw': yaw, 'cameraTargetPosition': np.array([pos_x, pos_y, pos_z]) } if args.viz: pybullet.configureDebugVisualizer(pybullet.COV_ENABLE_GUI, False) sim.resetDebugVisualizerCamera(**cam_args) if debug: res = sim.getDebugVisualizerCamera() print('Camera info for', cam_args) print('viewMatrix', res[2]) print('projectionMatrix', res[3]) sim.resetSimulation(pybullet.RESET_USE_DEFORMABLE_WORLD) # FEM deform sim sim.setGravity(0, 0, args.sim_gravity) sim.setTimeStep(1.0/args.sim_freq) # Could experiment with physic engine parameters, but so far we have not # noticed a stark improvement from changing these. # sim.setPhysicsEngineParameter(numSubSteps=10, allowedCcdPenetration=0.01) # # Load floor plane and rigid objects # sim.setAdditionalSearchPath(pybullet_data.getDataPath()) floor_id = sim.loadURDF('plane.urdf') if plane_texture is not None: if debug: print('texture file', plane_texture) texture_id = sim.loadTexture(plane_texture) sim.changeVisualShape( floor_id, -1, rgbaColor=[1,1,1,1], textureUniqueId=texture_id, ) assert(floor_id == 0) # camera assumes floor/ground is loaded first return sim
""" Utilities for deform sim in PyBullet. Note: this code is for research i.e. quick experimentation; it has minimal comments for now, but if we see further interest from the community -- we will add further comments, unify the style, improve efficiency and add unittests. @contactrika """ from pathlib import Path # automatically converts forward slashes if needed import numpy as np import pybullet import pybullet_data from .mesh_utils import get_mesh_data def get_preset_properties(object_preset_dict, deform_obj_name, key): if object_preset_dict is None or \ deform_obj_name not in object_preset_dict.keys(): return None if key in object_preset_dict[deform_obj_name].keys(): return object_preset_dict[deform_obj_name][key] def load_rigid_object(sim, obj_file_name, scale, init_pos, init_ori, mass=0.0, texture_file=None, rgba_color=None): """Load a rigid object from file, create visual and collision shapes.""" if obj_file_name.endswith('.obj'): # mesh info xyz_scale = [scale, scale, scale] viz_shape_id = sim.createVisualShape( shapeType=pybullet.GEOM_MESH, rgbaColor=rgba_color, fileName=obj_file_name, meshScale=xyz_scale) col_shape_id = sim.createCollisionShape( shapeType=pybullet.GEOM_MESH, fileName=obj_file_name, meshScale=xyz_scale) rigid_id = sim.createMultiBody( baseMass=mass, # mass==0 => fixed at position where it is loaded basePosition=init_pos, baseCollisionShapeIndex=col_shape_id, baseVisualShapeIndex=viz_shape_id, baseOrientation=pybullet.getQuaternionFromEuler(init_ori)) elif obj_file_name.endswith('.urdf'): # URDF file rigid_id = sim.loadURDF( obj_file_name, init_pos, pybullet.getQuaternionFromEuler(init_ori), useFixedBase=True if mass <= 0 else False, globalScaling=scale) else: print('Unknown file extension', obj_file_name) assert(False), 'load_rigid_object supports only obj and URDF files' sim.changeDynamics(rigid_id, -1, mass, lateralFriction=1.0, spinningFriction=1.0, rollingFriction=1.0, restitution=0.0) n_jt = sim.getNumJoints(rigid_id) if texture_file is not None: texture_id = sim.loadTexture(texture_file) kwargs = {} if hasattr(pybullet, 'VISUAL_SHAPE_DOUBLE_SIDED'): kwargs['flags'] = pybullet.VISUAL_SHAPE_DOUBLE_SIDED if obj_file_name.endswith('figure_headless.urdf'): sim.changeVisualShape( # only changing the body of the figure rigid_id, 0, rgbaColor=[1, 1, 1, 1], textureUniqueId=texture_id, **kwargs) else: for i in range(-1, n_jt): sim.changeVisualShape( rigid_id, i, rgbaColor=[1,1,1,1], textureUniqueId=texture_id, **kwargs) return rigid_id def load_deform_object(sim, obj_file_name, texture_file_name, scale, init_pos, init_ori, bending_stiffness, damping_stiffness, elastic_stiffness, friction_coeff, self_collision, debug): """Load object from obj file with pybullet's loadSoftBody().""" if debug: print('Loading filename', obj_file_name) # Note: do not set very small mass (e.g. 0.01 causes instabilities). deform_id = sim.loadSoftBody( mass=1, # 1kg is default; bad sim with lower mass fileName=str(Path(obj_file_name)), scale=scale, basePosition=init_pos, baseOrientation=pybullet.getQuaternionFromEuler(init_ori), springElasticStiffness=elastic_stiffness, springDampingStiffness=damping_stiffness, springBendingStiffness=bending_stiffness, frictionCoeff=friction_coeff, # collisionMargin=0.003, # how far apart do two objects begin interacting useSelfCollision=self_collision, springDampingAllDirections=1, useFaceContact=True, useNeoHookean=0, useMassSpring=True, useBendingSprings=True, # repulsionStiffness=10000000, ) # PyBullet examples for loading and anchoring deformables: # github.com/bulletphysics/bullet3/examples/pybullet/examples/deformable_anchor.py sim.setPhysicsEngineParameter(sparseSdfVoxelSize=0.25) texture_id = sim.loadTexture(str(Path(texture_file_name))) kwargs = {} if hasattr(pybullet, 'VISUAL_SHAPE_DOUBLE_SIDED'): kwargs['flags'] = pybullet.VISUAL_SHAPE_DOUBLE_SIDED sim.changeVisualShape( deform_id, -1, rgbaColor=[1,1,1,1], textureUniqueId=texture_id, **kwargs) num_mesh_vertices = get_mesh_data(sim, deform_id)[0] if debug: print('Loaded deform_id', deform_id, 'with', num_mesh_vertices, 'mesh vertices', 'init_pos', init_pos) # Pybullet will struggle with very large meshes, so we should keep mesh # sizes to a limited number of vertices and faces. # Large meshes will load on Linux/Ubuntu, but sim will run too slowly. # Meshes with >2^13=8196 vertices will fail to load on OS X due to shared # memory limits, as noted here: # github.com/bulletphysics/bullet3/issues/1965 assert(num_mesh_vertices < 2**13) # make sure mesh has less than ~8K verts return deform_id def reset_bullet(args, sim, plane_texture=None, debug=False): """Reset/initialize pybullet simulation.""" dist, pitch, yaw, pos_x, pos_y, pos_z = args.cam_viewmat cam_args = { 'cameraDistance': dist, 'cameraPitch': pitch, 'cameraYaw': yaw, 'cameraTargetPosition': np.array([pos_x, pos_y, pos_z]) } if args.viz: pybullet.configureDebugVisualizer(pybullet.COV_ENABLE_GUI, False) sim.resetDebugVisualizerCamera(**cam_args) if debug: res = sim.getDebugVisualizerCamera() print('Camera info for', cam_args) print('viewMatrix', res[2]) print('projectionMatrix', res[3]) sim.resetSimulation(pybullet.RESET_USE_DEFORMABLE_WORLD) # FEM deform sim sim.setGravity(0, 0, args.sim_gravity) sim.setTimeStep(1.0/args.sim_freq) # Could experiment with physic engine parameters, but so far we have not # noticed a stark improvement from changing these. # sim.setPhysicsEngineParameter(numSubSteps=10, allowedCcdPenetration=0.01) # # Load floor plane and rigid objects # sim.setAdditionalSearchPath(pybullet_data.getDataPath()) floor_id = sim.loadURDF('plane.urdf') if plane_texture is not None: if debug: print('texture file', plane_texture) texture_id = sim.loadTexture(plane_texture) sim.changeVisualShape( floor_id, -1, rgbaColor=[1,1,1,1], textureUniqueId=texture_id, ) assert(floor_id == 0) # camera assumes floor/ground is loaded first return sim
en
0.822753
Utilities for deform sim in PyBullet. Note: this code is for research i.e. quick experimentation; it has minimal comments for now, but if we see further interest from the community -- we will add further comments, unify the style, improve efficiency and add unittests. @contactrika # automatically converts forward slashes if needed Load a rigid object from file, create visual and collision shapes. # mesh info # mass==0 => fixed at position where it is loaded # URDF file # only changing the body of the figure Load object from obj file with pybullet's loadSoftBody(). # Note: do not set very small mass (e.g. 0.01 causes instabilities). # 1kg is default; bad sim with lower mass # collisionMargin=0.003, # how far apart do two objects begin interacting # repulsionStiffness=10000000, # PyBullet examples for loading and anchoring deformables: # github.com/bulletphysics/bullet3/examples/pybullet/examples/deformable_anchor.py # Pybullet will struggle with very large meshes, so we should keep mesh # sizes to a limited number of vertices and faces. # Large meshes will load on Linux/Ubuntu, but sim will run too slowly. # Meshes with >2^13=8196 vertices will fail to load on OS X due to shared # memory limits, as noted here: # github.com/bulletphysics/bullet3/issues/1965 # make sure mesh has less than ~8K verts Reset/initialize pybullet simulation. # FEM deform sim # Could experiment with physic engine parameters, but so far we have not # noticed a stark improvement from changing these. # sim.setPhysicsEngineParameter(numSubSteps=10, allowedCcdPenetration=0.01) # # Load floor plane and rigid objects # # camera assumes floor/ground is loaded first
2.444787
2
sample/tests/start_tests.py
yaras/pythontemplate
0
6621680
<gh_stars>0 import unittest from sample import start class StartTests(unittest.TestCase): def test_sayHello(self): self.assertEquals('hello', start.sayHello()) if __name__ == '__main__': unittest.main()
import unittest from sample import start class StartTests(unittest.TestCase): def test_sayHello(self): self.assertEquals('hello', start.sayHello()) if __name__ == '__main__': unittest.main()
none
1
2.735509
3
update-gh-pages.py
nicholasbishop/dumbmath-rs
2
6621681
#!/usr/bin/env python3 import os import shutil import subprocess import tempfile REPO = '<EMAIL>:nicholasbishop/dumbmath-rs.git' BRANCH = 'gh-pages' COMMIT_MSG = 'Automatic-ish rustdoc update' def run_cmd(cmd, cwd=None): print(' '.join(cmd)) subprocess.check_call(cmd, cwd=cwd) def main(): run_cmd(['cargo', 'doc']) with tempfile.TemporaryDirectory(prefix='update-gh-pages-') as tmp_dir: run_cmd(['git', 'clone', REPO, tmp_dir, '--branch', BRANCH]) dst_doc_dir = os.path.join(tmp_dir, 'doc') print('rm -r', dst_doc_dir) shutil.rmtree(dst_doc_dir) print('cp -r', 'target/doc', dst_doc_dir) shutil.copytree('target/doc', dst_doc_dir) run_cmd(['git', 'add', 'doc'], cwd=tmp_dir) run_cmd(['git', 'commit', 'doc', '-m', COMMIT_MSG], cwd=tmp_dir) run_cmd(['git', 'push'], cwd=tmp_dir) if __name__ == '__main__': main()
#!/usr/bin/env python3 import os import shutil import subprocess import tempfile REPO = '<EMAIL>:nicholasbishop/dumbmath-rs.git' BRANCH = 'gh-pages' COMMIT_MSG = 'Automatic-ish rustdoc update' def run_cmd(cmd, cwd=None): print(' '.join(cmd)) subprocess.check_call(cmd, cwd=cwd) def main(): run_cmd(['cargo', 'doc']) with tempfile.TemporaryDirectory(prefix='update-gh-pages-') as tmp_dir: run_cmd(['git', 'clone', REPO, tmp_dir, '--branch', BRANCH]) dst_doc_dir = os.path.join(tmp_dir, 'doc') print('rm -r', dst_doc_dir) shutil.rmtree(dst_doc_dir) print('cp -r', 'target/doc', dst_doc_dir) shutil.copytree('target/doc', dst_doc_dir) run_cmd(['git', 'add', 'doc'], cwd=tmp_dir) run_cmd(['git', 'commit', 'doc', '-m', COMMIT_MSG], cwd=tmp_dir) run_cmd(['git', 'push'], cwd=tmp_dir) if __name__ == '__main__': main()
fr
0.221828
#!/usr/bin/env python3
2.135167
2
segment/ml/models/three_dimensional/small_unet.py
yngtodd/segment
4
6621682
import torch from torch import nn import torch.nn.functional as F class DoubleBlock(nn.Module): '''(conv => BN => ReLU) * 2''' def __init__(self, in_ch, out_ch): super(DoubleBlock, self).__init__() self.conv = nn.Sequential( nn.Conv3d(in_ch, out_ch, 3, padding=1), nn.BatchNorm3d(out_ch), nn.ReLU(inplace=True), nn.Conv3d(out_ch, out_ch, 3, padding=1), nn.BatchNorm3d(out_ch), nn.ReLU(inplace=True) ) def forward(self, x): return self.conv(x) class InConv(nn.Module): def __init__(self, in_ch, out_ch): super(InConv, self).__init__() self.conv = DoubleBlock(in_ch, out_ch) def forward(self, x): return self.conv(x) class Down(nn.Module): def __init__(self, in_ch, out_ch): super(Down, self).__init__() self.pool = nn.MaxPool3d(2, stride=1, return_indices=True) self.block = DoubleBlock(in_ch, out_ch) def forward(self, x): x, indices = self.pool(x) x = self.block(x) return x, indices class Up(nn.Module): def __init__(self, in_ch, out_ch): super(Up, self).__init__() self.unpool = nn.MaxUnpool3d(2, stride=1) self.block = DoubleBlock(in_ch, out_ch) def forward(self, x, indices): x = self.unpool(x, indices) x = self.block(x) return x class OutConv(nn.Module): def __init__(self, in_ch, out_ch): super(OutConv, self).__init__() self.conv = nn.Conv3d(in_ch, out_ch, 1) def forward(self, x): return self.conv(x) class UNet3D(nn.Module): def __init__(self, n_channels, n_classes): super(UNet3D, self).__init__() self.inconv = InConv(n_channels, 2) self.down1 = Down(2, 4) self.down2 = Down(4, 8) self.down3 = Down(8, 16) self.down4 = Down(16, 32) self.up1 = Up(32, 16) self.up2 = Up(16, 8) self.up3 = Up(8, 4) self.up4 = Up(4, 2) self.outconv = OutConv(2, n_classes) def forward(self, x): x1 = self.inconv(x) x2, indices2 = self.down1(x1) x3, indices3 = self.down2(x2) x4, indices4 = self.down3(x3) x5, indices5 = self.down4(x4) x = self.up1(x5, indices5) x = self.up2(x, indices4) x = self.up3(x, indices3) x = self.up4(x, indices2) x = self.outconv(x) x = torch.sigmoid(x) return x
import torch from torch import nn import torch.nn.functional as F class DoubleBlock(nn.Module): '''(conv => BN => ReLU) * 2''' def __init__(self, in_ch, out_ch): super(DoubleBlock, self).__init__() self.conv = nn.Sequential( nn.Conv3d(in_ch, out_ch, 3, padding=1), nn.BatchNorm3d(out_ch), nn.ReLU(inplace=True), nn.Conv3d(out_ch, out_ch, 3, padding=1), nn.BatchNorm3d(out_ch), nn.ReLU(inplace=True) ) def forward(self, x): return self.conv(x) class InConv(nn.Module): def __init__(self, in_ch, out_ch): super(InConv, self).__init__() self.conv = DoubleBlock(in_ch, out_ch) def forward(self, x): return self.conv(x) class Down(nn.Module): def __init__(self, in_ch, out_ch): super(Down, self).__init__() self.pool = nn.MaxPool3d(2, stride=1, return_indices=True) self.block = DoubleBlock(in_ch, out_ch) def forward(self, x): x, indices = self.pool(x) x = self.block(x) return x, indices class Up(nn.Module): def __init__(self, in_ch, out_ch): super(Up, self).__init__() self.unpool = nn.MaxUnpool3d(2, stride=1) self.block = DoubleBlock(in_ch, out_ch) def forward(self, x, indices): x = self.unpool(x, indices) x = self.block(x) return x class OutConv(nn.Module): def __init__(self, in_ch, out_ch): super(OutConv, self).__init__() self.conv = nn.Conv3d(in_ch, out_ch, 1) def forward(self, x): return self.conv(x) class UNet3D(nn.Module): def __init__(self, n_channels, n_classes): super(UNet3D, self).__init__() self.inconv = InConv(n_channels, 2) self.down1 = Down(2, 4) self.down2 = Down(4, 8) self.down3 = Down(8, 16) self.down4 = Down(16, 32) self.up1 = Up(32, 16) self.up2 = Up(16, 8) self.up3 = Up(8, 4) self.up4 = Up(4, 2) self.outconv = OutConv(2, n_classes) def forward(self, x): x1 = self.inconv(x) x2, indices2 = self.down1(x1) x3, indices3 = self.down2(x2) x4, indices4 = self.down3(x3) x5, indices5 = self.down4(x4) x = self.up1(x5, indices5) x = self.up2(x, indices4) x = self.up3(x, indices3) x = self.up4(x, indices2) x = self.outconv(x) x = torch.sigmoid(x) return x
ko
0.250293
(conv => BN => ReLU) * 2
3.0752
3
control/src/path_planner.py
mnemonia/transprotobot
0
6621683
<filename>control/src/path_planner.py import logging import numpy as np from sensor_interface_layer import GpsMath from stanley_controller import State, StandleyController class PathPlanner(): LOG = logging.getLogger('PathPlanner') def __init__(self, gsl, sil, vil, tc, sc): self._gsl = gsl self._sil = sil self._vil = vil self._tc = tc self._sc = sc self._lastTimestamp = 0.0 self._toggle = False self.gpsMath = GpsMath() self._state = None self._controller = None def on(self): self.LOG.info('on') self._sc.angle(0) self._tc.stop() self._tc.velocity(0) self._tc.fwd() self._tc.velocity(0.0) self._sc.angle(0.8) # Initial state pt = self.gpsMath.locationToPoint(self._sil.gps()._latitude, self._sil.gps()._longitude, self._sil.gps()._altitude) self._state = State(x=pt["x"], y=pt["y"], yaw=np.radians(self._sil.gps()._heading), v=0.0) self._controller = StandleyController(self._state) def tick(self): self.LOG.info('tick {}'.format(self._controller.isFinished())) if self._controller.isFinished(): self._tc.stop() else: pt = self.gpsMath.locationToPoint(self._sil.gps()._latitude, self._sil.gps()._longitude, self._sil.gps()._altitude) self._state.x = pt["x"] self._state.y = pt["y"] self._state.yaw = np.deg2rad(self._sil.gps()._heading) self._controller.tick() self._tc.velocity(self._state.v) self._sc.angle(self._state.yaw)
<filename>control/src/path_planner.py import logging import numpy as np from sensor_interface_layer import GpsMath from stanley_controller import State, StandleyController class PathPlanner(): LOG = logging.getLogger('PathPlanner') def __init__(self, gsl, sil, vil, tc, sc): self._gsl = gsl self._sil = sil self._vil = vil self._tc = tc self._sc = sc self._lastTimestamp = 0.0 self._toggle = False self.gpsMath = GpsMath() self._state = None self._controller = None def on(self): self.LOG.info('on') self._sc.angle(0) self._tc.stop() self._tc.velocity(0) self._tc.fwd() self._tc.velocity(0.0) self._sc.angle(0.8) # Initial state pt = self.gpsMath.locationToPoint(self._sil.gps()._latitude, self._sil.gps()._longitude, self._sil.gps()._altitude) self._state = State(x=pt["x"], y=pt["y"], yaw=np.radians(self._sil.gps()._heading), v=0.0) self._controller = StandleyController(self._state) def tick(self): self.LOG.info('tick {}'.format(self._controller.isFinished())) if self._controller.isFinished(): self._tc.stop() else: pt = self.gpsMath.locationToPoint(self._sil.gps()._latitude, self._sil.gps()._longitude, self._sil.gps()._altitude) self._state.x = pt["x"] self._state.y = pt["y"] self._state.yaw = np.deg2rad(self._sil.gps()._heading) self._controller.tick() self._tc.velocity(self._state.v) self._sc.angle(self._state.yaw)
en
0.81879
# Initial state
2.811194
3
mc/db/schema.py
aspuru-guzik-group/mission_control
3
6621684
from . import models # noqa from .base import Base metadata = Base.metadata
from . import models # noqa from .base import Base metadata = Base.metadata
none
1
1.046662
1
create_level2_dataset.py
danilobenozzo/supervised_causality_detection
2
6621685
import numpy as np import pickle from scipy.io import loadmat from statsmodels.stats.stattools import durbin_watson, omni_normtest, jarque_bera from sklearn.linear_model import LinearRegression, BayesianRidge from sklearn.svm import SVR from sklearn.cross_validation import cross_val_score, KFold, cross_val_predict from sklearn.metrics import r2_score, mean_squared_error from sys import stdout from joblib import Parallel, delayed import nitime.analysis as nta from nitime.timeseries import TimeSeries #from load_challenge_data import load_challenge_data causality_structures = [((0,),0), ((0,),1), ((0,),2), ((1,),0), ((1,),1), ((1,),2), ((2,),0), ((2,),1), ((2,),2), ((0,1),0), ((0,1),1), ((0,1),2), ((0,2),0), ((0,2),1), ((0,2),2), ((1,2),0), ((1,2),1), ((1,2),2), ((0,1,2),0), ((0,1,2),1), ((0,1,2),2)] def regression_scores(timeseries, time_window_size, time_lag, reg, cv, scoring, timeseriesZ=None): """Compute regression scores for a given set of 3 timeseries according to the causality structures. """ global causality_structures if scoring == 'residual_tests': features_regression = np.zeros([len(causality_structures),7]) else: features_regression = np.zeros([len(causality_structures),2]) #added 2 dimensions to compute r2 and mse for j, (cs_train, cs_test) in enumerate(causality_structures): ts_train = timeseries[:,cs_train] if not(timeseriesZ is None): ts_train = np.hstack([ts_train, timeseriesZ]) if time_lag is None: time_lag=time_window_size ts_test = timeseries[:,cs_test] tmp_score = np.zeros([time_window_size,2]) #added 2 dimensions to compute r2 and mse residuals = np.zeros(timeseries.shape[0]-time_window_size) for i_reg in range(time_window_size): idx_example = np.arange(i_reg, timeseries.shape[0]-time_lag, time_window_size)[:-1] X = np.zeros((idx_example.size, time_window_size, ts_train.shape[1]))#len(cs_train))) for k in range(time_window_size): X[:,k] = ts_train[idx_example+k] X = X.reshape(X.shape[0], X.shape[1] * X.shape[2]) y = ts_test[idx_example + time_lag] if scoring == 'residual_tests': y_pred_i_reg = np.zeros(y.size) kfold = KFold(n=y.size, n_folds=cv) for train, test in kfold: reg.fit(X[train], y[train]) y_pred_i_reg[test] = reg.predict(X[test]) residuals[idx_example] = y - y_pred_i_reg #residuals else: tmp_predict = cross_val_predict(reg, X, y, cv=cv) tmp_score[i_reg,0] = r2_score(y,tmp_predict).mean() tmp_score[i_reg,1] = mean_squared_error(y,tmp_predict).mean() #tmp_score[i_reg] = cross_val_score(reg, X, y, cv=cv, scoring=scoring).mean() if scoring == 'residual_tests': features_regression[j,0] = durbin_watson(residuals) features_regression[j,[1,2]] = omni_normtest(residuals) features_regression[j,3:] = jarque_bera(residuals) else: features_regression[j] = tmp_score.mean(0) return features_regression def regression_scores_different_domain(timeseries_causes, timeseries_effect, time_window_size, reg, cv, scoring, timeseriesZ=None): """Compute regression scores for a given set of 3 timeseries as causes and 3 as effects according to the causality structures. """ global causality_structures if scoring == 'residual_tests': features_regression = np.zeros([len(causality_structures),7]) else: features_regression = np.zeros([len(causality_structures),2]) #added 2 dimensions to compute r2 and mse for j, (cs_train, cs_test) in enumerate(causality_structures): ts_train = timeseries_causes[:,cs_train] if not(timeseriesZ is None): ts_train = np.hstack([ts_train, timeseriesZ]) ts_test = timeseries_effect[:,cs_test] tmp_score = np.zeros([time_window_size,2]) #added 2 dimensions to compute r2 and mse residuals = np.zeros(timeseries_causes.shape[0]-time_window_size) for i_reg in range(time_window_size): idx_example = np.arange(i_reg, timeseries_causes.shape[0], time_window_size)[:-1] X = np.zeros((idx_example.size, time_window_size, ts_train.shape[1]))#len(cs_train))) for k in range(time_window_size): X[:,k] = ts_train[idx_example+k] X = X.reshape(X.shape[0], X.shape[1] * X.shape[2]) y = ts_test[idx_example + time_window_size] if scoring == 'residual_tests': y_pred_i_reg = np.zeros(y.size) kfold = KFold(n=y.size, n_folds=cv) for train, test in kfold: reg.fit(X[train], y[train]) y_pred_i_reg[test] = reg.predict(X[test]) residuals[idx_example] = y - y_pred_i_reg #residuals else: tmp_predict = cross_val_predict(reg, X, y, cv=cv) tmp_score[i_reg,0] = r2_score(y,tmp_predict).mean() tmp_score[i_reg,1] = mean_squared_error(y,tmp_predict).mean() #tmp_score[i_reg] = cross_val_score(reg, X, y, cv=cv, scoring=scoring).mean() if scoring == 'residual_tests': features_regression[j,0] = durbin_watson(residuals) features_regression[j,[1,2]] = omni_normtest(residuals) features_regression[j,3:] = jarque_bera(residuals) else: features_regression[j] = tmp_score.mean(0) return features_regression def granger_scores(timeseries, order): timeseries = TimeSeries(timeseries, sampling_interval=1) g = nta.GrangerAnalyzer(timeseries, order=order) g_xy_mat = np.mean(g.causality_xy, axis=-1) g_yx_mat = np.mean(g.causality_yx, axis=-1) return np.concatenate([g_xy_mat[np.tril_indices(3,-1)], g_yx_mat.T[np.triu_indices(3,1)]]) def feature_engineering(Xs, block_normalisation=False): print "Feature Engineering." feature_space = [] for X in Xs: if block_normalisation : print "Block-normalization r2, mse, granger" X = row_normalise(X)#grand_normalise(X)#feature_scaling(X) feature_space += [X, np.power(X, 2), np.power(X, 3), np.sign(X) * np.sqrt(np.abs(X))] # Feature engineering: all possible products between the original feature values: feature_space.append(np.array([np.multiply.outer(X[i], X[i])[np.triu_indices(X.shape[1], 1)] for i in range(X.shape[0])])) return feature_space def feature_engineering2(X, X_granger): # Feature engineering: all possible products between the original feature values: X_pairwise = np.array([np.multiply.outer(X[i], X[i])[np.triu_indices(X.shape[1], 1)] for i in range(X.shape[0])]) X_granger_pairwise = np.array([np.multiply.outer(X_granger[i], X_granger[i])[np.triu_indices(X_granger.shape[1], 1)] for i in range(X_granger.shape[0])]) # Add new features to the original ones: feature_space = [X, np.power(X, 2), np.power(X, 3), np.sign(X) * np.sqrt(np.abs(X)), X_pairwise, X_granger, np.power(X_granger, 2), np.power(X_granger, 3), np.sign(X_granger) * np.sqrt(np.abs(X_granger)), X_granger_pairwise] return feature_space def feature_scaling(A): """Feature scaling according to wikipedia x-x_min / x_max-x_min """ A = (A - A.min()) / (A.max() - A.min()) return A def grand_normalise(A): """Normalise (z-scoring) array A. """ A = A - A.mean() A = np.nan_to_num(A / A.std()) return A def row_normalise(A): """Normalize along row array A """ A = column_normalise(A.T) return A.T def column_normalise(A): """NOrmalise along column array A """ A = A - A.mean(0) A = np.nan_to_num(A / A.std(0)) return A def feature_normalisation(feature_space_train, feature_space_test=None, block_normalisation=False): print "Normalisation." if feature_space_test is None: feature_space = feature_space_train else: size_train = feature_space_train[0].shape[0] feature_space = [np.vstack([A_train, A_test]) for A_train, A_test in zip(feature_space_train, feature_space_test)] if block_normalisation: print "Block-normalisation." X = np.hstack([grand_normalise(A) for A in feature_space]) else: print "Per-feature Normalisation." X = np.hstack(feature_space) if feature_space_test is None: return X else: X_train = X[:size_train,:] X_test = X[size_train:,:] return X_train, X_test
import numpy as np import pickle from scipy.io import loadmat from statsmodels.stats.stattools import durbin_watson, omni_normtest, jarque_bera from sklearn.linear_model import LinearRegression, BayesianRidge from sklearn.svm import SVR from sklearn.cross_validation import cross_val_score, KFold, cross_val_predict from sklearn.metrics import r2_score, mean_squared_error from sys import stdout from joblib import Parallel, delayed import nitime.analysis as nta from nitime.timeseries import TimeSeries #from load_challenge_data import load_challenge_data causality_structures = [((0,),0), ((0,),1), ((0,),2), ((1,),0), ((1,),1), ((1,),2), ((2,),0), ((2,),1), ((2,),2), ((0,1),0), ((0,1),1), ((0,1),2), ((0,2),0), ((0,2),1), ((0,2),2), ((1,2),0), ((1,2),1), ((1,2),2), ((0,1,2),0), ((0,1,2),1), ((0,1,2),2)] def regression_scores(timeseries, time_window_size, time_lag, reg, cv, scoring, timeseriesZ=None): """Compute regression scores for a given set of 3 timeseries according to the causality structures. """ global causality_structures if scoring == 'residual_tests': features_regression = np.zeros([len(causality_structures),7]) else: features_regression = np.zeros([len(causality_structures),2]) #added 2 dimensions to compute r2 and mse for j, (cs_train, cs_test) in enumerate(causality_structures): ts_train = timeseries[:,cs_train] if not(timeseriesZ is None): ts_train = np.hstack([ts_train, timeseriesZ]) if time_lag is None: time_lag=time_window_size ts_test = timeseries[:,cs_test] tmp_score = np.zeros([time_window_size,2]) #added 2 dimensions to compute r2 and mse residuals = np.zeros(timeseries.shape[0]-time_window_size) for i_reg in range(time_window_size): idx_example = np.arange(i_reg, timeseries.shape[0]-time_lag, time_window_size)[:-1] X = np.zeros((idx_example.size, time_window_size, ts_train.shape[1]))#len(cs_train))) for k in range(time_window_size): X[:,k] = ts_train[idx_example+k] X = X.reshape(X.shape[0], X.shape[1] * X.shape[2]) y = ts_test[idx_example + time_lag] if scoring == 'residual_tests': y_pred_i_reg = np.zeros(y.size) kfold = KFold(n=y.size, n_folds=cv) for train, test in kfold: reg.fit(X[train], y[train]) y_pred_i_reg[test] = reg.predict(X[test]) residuals[idx_example] = y - y_pred_i_reg #residuals else: tmp_predict = cross_val_predict(reg, X, y, cv=cv) tmp_score[i_reg,0] = r2_score(y,tmp_predict).mean() tmp_score[i_reg,1] = mean_squared_error(y,tmp_predict).mean() #tmp_score[i_reg] = cross_val_score(reg, X, y, cv=cv, scoring=scoring).mean() if scoring == 'residual_tests': features_regression[j,0] = durbin_watson(residuals) features_regression[j,[1,2]] = omni_normtest(residuals) features_regression[j,3:] = jarque_bera(residuals) else: features_regression[j] = tmp_score.mean(0) return features_regression def regression_scores_different_domain(timeseries_causes, timeseries_effect, time_window_size, reg, cv, scoring, timeseriesZ=None): """Compute regression scores for a given set of 3 timeseries as causes and 3 as effects according to the causality structures. """ global causality_structures if scoring == 'residual_tests': features_regression = np.zeros([len(causality_structures),7]) else: features_regression = np.zeros([len(causality_structures),2]) #added 2 dimensions to compute r2 and mse for j, (cs_train, cs_test) in enumerate(causality_structures): ts_train = timeseries_causes[:,cs_train] if not(timeseriesZ is None): ts_train = np.hstack([ts_train, timeseriesZ]) ts_test = timeseries_effect[:,cs_test] tmp_score = np.zeros([time_window_size,2]) #added 2 dimensions to compute r2 and mse residuals = np.zeros(timeseries_causes.shape[0]-time_window_size) for i_reg in range(time_window_size): idx_example = np.arange(i_reg, timeseries_causes.shape[0], time_window_size)[:-1] X = np.zeros((idx_example.size, time_window_size, ts_train.shape[1]))#len(cs_train))) for k in range(time_window_size): X[:,k] = ts_train[idx_example+k] X = X.reshape(X.shape[0], X.shape[1] * X.shape[2]) y = ts_test[idx_example + time_window_size] if scoring == 'residual_tests': y_pred_i_reg = np.zeros(y.size) kfold = KFold(n=y.size, n_folds=cv) for train, test in kfold: reg.fit(X[train], y[train]) y_pred_i_reg[test] = reg.predict(X[test]) residuals[idx_example] = y - y_pred_i_reg #residuals else: tmp_predict = cross_val_predict(reg, X, y, cv=cv) tmp_score[i_reg,0] = r2_score(y,tmp_predict).mean() tmp_score[i_reg,1] = mean_squared_error(y,tmp_predict).mean() #tmp_score[i_reg] = cross_val_score(reg, X, y, cv=cv, scoring=scoring).mean() if scoring == 'residual_tests': features_regression[j,0] = durbin_watson(residuals) features_regression[j,[1,2]] = omni_normtest(residuals) features_regression[j,3:] = jarque_bera(residuals) else: features_regression[j] = tmp_score.mean(0) return features_regression def granger_scores(timeseries, order): timeseries = TimeSeries(timeseries, sampling_interval=1) g = nta.GrangerAnalyzer(timeseries, order=order) g_xy_mat = np.mean(g.causality_xy, axis=-1) g_yx_mat = np.mean(g.causality_yx, axis=-1) return np.concatenate([g_xy_mat[np.tril_indices(3,-1)], g_yx_mat.T[np.triu_indices(3,1)]]) def feature_engineering(Xs, block_normalisation=False): print "Feature Engineering." feature_space = [] for X in Xs: if block_normalisation : print "Block-normalization r2, mse, granger" X = row_normalise(X)#grand_normalise(X)#feature_scaling(X) feature_space += [X, np.power(X, 2), np.power(X, 3), np.sign(X) * np.sqrt(np.abs(X))] # Feature engineering: all possible products between the original feature values: feature_space.append(np.array([np.multiply.outer(X[i], X[i])[np.triu_indices(X.shape[1], 1)] for i in range(X.shape[0])])) return feature_space def feature_engineering2(X, X_granger): # Feature engineering: all possible products between the original feature values: X_pairwise = np.array([np.multiply.outer(X[i], X[i])[np.triu_indices(X.shape[1], 1)] for i in range(X.shape[0])]) X_granger_pairwise = np.array([np.multiply.outer(X_granger[i], X_granger[i])[np.triu_indices(X_granger.shape[1], 1)] for i in range(X_granger.shape[0])]) # Add new features to the original ones: feature_space = [X, np.power(X, 2), np.power(X, 3), np.sign(X) * np.sqrt(np.abs(X)), X_pairwise, X_granger, np.power(X_granger, 2), np.power(X_granger, 3), np.sign(X_granger) * np.sqrt(np.abs(X_granger)), X_granger_pairwise] return feature_space def feature_scaling(A): """Feature scaling according to wikipedia x-x_min / x_max-x_min """ A = (A - A.min()) / (A.max() - A.min()) return A def grand_normalise(A): """Normalise (z-scoring) array A. """ A = A - A.mean() A = np.nan_to_num(A / A.std()) return A def row_normalise(A): """Normalize along row array A """ A = column_normalise(A.T) return A.T def column_normalise(A): """NOrmalise along column array A """ A = A - A.mean(0) A = np.nan_to_num(A / A.std(0)) return A def feature_normalisation(feature_space_train, feature_space_test=None, block_normalisation=False): print "Normalisation." if feature_space_test is None: feature_space = feature_space_train else: size_train = feature_space_train[0].shape[0] feature_space = [np.vstack([A_train, A_test]) for A_train, A_test in zip(feature_space_train, feature_space_test)] if block_normalisation: print "Block-normalisation." X = np.hstack([grand_normalise(A) for A in feature_space]) else: print "Per-feature Normalisation." X = np.hstack(feature_space) if feature_space_test is None: return X else: X_train = X[:size_train,:] X_test = X[size_train:,:] return X_train, X_test
en
0.739808
#from load_challenge_data import load_challenge_data Compute regression scores for a given set of 3 timeseries according to the causality structures. #added 2 dimensions to compute r2 and mse #added 2 dimensions to compute r2 and mse #len(cs_train))) #residuals #tmp_score[i_reg] = cross_val_score(reg, X, y, cv=cv, scoring=scoring).mean() Compute regression scores for a given set of 3 timeseries as causes and 3 as effects according to the causality structures. #added 2 dimensions to compute r2 and mse #added 2 dimensions to compute r2 and mse #len(cs_train))) #residuals #tmp_score[i_reg] = cross_val_score(reg, X, y, cv=cv, scoring=scoring).mean() #grand_normalise(X)#feature_scaling(X) # Feature engineering: all possible products between the original feature values: # Feature engineering: all possible products between the original feature values: # Add new features to the original ones: Feature scaling according to wikipedia x-x_min / x_max-x_min Normalise (z-scoring) array A. Normalize along row array A NOrmalise along column array A
2.097483
2
mediagenerator/multidomain_media_url.py
ajith-004/django-mediagenerator
0
6621686
import os, os.path import urlparse from django.conf import settings from django.core.files.storage import FileSystemStorage count = 0 media_count=100 def rewrite_url(): media_change = getattr(settings, 'CHANGE_MEDIA_URL', False) if (media_change): global count global media_count count += 1 if count == 5: media_count += 1 count = 0 url_pattern = settings.MEDIA_URL_PATTERN return url_pattern.replace('%d', str(media_count)) else: return '' class CustomFileSystemStorage(FileSystemStorage): def __init__(self, location=None, base_url=None): if location is None: location = settings.MEDIA_ROOT if base_url is None: base_url = settings.MEDIA_URL self.location = os.path.abspath(location) self.base_url = urlparse.urljoin(rewrite_url(), base_url)
import os, os.path import urlparse from django.conf import settings from django.core.files.storage import FileSystemStorage count = 0 media_count=100 def rewrite_url(): media_change = getattr(settings, 'CHANGE_MEDIA_URL', False) if (media_change): global count global media_count count += 1 if count == 5: media_count += 1 count = 0 url_pattern = settings.MEDIA_URL_PATTERN return url_pattern.replace('%d', str(media_count)) else: return '' class CustomFileSystemStorage(FileSystemStorage): def __init__(self, location=None, base_url=None): if location is None: location = settings.MEDIA_ROOT if base_url is None: base_url = settings.MEDIA_URL self.location = os.path.abspath(location) self.base_url = urlparse.urljoin(rewrite_url(), base_url)
none
1
2.221236
2
__init__.py
zengljnwpu/yaspc
0
6621687
<gh_stars>0 """Yet Another Pascal Compiler""" # Bring in all of the public yaspc interface into this # module. #from __future__ import absolute_import #from __future__ import division #from __future__ import print_function # import all packages from optimization # pylint: disable=wildcard-import #from yaspc.optimization import * # pylint: enable=wildcard-import
"""Yet Another Pascal Compiler""" # Bring in all of the public yaspc interface into this # module. #from __future__ import absolute_import #from __future__ import division #from __future__ import print_function # import all packages from optimization # pylint: disable=wildcard-import #from yaspc.optimization import * # pylint: enable=wildcard-import
en
0.454746
Yet Another Pascal Compiler # Bring in all of the public yaspc interface into this # module. #from __future__ import absolute_import #from __future__ import division #from __future__ import print_function # import all packages from optimization # pylint: disable=wildcard-import #from yaspc.optimization import * # pylint: enable=wildcard-import
1.124957
1
examples/shaders.py
brianbruggeman/oogli
3
6621688
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import division import ctypes from textwrap import dedent as dd import numpy as np import OpenGL OpenGL.ERROR_CHECKING = True import glfw from glfw import gl # ###################################################################### # Data # ###################################################################### title = 'OpenGL 4.1 Rendering' width, height = 100, 75 major, minor = (3, 2) draw_array = False use_data = True modes = sorted([ gl.POINTS, gl.LINES, gl.LINE_LOOP, gl.LINE_STRIP, gl.LINES_ADJACENCY, gl.LINE_STRIP_ADJACENCY, # gl.QUADS, gl.TRIANGLES, gl.TRIANGLE_STRIP, gl.TRIANGLE_FAN, gl.TRIANGLE_STRIP_ADJACENCY, gl.TRIANGLES_ADJACENCY, # gl.PATCHES, ]) mode_index = modes.index(gl.TRIANGLES) fills = [ gl.FILL, gl.POINT, gl.LINE ] fill_index = fills.index(gl.LINE) pt = 0.5 vertices = np.array([ (x, y) for x in [-pt, 0, pt] for y in [-pt, 0, pt] ], dtype=np.float32) indices = np.array([ # index for index in range(vertices.shape[0]) 5, 6, 0, # 5, 2, 0, # 5, 8, 6, ], dtype=np.uint32) # Generate some colors for the points rgb = 3 colors = np.array([ (1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, 1.0), (1.0, 0.0, 1.0), (0.0, 1.0, 1.0), (1.0, 1.0, 0.0), (0.5, 0.5, 0.5), (0.5, 0.5, 0.0), (0.0, 0.5, 0.5), ], dtype=np.float32) data = np.zeros( len(vertices), dtype=[ ('position', np.float32, vertices.shape[-1]), ('color', np.float32, colors.shape[-1]), ] ) # Interleave vertex data for position and color data['position'] = vertices data['color'] = colors vshader = ''' #version 150 in vec2 position; in vec3 color; out vec3 v_color; void main () { gl_Position = vec4(position, 0.0, 1.0); v_color = color; } ''' fshader = ''' #version 150 in vec3 v_color; out vec4 frag_colour; void main () { frag_colour = vec4(v_color, 1.0); frag_colour = vec4(0.2, 1.0, 0.2, 1.0); } ''' # ###################################################################### # Helper functions def screenshot(pixels): assert isinstance(pixels, np.ndarray), 'data must be a numpy array' width, height = pixels.shape[0:2] return gl.read_pixels(0, 0, width, height, gl.RGB, gl.UNSIGNED_BYTE, pixels) @glfw.decorators.key_callback def on_key(win, key, code, action, mods): '''Handles keyboard event''' global mode_index global fill_index global draw_array global indices_buffer_id global vertices global colors global data global use_data if action in [glfw.PRESS, glfw.REPEAT]: if key in [glfw.KEY_ESCAPE, glfw.KEY_Q]: # Quit glfw.core.set_window_should_close(win, gl.TRUE) elif key == glfw.KEY_M: # Update draw mode (points, lines, triangles, quads, etc.) if mods & glfw.MOD_SHIFT: mode_index = mode_index - 1 if mode_index - 1 >= 0 else len(modes) - 1 else: mode_index = mode_index + 1 if mode_index + 1 < len(modes) else 0 print('New mode: {}'.format(modes[mode_index])) elif key == glfw.KEY_W: # Update fill mode (wireframe, solid, points) if mods & glfw.MOD_SHIFT: fill_index = fill_index - 1 if fill_index - 1 >= 0 else len(fills) - 1 else: fill_index = fill_index + 1 if fill_index + 1 < len(fills) else 0 print('New fill: {}'.format(fills[fill_index])) elif key == glfw.KEY_SPACE: if mods & glfw.MOD_SHIFT: colors = np.array([ (1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, 1.0), (1.0, 0.0, 1.0), (0.0, 1.0, 1.0), (1.0, 1.0, 0.0), (0.5, 0.5, 0.5), (0.5, 0.5, 0.0), (0.0, 0.5, 0.5), ], dtype=np.float32) else: # Randomize colors colors = np.random.rand(len(vertices), 3) data['color'] = colors def compile_shader(shader_source, shader_type): '''Compiles and checks output''' shader_id = gl.glCreateShader(shader_type) gl.glShaderSource(shader_id, dd(shader_source)) gl.glCompileShader(shader_id) shader_result = gl.glGetShaderiv(shader_id, gl.COMPILE_STATUS) shader_log = gl.glGetShaderiv(shader_id, gl.INFO_LOG_LENGTH) assert shader_result == gl.TRUE if shader_log > 0: error_message = gl.glGetShaderInfoLog(shader_id) print('ERROR: Vertex Shader Compilation | {}'.format(error_message)) return shader_id def compile_program(*shader_sources): '''Compiles shaders, links to a program and checks output''' assert len(shader_sources) >= 2 shader_types = [ gl.VERTEX_SHADER, gl.FRAGMENT_SHADER, gl.TESS_CONTROL_SHADER, gl.TESS_EVALUATION_SHADER, gl.GEOMETRY_SHADER, ] shaders = [ compile_shader(shader_source, shader_type) for shader_source, shader_type in zip(shader_sources, shader_types) ] program = gl.glCreateProgram() for shader in shaders: gl.glAttachShader(program, shader) gl.glLinkProgram(program) assert gl.glGetProgramiv(program, gl.LINK_STATUS) == gl.TRUE assert gl.glGetProgramiv(program, gl.INFO_LOG_LENGTH) == 0, gl.glGetProgramInfoLog(program) # Cleanup shaders for shader in shaders: gl.glDetachShader(program, shader) gl.glDeleteShader(shader) return program def setup_context(major, minor): glfw.core.init() glfw.core.window_hint(glfw.SAMPLES, 4) glfw.core.window_hint(glfw.CONTEXT_VERSION_MAJOR, major) glfw.core.window_hint(glfw.CONTEXT_VERSION_MINOR, minor) glfw.core.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE) glfw.core.window_hint(glfw.OPENGL_FORWARD_COMPAT, True) glfw.core.window_hint(glfw.RED_BITS, 24) glfw.core.window_hint(glfw.GREEN_BITS, 24) glfw.core.window_hint(glfw.BLUE_BITS, 24) glfw.core.window_hint(glfw.ALPHA_BITS, 24) glfw.core.window_hint(glfw.DEPTH_BITS, 24) # ###################################################################### # Setup OpenGL Context setup_context(major, minor) num_byte_size = 3 pixels = np.zeros((width, height, num_byte_size), dtype=np.uint8) win = glfw.create_window(title=title, width=width, height=height) glfw.core.set_key_callback(win, on_key) glfw.core.make_context_current(win) # Build pipeline program = compile_program(vshader, fshader) # Bind attributes gl.glBindAttribLocation(program, 0, 'position') gl.glBindAttribLocation(program, 1, 'color') # ###################################################################### # Setup VBO and VAO vao = gl.glGenVertexArrays(1) buffer_id = gl.glGenBuffers(1) indices_buffer_id = gl.glGenBuffers(1) gl.glBindBuffer(gl.ARRAY_BUFFER, buffer_id) gl.glBindBuffer(gl.ELEMENT_ARRAY_BUFFER, indices_buffer_id) gl.glBufferData(gl.ELEMENT_ARRAY_BUFFER, indices.flatten(), gl.STATIC_DRAW) # ###################################################################### # Render while not glfw.window_should_close(win): gl.glClear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT) gl.glPolygonMode(gl.FRONT_AND_BACK, fills[fill_index]) gl.glBufferData(gl.ARRAY_BUFFER, data.nbytes, data, gl.DYNAMIC_DRAW) gl.glEnable(gl.DEPTH_TEST) gl.glDepthFunc(gl.LESS) gl.glUseProgram(program) gl.glBindVertexArray(vao) stride = data.strides[0] offset = ctypes.c_void_p(0) pos = gl.glGetAttribLocation(program, 'position') gl.glEnableVertexAttribArray(pos) gl.glBindBuffer(gl.ELEMENT_ARRAY_BUFFER, indices_buffer_id) gl.glBindBuffer(gl.ARRAY_BUFFER, buffer_id) gl.glVertexAttribPointer(pos, data['position'].shape[-1], gl.FLOAT, False, stride, offset) offset = ctypes.c_void_p(data.dtype['position'].itemsize) col = gl.glGetAttribLocation(program, 'color') gl.glEnableVertexAttribArray(col) gl.glBindBuffer(gl.ELEMENT_ARRAY_BUFFER, indices_buffer_id) gl.glBindBuffer(gl.ARRAY_BUFFER, buffer_id) gl.glVertexAttribPointer(col, data['color'].shape[-1], gl.FLOAT, False, stride, offset) gl.glDrawElements(modes[mode_index], len(indices), gl.UNSIGNED_INT, None) gl.glDisableVertexAttribArray(vao) pixels = screenshot(pixels) # Standard Loop Event handling glfw.core.swap_buffers(win) glfw.core.poll_events() checksum = np.sum(pixels) assert checksum == 35587, checksum # ###################################################################### # Cleanup gl.glUseProgram(0) glfw.core.terminate()
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import division import ctypes from textwrap import dedent as dd import numpy as np import OpenGL OpenGL.ERROR_CHECKING = True import glfw from glfw import gl # ###################################################################### # Data # ###################################################################### title = 'OpenGL 4.1 Rendering' width, height = 100, 75 major, minor = (3, 2) draw_array = False use_data = True modes = sorted([ gl.POINTS, gl.LINES, gl.LINE_LOOP, gl.LINE_STRIP, gl.LINES_ADJACENCY, gl.LINE_STRIP_ADJACENCY, # gl.QUADS, gl.TRIANGLES, gl.TRIANGLE_STRIP, gl.TRIANGLE_FAN, gl.TRIANGLE_STRIP_ADJACENCY, gl.TRIANGLES_ADJACENCY, # gl.PATCHES, ]) mode_index = modes.index(gl.TRIANGLES) fills = [ gl.FILL, gl.POINT, gl.LINE ] fill_index = fills.index(gl.LINE) pt = 0.5 vertices = np.array([ (x, y) for x in [-pt, 0, pt] for y in [-pt, 0, pt] ], dtype=np.float32) indices = np.array([ # index for index in range(vertices.shape[0]) 5, 6, 0, # 5, 2, 0, # 5, 8, 6, ], dtype=np.uint32) # Generate some colors for the points rgb = 3 colors = np.array([ (1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, 1.0), (1.0, 0.0, 1.0), (0.0, 1.0, 1.0), (1.0, 1.0, 0.0), (0.5, 0.5, 0.5), (0.5, 0.5, 0.0), (0.0, 0.5, 0.5), ], dtype=np.float32) data = np.zeros( len(vertices), dtype=[ ('position', np.float32, vertices.shape[-1]), ('color', np.float32, colors.shape[-1]), ] ) # Interleave vertex data for position and color data['position'] = vertices data['color'] = colors vshader = ''' #version 150 in vec2 position; in vec3 color; out vec3 v_color; void main () { gl_Position = vec4(position, 0.0, 1.0); v_color = color; } ''' fshader = ''' #version 150 in vec3 v_color; out vec4 frag_colour; void main () { frag_colour = vec4(v_color, 1.0); frag_colour = vec4(0.2, 1.0, 0.2, 1.0); } ''' # ###################################################################### # Helper functions def screenshot(pixels): assert isinstance(pixels, np.ndarray), 'data must be a numpy array' width, height = pixels.shape[0:2] return gl.read_pixels(0, 0, width, height, gl.RGB, gl.UNSIGNED_BYTE, pixels) @glfw.decorators.key_callback def on_key(win, key, code, action, mods): '''Handles keyboard event''' global mode_index global fill_index global draw_array global indices_buffer_id global vertices global colors global data global use_data if action in [glfw.PRESS, glfw.REPEAT]: if key in [glfw.KEY_ESCAPE, glfw.KEY_Q]: # Quit glfw.core.set_window_should_close(win, gl.TRUE) elif key == glfw.KEY_M: # Update draw mode (points, lines, triangles, quads, etc.) if mods & glfw.MOD_SHIFT: mode_index = mode_index - 1 if mode_index - 1 >= 0 else len(modes) - 1 else: mode_index = mode_index + 1 if mode_index + 1 < len(modes) else 0 print('New mode: {}'.format(modes[mode_index])) elif key == glfw.KEY_W: # Update fill mode (wireframe, solid, points) if mods & glfw.MOD_SHIFT: fill_index = fill_index - 1 if fill_index - 1 >= 0 else len(fills) - 1 else: fill_index = fill_index + 1 if fill_index + 1 < len(fills) else 0 print('New fill: {}'.format(fills[fill_index])) elif key == glfw.KEY_SPACE: if mods & glfw.MOD_SHIFT: colors = np.array([ (1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, 1.0), (1.0, 0.0, 1.0), (0.0, 1.0, 1.0), (1.0, 1.0, 0.0), (0.5, 0.5, 0.5), (0.5, 0.5, 0.0), (0.0, 0.5, 0.5), ], dtype=np.float32) else: # Randomize colors colors = np.random.rand(len(vertices), 3) data['color'] = colors def compile_shader(shader_source, shader_type): '''Compiles and checks output''' shader_id = gl.glCreateShader(shader_type) gl.glShaderSource(shader_id, dd(shader_source)) gl.glCompileShader(shader_id) shader_result = gl.glGetShaderiv(shader_id, gl.COMPILE_STATUS) shader_log = gl.glGetShaderiv(shader_id, gl.INFO_LOG_LENGTH) assert shader_result == gl.TRUE if shader_log > 0: error_message = gl.glGetShaderInfoLog(shader_id) print('ERROR: Vertex Shader Compilation | {}'.format(error_message)) return shader_id def compile_program(*shader_sources): '''Compiles shaders, links to a program and checks output''' assert len(shader_sources) >= 2 shader_types = [ gl.VERTEX_SHADER, gl.FRAGMENT_SHADER, gl.TESS_CONTROL_SHADER, gl.TESS_EVALUATION_SHADER, gl.GEOMETRY_SHADER, ] shaders = [ compile_shader(shader_source, shader_type) for shader_source, shader_type in zip(shader_sources, shader_types) ] program = gl.glCreateProgram() for shader in shaders: gl.glAttachShader(program, shader) gl.glLinkProgram(program) assert gl.glGetProgramiv(program, gl.LINK_STATUS) == gl.TRUE assert gl.glGetProgramiv(program, gl.INFO_LOG_LENGTH) == 0, gl.glGetProgramInfoLog(program) # Cleanup shaders for shader in shaders: gl.glDetachShader(program, shader) gl.glDeleteShader(shader) return program def setup_context(major, minor): glfw.core.init() glfw.core.window_hint(glfw.SAMPLES, 4) glfw.core.window_hint(glfw.CONTEXT_VERSION_MAJOR, major) glfw.core.window_hint(glfw.CONTEXT_VERSION_MINOR, minor) glfw.core.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE) glfw.core.window_hint(glfw.OPENGL_FORWARD_COMPAT, True) glfw.core.window_hint(glfw.RED_BITS, 24) glfw.core.window_hint(glfw.GREEN_BITS, 24) glfw.core.window_hint(glfw.BLUE_BITS, 24) glfw.core.window_hint(glfw.ALPHA_BITS, 24) glfw.core.window_hint(glfw.DEPTH_BITS, 24) # ###################################################################### # Setup OpenGL Context setup_context(major, minor) num_byte_size = 3 pixels = np.zeros((width, height, num_byte_size), dtype=np.uint8) win = glfw.create_window(title=title, width=width, height=height) glfw.core.set_key_callback(win, on_key) glfw.core.make_context_current(win) # Build pipeline program = compile_program(vshader, fshader) # Bind attributes gl.glBindAttribLocation(program, 0, 'position') gl.glBindAttribLocation(program, 1, 'color') # ###################################################################### # Setup VBO and VAO vao = gl.glGenVertexArrays(1) buffer_id = gl.glGenBuffers(1) indices_buffer_id = gl.glGenBuffers(1) gl.glBindBuffer(gl.ARRAY_BUFFER, buffer_id) gl.glBindBuffer(gl.ELEMENT_ARRAY_BUFFER, indices_buffer_id) gl.glBufferData(gl.ELEMENT_ARRAY_BUFFER, indices.flatten(), gl.STATIC_DRAW) # ###################################################################### # Render while not glfw.window_should_close(win): gl.glClear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT) gl.glPolygonMode(gl.FRONT_AND_BACK, fills[fill_index]) gl.glBufferData(gl.ARRAY_BUFFER, data.nbytes, data, gl.DYNAMIC_DRAW) gl.glEnable(gl.DEPTH_TEST) gl.glDepthFunc(gl.LESS) gl.glUseProgram(program) gl.glBindVertexArray(vao) stride = data.strides[0] offset = ctypes.c_void_p(0) pos = gl.glGetAttribLocation(program, 'position') gl.glEnableVertexAttribArray(pos) gl.glBindBuffer(gl.ELEMENT_ARRAY_BUFFER, indices_buffer_id) gl.glBindBuffer(gl.ARRAY_BUFFER, buffer_id) gl.glVertexAttribPointer(pos, data['position'].shape[-1], gl.FLOAT, False, stride, offset) offset = ctypes.c_void_p(data.dtype['position'].itemsize) col = gl.glGetAttribLocation(program, 'color') gl.glEnableVertexAttribArray(col) gl.glBindBuffer(gl.ELEMENT_ARRAY_BUFFER, indices_buffer_id) gl.glBindBuffer(gl.ARRAY_BUFFER, buffer_id) gl.glVertexAttribPointer(col, data['color'].shape[-1], gl.FLOAT, False, stride, offset) gl.glDrawElements(modes[mode_index], len(indices), gl.UNSIGNED_INT, None) gl.glDisableVertexAttribArray(vao) pixels = screenshot(pixels) # Standard Loop Event handling glfw.core.swap_buffers(win) glfw.core.poll_events() checksum = np.sum(pixels) assert checksum == 35587, checksum # ###################################################################### # Cleanup gl.glUseProgram(0) glfw.core.terminate()
de
0.367588
#!/usr/bin/env python # -*- coding: utf-8 -*- # ###################################################################### # Data # ###################################################################### # gl.QUADS, # gl.PATCHES, # index for index in range(vertices.shape[0]) # 5, 2, 0, # 5, 8, 6, # Generate some colors for the points # Interleave vertex data for position and color #version 150 in vec2 position; in vec3 color; out vec3 v_color; void main () { gl_Position = vec4(position, 0.0, 1.0); v_color = color; } #version 150 in vec3 v_color; out vec4 frag_colour; void main () { frag_colour = vec4(v_color, 1.0); frag_colour = vec4(0.2, 1.0, 0.2, 1.0); } # ###################################################################### # Helper functions Handles keyboard event # Quit # Update draw mode (points, lines, triangles, quads, etc.) # Update fill mode (wireframe, solid, points) # Randomize colors Compiles and checks output Compiles shaders, links to a program and checks output # Cleanup shaders # ###################################################################### # Setup OpenGL Context # Build pipeline # Bind attributes # ###################################################################### # Setup VBO and VAO # ###################################################################### # Render # Standard Loop Event handling # ###################################################################### # Cleanup
2.458093
2
mysql2/mysql_settings.py
arfu2016/DuReader
0
6621689
""" @Project : DuReader @Module : mysql_settings.py @Author : Deco [<EMAIL>] @Created : 5/14/18 1:42 PM @Desc : """ import logging import os import sys import pprint import pickle from .mysql_connect import Mysql from . import settings host = settings.MYSQL.get("host") port = settings.MYSQL.get("port") user = settings.MYSQL.get("user") passwd = settings.MYSQL.get("passwd") robot_user_interact_log = {"host": host, "port": port, "user": user, "passwd": <PASSWORD>, "db": "db_robot" } def set_logger(): # for logging logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) program = os.path.basename(sys.argv[0]) logger = logging.getLogger(program) return logger def test(): logger = set_logger() interact_log = Mysql(**robot_user_interact_log) log_number = interact_log.fetchall( "select count(*) from robot_user_interact_log") logger.info('There are {} records'.format(log_number[0][0])) log_columns = interact_log.fetchall( "describe robot_user_interact_log") column_names = [log_column[0] for log_column in log_columns] log_example = interact_log.fetchone( "select * from robot_user_interact_log") # print(log_example) pprint.pprint(list(zip(column_names, log_example))) log_templates = interact_log.fetchall( "select template from robot_user_interact_log") log_tpl_yes = [tpl[0] for tpl in log_templates if tpl[0]] log_tpl_unique = list(set(log_tpl_yes)) logger.info('There are {} valid templates'.format(len(log_tpl_yes))) logger.info('There are {} unique templates'.format(len(log_tpl_unique))) pprint.pprint(log_tpl_unique[0:10]) def save_data(): logger = set_logger() interact_log = Mysql(**robot_user_interact_log) log_data = interact_log.fetchall( "select template, intent_name from robot_user_interact_log") # print(log_data[0]) log_data_yes = [(tpl, intent) for tpl, intent in log_data if tpl] log_data_unique = list(set(log_data_yes)) logger.info( 'There are {} unique templates and intents'.format( len(log_data_unique))) file_dir = os.path.dirname(os.path.abspath(__file__)) file_name = os.path.join(file_dir, 'data/templates_intents.pkl') with open(file_name, 'wb') as f: pickle.dump(log_data_unique, f)
""" @Project : DuReader @Module : mysql_settings.py @Author : Deco [<EMAIL>] @Created : 5/14/18 1:42 PM @Desc : """ import logging import os import sys import pprint import pickle from .mysql_connect import Mysql from . import settings host = settings.MYSQL.get("host") port = settings.MYSQL.get("port") user = settings.MYSQL.get("user") passwd = settings.MYSQL.get("passwd") robot_user_interact_log = {"host": host, "port": port, "user": user, "passwd": <PASSWORD>, "db": "db_robot" } def set_logger(): # for logging logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) program = os.path.basename(sys.argv[0]) logger = logging.getLogger(program) return logger def test(): logger = set_logger() interact_log = Mysql(**robot_user_interact_log) log_number = interact_log.fetchall( "select count(*) from robot_user_interact_log") logger.info('There are {} records'.format(log_number[0][0])) log_columns = interact_log.fetchall( "describe robot_user_interact_log") column_names = [log_column[0] for log_column in log_columns] log_example = interact_log.fetchone( "select * from robot_user_interact_log") # print(log_example) pprint.pprint(list(zip(column_names, log_example))) log_templates = interact_log.fetchall( "select template from robot_user_interact_log") log_tpl_yes = [tpl[0] for tpl in log_templates if tpl[0]] log_tpl_unique = list(set(log_tpl_yes)) logger.info('There are {} valid templates'.format(len(log_tpl_yes))) logger.info('There are {} unique templates'.format(len(log_tpl_unique))) pprint.pprint(log_tpl_unique[0:10]) def save_data(): logger = set_logger() interact_log = Mysql(**robot_user_interact_log) log_data = interact_log.fetchall( "select template, intent_name from robot_user_interact_log") # print(log_data[0]) log_data_yes = [(tpl, intent) for tpl, intent in log_data if tpl] log_data_unique = list(set(log_data_yes)) logger.info( 'There are {} unique templates and intents'.format( len(log_data_unique))) file_dir = os.path.dirname(os.path.abspath(__file__)) file_name = os.path.join(file_dir, 'data/templates_intents.pkl') with open(file_name, 'wb') as f: pickle.dump(log_data_unique, f)
en
0.288759
@Project : DuReader @Module : mysql_settings.py @Author : Deco [<EMAIL>] @Created : 5/14/18 1:42 PM @Desc : # for logging # print(log_example) # print(log_data[0])
2.635601
3
hw/_qa/alexander_sidorov/vadim_maletski/hw06/level_04_test.py
alexander-sidorov/qap-05
9
6621690
from datetime import date from typing import Any import pytest from hw.vadim_maletski.func6 import level_04 from .common import azaza from .common import validate_data from .common import validate_errors happy_data = [ pytest.param(*params, id=name) for name, params in { "date": [{object: date(1900, 1, 1), type: date(1800, 1, 1)}, type], "xdate": [ { object: azaza(1900, 1, 1, bs=[date]), type: azaza(1800, 1, 1, bs=[date]), }, type, ], }.items() ] @pytest.mark.parametrize("arg,expected", happy_data) def test_task_04_happy(arg: Any, expected: Any) -> None: outcome = level_04(arg) validate_data(outcome) data = outcome["data"] assert data == expected unhappy_data = [ pytest.param(arg, id=name) for name, arg in { "invalid-type": azaza(), "empty": {}, }.items() ] @pytest.mark.parametrize("arg", unhappy_data) def test_task_04_unhappy(arg: Any) -> None: outcome = level_04(arg) validate_errors(outcome)
from datetime import date from typing import Any import pytest from hw.vadim_maletski.func6 import level_04 from .common import azaza from .common import validate_data from .common import validate_errors happy_data = [ pytest.param(*params, id=name) for name, params in { "date": [{object: date(1900, 1, 1), type: date(1800, 1, 1)}, type], "xdate": [ { object: azaza(1900, 1, 1, bs=[date]), type: azaza(1800, 1, 1, bs=[date]), }, type, ], }.items() ] @pytest.mark.parametrize("arg,expected", happy_data) def test_task_04_happy(arg: Any, expected: Any) -> None: outcome = level_04(arg) validate_data(outcome) data = outcome["data"] assert data == expected unhappy_data = [ pytest.param(arg, id=name) for name, arg in { "invalid-type": azaza(), "empty": {}, }.items() ] @pytest.mark.parametrize("arg", unhappy_data) def test_task_04_unhappy(arg: Any) -> None: outcome = level_04(arg) validate_errors(outcome)
none
1
2.391653
2
setup.py
UniBO-PRISMLab/micro-wot-servient
1
6621691
<reponame>UniBO-PRISMLab/micro-wot-servient from setuptools import setup, find_packages setup( name='embeddedWoTServient', url='https://github.com/UniBO-PRISMLab/micro-wot-servient', author='<NAME>', version='0.1', license='MIT', description='WoT module for build TDs and executable scripts for embedded systems', long_description=open('README.md').read(), py_modules=['embeddedWoTServient'], install_requires=['Click','jinja2','pyyaml','jsonschema','pyserial', 'serial', 'click', 'esptool'], entry_points=''' [console_scripts] embeddedWoTServient=embeddedWoTServient:cli ''' )
from setuptools import setup, find_packages setup( name='embeddedWoTServient', url='https://github.com/UniBO-PRISMLab/micro-wot-servient', author='<NAME>', version='0.1', license='MIT', description='WoT module for build TDs and executable scripts for embedded systems', long_description=open('README.md').read(), py_modules=['embeddedWoTServient'], install_requires=['Click','jinja2','pyyaml','jsonschema','pyserial', 'serial', 'click', 'esptool'], entry_points=''' [console_scripts] embeddedWoTServient=embeddedWoTServient:cli ''' )
el
0.160844
[console_scripts] embeddedWoTServient=embeddedWoTServient:cli
1.267502
1
leetcode/28_strStr.py
thiakx/leetcode
0
6621692
<filename>leetcode/28_strStr.py<gh_stars>0 import unittest class Solution: def strStr(self, haystack: str, needle: str) -> int: haystack_set = set(haystack) needle_len = len(needle) haystack_len = len(haystack) if needle == '': return 0 elif haystack == '' or haystack_len < needle_len: return -1 else: if needle[0] not in haystack_set: return -1 else: # use a moving window for i in range(haystack_len - needle_len + 1): if haystack[i:i + needle_len] == needle: return i else: pass return -1 class Test(unittest.TestCase): def test_short(self): solution = Solution() result = solution.strStr('hello', 'll') expected = 2 self.assertEqual(result, expected) if __name__ == '__main__': unittest.main(argv=['first-arg-is-ignored'], exit=False)
<filename>leetcode/28_strStr.py<gh_stars>0 import unittest class Solution: def strStr(self, haystack: str, needle: str) -> int: haystack_set = set(haystack) needle_len = len(needle) haystack_len = len(haystack) if needle == '': return 0 elif haystack == '' or haystack_len < needle_len: return -1 else: if needle[0] not in haystack_set: return -1 else: # use a moving window for i in range(haystack_len - needle_len + 1): if haystack[i:i + needle_len] == needle: return i else: pass return -1 class Test(unittest.TestCase): def test_short(self): solution = Solution() result = solution.strStr('hello', 'll') expected = 2 self.assertEqual(result, expected) if __name__ == '__main__': unittest.main(argv=['first-arg-is-ignored'], exit=False)
en
0.688134
# use a moving window
3.538848
4
pinpayments/constants.py
MattHealy/pinpayments-python
0
6621693
<reponame>MattHealy/pinpayments-python API_VERSION = '1' BASE_URL = 'https://api.pin.net.au/' + API_VERSION BASE_URL_TEST = 'https://test-api.pin.net.au/' + API_VERSION
API_VERSION = '1' BASE_URL = 'https://api.pin.net.au/' + API_VERSION BASE_URL_TEST = 'https://test-api.pin.net.au/' + API_VERSION
none
1
1.125178
1
deploy/gunicorn.py
udbhav/eurorack-planner
1
6621694
import os import sys sys.path.append("/home/udbhav/www/django/eurorack-planner/app/source/") os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings") # This application object is used by the development server # as well as any WSGI server configured to use this file. from django.core.wsgi import get_wsgi_application application = get_wsgi_application()
import os import sys sys.path.append("/home/udbhav/www/django/eurorack-planner/app/source/") os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings") # This application object is used by the development server # as well as any WSGI server configured to use this file. from django.core.wsgi import get_wsgi_application application = get_wsgi_application()
en
0.970782
# This application object is used by the development server # as well as any WSGI server configured to use this file.
1.402886
1
disbot.py
Smirf123/Disbot-Python-Bot
0
6621695
import discord import requests from discord.ext import commands client = commands.AutoShardedBot(command_prefix="!") TOKEN = "INSERTTOKENHERE" clientid = insertclientidherenoquotes disbottoken = "<PASSWORD>" async def disbot(): while True: url = f"https://disbot.top/api/v2/bot/{clientid}/update" headers = CaseInsensitiveDict() headers["Authorization"] = {disbottoken} headers["Content-Type"] = "application/x-www-form-urlencoded" data = f"serverCount={len(client.guilds)}" resp = requests.post(url, headers=headers, data=data) print(f"{resp} We did it") @client.event async def on_ready(): print(f"{client.user} has connected to Discord and is sending to Disbot.top") client.loop.create_task(disbot()) client.run(TOKEN)
import discord import requests from discord.ext import commands client = commands.AutoShardedBot(command_prefix="!") TOKEN = "INSERTTOKENHERE" clientid = insertclientidherenoquotes disbottoken = "<PASSWORD>" async def disbot(): while True: url = f"https://disbot.top/api/v2/bot/{clientid}/update" headers = CaseInsensitiveDict() headers["Authorization"] = {disbottoken} headers["Content-Type"] = "application/x-www-form-urlencoded" data = f"serverCount={len(client.guilds)}" resp = requests.post(url, headers=headers, data=data) print(f"{resp} We did it") @client.event async def on_ready(): print(f"{client.user} has connected to Discord and is sending to Disbot.top") client.loop.create_task(disbot()) client.run(TOKEN)
none
1
2.74931
3
apps/collection/tests/test_collection_relation.py
magocod/django_repository
1
6621696
<reponame>magocod/django_repository<gh_stars>1-10 # standard library # import json # local Django from apps.category.models import Category from apps.collection.models import Collection from apps.collection.serializers import CollectionHeavySerializer from apps.tag.models import Tag from apps.tests.fixtures import RepositoryTestCase class CollectionRelationTest(RepositoryTestCase): """ ... """ serializer = CollectionHeavySerializer def test_update_the_collection_that_does_not_exist(self): relationdata = { "collection_id": 1, "categories": [1], "tags": [1], } response_add = self.admin_client.put( f"/api/collection/relations/{10000}/", relationdata ) response_remove = self.admin_client.delete( f"/api/collection/relations/{10000}/", relationdata ) self.assertEqual(response_add.status_code, 404) self.assertEqual(response_remove.status_code, 404) def test_collection_add_relations(self): # oldvalues = self.serializer( # Collection.objects.get(id=1) # ) relationdata = { "collection_id": 1, "categories": [1], "tags": [1], } response = self.admin_client.put( f"/api/collection/relations/{1}/", relationdata ) # print(response.data) newvalues = self.serializer(Collection.objects.get(id=1)) self.assertEqual(response.status_code, 200) # self.assertNotEqual(oldvalues.data, response.data) self.assertEqual(newvalues.data, response.data) def test_add_elements_that_do_not_exist_to_the_relationship(self): oldvalues = self.serializer(Collection.objects.get(id=1)) relationdata = { "collection_id": 1, "categories": [1, 10000], "tags": [1, 20000], } response = self.admin_client.put( f"/api/collection/relations/{1}/", relationdata ) # print(response.data) values = self.serializer(Collection.objects.get(id=1)) self.assertEqual(response.status_code, 400) self.assertEqual(values.data, oldvalues.data) def test_error_parameters_collection_add_relations(self): oldvalues = self.serializer(Collection.objects.get(id=1)) relationdata = { "col_id": 1, "cts": [1], "tgs": [1], } response = self.admin_client.put( f"/api/collection/relations/{1}/", relationdata ) newvalues = self.serializer(Collection.objects.get(id=1)) self.assertEqual(response.status_code, 400) self.assertEqual(newvalues.data, oldvalues.data) def test_collection_remove_relations(self): """ ... """ collection = Collection.objects.get(id=1) for category_id in [1, 2]: category = Category.objects.get(id=category_id) collection.categories.add(category) for tag_id in [1, 2]: tag = Tag.objects.get(id=tag_id) collection.tags.add(tag) # oldvalues = self.serializer( # Collection.objects.get(id=1) # ) relationdata = { "collection_id": 1, "categories": [1], "tags": [1], } response = self.admin_client.delete( f"/api/collection/relations/{1}/", relationdata ) # print(response.data) newvalues = self.serializer(Collection.objects.get(id=1)) self.assertEqual(response.status_code, 200) # self.assertNotEqual(oldvalues.data, response.data) self.assertEqual(newvalues.data, response.data) def test_error_parameters_collection_remove_relations(self): oldvalues = self.serializer(Collection.objects.get(id=1)) relationdata = { "col_id": 1, "cts": [1], "tgs": [1], } response = self.admin_client.delete( f"/api/collection/relations/{1}/", relationdata ) newvalues = self.serializer(Collection.objects.get(id=1)) self.assertEqual(response.status_code, 400) self.assertEqual(newvalues.data, oldvalues.data) def test_remove_elements_that_do_not_exist_to_the_relationship(self): oldvalues = self.serializer(Collection.objects.get(id=1)) relationdata = { "collection_id": 1, "categories": [1, 10000], "tags": [1, 20000], } response = self.admin_client.delete( f"/api/collection/relations/{1}/", relationdata ) # print(response.data) values = self.serializer(Collection.objects.get(id=1)) self.assertEqual(response.status_code, 400) self.assertEqual(values.data, oldvalues.data)
# standard library # import json # local Django from apps.category.models import Category from apps.collection.models import Collection from apps.collection.serializers import CollectionHeavySerializer from apps.tag.models import Tag from apps.tests.fixtures import RepositoryTestCase class CollectionRelationTest(RepositoryTestCase): """ ... """ serializer = CollectionHeavySerializer def test_update_the_collection_that_does_not_exist(self): relationdata = { "collection_id": 1, "categories": [1], "tags": [1], } response_add = self.admin_client.put( f"/api/collection/relations/{10000}/", relationdata ) response_remove = self.admin_client.delete( f"/api/collection/relations/{10000}/", relationdata ) self.assertEqual(response_add.status_code, 404) self.assertEqual(response_remove.status_code, 404) def test_collection_add_relations(self): # oldvalues = self.serializer( # Collection.objects.get(id=1) # ) relationdata = { "collection_id": 1, "categories": [1], "tags": [1], } response = self.admin_client.put( f"/api/collection/relations/{1}/", relationdata ) # print(response.data) newvalues = self.serializer(Collection.objects.get(id=1)) self.assertEqual(response.status_code, 200) # self.assertNotEqual(oldvalues.data, response.data) self.assertEqual(newvalues.data, response.data) def test_add_elements_that_do_not_exist_to_the_relationship(self): oldvalues = self.serializer(Collection.objects.get(id=1)) relationdata = { "collection_id": 1, "categories": [1, 10000], "tags": [1, 20000], } response = self.admin_client.put( f"/api/collection/relations/{1}/", relationdata ) # print(response.data) values = self.serializer(Collection.objects.get(id=1)) self.assertEqual(response.status_code, 400) self.assertEqual(values.data, oldvalues.data) def test_error_parameters_collection_add_relations(self): oldvalues = self.serializer(Collection.objects.get(id=1)) relationdata = { "col_id": 1, "cts": [1], "tgs": [1], } response = self.admin_client.put( f"/api/collection/relations/{1}/", relationdata ) newvalues = self.serializer(Collection.objects.get(id=1)) self.assertEqual(response.status_code, 400) self.assertEqual(newvalues.data, oldvalues.data) def test_collection_remove_relations(self): """ ... """ collection = Collection.objects.get(id=1) for category_id in [1, 2]: category = Category.objects.get(id=category_id) collection.categories.add(category) for tag_id in [1, 2]: tag = Tag.objects.get(id=tag_id) collection.tags.add(tag) # oldvalues = self.serializer( # Collection.objects.get(id=1) # ) relationdata = { "collection_id": 1, "categories": [1], "tags": [1], } response = self.admin_client.delete( f"/api/collection/relations/{1}/", relationdata ) # print(response.data) newvalues = self.serializer(Collection.objects.get(id=1)) self.assertEqual(response.status_code, 200) # self.assertNotEqual(oldvalues.data, response.data) self.assertEqual(newvalues.data, response.data) def test_error_parameters_collection_remove_relations(self): oldvalues = self.serializer(Collection.objects.get(id=1)) relationdata = { "col_id": 1, "cts": [1], "tgs": [1], } response = self.admin_client.delete( f"/api/collection/relations/{1}/", relationdata ) newvalues = self.serializer(Collection.objects.get(id=1)) self.assertEqual(response.status_code, 400) self.assertEqual(newvalues.data, oldvalues.data) def test_remove_elements_that_do_not_exist_to_the_relationship(self): oldvalues = self.serializer(Collection.objects.get(id=1)) relationdata = { "collection_id": 1, "categories": [1, 10000], "tags": [1, 20000], } response = self.admin_client.delete( f"/api/collection/relations/{1}/", relationdata ) # print(response.data) values = self.serializer(Collection.objects.get(id=1)) self.assertEqual(response.status_code, 400) self.assertEqual(values.data, oldvalues.data)
en
0.179312
# standard library # import json # local Django ... # oldvalues = self.serializer( # Collection.objects.get(id=1) # ) # print(response.data) # self.assertNotEqual(oldvalues.data, response.data) # print(response.data) ... # oldvalues = self.serializer( # Collection.objects.get(id=1) # ) # print(response.data) # self.assertNotEqual(oldvalues.data, response.data) # print(response.data)
2.261625
2
python/src/main/python/pyalink/alink/tests/pipeline/test_special_models.py
wenwei8268/Alink
0
6621697
<gh_stars>0 import unittest import numpy as np import pandas as pd from pyalink.alink import * class TestSpecialModels(unittest.TestCase): def test_wrap_model(self): source = CsvSourceBatchOp() \ .setSchemaStr( "sepal_length double, sepal_width double, petal_length double, petal_width double, category string") \ .setFilePath("https://alink-test-data.oss-cn-hangzhou.aliyuncs.com/iris.csv") stage1 = QuantileDiscretizer().setNumBuckets(2).setSelectedCols(["sepal_length"]) model1 = stage1.fit(source) output1 = model1.transform(source) print(model1) print(dir(model1)) pass def test_glr(self): data = np.array([ [1, 5, 118, 69, 1.0, 2.0], [2, 10, 58, 35, 1.0, 2.0], [3, 15, 42, 26, 1.0, 2.0], [4, 20, 35, 21, 1.0, 2.0], [5, 30, 27, 18, 1.0, 2.0], [6, 40, 25, 16, 1.0, 2.0], [7, 60, 21, 13, 1.0, 2.0], [8, 80, 19, 12, 1.0, 2.0], [9, 100, 18, 12, 1.0, 2.0] ]) df = pd.DataFrame({ "id": data[:, 0], "u": data[:, 1], "lot1": data[:, 2], "lot2": data[:, 3], "offset": data[:, 4], "weights": data[:, 5], }).astype({ "id": np.int64, "u": np.int64, "lot1": np.int64, "lot2": np.int64, "offset": np.float64, "weights": np.float64, }) print(df) print(df.dtypes) source = dataframeToOperator(df, schemaStr="id int, u double, lot1 double, lot2 double, offset double, weights double", op_type="batch") featureColNames = ["lot1", "lot2"] labelColName = "u" glm = GeneralizedLinearRegression() \ .setFamily("gamma") \ .setLink("Log") \ .setRegParam(0.3) \ .setFitIntercept(False) \ .setMaxIter(10) \ .setOffsetCol("offset") \ .setWeightCol("weights") \ .setFitIntercept(False) \ .setFeatureCols(featureColNames) \ .setLabelCol(labelColName) \ .setPredictionCol("pred") model = glm.fit(source) print(type(model)) print(dir(model)) print(model.transform(source).collectToDataframe())
import unittest import numpy as np import pandas as pd from pyalink.alink import * class TestSpecialModels(unittest.TestCase): def test_wrap_model(self): source = CsvSourceBatchOp() \ .setSchemaStr( "sepal_length double, sepal_width double, petal_length double, petal_width double, category string") \ .setFilePath("https://alink-test-data.oss-cn-hangzhou.aliyuncs.com/iris.csv") stage1 = QuantileDiscretizer().setNumBuckets(2).setSelectedCols(["sepal_length"]) model1 = stage1.fit(source) output1 = model1.transform(source) print(model1) print(dir(model1)) pass def test_glr(self): data = np.array([ [1, 5, 118, 69, 1.0, 2.0], [2, 10, 58, 35, 1.0, 2.0], [3, 15, 42, 26, 1.0, 2.0], [4, 20, 35, 21, 1.0, 2.0], [5, 30, 27, 18, 1.0, 2.0], [6, 40, 25, 16, 1.0, 2.0], [7, 60, 21, 13, 1.0, 2.0], [8, 80, 19, 12, 1.0, 2.0], [9, 100, 18, 12, 1.0, 2.0] ]) df = pd.DataFrame({ "id": data[:, 0], "u": data[:, 1], "lot1": data[:, 2], "lot2": data[:, 3], "offset": data[:, 4], "weights": data[:, 5], }).astype({ "id": np.int64, "u": np.int64, "lot1": np.int64, "lot2": np.int64, "offset": np.float64, "weights": np.float64, }) print(df) print(df.dtypes) source = dataframeToOperator(df, schemaStr="id int, u double, lot1 double, lot2 double, offset double, weights double", op_type="batch") featureColNames = ["lot1", "lot2"] labelColName = "u" glm = GeneralizedLinearRegression() \ .setFamily("gamma") \ .setLink("Log") \ .setRegParam(0.3) \ .setFitIntercept(False) \ .setMaxIter(10) \ .setOffsetCol("offset") \ .setWeightCol("weights") \ .setFitIntercept(False) \ .setFeatureCols(featureColNames) \ .setLabelCol(labelColName) \ .setPredictionCol("pred") model = glm.fit(source) print(type(model)) print(dir(model)) print(model.transform(source).collectToDataframe())
none
1
2.464846
2
apps/scrapers/po10.py
tractiming/trac-gae
3
6621698
<gh_stars>1-10 import requests from bs4 import BeautifulSoup from datetime import datetime from .base import Scraper from .exceptions import NoSuchAthlete, TooManyAthletes PO10_BASE = 'http://www.thepowerof10.info/' PO10_SEARCH_URL = PO10_BASE + 'athletes/athleteslookup.aspx' YEAR_ROW = 1 TITLES_ROW = 2 PERF_ROW = 3 def perf_row_type(tr_elem): ''' tests what row type a tr elem from perf table is ''' link = tr_elem.find_all('td')[0].a if link is not None: return YEAR_ROW if tr_elem.find_all('td')[0].text == 'Event': return TITLES_ROW return PERF_ROW def get_name_from_profile(content): ''' given an html page returns the athlete name ''' profile_soup = BeautifulSoup(content, 'html.parser') name_elem = profile_soup.find_all(class_="athleteprofilesubheader")[0].h2 return name_elem.string.strip().encode('utf-8') def get_club_from_profile(content): profile_soup = BeautifulSoup(content, 'html.parser') details = profile_soup.find_all(id='cphBody_pnlAthleteDetails')[0] return details.find_all('table')[1].find_all('table')[0].find_all('td')[1].text def get_perf_table_from_profile(content): ''' given an html profile page returns the bs4 performance table ''' soup = BeautifulSoup(content, 'html.parser') main_perfs_table = soup.find(id='cphBody_pnlPerformances') perfs_table = main_perfs_table.find_all('table')[1] return perfs_table def get_results_from_row(row): cells = row.find_all('td') return {'event': cells[0].text, 'position': cells[5].text, 'meet': cells[10].text, 'meet_id': cells[9].a.get('href').split('meetingid=')[1].split('&')[0], 'date': datetime.strptime(cells[11].text, '%d %b %y'), 'perf': cells[1].text} class Po10Scraper(Scraper): ''' Scrapes the Power of 10 websites (UK athletes) ''' def get_athlete_details_from_url(self, url): ''' gets the athlete name from power of 10 ''' r = requests.get(url) if r.status_code != 200: raise NoSuchAthlete("Unable to find athlete.") try: name = get_name_from_profile(r.content) except IndexError: # No name = no athlete raise NoSuchAthlete("Unable to find athlete.") return {'name': name, 'url': url} def get_athlete_results_from_url(self, url, limit=None): ''' returns performances from the athlete's power of 10 page. ''' # Use viewby=date parameter to order results by date r = requests.get(url, params={'viewby': 'date'}) # Here there is no validation that id is valid. We assume id is legit perfs_table = get_perf_table_from_profile(r.content) perfs = [] for row in perfs_table.find_all('tr'): if perf_row_type(row) != PERF_ROW: continue cells = row.find_all('td') perfs.append(get_results_from_row(row)) if limit == len(perfs): break return perfs def search(self, firstname='', surname='', team=''): r = requests.get(PO10_SEARCH_URL, params={'firstname': firstname, 'surname': surname, 'club': team}) if r.history: # We have been redirected to an athlete profile page name = get_name_from_profile(r.content) url = r.url club = get_club_from_profile(r.content) return [{'name': name, 'team': club, 'url': url}] results = [] soup = BeautifulSoup(r.content, 'html.parser') # Check errors - we may have either too many athletes found or # not a detailed enough search (fewer than 3 letters) request_error = soup.find_all(id='cphBody_lblRequestErrorMessage') results_error = soup.find_all(id='cphBody_lblResultsErrorMessage') if request_error and 'Please enter at least 3 characters' in request_error[0].text: raise ValueError('Search terms not detailed enough.') elif results_error and 'Too many athletes found.' in results_error[0].text: raise TooManyAthletes('Found too many athletes for search.') search_results = soup.find(id='cphBody_pnlResults') rows = search_results.find_all('tr') for row in rows[1:-1]: # First row is header, last is ignored cells = row.find_all('td') name = cells[0].text + ' ' + cells[1].text club = cells[7].text url = PO10_BASE + 'athletes/' + cells[8].a.get('href') results.append({'name': name, 'team': club, 'url': url}) return results
import requests from bs4 import BeautifulSoup from datetime import datetime from .base import Scraper from .exceptions import NoSuchAthlete, TooManyAthletes PO10_BASE = 'http://www.thepowerof10.info/' PO10_SEARCH_URL = PO10_BASE + 'athletes/athleteslookup.aspx' YEAR_ROW = 1 TITLES_ROW = 2 PERF_ROW = 3 def perf_row_type(tr_elem): ''' tests what row type a tr elem from perf table is ''' link = tr_elem.find_all('td')[0].a if link is not None: return YEAR_ROW if tr_elem.find_all('td')[0].text == 'Event': return TITLES_ROW return PERF_ROW def get_name_from_profile(content): ''' given an html page returns the athlete name ''' profile_soup = BeautifulSoup(content, 'html.parser') name_elem = profile_soup.find_all(class_="athleteprofilesubheader")[0].h2 return name_elem.string.strip().encode('utf-8') def get_club_from_profile(content): profile_soup = BeautifulSoup(content, 'html.parser') details = profile_soup.find_all(id='cphBody_pnlAthleteDetails')[0] return details.find_all('table')[1].find_all('table')[0].find_all('td')[1].text def get_perf_table_from_profile(content): ''' given an html profile page returns the bs4 performance table ''' soup = BeautifulSoup(content, 'html.parser') main_perfs_table = soup.find(id='cphBody_pnlPerformances') perfs_table = main_perfs_table.find_all('table')[1] return perfs_table def get_results_from_row(row): cells = row.find_all('td') return {'event': cells[0].text, 'position': cells[5].text, 'meet': cells[10].text, 'meet_id': cells[9].a.get('href').split('meetingid=')[1].split('&')[0], 'date': datetime.strptime(cells[11].text, '%d %b %y'), 'perf': cells[1].text} class Po10Scraper(Scraper): ''' Scrapes the Power of 10 websites (UK athletes) ''' def get_athlete_details_from_url(self, url): ''' gets the athlete name from power of 10 ''' r = requests.get(url) if r.status_code != 200: raise NoSuchAthlete("Unable to find athlete.") try: name = get_name_from_profile(r.content) except IndexError: # No name = no athlete raise NoSuchAthlete("Unable to find athlete.") return {'name': name, 'url': url} def get_athlete_results_from_url(self, url, limit=None): ''' returns performances from the athlete's power of 10 page. ''' # Use viewby=date parameter to order results by date r = requests.get(url, params={'viewby': 'date'}) # Here there is no validation that id is valid. We assume id is legit perfs_table = get_perf_table_from_profile(r.content) perfs = [] for row in perfs_table.find_all('tr'): if perf_row_type(row) != PERF_ROW: continue cells = row.find_all('td') perfs.append(get_results_from_row(row)) if limit == len(perfs): break return perfs def search(self, firstname='', surname='', team=''): r = requests.get(PO10_SEARCH_URL, params={'firstname': firstname, 'surname': surname, 'club': team}) if r.history: # We have been redirected to an athlete profile page name = get_name_from_profile(r.content) url = r.url club = get_club_from_profile(r.content) return [{'name': name, 'team': club, 'url': url}] results = [] soup = BeautifulSoup(r.content, 'html.parser') # Check errors - we may have either too many athletes found or # not a detailed enough search (fewer than 3 letters) request_error = soup.find_all(id='cphBody_lblRequestErrorMessage') results_error = soup.find_all(id='cphBody_lblResultsErrorMessage') if request_error and 'Please enter at least 3 characters' in request_error[0].text: raise ValueError('Search terms not detailed enough.') elif results_error and 'Too many athletes found.' in results_error[0].text: raise TooManyAthletes('Found too many athletes for search.') search_results = soup.find(id='cphBody_pnlResults') rows = search_results.find_all('tr') for row in rows[1:-1]: # First row is header, last is ignored cells = row.find_all('td') name = cells[0].text + ' ' + cells[1].text club = cells[7].text url = PO10_BASE + 'athletes/' + cells[8].a.get('href') results.append({'name': name, 'team': club, 'url': url}) return results
en
0.872321
tests what row type a tr elem from perf table is given an html page returns the athlete name given an html profile page returns the bs4 performance table Scrapes the Power of 10 websites (UK athletes) gets the athlete name from power of 10 # No name = no athlete returns performances from the athlete's power of 10 page. # Use viewby=date parameter to order results by date # Here there is no validation that id is valid. We assume id is legit # We have been redirected to an athlete profile page # Check errors - we may have either too many athletes found or # not a detailed enough search (fewer than 3 letters) # First row is header, last is ignored
3.022284
3
6 kyu/IP Validation.py
mwk0408/codewars_solutions
6
6621699
<reponame>mwk0408/codewars_solutions def is_valid_IP(strng): strng=strng.split(".") if len(strng)!=4: return False for i in strng: try: temp=int(i) if temp<0: return False elif temp>255: return False elif temp>=0 and temp<=9 and len(i)>1: return False elif temp>=10 and temp<=99 and len(i)>2: return False except: return False return True
def is_valid_IP(strng): strng=strng.split(".") if len(strng)!=4: return False for i in strng: try: temp=int(i) if temp<0: return False elif temp>255: return False elif temp>=0 and temp<=9 and len(i)>1: return False elif temp>=10 and temp<=99 and len(i)>2: return False except: return False return True
none
1
3.195564
3
src/mock/position.py
celiakwan/kafka-taxi-booker
1
6621700
<reponame>celiakwan/kafka-taxi-booker from random import randint THRESHOLD = 10000000 def _mock_coordinates(min, max): return randint(min * THRESHOLD, max * THRESHOLD) / THRESHOLD def mock_latitude(): return _mock_coordinates(20, 30) def mock_longitude(): return _mock_coordinates(50, 60)
from random import randint THRESHOLD = 10000000 def _mock_coordinates(min, max): return randint(min * THRESHOLD, max * THRESHOLD) / THRESHOLD def mock_latitude(): return _mock_coordinates(20, 30) def mock_longitude(): return _mock_coordinates(50, 60)
none
1
2.516394
3
tensor2struct/models/spider/spider_linking.py
chenyangh/tensor2struct-public
69
6621701
import attr import einops import numpy as np import torch from torch import nn import torch.nn.functional as F from tensor2struct.utils import batched_sequence from tensor2struct.contexts import knowledge_graph from tensor2struct.utils import registry, gumbel from tensor2struct.modules import rat, lstm, embedders, energys import logging logger = logging.getLogger("tensor2struct") def get_graph_from_relations(desc, relations2id): """ Protocol: the graph is contructed based on four keys of desc: question, columns, tables **MIND THE ORDER OF SECTIONS** """ sections = [("q", len(desc["question"]))] if "columns" in desc: sections.append(("col", len(desc["columns"]))) if "tables" in desc: sections.append(("tab", len(desc["tables"]))) relations = [desc["schema_relations"], desc["sc_relations"], desc["cv_relations"]] relation_graph = knowledge_graph.KnowledgeGraph(sections, relations2id) for relation in relations: relation_graph.add_relations_to_graph(relation) return relation_graph.get_relation_graph() def get_schema_graph_from_relations(desc, relations2id): sections = [] if "columns" in desc: sections.append(("col", len(desc["columns"]))) if "tables" in desc: sections.append(("tab", len(desc["tables"]))) relations = [desc["schema_relations"]] relation_graph = knowledge_graph.KnowledgeGraph(sections, relations2id) for relation in relations: relation_graph.add_relations_to_graph(relation) return relation_graph.get_relation_graph() @attr.s class RelationMap: q_len = attr.ib(default=None) c_len = attr.ib(default=None) t_len = attr.ib(default=None) predefined_relation = attr.ib(default=None) ct_relation = attr.ib(default=None) qq_relation = attr.ib(default=None) qc_relation = attr.ib(default=None) qt_relation = attr.ib(default=None) cq_relation = attr.ib(default=None) tq_relation = attr.ib(default=None) @registry.register("schema_linking", "spider_string_matching") class StringLinking: def __init__(self, device, preproc): self._device = device self.relations2id = preproc.relations2id def __call__(self, desc): return self.link_one_example(desc) def link_one_example(self, desc): relation_np = get_graph_from_relations(desc, self.relations2id) relations_t = torch.LongTensor(relation_np).to(self._device) relation_obj = RelationMap( q_len=len(desc["question"]), c_len=len(desc["columns"]), t_len=len(desc["tables"]), predefined_relation=relations_t, ) return relation_obj def argmax(logits, device, dim): max_id = torch.argmax(logits, dim=dim, keepdim=True) one_hot = torch.zeros_like(logits).to(device).scatter_(dim, max_id, 1) return one_hot @registry.register("schema_linking", "bilinear_matching") class BilinearLinking(nn.Module): def __init__( self, device, preproc, word_emb_size, num_latent_relations, hidden_size=300, recurrent_size=256, discrete_relation=True, norm_relation=True, symmetric_relation=False, combine_latent_relations=False, score_type="bilinear", learnable_embeddings=False, question_encoder=("shared-en-emb",), column_encoder=("shared-en-emb",), table_encoder=("shared-en-emb",), ): super().__init__() self.preproc = preproc self.vocab = preproc.vocab self.word_emb_size = word_emb_size self._device = device self.hidden_size = hidden_size self.discrete_relation = discrete_relation self.norm_relation = norm_relation self.num_latent_relations = num_latent_relations self.relations2id = preproc.relations2id self.recurrent_size = recurrent_size self.dropout = 0.0 score_funcs = { "bilinear": lambda: energys.Bilinear( hidden_size, num_latent_relations, include_id=True ), "mlp": lambda: energys.MLP(hidden_size, num_latent_relations), } # build modules if learnable_embeddings: self.en_learnable_words = self.vocab else: self.en_learnable_words = None shared_modules = { "shared-en-emb": embedders.LookupEmbeddings( self._device, self.vocab, self.preproc.word_emb, self.word_emb_size, learnable_words=self.en_learnable_words, ), } if self.preproc.use_ch_vocab: self.ch_vocab = preproc.ch_vocab if learnable_embeddings: self.ch_learnable_words = self.ch_vocab else: self.ch_learnable_words = None shared_modules["shared-ch-emb"] = embedders.LookupEmbeddings( self._device, self.ch_vocab, self.preproc.ch_word_emb, self.preproc.ch_word_emb.dim, learnable_words=self.ch_learnable_words, ) shared_modules["ch-bilstm"] = lstm.BiLSTM( input_size=self.preproc.ch_word_emb.dim, output_size=self.recurrent_size, dropout=self.dropout, use_native=False, summarize=False, ) shared_modules["ch-bilstm-native"] = lstm.BiLSTM( input_size=self.preproc.ch_word_emb.dim, output_size=self.recurrent_size, dropout=self.dropout, use_native=True, summarize=False, ) self.question_encoder = self._build_modules( question_encoder, shared_modules=shared_modules ) self.column_encoder = self._build_modules( column_encoder, shared_modules=shared_modules ) self.table_encoder = self._build_modules( table_encoder, shared_modules=shared_modules ) self.combine_latent_relations = combine_latent_relations if combine_latent_relations: self.string_link = StringLinking(device, preproc) self.symmetric_relation = symmetric_relation assert self.symmetric_relation if self.symmetric_relation: relations = ("qc", "qt") else: relations = ("qc", "cq", "tq", "qt") self.relation_score_dic = nn.ModuleDict( {k: score_funcs[score_type]() for k in relations} ) if discrete_relation: self.temperature = 1 # for gumbel if not norm_relation: # then norm q/col/tab self.null_q_token = nn.Parameter(torch.zeros([1, hidden_size])) self.null_c_token = nn.Parameter(torch.zeros([1, hidden_size])) self.null_t_token = nn.Parameter(torch.zeros([1, hidden_size])) def _build_modules(self, module_types, shared_modules=None): module_builder = { "en-emb": lambda: embedders.LookupEmbeddings( self._device, self.vocab, self.preproc.word_emb, self.word_emb_size, learnable_words=self.en_learnable_words, ), "bilstm": lambda: lstm.BiLSTM( input_size=self.word_emb_size, output_size=self.recurrent_size, dropout=self.dropout, summarize=False, use_native=False, ), "bilstm-native": lambda: lstm.BiLSTM( input_size=self.word_emb_size, output_size=self.recurrent_size, dropout=self.dropout, summarize=False, use_native=True, ), } modules = [] for module_type in module_types: if module_type in shared_modules: modules.append(shared_modules[module_type]) else: modules.append(module_builder[module_type]()) return torch.nn.Sequential(*modules) def compute_relation_score(self, x1, x2, boudaries, score_type): """ x1, x2: len * relation_emb_size """ x1, x2 = x1.unsqueeze(1), x2.unsqueeze(1) # len * 1 * emb_size len_1, _, rs = x1.size() len_2, _, rs = x2.size() _x1 = x1.expand(len_1, len_2, rs) _x2 = x2.expand(len_2, len_1, rs).transpose(0, 1) relation_scores = self.relation_score_dic[score_type](_x1, _x2) # TODO: optimize this code res = [] for s, e in zip(boudaries, boudaries[1:]): max_val, max_id = torch.max(relation_scores[:, s:e, :], dim=1, keepdim=True) res.append(max_val) res_v = torch.cat(res, dim=1) return res_v def normalize_relation_score(self, relation_scores): """ relation_scores: either dim_1 or dim_2 will be normalized """ if not self.norm_relation: norm_dim = 1 else: norm_dim = 2 if self.discrete_relation: device = relation_scores.device if self.training: r = gumbel.gumbel_softmax_sample( relation_scores, self.temperature, device, norm_dim ) else: r = argmax(relation_scores, device, norm_dim) else: r = torch.softmax(relation_scores, dim=norm_dim) return r def get_symmetric_relation(self, x1, x2, boudaries, score_type, ignore_null=True): x1_type, x2_type = score_type assert x1_type == "q" # qc, qt # pack the null token if not self.norm_relation: null_token_1 = getattr(self, f"null_{x1_type}_token") x1 = torch.cat([x1, null_token_1], 0) null_token_2 = getattr(self, f"null_{x2_type}_token") x2 = torch.cat([x2, null_token_2], 0) boudaries.append(boudaries[-1] + 1) relation_scores = self.compute_relation_score(x1, x2, boudaries, score_type) r1 = self.normalize_relation_score(relation_scores) r2 = self.normalize_relation_score(relation_scores.transpose(0, 1)) # unpack the null tokens if not self.norm_relation and ignore_null: r1 = r1[:-1, :-1, :] r2 = r2[:-1, :-1, :] return r1, r2 def get_q_ct_relations(self, desc, ignore_null, column_type): q_enc, _ = self.question_encoder([[desc["question"]]]) if column_type: c_enc, c_boudaries = self.column_encoder( [[col[1:] for col in desc["columns"]]] ) else: c_enc, c_boudaries = self.column_encoder([desc["columns"]]) t_enc, t_boudaries = self.table_encoder([desc["tables"]]) q_enc, c_enc, t_enc = q_enc.select(0), c_enc.select(0), t_enc.select(0) c_boudaries, t_boudaries = c_boudaries[0].tolist(), t_boudaries[0].tolist() qc_relation, cq_relation = self.get_symmetric_relation( q_enc, c_enc, c_boudaries, "qc", ignore_null=ignore_null ) qt_relation, tq_relation = self.get_symmetric_relation( q_enc, t_enc, t_boudaries, "qt", ignore_null=ignore_null ) return qc_relation, cq_relation, qt_relation, tq_relation def forward_unbatched(self, desc, ignore_null=True, column_type=True): qc_relation, cq_relation, qt_relation, tq_relation = self.get_q_ct_relations( desc, ignore_null, column_type ) ct_relation_np = get_schema_graph_from_relations(desc, self.relations2id) ct_relation = torch.LongTensor(ct_relation_np).to(self._device) if self.combine_latent_relations: r = self.string_link(desc) predefined_relation = r.predefined_relation else: predefined_relation = None relations = RelationMap( q_len=len(desc["question"]), c_len=len(desc["columns"]), t_len=len(desc["tables"]), predefined_relation=predefined_relation, qc_relation=qc_relation, cq_relation=cq_relation, qt_relation=qt_relation, tq_relation=tq_relation, ct_relation=ct_relation, ) return relations def forward(self, desc, ignore_null=True, column_type=True): return self.forward_unbatched( desc, ignore_null=ignore_null, column_type=column_type ) @registry.register("schema_linking", "sinkhorn_matching") class SinkhornLinking(BilinearLinking): def __init__( self, device, preproc, num_latent_relations, word_emb_size, recurrent_size=256, discrete_relation=True, norm_relation=True, symmetric_relation=False, combine_latent_relations=False, question_encoder=("shared-en-emb", "bilstm-native"), column_encoder=("shared-en-emb", "bilstm-native"), table_encoder=("shared-en-emb", "bilstm-native"), ): super().__init__( device=device, preproc=preproc, word_emb_size=word_emb_size, num_latent_relations=num_latent_relations, discrete_relation=False, norm_relation=False, symmetric_relation=True, hidden_size=recurrent_size, recurrent_size=recurrent_size, combine_latent_relations=combine_latent_relations, question_encoder=question_encoder, column_encoder=column_encoder, table_encoder=table_encoder, score_type="bilinear", learnable_embeddings=False, ) self.sh_temperature = 0.8 self.num_sh_it = 16 self.null_q_token = nn.Parameter(torch.zeros([1, recurrent_size])) self.null_c_token = nn.Parameter(torch.zeros([1, recurrent_size])) self.null_t_token = nn.Parameter(torch.zeros([1, recurrent_size])) def normalize_relation_score(self, relation_scores): x_len, _x_len, num_r = relation_scores.size() assert x_len == _x_len it_scores = relation_scores for _ in range(self.num_sh_it): it_scores = it_scores - torch.logsumexp(it_scores, dim=1, keepdim=True) it_scores = it_scores - torch.logsumexp(it_scores, dim=0, keepdim=True) prob_m = torch.exp(it_scores) return prob_m def compute_relation_score(self, x1, x2, score_type): """ x1, x2: len * relation_emb_size """ x1, x2 = x1.unsqueeze(1), x2.unsqueeze(1) # len * 1 * emb_size len_1, _, rs = x1.size() len_2, _, rs = x2.size() _x1 = x1.expand(len_1, len_2, rs) _x2 = x2.expand(len_2, len_1, rs).transpose(0, 1) relation_scores = self.relation_score_dic[score_type](_x1, _x2) return relation_scores def get_symmetric_relation(self, x1, x2, boudaries, score_type, ignore_null=True): q_type, ct_type = score_type assert q_type == "q" # qc, qt q_len, feat_dim = x1.size() ct_len, feat_dim = x2.size() pad_len = max(q_len, ct_len) + 1 null_token_q = getattr(self, f"null_{q_type}_token").expand(pad_len - q_len, -1) q_input = torch.cat([x1, null_token_q], 0) null_token_ct = getattr(self, f"null_{ct_type}_token").expand( pad_len - ct_len, -1 ) ct_input = torch.cat([x2, null_token_ct], 0) relation_scores = self.compute_relation_score(q_input, ct_input, score_type) q_ct_r = self.normalize_relation_score(relation_scores) # merge prob mass of ct m_q_ct_r_sum = [] m_q_ct_r_avg = [] for s, e in zip(boudaries, boudaries[1:]): sum_val = torch.sum(q_ct_r[:q_len, s:e, :], dim=1, keepdim=True) m_q_ct_r_sum.append(sum_val) avg_val = torch.mean(q_ct_r[:q_len, s:e, :], dim=1, keepdim=True) m_q_ct_r_avg.append(avg_val) m_q_ct_r = torch.cat(m_q_ct_r_sum, dim=1) m_q_ct_r_avg = torch.cat(m_q_ct_r_avg, dim=1) m_ct_q_r = m_q_ct_r_avg.transpose(0, 1) return m_q_ct_r, m_ct_q_r def merge_duplicates(self, items): # input: list of list of words new_item_list = [] new_item2id = {} id_map = [] for i, item in enumerate(items): t_item = tuple(item) if t_item not in new_item_list: new_item2id[t_item] = len(new_item_list) new_item_list.append(t_item) id_map.append(new_item2id[t_item]) return new_item_list, id_map def get_q_ct_relations(self, desc, ignore_null, column_type): # create mapping q_enc, _ = self.question_encoder([[desc["question"]]]) if column_type: raw_columns = [col[1:] for col in desc["columns"]] else: raw_columns = desc["columns"] new_columns, column_id_map = self.merge_duplicates(raw_columns) c_enc, c_boudaries = self.column_encoder([new_columns]) new_tables, table_id_map = self.merge_duplicates(desc["tables"]) t_enc, t_boudaries = self.table_encoder([new_tables]) # compute relations q_enc, c_enc, t_enc = q_enc.select(0), c_enc.select(0), t_enc.select(0) c_boudaries, t_boudaries = c_boudaries[0].tolist(), t_boudaries[0].tolist() m_qc_relation, m_cq_relation = self.get_symmetric_relation( q_enc, c_enc, c_boudaries, "qc", ignore_null=ignore_null ) m_qt_relation, m_tq_relation = self.get_symmetric_relation( q_enc, t_enc, t_boudaries, "qt", ignore_null=ignore_null ) # map it back column_id_map = torch.LongTensor(column_id_map).to(self._device) table_id_map = torch.LongTensor(table_id_map).to(self._device) qc_relation = m_qc_relation.index_select(1, column_id_map) cq_relation = m_cq_relation.index_select(0, column_id_map) qt_relation = m_qt_relation.index_select(1, table_id_map) tq_relation = m_tq_relation.index_select(0, table_id_map) return qc_relation, cq_relation, qt_relation, tq_relation
import attr import einops import numpy as np import torch from torch import nn import torch.nn.functional as F from tensor2struct.utils import batched_sequence from tensor2struct.contexts import knowledge_graph from tensor2struct.utils import registry, gumbel from tensor2struct.modules import rat, lstm, embedders, energys import logging logger = logging.getLogger("tensor2struct") def get_graph_from_relations(desc, relations2id): """ Protocol: the graph is contructed based on four keys of desc: question, columns, tables **MIND THE ORDER OF SECTIONS** """ sections = [("q", len(desc["question"]))] if "columns" in desc: sections.append(("col", len(desc["columns"]))) if "tables" in desc: sections.append(("tab", len(desc["tables"]))) relations = [desc["schema_relations"], desc["sc_relations"], desc["cv_relations"]] relation_graph = knowledge_graph.KnowledgeGraph(sections, relations2id) for relation in relations: relation_graph.add_relations_to_graph(relation) return relation_graph.get_relation_graph() def get_schema_graph_from_relations(desc, relations2id): sections = [] if "columns" in desc: sections.append(("col", len(desc["columns"]))) if "tables" in desc: sections.append(("tab", len(desc["tables"]))) relations = [desc["schema_relations"]] relation_graph = knowledge_graph.KnowledgeGraph(sections, relations2id) for relation in relations: relation_graph.add_relations_to_graph(relation) return relation_graph.get_relation_graph() @attr.s class RelationMap: q_len = attr.ib(default=None) c_len = attr.ib(default=None) t_len = attr.ib(default=None) predefined_relation = attr.ib(default=None) ct_relation = attr.ib(default=None) qq_relation = attr.ib(default=None) qc_relation = attr.ib(default=None) qt_relation = attr.ib(default=None) cq_relation = attr.ib(default=None) tq_relation = attr.ib(default=None) @registry.register("schema_linking", "spider_string_matching") class StringLinking: def __init__(self, device, preproc): self._device = device self.relations2id = preproc.relations2id def __call__(self, desc): return self.link_one_example(desc) def link_one_example(self, desc): relation_np = get_graph_from_relations(desc, self.relations2id) relations_t = torch.LongTensor(relation_np).to(self._device) relation_obj = RelationMap( q_len=len(desc["question"]), c_len=len(desc["columns"]), t_len=len(desc["tables"]), predefined_relation=relations_t, ) return relation_obj def argmax(logits, device, dim): max_id = torch.argmax(logits, dim=dim, keepdim=True) one_hot = torch.zeros_like(logits).to(device).scatter_(dim, max_id, 1) return one_hot @registry.register("schema_linking", "bilinear_matching") class BilinearLinking(nn.Module): def __init__( self, device, preproc, word_emb_size, num_latent_relations, hidden_size=300, recurrent_size=256, discrete_relation=True, norm_relation=True, symmetric_relation=False, combine_latent_relations=False, score_type="bilinear", learnable_embeddings=False, question_encoder=("shared-en-emb",), column_encoder=("shared-en-emb",), table_encoder=("shared-en-emb",), ): super().__init__() self.preproc = preproc self.vocab = preproc.vocab self.word_emb_size = word_emb_size self._device = device self.hidden_size = hidden_size self.discrete_relation = discrete_relation self.norm_relation = norm_relation self.num_latent_relations = num_latent_relations self.relations2id = preproc.relations2id self.recurrent_size = recurrent_size self.dropout = 0.0 score_funcs = { "bilinear": lambda: energys.Bilinear( hidden_size, num_latent_relations, include_id=True ), "mlp": lambda: energys.MLP(hidden_size, num_latent_relations), } # build modules if learnable_embeddings: self.en_learnable_words = self.vocab else: self.en_learnable_words = None shared_modules = { "shared-en-emb": embedders.LookupEmbeddings( self._device, self.vocab, self.preproc.word_emb, self.word_emb_size, learnable_words=self.en_learnable_words, ), } if self.preproc.use_ch_vocab: self.ch_vocab = preproc.ch_vocab if learnable_embeddings: self.ch_learnable_words = self.ch_vocab else: self.ch_learnable_words = None shared_modules["shared-ch-emb"] = embedders.LookupEmbeddings( self._device, self.ch_vocab, self.preproc.ch_word_emb, self.preproc.ch_word_emb.dim, learnable_words=self.ch_learnable_words, ) shared_modules["ch-bilstm"] = lstm.BiLSTM( input_size=self.preproc.ch_word_emb.dim, output_size=self.recurrent_size, dropout=self.dropout, use_native=False, summarize=False, ) shared_modules["ch-bilstm-native"] = lstm.BiLSTM( input_size=self.preproc.ch_word_emb.dim, output_size=self.recurrent_size, dropout=self.dropout, use_native=True, summarize=False, ) self.question_encoder = self._build_modules( question_encoder, shared_modules=shared_modules ) self.column_encoder = self._build_modules( column_encoder, shared_modules=shared_modules ) self.table_encoder = self._build_modules( table_encoder, shared_modules=shared_modules ) self.combine_latent_relations = combine_latent_relations if combine_latent_relations: self.string_link = StringLinking(device, preproc) self.symmetric_relation = symmetric_relation assert self.symmetric_relation if self.symmetric_relation: relations = ("qc", "qt") else: relations = ("qc", "cq", "tq", "qt") self.relation_score_dic = nn.ModuleDict( {k: score_funcs[score_type]() for k in relations} ) if discrete_relation: self.temperature = 1 # for gumbel if not norm_relation: # then norm q/col/tab self.null_q_token = nn.Parameter(torch.zeros([1, hidden_size])) self.null_c_token = nn.Parameter(torch.zeros([1, hidden_size])) self.null_t_token = nn.Parameter(torch.zeros([1, hidden_size])) def _build_modules(self, module_types, shared_modules=None): module_builder = { "en-emb": lambda: embedders.LookupEmbeddings( self._device, self.vocab, self.preproc.word_emb, self.word_emb_size, learnable_words=self.en_learnable_words, ), "bilstm": lambda: lstm.BiLSTM( input_size=self.word_emb_size, output_size=self.recurrent_size, dropout=self.dropout, summarize=False, use_native=False, ), "bilstm-native": lambda: lstm.BiLSTM( input_size=self.word_emb_size, output_size=self.recurrent_size, dropout=self.dropout, summarize=False, use_native=True, ), } modules = [] for module_type in module_types: if module_type in shared_modules: modules.append(shared_modules[module_type]) else: modules.append(module_builder[module_type]()) return torch.nn.Sequential(*modules) def compute_relation_score(self, x1, x2, boudaries, score_type): """ x1, x2: len * relation_emb_size """ x1, x2 = x1.unsqueeze(1), x2.unsqueeze(1) # len * 1 * emb_size len_1, _, rs = x1.size() len_2, _, rs = x2.size() _x1 = x1.expand(len_1, len_2, rs) _x2 = x2.expand(len_2, len_1, rs).transpose(0, 1) relation_scores = self.relation_score_dic[score_type](_x1, _x2) # TODO: optimize this code res = [] for s, e in zip(boudaries, boudaries[1:]): max_val, max_id = torch.max(relation_scores[:, s:e, :], dim=1, keepdim=True) res.append(max_val) res_v = torch.cat(res, dim=1) return res_v def normalize_relation_score(self, relation_scores): """ relation_scores: either dim_1 or dim_2 will be normalized """ if not self.norm_relation: norm_dim = 1 else: norm_dim = 2 if self.discrete_relation: device = relation_scores.device if self.training: r = gumbel.gumbel_softmax_sample( relation_scores, self.temperature, device, norm_dim ) else: r = argmax(relation_scores, device, norm_dim) else: r = torch.softmax(relation_scores, dim=norm_dim) return r def get_symmetric_relation(self, x1, x2, boudaries, score_type, ignore_null=True): x1_type, x2_type = score_type assert x1_type == "q" # qc, qt # pack the null token if not self.norm_relation: null_token_1 = getattr(self, f"null_{x1_type}_token") x1 = torch.cat([x1, null_token_1], 0) null_token_2 = getattr(self, f"null_{x2_type}_token") x2 = torch.cat([x2, null_token_2], 0) boudaries.append(boudaries[-1] + 1) relation_scores = self.compute_relation_score(x1, x2, boudaries, score_type) r1 = self.normalize_relation_score(relation_scores) r2 = self.normalize_relation_score(relation_scores.transpose(0, 1)) # unpack the null tokens if not self.norm_relation and ignore_null: r1 = r1[:-1, :-1, :] r2 = r2[:-1, :-1, :] return r1, r2 def get_q_ct_relations(self, desc, ignore_null, column_type): q_enc, _ = self.question_encoder([[desc["question"]]]) if column_type: c_enc, c_boudaries = self.column_encoder( [[col[1:] for col in desc["columns"]]] ) else: c_enc, c_boudaries = self.column_encoder([desc["columns"]]) t_enc, t_boudaries = self.table_encoder([desc["tables"]]) q_enc, c_enc, t_enc = q_enc.select(0), c_enc.select(0), t_enc.select(0) c_boudaries, t_boudaries = c_boudaries[0].tolist(), t_boudaries[0].tolist() qc_relation, cq_relation = self.get_symmetric_relation( q_enc, c_enc, c_boudaries, "qc", ignore_null=ignore_null ) qt_relation, tq_relation = self.get_symmetric_relation( q_enc, t_enc, t_boudaries, "qt", ignore_null=ignore_null ) return qc_relation, cq_relation, qt_relation, tq_relation def forward_unbatched(self, desc, ignore_null=True, column_type=True): qc_relation, cq_relation, qt_relation, tq_relation = self.get_q_ct_relations( desc, ignore_null, column_type ) ct_relation_np = get_schema_graph_from_relations(desc, self.relations2id) ct_relation = torch.LongTensor(ct_relation_np).to(self._device) if self.combine_latent_relations: r = self.string_link(desc) predefined_relation = r.predefined_relation else: predefined_relation = None relations = RelationMap( q_len=len(desc["question"]), c_len=len(desc["columns"]), t_len=len(desc["tables"]), predefined_relation=predefined_relation, qc_relation=qc_relation, cq_relation=cq_relation, qt_relation=qt_relation, tq_relation=tq_relation, ct_relation=ct_relation, ) return relations def forward(self, desc, ignore_null=True, column_type=True): return self.forward_unbatched( desc, ignore_null=ignore_null, column_type=column_type ) @registry.register("schema_linking", "sinkhorn_matching") class SinkhornLinking(BilinearLinking): def __init__( self, device, preproc, num_latent_relations, word_emb_size, recurrent_size=256, discrete_relation=True, norm_relation=True, symmetric_relation=False, combine_latent_relations=False, question_encoder=("shared-en-emb", "bilstm-native"), column_encoder=("shared-en-emb", "bilstm-native"), table_encoder=("shared-en-emb", "bilstm-native"), ): super().__init__( device=device, preproc=preproc, word_emb_size=word_emb_size, num_latent_relations=num_latent_relations, discrete_relation=False, norm_relation=False, symmetric_relation=True, hidden_size=recurrent_size, recurrent_size=recurrent_size, combine_latent_relations=combine_latent_relations, question_encoder=question_encoder, column_encoder=column_encoder, table_encoder=table_encoder, score_type="bilinear", learnable_embeddings=False, ) self.sh_temperature = 0.8 self.num_sh_it = 16 self.null_q_token = nn.Parameter(torch.zeros([1, recurrent_size])) self.null_c_token = nn.Parameter(torch.zeros([1, recurrent_size])) self.null_t_token = nn.Parameter(torch.zeros([1, recurrent_size])) def normalize_relation_score(self, relation_scores): x_len, _x_len, num_r = relation_scores.size() assert x_len == _x_len it_scores = relation_scores for _ in range(self.num_sh_it): it_scores = it_scores - torch.logsumexp(it_scores, dim=1, keepdim=True) it_scores = it_scores - torch.logsumexp(it_scores, dim=0, keepdim=True) prob_m = torch.exp(it_scores) return prob_m def compute_relation_score(self, x1, x2, score_type): """ x1, x2: len * relation_emb_size """ x1, x2 = x1.unsqueeze(1), x2.unsqueeze(1) # len * 1 * emb_size len_1, _, rs = x1.size() len_2, _, rs = x2.size() _x1 = x1.expand(len_1, len_2, rs) _x2 = x2.expand(len_2, len_1, rs).transpose(0, 1) relation_scores = self.relation_score_dic[score_type](_x1, _x2) return relation_scores def get_symmetric_relation(self, x1, x2, boudaries, score_type, ignore_null=True): q_type, ct_type = score_type assert q_type == "q" # qc, qt q_len, feat_dim = x1.size() ct_len, feat_dim = x2.size() pad_len = max(q_len, ct_len) + 1 null_token_q = getattr(self, f"null_{q_type}_token").expand(pad_len - q_len, -1) q_input = torch.cat([x1, null_token_q], 0) null_token_ct = getattr(self, f"null_{ct_type}_token").expand( pad_len - ct_len, -1 ) ct_input = torch.cat([x2, null_token_ct], 0) relation_scores = self.compute_relation_score(q_input, ct_input, score_type) q_ct_r = self.normalize_relation_score(relation_scores) # merge prob mass of ct m_q_ct_r_sum = [] m_q_ct_r_avg = [] for s, e in zip(boudaries, boudaries[1:]): sum_val = torch.sum(q_ct_r[:q_len, s:e, :], dim=1, keepdim=True) m_q_ct_r_sum.append(sum_val) avg_val = torch.mean(q_ct_r[:q_len, s:e, :], dim=1, keepdim=True) m_q_ct_r_avg.append(avg_val) m_q_ct_r = torch.cat(m_q_ct_r_sum, dim=1) m_q_ct_r_avg = torch.cat(m_q_ct_r_avg, dim=1) m_ct_q_r = m_q_ct_r_avg.transpose(0, 1) return m_q_ct_r, m_ct_q_r def merge_duplicates(self, items): # input: list of list of words new_item_list = [] new_item2id = {} id_map = [] for i, item in enumerate(items): t_item = tuple(item) if t_item not in new_item_list: new_item2id[t_item] = len(new_item_list) new_item_list.append(t_item) id_map.append(new_item2id[t_item]) return new_item_list, id_map def get_q_ct_relations(self, desc, ignore_null, column_type): # create mapping q_enc, _ = self.question_encoder([[desc["question"]]]) if column_type: raw_columns = [col[1:] for col in desc["columns"]] else: raw_columns = desc["columns"] new_columns, column_id_map = self.merge_duplicates(raw_columns) c_enc, c_boudaries = self.column_encoder([new_columns]) new_tables, table_id_map = self.merge_duplicates(desc["tables"]) t_enc, t_boudaries = self.table_encoder([new_tables]) # compute relations q_enc, c_enc, t_enc = q_enc.select(0), c_enc.select(0), t_enc.select(0) c_boudaries, t_boudaries = c_boudaries[0].tolist(), t_boudaries[0].tolist() m_qc_relation, m_cq_relation = self.get_symmetric_relation( q_enc, c_enc, c_boudaries, "qc", ignore_null=ignore_null ) m_qt_relation, m_tq_relation = self.get_symmetric_relation( q_enc, t_enc, t_boudaries, "qt", ignore_null=ignore_null ) # map it back column_id_map = torch.LongTensor(column_id_map).to(self._device) table_id_map = torch.LongTensor(table_id_map).to(self._device) qc_relation = m_qc_relation.index_select(1, column_id_map) cq_relation = m_cq_relation.index_select(0, column_id_map) qt_relation = m_qt_relation.index_select(1, table_id_map) tq_relation = m_tq_relation.index_select(0, table_id_map) return qc_relation, cq_relation, qt_relation, tq_relation
en
0.571015
Protocol: the graph is contructed based on four keys of desc: question, columns, tables **MIND THE ORDER OF SECTIONS** # build modules # for gumbel # then norm q/col/tab x1, x2: len * relation_emb_size # len * 1 * emb_size # TODO: optimize this code relation_scores: either dim_1 or dim_2 will be normalized # qc, qt # pack the null token # unpack the null tokens x1, x2: len * relation_emb_size # len * 1 * emb_size # qc, qt # merge prob mass of ct # input: list of list of words # create mapping # compute relations # map it back
2.149804
2
Codeforces/A_Anti_Light_s_Cell_Guessing.py
anubhab-code/Competitive-Programming
0
6621702
<gh_stars>0 for _ in range(int(input())): a,b=map(int,input().split()) if a==1 and b==1: print(0) elif a==1 or b==1: print(1) else: print(2)
for _ in range(int(input())): a,b=map(int,input().split()) if a==1 and b==1: print(0) elif a==1 or b==1: print(1) else: print(2)
none
1
3.732288
4
commands/__init__.py
sebiTCR/Stocks-of-Razbia
3
6621703
<reponame>sebiTCR/Stocks-of-Razbia import typing as t from .exc import * from .converters import * from .cmd_utils import * def _long_name(name, parent): if parent is not None: return f"{parent.long_name} {name}" return name class Command: __slots__ = ("name", "long_name", "run", "args", "usage") def __init__(self, func, name=None, usage="", parent=None): name = name or func.__name__ long_name = _long_name(name, parent) self.name = name self.long_name = long_name self.usage = f"{{name}} {usage}" self.run = func self.args = handle_annotations_args(func) def __call__(self, ctx, *args: str): prepared_args = prepare_args(ctx, self, args) return self.run(ctx, *prepared_args) class Group: __slots__ = ("name", "long_name", "sub_commands") def __init__(self, name=None, parent=None): self.name = name self.long_name = _long_name(name, parent) self.sub_commands = {} async def __call__(self, ctx, *args): if not args: raise CommandError(f"Valid subcommands: {', '.join(self.remapped_sub_commands(ctx))}") sub_command_name, *rest = args sub_command_name = ctx.api.command_names.get((sub_command_name, self.name), sub_command_name) if sub_command_name not in self.sub_commands: raise CommandError(f"Valid subcommands: {', '.join(self.remapped_sub_commands(ctx))}") return await self.sub_commands[sub_command_name](ctx, *rest) def command(self, name=None, cls=Command, **kwargs): return command(name=name, cls=cls, registry=self.sub_commands, parent=self, **kwargs) def remapped_sub_commands(self, ctx): res = [ new_command_name for (new_command_name, group_name), old_command_name in ctx.api.command_names.items() if group_name == self.name and old_command_name in self.sub_commands ] if res: return res else: return self.sub_commands def command(*, name=None, cls=Command, registry: t.Dict[str, t.Callable] = None, **kwargs): def decorator(func): cmd = cls(func, name=name, **kwargs) if registry is not None: registry[cmd.name] = cmd return cmd return decorator def group(*, name=None, cls=Group, registry: t.Dict[str, t.Callable] = None, **kwargs): grp = cls(name=name, **kwargs) if registry is not None: registry[grp.name] = grp return grp
import typing as t from .exc import * from .converters import * from .cmd_utils import * def _long_name(name, parent): if parent is not None: return f"{parent.long_name} {name}" return name class Command: __slots__ = ("name", "long_name", "run", "args", "usage") def __init__(self, func, name=None, usage="", parent=None): name = name or func.__name__ long_name = _long_name(name, parent) self.name = name self.long_name = long_name self.usage = f"{{name}} {usage}" self.run = func self.args = handle_annotations_args(func) def __call__(self, ctx, *args: str): prepared_args = prepare_args(ctx, self, args) return self.run(ctx, *prepared_args) class Group: __slots__ = ("name", "long_name", "sub_commands") def __init__(self, name=None, parent=None): self.name = name self.long_name = _long_name(name, parent) self.sub_commands = {} async def __call__(self, ctx, *args): if not args: raise CommandError(f"Valid subcommands: {', '.join(self.remapped_sub_commands(ctx))}") sub_command_name, *rest = args sub_command_name = ctx.api.command_names.get((sub_command_name, self.name), sub_command_name) if sub_command_name not in self.sub_commands: raise CommandError(f"Valid subcommands: {', '.join(self.remapped_sub_commands(ctx))}") return await self.sub_commands[sub_command_name](ctx, *rest) def command(self, name=None, cls=Command, **kwargs): return command(name=name, cls=cls, registry=self.sub_commands, parent=self, **kwargs) def remapped_sub_commands(self, ctx): res = [ new_command_name for (new_command_name, group_name), old_command_name in ctx.api.command_names.items() if group_name == self.name and old_command_name in self.sub_commands ] if res: return res else: return self.sub_commands def command(*, name=None, cls=Command, registry: t.Dict[str, t.Callable] = None, **kwargs): def decorator(func): cmd = cls(func, name=name, **kwargs) if registry is not None: registry[cmd.name] = cmd return cmd return decorator def group(*, name=None, cls=Group, registry: t.Dict[str, t.Callable] = None, **kwargs): grp = cls(name=name, **kwargs) if registry is not None: registry[grp.name] = grp return grp
none
1
2.468929
2
winregistry/winregistry.py
michael-rolfe/winregistry
18
6621704
<gh_stars>10-100 from datetime import datetime, timedelta from typing import Any, Optional, Union from winreg import ( KEY_READ, KEY_SET_VALUE, KEY_WRITE, ConnectRegistry, CreateKeyEx, DeleteKey, DeleteValue, EnumKey, EnumValue, HKEYType, OpenKey, QueryInfoKey, QueryValueEx, SetValueEx, ) from winregistry.consts import WinregType from winregistry.models import RegEntry, RegKey from winregistry.utils import get_access_key, parse_path class WinRegistry: def __init__( self, host: Optional[str] = None, ) -> None: self.host: Optional[str] = host self._client: Optional[HKEYType] = None self._handler = None self._root: Optional[HKEYType] = None def _get_handler( self, key: str, access: int, key_wow64_32key: bool, ) -> HKEYType: root, path = parse_path(key) access_key = get_access_key(access, key_wow64_32key) if not self._client or root != self._root: self._client = ConnectRegistry(self.host, root) key_handle = OpenKey( key=self._client, sub_key=path, reserved=0, access=access_key, ) return key_handle def close(self) -> None: if self._client: self._client.Close() def __enter__(self) -> "WinRegistry": return self def __exit__(self, exc_type, exc_val, exc_tb): # type: ignore self.close() if exc_val: raise def read_entry( self, reg_key: str, name: str, key_wow64_32key: bool = False, ) -> RegEntry: handle = self._get_handler(reg_key, KEY_READ, key_wow64_32key) raw_value, raw_type = QueryValueEx(handle, name) return RegEntry( reg_key=reg_key, name=name, value=raw_value, type=WinregType(raw_type), host=self.host, ) def write_entry( self, reg_key: str, name: str, value: Any = None, reg_type: Union[WinregType, int] = WinregType.REG_SZ, key_wow64_32key: bool = False, ) -> None: if isinstance(reg_type, int): reg_type = WinregType(reg_type) handle = self._get_handler(reg_key, KEY_SET_VALUE, key_wow64_32key) SetValueEx(handle, name, 0, reg_type.value, value) def delete_entry( self, key: str, name: str, key_wow64_32key: bool = False, ) -> None: handle = self._get_handler(key, KEY_SET_VALUE, key_wow64_32key) DeleteValue(handle, name) def read_key( self, name: str, key_wow64_32key: bool = False, ) -> RegKey: handle = self._get_handler(name, KEY_READ, key_wow64_32key) keys_num, values_num, modify = QueryInfoKey(handle) modify_at = datetime(1601, 1, 1) + timedelta(microseconds=modify / 10) keys = list() entries = list() for key_i in range(0, keys_num): keys.append(EnumKey(handle, key_i)) for key_i in range(0, values_num): entry_name, value, raw_type = EnumValue(handle, key_i) entries.append( RegEntry( reg_key=name, name=entry_name, value=value, type=WinregType(raw_type), host=self.host, ) ) return RegKey( name=name, reg_keys=keys, entries=entries, modify_at=modify_at, ) def create_key( self, name: str, key_wow64_32key: bool = False, ) -> None: handler = None sub_keys = name.split("\\") i = 0 while i < len(sub_keys) and not handler: try: current = "\\".join(sub_keys[: len(sub_keys) - i]) handler = self._get_handler(current, KEY_WRITE, key_wow64_32key) except FileNotFoundError: i += 1 before_index = len(sub_keys) - i tail = "\\".join(sub_keys[before_index:]) CreateKeyEx( key=handler, # type: ignore sub_key=tail, reserved=0, access=get_access_key(KEY_WRITE), ) def delete_key( self, name: str, key_wow64_32key: bool = False, ) -> None: parental, key_name = name.rsplit(sep="\\", maxsplit=1) handle = self._get_handler(parental, KEY_WRITE, key_wow64_32key) DeleteKey(handle, key_name) def delete_key_tree( self, name: str, key_wow64_32key: bool = False, ) -> None: handle = self._get_handler(name, KEY_READ, key_wow64_32key) keys_num, values_num, modify = QueryInfoKey(handle) # pylint: disable=unused-variable for key_i in range(0, keys_num): key = EnumKey(handle, key_i) self.delete_key_tree(f"{name}\\{key}", key_wow64_32key) handle.Close() self.delete_key(name, key_wow64_32key)
from datetime import datetime, timedelta from typing import Any, Optional, Union from winreg import ( KEY_READ, KEY_SET_VALUE, KEY_WRITE, ConnectRegistry, CreateKeyEx, DeleteKey, DeleteValue, EnumKey, EnumValue, HKEYType, OpenKey, QueryInfoKey, QueryValueEx, SetValueEx, ) from winregistry.consts import WinregType from winregistry.models import RegEntry, RegKey from winregistry.utils import get_access_key, parse_path class WinRegistry: def __init__( self, host: Optional[str] = None, ) -> None: self.host: Optional[str] = host self._client: Optional[HKEYType] = None self._handler = None self._root: Optional[HKEYType] = None def _get_handler( self, key: str, access: int, key_wow64_32key: bool, ) -> HKEYType: root, path = parse_path(key) access_key = get_access_key(access, key_wow64_32key) if not self._client or root != self._root: self._client = ConnectRegistry(self.host, root) key_handle = OpenKey( key=self._client, sub_key=path, reserved=0, access=access_key, ) return key_handle def close(self) -> None: if self._client: self._client.Close() def __enter__(self) -> "WinRegistry": return self def __exit__(self, exc_type, exc_val, exc_tb): # type: ignore self.close() if exc_val: raise def read_entry( self, reg_key: str, name: str, key_wow64_32key: bool = False, ) -> RegEntry: handle = self._get_handler(reg_key, KEY_READ, key_wow64_32key) raw_value, raw_type = QueryValueEx(handle, name) return RegEntry( reg_key=reg_key, name=name, value=raw_value, type=WinregType(raw_type), host=self.host, ) def write_entry( self, reg_key: str, name: str, value: Any = None, reg_type: Union[WinregType, int] = WinregType.REG_SZ, key_wow64_32key: bool = False, ) -> None: if isinstance(reg_type, int): reg_type = WinregType(reg_type) handle = self._get_handler(reg_key, KEY_SET_VALUE, key_wow64_32key) SetValueEx(handle, name, 0, reg_type.value, value) def delete_entry( self, key: str, name: str, key_wow64_32key: bool = False, ) -> None: handle = self._get_handler(key, KEY_SET_VALUE, key_wow64_32key) DeleteValue(handle, name) def read_key( self, name: str, key_wow64_32key: bool = False, ) -> RegKey: handle = self._get_handler(name, KEY_READ, key_wow64_32key) keys_num, values_num, modify = QueryInfoKey(handle) modify_at = datetime(1601, 1, 1) + timedelta(microseconds=modify / 10) keys = list() entries = list() for key_i in range(0, keys_num): keys.append(EnumKey(handle, key_i)) for key_i in range(0, values_num): entry_name, value, raw_type = EnumValue(handle, key_i) entries.append( RegEntry( reg_key=name, name=entry_name, value=value, type=WinregType(raw_type), host=self.host, ) ) return RegKey( name=name, reg_keys=keys, entries=entries, modify_at=modify_at, ) def create_key( self, name: str, key_wow64_32key: bool = False, ) -> None: handler = None sub_keys = name.split("\\") i = 0 while i < len(sub_keys) and not handler: try: current = "\\".join(sub_keys[: len(sub_keys) - i]) handler = self._get_handler(current, KEY_WRITE, key_wow64_32key) except FileNotFoundError: i += 1 before_index = len(sub_keys) - i tail = "\\".join(sub_keys[before_index:]) CreateKeyEx( key=handler, # type: ignore sub_key=tail, reserved=0, access=get_access_key(KEY_WRITE), ) def delete_key( self, name: str, key_wow64_32key: bool = False, ) -> None: parental, key_name = name.rsplit(sep="\\", maxsplit=1) handle = self._get_handler(parental, KEY_WRITE, key_wow64_32key) DeleteKey(handle, key_name) def delete_key_tree( self, name: str, key_wow64_32key: bool = False, ) -> None: handle = self._get_handler(name, KEY_READ, key_wow64_32key) keys_num, values_num, modify = QueryInfoKey(handle) # pylint: disable=unused-variable for key_i in range(0, keys_num): key = EnumKey(handle, key_i) self.delete_key_tree(f"{name}\\{key}", key_wow64_32key) handle.Close() self.delete_key(name, key_wow64_32key)
en
0.383714
# type: ignore # type: ignore # pylint: disable=unused-variable
2.093836
2
diplomacy_research/models/draw/tests/draw_model_test_setup.py
wwongkamjan/dipnet_press
39
6621705
# ============================================================================== # Copyright 2019 - <NAME> # # NOTICE: Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # ============================================================================== """ Generic class to tests for draw model correctness """ from tornado import gen from tornado.ioloop import IOLoop from diplomacy import Game from diplomacy_research.models.datasets.queue_dataset import QueueDataset from diplomacy_research.models.state_space import extract_state_proto, extract_phase_history_proto, \ extract_possible_orders_proto from diplomacy_research.utils.cluster import process_fetches_dict class DrawModelTestSetup(): """ Creates a testable setup to test a model and a constructor """ def __init__(self, policy_model_ctor, value_model_ctor, draw_model_ctor, dataset_builder, adapter_ctor, load_policy_args, load_value_args, load_draw_args): """ Constructor :param policy_model_ctor: The policy model constructor to create the policy. :param value_model_ctor: The value model constructor to create the value model. :param draw_model_ctor: The draw model constructor to create the draw model. :param dataset_builder: An instance of `BaseBuilder` containing the proto-fields and generation methods :param adaptor_ctor: The policy adapter constructor to create the policy adapter :param load_policy_args: Reference to the callable function required to load policy args :param load_value_args: Reference to the callable function required to load value args :param load_draw_args: Reference to the callable function required to load draw args :type policy_model_ctor: diplomacy_research.models.policy.base_policy_model.BasePolicyModel.__class__ :type value_model_ctor: diplomacy_research.models.value.base_value_model.BaseValueModel.__class__ :type draw_model_ctor: diplomacy_research.models.draw.base_draw_model.BaseDrawModel.__class__ :type dataset_builder: diplomacy_research.models.datasets.base_builder.BaseBuilder :type adapter_ctor: diplomacy_research.models.policy.base_policy_adapter.BasePolicyAdapter.__class__ """ # pylint: disable=too-many-arguments # Parsing new flags args = load_policy_args() if load_value_args is not None: args += load_value_args() args += load_draw_args() self.hparams = self.parse_flags(args) # Other attributes self.graph = None self.sess = None self.adapter = None self.queue_dataset = None self.policy_model_ctor = policy_model_ctor self.value_model_ctor = value_model_ctor self.draw_model_ctor = draw_model_ctor self.dataset_builder = dataset_builder self.adapter_ctor = adapter_ctor def build_model(self): """ Builds the model """ from diplomacy_research.utils.tensorflow import tf graph = tf.Graph() with graph.as_default(): # Creating dataset self.queue_dataset = QueueDataset(batch_size=self.hparams['batch_size'], dataset_builder=self.dataset_builder) # Creating model and validating model = self.policy_model_ctor(self.queue_dataset, self.hparams) if self.value_model_ctor is not None: model = self.value_model_ctor(model, self.queue_dataset, self.hparams) model = self.draw_model_ctor(model, self.queue_dataset, self.hparams) model.finalize_build() model.validate() self.graph = graph self.sess = tf.Session(graph=graph) @staticmethod def parse_flags(args): """ Parse flags without calling tf.app.run() """ define = {'bool': lambda x: bool(x), # pylint: disable=unnecessary-lambda 'int': lambda x: int(x), # pylint: disable=unnecessary-lambda 'str': lambda x: str(x), # pylint: disable=unnecessary-lambda 'float': lambda x: float(x), # pylint: disable=unnecessary-lambda '---': lambda x: x} # pylint: disable=unnecessary-lambda # Keeping a dictionary of parse args to overwrite if provided multiple times flags = {} for arg in args: arg_type, arg_name, arg_value, _ = arg flags[arg_name] = define[arg_type](arg_value) if arg_type == '---' and arg_name in flags: del flags[arg_name] return flags def run_tests(self): """ Run all tests """ IOLoop.current().run_sync(self.run_tests_async) @gen.coroutine def run_tests_async(self): """ Run tests in an asynchronous IO Loop """ self.build_model() self.adapter = self.adapter_ctor(self.queue_dataset, self.graph, session=self.sess) yield self.test_get_draw_prob() @gen.coroutine def test_get_draw_prob(self): """ Checks if the .get_draw_prob method works """ game = Game() state_proto = extract_state_proto(game) phase_history_proto = extract_phase_history_proto(game) possible_orders_proto = extract_possible_orders_proto(game) locs = ['PAR', 'MAR', 'BUR'] kwargs = {'player_seed': 0, 'noise': 0., 'temperature': 1., 'dropout_rate': 0.} # Temperature == 1. # With and without prefetching for use_prefetching in (False, True): if not use_prefetching: _, policy_details = yield self.adapter.get_orders(locs, state_proto, 'FRANCE', phase_history_proto, possible_orders_proto, **kwargs) else: fetches = yield self.adapter.get_orders(locs, state_proto, 'FRANCE', phase_history_proto, possible_orders_proto, prefetch=True, **kwargs) fetches = yield process_fetches_dict(self.queue_dataset, fetches) _, policy_details = yield self.adapter.get_orders(locs, state_proto, 'FRANCE', phase_history_proto, possible_orders_proto, fetches=fetches, **kwargs) assert policy_details['draw_action'] in (True, False) assert 0. < policy_details['draw_prob'] < 1.
# ============================================================================== # Copyright 2019 - <NAME> # # NOTICE: Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # ============================================================================== """ Generic class to tests for draw model correctness """ from tornado import gen from tornado.ioloop import IOLoop from diplomacy import Game from diplomacy_research.models.datasets.queue_dataset import QueueDataset from diplomacy_research.models.state_space import extract_state_proto, extract_phase_history_proto, \ extract_possible_orders_proto from diplomacy_research.utils.cluster import process_fetches_dict class DrawModelTestSetup(): """ Creates a testable setup to test a model and a constructor """ def __init__(self, policy_model_ctor, value_model_ctor, draw_model_ctor, dataset_builder, adapter_ctor, load_policy_args, load_value_args, load_draw_args): """ Constructor :param policy_model_ctor: The policy model constructor to create the policy. :param value_model_ctor: The value model constructor to create the value model. :param draw_model_ctor: The draw model constructor to create the draw model. :param dataset_builder: An instance of `BaseBuilder` containing the proto-fields and generation methods :param adaptor_ctor: The policy adapter constructor to create the policy adapter :param load_policy_args: Reference to the callable function required to load policy args :param load_value_args: Reference to the callable function required to load value args :param load_draw_args: Reference to the callable function required to load draw args :type policy_model_ctor: diplomacy_research.models.policy.base_policy_model.BasePolicyModel.__class__ :type value_model_ctor: diplomacy_research.models.value.base_value_model.BaseValueModel.__class__ :type draw_model_ctor: diplomacy_research.models.draw.base_draw_model.BaseDrawModel.__class__ :type dataset_builder: diplomacy_research.models.datasets.base_builder.BaseBuilder :type adapter_ctor: diplomacy_research.models.policy.base_policy_adapter.BasePolicyAdapter.__class__ """ # pylint: disable=too-many-arguments # Parsing new flags args = load_policy_args() if load_value_args is not None: args += load_value_args() args += load_draw_args() self.hparams = self.parse_flags(args) # Other attributes self.graph = None self.sess = None self.adapter = None self.queue_dataset = None self.policy_model_ctor = policy_model_ctor self.value_model_ctor = value_model_ctor self.draw_model_ctor = draw_model_ctor self.dataset_builder = dataset_builder self.adapter_ctor = adapter_ctor def build_model(self): """ Builds the model """ from diplomacy_research.utils.tensorflow import tf graph = tf.Graph() with graph.as_default(): # Creating dataset self.queue_dataset = QueueDataset(batch_size=self.hparams['batch_size'], dataset_builder=self.dataset_builder) # Creating model and validating model = self.policy_model_ctor(self.queue_dataset, self.hparams) if self.value_model_ctor is not None: model = self.value_model_ctor(model, self.queue_dataset, self.hparams) model = self.draw_model_ctor(model, self.queue_dataset, self.hparams) model.finalize_build() model.validate() self.graph = graph self.sess = tf.Session(graph=graph) @staticmethod def parse_flags(args): """ Parse flags without calling tf.app.run() """ define = {'bool': lambda x: bool(x), # pylint: disable=unnecessary-lambda 'int': lambda x: int(x), # pylint: disable=unnecessary-lambda 'str': lambda x: str(x), # pylint: disable=unnecessary-lambda 'float': lambda x: float(x), # pylint: disable=unnecessary-lambda '---': lambda x: x} # pylint: disable=unnecessary-lambda # Keeping a dictionary of parse args to overwrite if provided multiple times flags = {} for arg in args: arg_type, arg_name, arg_value, _ = arg flags[arg_name] = define[arg_type](arg_value) if arg_type == '---' and arg_name in flags: del flags[arg_name] return flags def run_tests(self): """ Run all tests """ IOLoop.current().run_sync(self.run_tests_async) @gen.coroutine def run_tests_async(self): """ Run tests in an asynchronous IO Loop """ self.build_model() self.adapter = self.adapter_ctor(self.queue_dataset, self.graph, session=self.sess) yield self.test_get_draw_prob() @gen.coroutine def test_get_draw_prob(self): """ Checks if the .get_draw_prob method works """ game = Game() state_proto = extract_state_proto(game) phase_history_proto = extract_phase_history_proto(game) possible_orders_proto = extract_possible_orders_proto(game) locs = ['PAR', 'MAR', 'BUR'] kwargs = {'player_seed': 0, 'noise': 0., 'temperature': 1., 'dropout_rate': 0.} # Temperature == 1. # With and without prefetching for use_prefetching in (False, True): if not use_prefetching: _, policy_details = yield self.adapter.get_orders(locs, state_proto, 'FRANCE', phase_history_proto, possible_orders_proto, **kwargs) else: fetches = yield self.adapter.get_orders(locs, state_proto, 'FRANCE', phase_history_proto, possible_orders_proto, prefetch=True, **kwargs) fetches = yield process_fetches_dict(self.queue_dataset, fetches) _, policy_details = yield self.adapter.get_orders(locs, state_proto, 'FRANCE', phase_history_proto, possible_orders_proto, fetches=fetches, **kwargs) assert policy_details['draw_action'] in (True, False) assert 0. < policy_details['draw_prob'] < 1.
en
0.635688
# ============================================================================== # Copyright 2019 - <NAME> # # NOTICE: Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # ============================================================================== Generic class to tests for draw model correctness Creates a testable setup to test a model and a constructor Constructor :param policy_model_ctor: The policy model constructor to create the policy. :param value_model_ctor: The value model constructor to create the value model. :param draw_model_ctor: The draw model constructor to create the draw model. :param dataset_builder: An instance of `BaseBuilder` containing the proto-fields and generation methods :param adaptor_ctor: The policy adapter constructor to create the policy adapter :param load_policy_args: Reference to the callable function required to load policy args :param load_value_args: Reference to the callable function required to load value args :param load_draw_args: Reference to the callable function required to load draw args :type policy_model_ctor: diplomacy_research.models.policy.base_policy_model.BasePolicyModel.__class__ :type value_model_ctor: diplomacy_research.models.value.base_value_model.BaseValueModel.__class__ :type draw_model_ctor: diplomacy_research.models.draw.base_draw_model.BaseDrawModel.__class__ :type dataset_builder: diplomacy_research.models.datasets.base_builder.BaseBuilder :type adapter_ctor: diplomacy_research.models.policy.base_policy_adapter.BasePolicyAdapter.__class__ # pylint: disable=too-many-arguments # Parsing new flags # Other attributes Builds the model # Creating dataset # Creating model and validating Parse flags without calling tf.app.run() # pylint: disable=unnecessary-lambda # pylint: disable=unnecessary-lambda # pylint: disable=unnecessary-lambda # pylint: disable=unnecessary-lambda # pylint: disable=unnecessary-lambda # Keeping a dictionary of parse args to overwrite if provided multiple times Run all tests Run tests in an asynchronous IO Loop Checks if the .get_draw_prob method works # Temperature == 1. # With and without prefetching
1.96442
2
tests/test_mapper.py
jcklie/wikimapper
69
6621706
import pytest from tests.fixtures import * BAVARIAN_PARAMS = [ pytest.param("Stoaboog", "Q168327"), pytest.param("Wechslkrod", "Q243242"), pytest.param("Wickiana", "Q2567666"), pytest.param("Ulrich_Zwingli", "Q123034"), pytest.param("Jingstes_Gricht", "Q1821239"), pytest.param("Sånkt_Johann_im_Pongau", "Q251022", id="Has special character"), pytest.param("Quadrátkilometa", "Q25343", id="Has redirect"), pytest.param("D'_boarische_Woocha", "Q20616808", id="Has special character"), pytest.param("I am not in the Wiki", None, id="Title not in the wiki"), pytest.param("Pergentinus_und_Laurentinus", None, id="In the index, but not mapped"), ] @pytest.mark.parametrize("page_title, expected", BAVARIAN_PARAMS) def test_title_to_id(bavarian_wiki_mapper, page_title: str, expected: str): mapper = bavarian_wiki_mapper wikidata_id = mapper.title_to_id(page_title) assert wikidata_id == expected @pytest.mark.parametrize("page_title, expected", BAVARIAN_PARAMS) def test_url_to_id(bavarian_wiki_mapper, page_title: str, expected: str): mapper = bavarian_wiki_mapper url = "https://bar.wikipedia.org/wiki/" + page_title wikidata_id = mapper.url_to_id(url) assert wikidata_id == expected @pytest.mark.parametrize( "wikidata_id, expected", [ ("Q1027119", ["Gallesium", "Gallese", "Gallesium_(Titularbistum)"]), ("Q102904", ["Vulkanologie", "Vuikanologie"]), ("Q10296976", ["Stootsfiahra_52", "Liste_der_Staatsoberhäupter_52"]), ("12345678909876543210", []), ], ) def test_id_to_titles(bavarian_wiki_mapper, wikidata_id: str, expected: str): mapper = bavarian_wiki_mapper titles = mapper.id_to_titles(wikidata_id) assert set(titles) == set(expected)
import pytest from tests.fixtures import * BAVARIAN_PARAMS = [ pytest.param("Stoaboog", "Q168327"), pytest.param("Wechslkrod", "Q243242"), pytest.param("Wickiana", "Q2567666"), pytest.param("Ulrich_Zwingli", "Q123034"), pytest.param("Jingstes_Gricht", "Q1821239"), pytest.param("Sånkt_Johann_im_Pongau", "Q251022", id="Has special character"), pytest.param("Quadrátkilometa", "Q25343", id="Has redirect"), pytest.param("D'_boarische_Woocha", "Q20616808", id="Has special character"), pytest.param("I am not in the Wiki", None, id="Title not in the wiki"), pytest.param("Pergentinus_und_Laurentinus", None, id="In the index, but not mapped"), ] @pytest.mark.parametrize("page_title, expected", BAVARIAN_PARAMS) def test_title_to_id(bavarian_wiki_mapper, page_title: str, expected: str): mapper = bavarian_wiki_mapper wikidata_id = mapper.title_to_id(page_title) assert wikidata_id == expected @pytest.mark.parametrize("page_title, expected", BAVARIAN_PARAMS) def test_url_to_id(bavarian_wiki_mapper, page_title: str, expected: str): mapper = bavarian_wiki_mapper url = "https://bar.wikipedia.org/wiki/" + page_title wikidata_id = mapper.url_to_id(url) assert wikidata_id == expected @pytest.mark.parametrize( "wikidata_id, expected", [ ("Q1027119", ["Gallesium", "Gallese", "Gallesium_(Titularbistum)"]), ("Q102904", ["Vulkanologie", "Vuikanologie"]), ("Q10296976", ["Stootsfiahra_52", "Liste_der_Staatsoberhäupter_52"]), ("12345678909876543210", []), ], ) def test_id_to_titles(bavarian_wiki_mapper, wikidata_id: str, expected: str): mapper = bavarian_wiki_mapper titles = mapper.id_to_titles(wikidata_id) assert set(titles) == set(expected)
none
1
2.444057
2
utils/formatters.py
NicholasJohansan/EP5
0
6621707
<gh_stars>0 def seconds_to_time(seconds): days = (seconds // (60*60*24)), "day" seconds %= (60*60*24) hours = (seconds // (60*60)), "hour" seconds %= (60*60) minutes = (seconds // (60)), "minute" seconds = seconds % 60, "second" _time = [] for u, m in [days, hours, minutes, seconds]: if u: if u > 1: m += "s" _time.append(f"{u} {m}") return ", ".join(_time)
def seconds_to_time(seconds): days = (seconds // (60*60*24)), "day" seconds %= (60*60*24) hours = (seconds // (60*60)), "hour" seconds %= (60*60) minutes = (seconds // (60)), "minute" seconds = seconds % 60, "second" _time = [] for u, m in [days, hours, minutes, seconds]: if u: if u > 1: m += "s" _time.append(f"{u} {m}") return ", ".join(_time)
none
1
3.713325
4
nucypher/tests/blockchain/eth/contracts/main/policy_manager/test_policy_manager.py
kanzeparov/NuCypher
0
6621708
""" This file is part of nucypher. nucypher is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. nucypher is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with nucypher. If not, see <https://www.gnu.org/licenses/>. """ import os import pytest from eth_tester.exceptions import TransactionFailed from web3.contract import Contract NULL_ADDR = '0x' + '0' * 40 CLIENT_FIELD = 0 RATE_FIELD = 1 FIRST_REWARD_FIELD = 2 START_PERIOD_FIELD = 3 LAST_PERIOD_FIELD = 4 DISABLED_FIELD = 5 REWARD_FIELD = 0 REWARD_RATE_FIELD = 1 LAST_MINED_PERIOD_FIELD = 2 MIN_REWARD_RATE_FIELD = 3 secret = (123456).to_bytes(32, byteorder='big') secret2 = (654321).to_bytes(32, byteorder='big') POLICY_ID_LENGTH = 16 policy_id = os.urandom(POLICY_ID_LENGTH) policy_id_2 = os.urandom(POLICY_ID_LENGTH) policy_id_3 = os.urandom(POLICY_ID_LENGTH) rate = 20 number_of_periods = 10 value = rate * number_of_periods @pytest.mark.slow def test_create_revoke(testerchain, escrow, policy_manager): creator, client, bad_node, node1, node2, node3, *everyone_else = testerchain.interface.w3.eth.accounts client_balance = testerchain.interface.w3.eth.getBalance(client) policy_created_log = policy_manager.events.PolicyCreated.createFilter(fromBlock='latest') arrangement_revoked_log = policy_manager.events.ArrangementRevoked.createFilter(fromBlock='latest') policy_revoked_log = policy_manager.events.PolicyRevoked.createFilter(fromBlock='latest') arrangement_refund_log = policy_manager.events.RefundForArrangement.createFilter(fromBlock='latest') policy_refund_log = policy_manager.events.RefundForPolicy.createFilter(fromBlock='latest') # Check registered nodes assert 0 < policy_manager.functions.nodes(node1).call()[LAST_MINED_PERIOD_FIELD] assert 0 < policy_manager.functions.nodes(node2).call()[LAST_MINED_PERIOD_FIELD] assert 0 < policy_manager.functions.nodes(node3).call()[LAST_MINED_PERIOD_FIELD] assert 0 == policy_manager.functions.nodes(bad_node).call()[LAST_MINED_PERIOD_FIELD] # Try to create policy for bad (unregistered) node with pytest.raises((TransactionFailed, ValueError)): tx = policy_manager.functions.createPolicy(policy_id, 1, 0, [bad_node])\ .transact({'from': client, 'value': value}) testerchain.wait_for_receipt(tx) with pytest.raises((TransactionFailed, ValueError)): tx = policy_manager.functions.createPolicy(policy_id, 1, 0, [node1, bad_node])\ .transact({'from': client, 'value': value}) testerchain.wait_for_receipt(tx) # Try to create policy with no ETH with pytest.raises((TransactionFailed, ValueError)): tx = policy_manager.functions.createPolicy(policy_id, 1, 0, [node1]).transact({'from': client}) testerchain.wait_for_receipt(tx) # Create policy period = escrow.functions.getCurrentPeriod().call() tx = policy_manager.functions.createPolicy(policy_id, number_of_periods, 0, [node1])\ .transact({'from': client, 'value': value, 'gas_price': 0}) testerchain.wait_for_receipt(tx) # Check balances and policy info assert value == testerchain.interface.w3.eth.getBalance(policy_manager.address) assert client_balance - 200 == testerchain.interface.w3.eth.getBalance(client) policy = policy_manager.functions.policies(policy_id).call() assert client == policy[CLIENT_FIELD] assert rate == policy[RATE_FIELD] assert 0 == policy[FIRST_REWARD_FIELD] assert period + 1 == policy[START_PERIOD_FIELD] assert period + 10 == policy[LAST_PERIOD_FIELD] assert not policy[DISABLED_FIELD] assert 1 == policy_manager.functions.getArrangementsLength(policy_id).call() assert node1 == policy_manager.functions.getArrangementInfo(policy_id, 0).call()[0] events = policy_created_log.get_all_entries() assert 1 == len(events) event_args = events[0]['args'] assert policy_id == event_args['policyId'] assert client == event_args['client'] # Can't create policy with the same id with pytest.raises((TransactionFailed, ValueError)): tx = policy_manager.functions.createPolicy(policy_id, number_of_periods, 0, [node1])\ .transact({'from': client, 'value': value}) testerchain.wait_for_receipt(tx) # Only policy owner can revoke policy with pytest.raises((TransactionFailed, ValueError)): tx = policy_manager.functions.revokePolicy(policy_id).transact({'from': creator}) testerchain.wait_for_receipt(tx) tx = policy_manager.functions.revokePolicy(policy_id).transact({'from': client, 'gas_price': 0}) testerchain.wait_for_receipt(tx) assert policy_manager.functions.policies(policy_id).call()[DISABLED_FIELD] events = policy_revoked_log.get_all_entries() assert 1 == len(events) event_args = events[0]['args'] assert policy_id == event_args['policyId'] assert client == event_args['client'] assert value == event_args['value'] events = arrangement_revoked_log.get_all_entries() assert 1 == len(events) event_args = events[0]['args'] assert policy_id == event_args['policyId'] assert client == event_args['client'] assert node1 == event_args['node'] assert value == event_args['value'] # Can't revoke again because policy and all arrangements are disabled with pytest.raises((TransactionFailed, ValueError)): tx = policy_manager.functions.revokePolicy(policy_id).transact({'from': client}) testerchain.wait_for_receipt(tx) with pytest.raises((TransactionFailed, ValueError)): tx = policy_manager.functions.revokeArrangement(policy_id, node1).transact({'from': client}) testerchain.wait_for_receipt(tx) # Create new policy period = escrow.functions.getCurrentPeriod().call() tx = policy_manager.functions.createPolicy(policy_id_2, number_of_periods, 0, [node1, node2, node3])\ .transact({'from': client, 'value': 6 * value, 'gas_price': 0}) testerchain.wait_for_receipt(tx) assert 6 * value == testerchain.interface.w3.eth.getBalance(policy_manager.address) assert client_balance - 6 * value == testerchain.interface.w3.eth.getBalance(client) policy = policy_manager.functions.policies(policy_id_2).call() assert client == policy[CLIENT_FIELD] assert 2 * rate == policy[RATE_FIELD] assert 0 == policy[FIRST_REWARD_FIELD] assert period + 1 == policy[START_PERIOD_FIELD] assert period + 10 == policy[LAST_PERIOD_FIELD] assert not policy[DISABLED_FIELD] events = policy_created_log.get_all_entries() assert 2 == len(events) event_args = events[1]['args'] assert policy_id_2 == event_args['policyId'] assert client == event_args['client'] # Can't revoke nonexistent arrangement with pytest.raises((TransactionFailed, ValueError)): tx = policy_manager.functions.revokeArrangement(policy_id_2, testerchain.interface.w3.eth.accounts[6])\ .transact({'from': client}) testerchain.wait_for_receipt(tx) # Can't revoke null arrangement (also it's nonexistent) with pytest.raises((TransactionFailed, ValueError)): tx = policy_manager.functions.revokeArrangement(policy_id_2, NULL_ADDR).transact({'from': client}) testerchain.wait_for_receipt(tx) # Revoke only one arrangement tx = policy_manager.functions.revokeArrangement(policy_id_2, node1).transact({'from': client, 'gas_price': 0}) testerchain.wait_for_receipt(tx) assert 4 * value == testerchain.interface.w3.eth.getBalance(policy_manager.address) assert client_balance - 4 * value == testerchain.interface.w3.eth.getBalance(client) assert not policy_manager.functions.policies(policy_id_2).call()[DISABLED_FIELD] events = arrangement_revoked_log.get_all_entries() assert 2 == len(events) event_args = events[1]['args'] assert policy_id_2 == event_args['policyId'] assert client == event_args['client'] assert node1 == event_args['node'] assert 2 * value == event_args['value'] # Can't revoke again because arrangement is disabled with pytest.raises((TransactionFailed, ValueError)): tx = policy_manager.functions.revokeArrangement(policy_id_2, node1).transact({'from': client}) testerchain.wait_for_receipt(tx) # Can't revoke null arrangement (it's nonexistent) with pytest.raises((TransactionFailed, ValueError)): tx = policy_manager.functions.revokeArrangement(policy_id_2, NULL_ADDR).transact({'from': client}) testerchain.wait_for_receipt(tx) # Revoke policy with remaining arrangements tx = policy_manager.functions.revokePolicy(policy_id_2).transact({'from': client, 'gas_price': 0}) testerchain.wait_for_receipt(tx) assert 0 == testerchain.interface.w3.eth.getBalance(policy_manager.address) assert client_balance == testerchain.interface.w3.eth.getBalance(client) assert policy_manager.functions.policies(policy_id_2).call()[DISABLED_FIELD] events = arrangement_revoked_log.get_all_entries() assert 4 == len(events) event_args = events[2]['args'] assert policy_id_2 == event_args['policyId'] assert client == event_args['client'] assert node2 == event_args['node'] assert 2 * value == event_args['value'] event_args = events[3]['args'] assert policy_id_2 == event_args['policyId'] assert client == event_args['client'] assert node3 == event_args['node'] assert 2 * value == event_args['value'] events = policy_revoked_log.get_all_entries() assert 2 == len(events) event_args = events[1]['args'] assert policy_id_2 == event_args['policyId'] assert client == event_args['client'] assert 4 * value == event_args['value'] # Can't revoke policy again because policy and all arrangements are disabled with pytest.raises((TransactionFailed, ValueError)): tx = policy_manager.functions.revokePolicy(policy_id_2).transact({'from': client}) testerchain.wait_for_receipt(tx) with pytest.raises((TransactionFailed, ValueError)): tx = policy_manager.functions.revokeArrangement(policy_id_2, node1).transact({'from': client}) testerchain.wait_for_receipt(tx) # Can't create policy with wrong ETH value - when reward is not calculated by formula: # numberOfNodes * (firstPartialReward + rewardRate * numberOfPeriods) with pytest.raises((TransactionFailed, ValueError)): tx = policy_manager.functions.createPolicy(policy_id_3, 10, 0, [node1]).transact({'from': client, 'value': 11}) testerchain.wait_for_receipt(tx) with pytest.raises((TransactionFailed, ValueError)): tx = policy_manager.functions.createPolicy(policy_id_3, 10, 1, [node1]).transact({'from': client, 'value': 22}) testerchain.wait_for_receipt(tx) with pytest.raises((TransactionFailed, ValueError)): tx = policy_manager.functions.createPolicy(policy_id_3, 10, 1, [node1]).transact({'from': client, 'value': 11}) testerchain.wait_for_receipt(tx) # Set minimum reward rate for nodes tx = policy_manager.functions.setMinRewardRate(10).transact({'from': node1}) testerchain.wait_for_receipt(tx) tx = policy_manager.functions.setMinRewardRate(20).transact({'from': node2}) testerchain.wait_for_receipt(tx) assert 10 == policy_manager.functions.nodes(node1).call()[MIN_REWARD_RATE_FIELD] assert 20 == policy_manager.functions.nodes(node2).call()[MIN_REWARD_RATE_FIELD] # Try to create policy with low rate with pytest.raises((TransactionFailed, ValueError)): tx = policy_manager.functions.createPolicy(policy_id_3, 1, 0, [node1])\ .transact({'from': client, 'value': 5}) testerchain.wait_for_receipt(tx) with pytest.raises((TransactionFailed, ValueError)): tx = policy_manager.functions.createPolicy(policy_id_3, 1, 0, [node1, node2])\ .transact({'from': client, 'value': 30}) testerchain.wait_for_receipt(tx) # Create new policy with payment for the first period period = escrow.functions.getCurrentPeriod().call() tx = policy_manager.functions.createPolicy(policy_id_3, number_of_periods, int(0.5 * rate), [node1, node2, node3])\ .transact({'from': client, 'value': int((0.5 * rate + rate * number_of_periods) * 3), 'gas_price': 0}) testerchain.wait_for_receipt(tx) assert 3 * value + 1.5 * rate == testerchain.interface.w3.eth.getBalance(policy_manager.address) assert client_balance - int(3 * value + 1.5 * rate) == testerchain.interface.w3.eth.getBalance(client) policy = policy_manager.functions.policies(policy_id_3).call() assert client == policy[CLIENT_FIELD] assert rate == policy[RATE_FIELD] assert 0.5 * rate == policy[FIRST_REWARD_FIELD] assert period + 1 == policy[START_PERIOD_FIELD] assert period + 10 == policy[LAST_PERIOD_FIELD] assert not policy[DISABLED_FIELD] events = policy_created_log.get_all_entries() assert 3 == len(events) event_args = events[2]['args'] assert policy_id_3 == event_args['policyId'] assert client == event_args['client'] # Revoke only one arrangement tx = policy_manager.functions.revokeArrangement(policy_id_3, node1).transact({'from': client, 'gas_price': 0}) testerchain.wait_for_receipt(tx) assert 2 * value + rate == testerchain.interface.w3.eth.getBalance(policy_manager.address) assert client_balance - (2 * value + rate) == testerchain.interface.w3.eth.getBalance(client) assert not policy_manager.functions.policies(policy_id_3).call()[DISABLED_FIELD] events = arrangement_revoked_log.get_all_entries() assert 5 == len(events) event_args = events[4]['args'] assert policy_id_3 == event_args['policyId'] assert client == event_args['client'] assert node1 == event_args['node'] assert value + 0.5 * rate == event_args['value'] # Revoke policy tx = policy_manager.functions.revokePolicy(policy_id_3).transact({'from': client, 'gas_price': 0}) testerchain.wait_for_receipt(tx) assert 0 == testerchain.interface.w3.eth.getBalance(policy_manager.address) assert client_balance == testerchain.interface.w3.eth.getBalance(client) assert policy_manager.functions.policies(policy_id_3).call()[DISABLED_FIELD] events = arrangement_revoked_log.get_all_entries() assert 7 == len(events) event_args = events[5]['args'] assert policy_id_3 == event_args['policyId'] assert client == event_args['client'] assert node2 == event_args['node'] assert value + 0.5 * rate == event_args['value'] event_args = events[6]['args'] assert policy_id_3 == event_args['policyId'] assert client == event_args['client'] assert node3 == event_args['node'] assert value + 0.5 * rate == event_args['value'] events = policy_revoked_log.get_all_entries() assert 3 == len(events) event_args = events[2]['args'] assert policy_id_3 == event_args['policyId'] assert client == event_args['client'] assert 2 * value + rate == event_args['value'] events = arrangement_refund_log.get_all_entries() assert 0 == len(events) events = policy_refund_log.get_all_entries() assert 0 == len(events) @pytest.mark.slow def test_upgrading(testerchain): creator = testerchain.interface.w3.eth.accounts[0] secret_hash = testerchain.interface.w3.sha3(secret) secret2_hash = testerchain.interface.w3.sha3(secret2) # Deploy contracts escrow1, _ = testerchain.interface.deploy_contract('MinersEscrowForPolicyMock', 1) escrow2, _ = testerchain.interface.deploy_contract('MinersEscrowForPolicyMock', 1) address1 = escrow1.address address2 = escrow2.address contract_library_v1, _ = testerchain.interface.deploy_contract('PolicyManager', address1) dispatcher, _ = testerchain.interface.deploy_contract('Dispatcher', contract_library_v1.address, secret_hash) # Deploy second version of the contract contract_library_v2, _ = testerchain.interface.deploy_contract('PolicyManagerV2Mock', address2) contract = testerchain.interface.w3.eth.contract( abi=contract_library_v2.abi, address=dispatcher.address, ContractFactoryClass=Contract) # Upgrade to the second version assert address1 == contract.functions.escrow().call() tx = dispatcher.functions.upgrade(contract_library_v2.address, secret, secret2_hash).transact({'from': creator}) testerchain.wait_for_receipt(tx) # Check constructor and storage values assert contract_library_v2.address == dispatcher.functions.target().call() assert address2 == contract.functions.escrow().call() # Check new ABI tx = contract.functions.setValueToCheck(3).transact({'from': creator}) testerchain.wait_for_receipt(tx) assert 3 == contract.functions.valueToCheck().call() # Can't upgrade to the previous version or to the bad version contract_library_bad, _ = testerchain.interface.deploy_contract('PolicyManagerBad', address2) with pytest.raises((TransactionFailed, ValueError)): tx = dispatcher.functions.upgrade(contract_library_v1.address, secret2, secret_hash)\ .transact({'from': creator}) testerchain.wait_for_receipt(tx) with pytest.raises((TransactionFailed, ValueError)): tx = dispatcher.functions.upgrade(contract_library_bad.address, secret2, secret_hash)\ .transact({'from': creator}) testerchain.wait_for_receipt(tx) # But can rollback tx = dispatcher.functions.rollback(secret2, secret_hash).transact({'from': creator}) testerchain.wait_for_receipt(tx) assert contract_library_v1.address == dispatcher.functions.target().call() assert address1 == contract.functions.escrow().call() # After rollback new ABI is unavailable with pytest.raises((TransactionFailed, ValueError)): tx = contract.functions.setValueToCheck(2).transact({'from': creator}) testerchain.wait_for_receipt(tx) # Try to upgrade to the bad version with pytest.raises((TransactionFailed, ValueError)): tx = dispatcher.functions.upgrade(contract_library_bad.address, secret, secret2_hash)\ .transact({'from': creator}) testerchain.wait_for_receipt(tx)
""" This file is part of nucypher. nucypher is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. nucypher is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with nucypher. If not, see <https://www.gnu.org/licenses/>. """ import os import pytest from eth_tester.exceptions import TransactionFailed from web3.contract import Contract NULL_ADDR = '0x' + '0' * 40 CLIENT_FIELD = 0 RATE_FIELD = 1 FIRST_REWARD_FIELD = 2 START_PERIOD_FIELD = 3 LAST_PERIOD_FIELD = 4 DISABLED_FIELD = 5 REWARD_FIELD = 0 REWARD_RATE_FIELD = 1 LAST_MINED_PERIOD_FIELD = 2 MIN_REWARD_RATE_FIELD = 3 secret = (123456).to_bytes(32, byteorder='big') secret2 = (654321).to_bytes(32, byteorder='big') POLICY_ID_LENGTH = 16 policy_id = os.urandom(POLICY_ID_LENGTH) policy_id_2 = os.urandom(POLICY_ID_LENGTH) policy_id_3 = os.urandom(POLICY_ID_LENGTH) rate = 20 number_of_periods = 10 value = rate * number_of_periods @pytest.mark.slow def test_create_revoke(testerchain, escrow, policy_manager): creator, client, bad_node, node1, node2, node3, *everyone_else = testerchain.interface.w3.eth.accounts client_balance = testerchain.interface.w3.eth.getBalance(client) policy_created_log = policy_manager.events.PolicyCreated.createFilter(fromBlock='latest') arrangement_revoked_log = policy_manager.events.ArrangementRevoked.createFilter(fromBlock='latest') policy_revoked_log = policy_manager.events.PolicyRevoked.createFilter(fromBlock='latest') arrangement_refund_log = policy_manager.events.RefundForArrangement.createFilter(fromBlock='latest') policy_refund_log = policy_manager.events.RefundForPolicy.createFilter(fromBlock='latest') # Check registered nodes assert 0 < policy_manager.functions.nodes(node1).call()[LAST_MINED_PERIOD_FIELD] assert 0 < policy_manager.functions.nodes(node2).call()[LAST_MINED_PERIOD_FIELD] assert 0 < policy_manager.functions.nodes(node3).call()[LAST_MINED_PERIOD_FIELD] assert 0 == policy_manager.functions.nodes(bad_node).call()[LAST_MINED_PERIOD_FIELD] # Try to create policy for bad (unregistered) node with pytest.raises((TransactionFailed, ValueError)): tx = policy_manager.functions.createPolicy(policy_id, 1, 0, [bad_node])\ .transact({'from': client, 'value': value}) testerchain.wait_for_receipt(tx) with pytest.raises((TransactionFailed, ValueError)): tx = policy_manager.functions.createPolicy(policy_id, 1, 0, [node1, bad_node])\ .transact({'from': client, 'value': value}) testerchain.wait_for_receipt(tx) # Try to create policy with no ETH with pytest.raises((TransactionFailed, ValueError)): tx = policy_manager.functions.createPolicy(policy_id, 1, 0, [node1]).transact({'from': client}) testerchain.wait_for_receipt(tx) # Create policy period = escrow.functions.getCurrentPeriod().call() tx = policy_manager.functions.createPolicy(policy_id, number_of_periods, 0, [node1])\ .transact({'from': client, 'value': value, 'gas_price': 0}) testerchain.wait_for_receipt(tx) # Check balances and policy info assert value == testerchain.interface.w3.eth.getBalance(policy_manager.address) assert client_balance - 200 == testerchain.interface.w3.eth.getBalance(client) policy = policy_manager.functions.policies(policy_id).call() assert client == policy[CLIENT_FIELD] assert rate == policy[RATE_FIELD] assert 0 == policy[FIRST_REWARD_FIELD] assert period + 1 == policy[START_PERIOD_FIELD] assert period + 10 == policy[LAST_PERIOD_FIELD] assert not policy[DISABLED_FIELD] assert 1 == policy_manager.functions.getArrangementsLength(policy_id).call() assert node1 == policy_manager.functions.getArrangementInfo(policy_id, 0).call()[0] events = policy_created_log.get_all_entries() assert 1 == len(events) event_args = events[0]['args'] assert policy_id == event_args['policyId'] assert client == event_args['client'] # Can't create policy with the same id with pytest.raises((TransactionFailed, ValueError)): tx = policy_manager.functions.createPolicy(policy_id, number_of_periods, 0, [node1])\ .transact({'from': client, 'value': value}) testerchain.wait_for_receipt(tx) # Only policy owner can revoke policy with pytest.raises((TransactionFailed, ValueError)): tx = policy_manager.functions.revokePolicy(policy_id).transact({'from': creator}) testerchain.wait_for_receipt(tx) tx = policy_manager.functions.revokePolicy(policy_id).transact({'from': client, 'gas_price': 0}) testerchain.wait_for_receipt(tx) assert policy_manager.functions.policies(policy_id).call()[DISABLED_FIELD] events = policy_revoked_log.get_all_entries() assert 1 == len(events) event_args = events[0]['args'] assert policy_id == event_args['policyId'] assert client == event_args['client'] assert value == event_args['value'] events = arrangement_revoked_log.get_all_entries() assert 1 == len(events) event_args = events[0]['args'] assert policy_id == event_args['policyId'] assert client == event_args['client'] assert node1 == event_args['node'] assert value == event_args['value'] # Can't revoke again because policy and all arrangements are disabled with pytest.raises((TransactionFailed, ValueError)): tx = policy_manager.functions.revokePolicy(policy_id).transact({'from': client}) testerchain.wait_for_receipt(tx) with pytest.raises((TransactionFailed, ValueError)): tx = policy_manager.functions.revokeArrangement(policy_id, node1).transact({'from': client}) testerchain.wait_for_receipt(tx) # Create new policy period = escrow.functions.getCurrentPeriod().call() tx = policy_manager.functions.createPolicy(policy_id_2, number_of_periods, 0, [node1, node2, node3])\ .transact({'from': client, 'value': 6 * value, 'gas_price': 0}) testerchain.wait_for_receipt(tx) assert 6 * value == testerchain.interface.w3.eth.getBalance(policy_manager.address) assert client_balance - 6 * value == testerchain.interface.w3.eth.getBalance(client) policy = policy_manager.functions.policies(policy_id_2).call() assert client == policy[CLIENT_FIELD] assert 2 * rate == policy[RATE_FIELD] assert 0 == policy[FIRST_REWARD_FIELD] assert period + 1 == policy[START_PERIOD_FIELD] assert period + 10 == policy[LAST_PERIOD_FIELD] assert not policy[DISABLED_FIELD] events = policy_created_log.get_all_entries() assert 2 == len(events) event_args = events[1]['args'] assert policy_id_2 == event_args['policyId'] assert client == event_args['client'] # Can't revoke nonexistent arrangement with pytest.raises((TransactionFailed, ValueError)): tx = policy_manager.functions.revokeArrangement(policy_id_2, testerchain.interface.w3.eth.accounts[6])\ .transact({'from': client}) testerchain.wait_for_receipt(tx) # Can't revoke null arrangement (also it's nonexistent) with pytest.raises((TransactionFailed, ValueError)): tx = policy_manager.functions.revokeArrangement(policy_id_2, NULL_ADDR).transact({'from': client}) testerchain.wait_for_receipt(tx) # Revoke only one arrangement tx = policy_manager.functions.revokeArrangement(policy_id_2, node1).transact({'from': client, 'gas_price': 0}) testerchain.wait_for_receipt(tx) assert 4 * value == testerchain.interface.w3.eth.getBalance(policy_manager.address) assert client_balance - 4 * value == testerchain.interface.w3.eth.getBalance(client) assert not policy_manager.functions.policies(policy_id_2).call()[DISABLED_FIELD] events = arrangement_revoked_log.get_all_entries() assert 2 == len(events) event_args = events[1]['args'] assert policy_id_2 == event_args['policyId'] assert client == event_args['client'] assert node1 == event_args['node'] assert 2 * value == event_args['value'] # Can't revoke again because arrangement is disabled with pytest.raises((TransactionFailed, ValueError)): tx = policy_manager.functions.revokeArrangement(policy_id_2, node1).transact({'from': client}) testerchain.wait_for_receipt(tx) # Can't revoke null arrangement (it's nonexistent) with pytest.raises((TransactionFailed, ValueError)): tx = policy_manager.functions.revokeArrangement(policy_id_2, NULL_ADDR).transact({'from': client}) testerchain.wait_for_receipt(tx) # Revoke policy with remaining arrangements tx = policy_manager.functions.revokePolicy(policy_id_2).transact({'from': client, 'gas_price': 0}) testerchain.wait_for_receipt(tx) assert 0 == testerchain.interface.w3.eth.getBalance(policy_manager.address) assert client_balance == testerchain.interface.w3.eth.getBalance(client) assert policy_manager.functions.policies(policy_id_2).call()[DISABLED_FIELD] events = arrangement_revoked_log.get_all_entries() assert 4 == len(events) event_args = events[2]['args'] assert policy_id_2 == event_args['policyId'] assert client == event_args['client'] assert node2 == event_args['node'] assert 2 * value == event_args['value'] event_args = events[3]['args'] assert policy_id_2 == event_args['policyId'] assert client == event_args['client'] assert node3 == event_args['node'] assert 2 * value == event_args['value'] events = policy_revoked_log.get_all_entries() assert 2 == len(events) event_args = events[1]['args'] assert policy_id_2 == event_args['policyId'] assert client == event_args['client'] assert 4 * value == event_args['value'] # Can't revoke policy again because policy and all arrangements are disabled with pytest.raises((TransactionFailed, ValueError)): tx = policy_manager.functions.revokePolicy(policy_id_2).transact({'from': client}) testerchain.wait_for_receipt(tx) with pytest.raises((TransactionFailed, ValueError)): tx = policy_manager.functions.revokeArrangement(policy_id_2, node1).transact({'from': client}) testerchain.wait_for_receipt(tx) # Can't create policy with wrong ETH value - when reward is not calculated by formula: # numberOfNodes * (firstPartialReward + rewardRate * numberOfPeriods) with pytest.raises((TransactionFailed, ValueError)): tx = policy_manager.functions.createPolicy(policy_id_3, 10, 0, [node1]).transact({'from': client, 'value': 11}) testerchain.wait_for_receipt(tx) with pytest.raises((TransactionFailed, ValueError)): tx = policy_manager.functions.createPolicy(policy_id_3, 10, 1, [node1]).transact({'from': client, 'value': 22}) testerchain.wait_for_receipt(tx) with pytest.raises((TransactionFailed, ValueError)): tx = policy_manager.functions.createPolicy(policy_id_3, 10, 1, [node1]).transact({'from': client, 'value': 11}) testerchain.wait_for_receipt(tx) # Set minimum reward rate for nodes tx = policy_manager.functions.setMinRewardRate(10).transact({'from': node1}) testerchain.wait_for_receipt(tx) tx = policy_manager.functions.setMinRewardRate(20).transact({'from': node2}) testerchain.wait_for_receipt(tx) assert 10 == policy_manager.functions.nodes(node1).call()[MIN_REWARD_RATE_FIELD] assert 20 == policy_manager.functions.nodes(node2).call()[MIN_REWARD_RATE_FIELD] # Try to create policy with low rate with pytest.raises((TransactionFailed, ValueError)): tx = policy_manager.functions.createPolicy(policy_id_3, 1, 0, [node1])\ .transact({'from': client, 'value': 5}) testerchain.wait_for_receipt(tx) with pytest.raises((TransactionFailed, ValueError)): tx = policy_manager.functions.createPolicy(policy_id_3, 1, 0, [node1, node2])\ .transact({'from': client, 'value': 30}) testerchain.wait_for_receipt(tx) # Create new policy with payment for the first period period = escrow.functions.getCurrentPeriod().call() tx = policy_manager.functions.createPolicy(policy_id_3, number_of_periods, int(0.5 * rate), [node1, node2, node3])\ .transact({'from': client, 'value': int((0.5 * rate + rate * number_of_periods) * 3), 'gas_price': 0}) testerchain.wait_for_receipt(tx) assert 3 * value + 1.5 * rate == testerchain.interface.w3.eth.getBalance(policy_manager.address) assert client_balance - int(3 * value + 1.5 * rate) == testerchain.interface.w3.eth.getBalance(client) policy = policy_manager.functions.policies(policy_id_3).call() assert client == policy[CLIENT_FIELD] assert rate == policy[RATE_FIELD] assert 0.5 * rate == policy[FIRST_REWARD_FIELD] assert period + 1 == policy[START_PERIOD_FIELD] assert period + 10 == policy[LAST_PERIOD_FIELD] assert not policy[DISABLED_FIELD] events = policy_created_log.get_all_entries() assert 3 == len(events) event_args = events[2]['args'] assert policy_id_3 == event_args['policyId'] assert client == event_args['client'] # Revoke only one arrangement tx = policy_manager.functions.revokeArrangement(policy_id_3, node1).transact({'from': client, 'gas_price': 0}) testerchain.wait_for_receipt(tx) assert 2 * value + rate == testerchain.interface.w3.eth.getBalance(policy_manager.address) assert client_balance - (2 * value + rate) == testerchain.interface.w3.eth.getBalance(client) assert not policy_manager.functions.policies(policy_id_3).call()[DISABLED_FIELD] events = arrangement_revoked_log.get_all_entries() assert 5 == len(events) event_args = events[4]['args'] assert policy_id_3 == event_args['policyId'] assert client == event_args['client'] assert node1 == event_args['node'] assert value + 0.5 * rate == event_args['value'] # Revoke policy tx = policy_manager.functions.revokePolicy(policy_id_3).transact({'from': client, 'gas_price': 0}) testerchain.wait_for_receipt(tx) assert 0 == testerchain.interface.w3.eth.getBalance(policy_manager.address) assert client_balance == testerchain.interface.w3.eth.getBalance(client) assert policy_manager.functions.policies(policy_id_3).call()[DISABLED_FIELD] events = arrangement_revoked_log.get_all_entries() assert 7 == len(events) event_args = events[5]['args'] assert policy_id_3 == event_args['policyId'] assert client == event_args['client'] assert node2 == event_args['node'] assert value + 0.5 * rate == event_args['value'] event_args = events[6]['args'] assert policy_id_3 == event_args['policyId'] assert client == event_args['client'] assert node3 == event_args['node'] assert value + 0.5 * rate == event_args['value'] events = policy_revoked_log.get_all_entries() assert 3 == len(events) event_args = events[2]['args'] assert policy_id_3 == event_args['policyId'] assert client == event_args['client'] assert 2 * value + rate == event_args['value'] events = arrangement_refund_log.get_all_entries() assert 0 == len(events) events = policy_refund_log.get_all_entries() assert 0 == len(events) @pytest.mark.slow def test_upgrading(testerchain): creator = testerchain.interface.w3.eth.accounts[0] secret_hash = testerchain.interface.w3.sha3(secret) secret2_hash = testerchain.interface.w3.sha3(secret2) # Deploy contracts escrow1, _ = testerchain.interface.deploy_contract('MinersEscrowForPolicyMock', 1) escrow2, _ = testerchain.interface.deploy_contract('MinersEscrowForPolicyMock', 1) address1 = escrow1.address address2 = escrow2.address contract_library_v1, _ = testerchain.interface.deploy_contract('PolicyManager', address1) dispatcher, _ = testerchain.interface.deploy_contract('Dispatcher', contract_library_v1.address, secret_hash) # Deploy second version of the contract contract_library_v2, _ = testerchain.interface.deploy_contract('PolicyManagerV2Mock', address2) contract = testerchain.interface.w3.eth.contract( abi=contract_library_v2.abi, address=dispatcher.address, ContractFactoryClass=Contract) # Upgrade to the second version assert address1 == contract.functions.escrow().call() tx = dispatcher.functions.upgrade(contract_library_v2.address, secret, secret2_hash).transact({'from': creator}) testerchain.wait_for_receipt(tx) # Check constructor and storage values assert contract_library_v2.address == dispatcher.functions.target().call() assert address2 == contract.functions.escrow().call() # Check new ABI tx = contract.functions.setValueToCheck(3).transact({'from': creator}) testerchain.wait_for_receipt(tx) assert 3 == contract.functions.valueToCheck().call() # Can't upgrade to the previous version or to the bad version contract_library_bad, _ = testerchain.interface.deploy_contract('PolicyManagerBad', address2) with pytest.raises((TransactionFailed, ValueError)): tx = dispatcher.functions.upgrade(contract_library_v1.address, secret2, secret_hash)\ .transact({'from': creator}) testerchain.wait_for_receipt(tx) with pytest.raises((TransactionFailed, ValueError)): tx = dispatcher.functions.upgrade(contract_library_bad.address, secret2, secret_hash)\ .transact({'from': creator}) testerchain.wait_for_receipt(tx) # But can rollback tx = dispatcher.functions.rollback(secret2, secret_hash).transact({'from': creator}) testerchain.wait_for_receipt(tx) assert contract_library_v1.address == dispatcher.functions.target().call() assert address1 == contract.functions.escrow().call() # After rollback new ABI is unavailable with pytest.raises((TransactionFailed, ValueError)): tx = contract.functions.setValueToCheck(2).transact({'from': creator}) testerchain.wait_for_receipt(tx) # Try to upgrade to the bad version with pytest.raises((TransactionFailed, ValueError)): tx = dispatcher.functions.upgrade(contract_library_bad.address, secret, secret2_hash)\ .transact({'from': creator}) testerchain.wait_for_receipt(tx)
en
0.852929
This file is part of nucypher. nucypher is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. nucypher is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with nucypher. If not, see <https://www.gnu.org/licenses/>. # Check registered nodes # Try to create policy for bad (unregistered) node # Try to create policy with no ETH # Create policy # Check balances and policy info # Can't create policy with the same id # Only policy owner can revoke policy # Can't revoke again because policy and all arrangements are disabled # Create new policy # Can't revoke nonexistent arrangement # Can't revoke null arrangement (also it's nonexistent) # Revoke only one arrangement # Can't revoke again because arrangement is disabled # Can't revoke null arrangement (it's nonexistent) # Revoke policy with remaining arrangements # Can't revoke policy again because policy and all arrangements are disabled # Can't create policy with wrong ETH value - when reward is not calculated by formula: # numberOfNodes * (firstPartialReward + rewardRate * numberOfPeriods) # Set minimum reward rate for nodes # Try to create policy with low rate # Create new policy with payment for the first period # Revoke only one arrangement # Revoke policy # Deploy contracts # Deploy second version of the contract # Upgrade to the second version # Check constructor and storage values # Check new ABI # Can't upgrade to the previous version or to the bad version # But can rollback # After rollback new ABI is unavailable # Try to upgrade to the bad version
1.886816
2
src/utils.py
SamadiPour/iran-ips
33
6621709
<gh_stars>10-100 import ipaddress def save_to_file(path: str, content: str): with open(path, "w") as fp: fp.write(content) def convert_to_ip_network(ip: str): try: return ipaddress.ip_network(ip) except: return None # -- Old method to remove subnets -- # -- I will remove it when I'm sure from the new method -- # def remove_subnet_ips(ip_list: list[ipaddress.IPv4Network]): # subset_indexes = set() # # for i in range(0, len(ip_list)): # for j in range(i + 1, len(ip_list)): # x = ip_list[i] # y = ip_list[j] # # if x.subnet_of(y): # subset_indexes.add(i) # continue # elif y.subnet_of(x): # subset_indexes.add(j) # # for i in sorted(subset_indexes, reverse=True): # del ip_list[i] def remove_subnet_ips(ip_list: list[ipaddress.IPv4Network]): i = -1 while True: # iterate over the list to compare ip[i] with others # then and one to i and iterate over till the end list_len = len(ip_list) i += 1 # if i is out of range, then break if i >= list_len: break # make an empty list of subset_indexes and iterate over the list subset_indexes = set() for j in range(i + 1, list_len): x = ip_list[i] y = ip_list[j] # compare x and y to find subnet and add it to subset_indexes if x.subnet_of(y): subset_indexes.add(i) continue elif y.subnet_of(x): subset_indexes.add(j) # go back for one if [i] is going to be deleted if i in subset_indexes: i -= 1 # remove the subset_indexes from the list for x in sorted(subset_indexes, reverse=True): del ip_list[x] def get_private_ip_networks(): return [ ipaddress.ip_network("0.0.0.0/8"), ipaddress.ip_network("10.0.0.0/8"), ipaddress.ip_network("192.168.3.11/10"), ipaddress.ip_network("127.0.0.0/8"), ipaddress.ip_network("169.254.0.0/16"), ipaddress.ip_network("172.16.0.0/12"), ipaddress.ip_network("192.0.0.0/24"), ipaddress.ip_network("192.0.2.0/24"), ipaddress.ip_network("192.168.127.12/24"), ipaddress.ip_network("192.168.0.0/16"), ipaddress.ip_network("198.18.0.0/15"), ipaddress.ip_network("198.51.100.0/24"), ipaddress.ip_network("203.0.113.0/24"), ipaddress.ip_network("192.168.3.11/3"), ] def get_private_ip_networks_str(): return map(str, get_private_ip_networks())
import ipaddress def save_to_file(path: str, content: str): with open(path, "w") as fp: fp.write(content) def convert_to_ip_network(ip: str): try: return ipaddress.ip_network(ip) except: return None # -- Old method to remove subnets -- # -- I will remove it when I'm sure from the new method -- # def remove_subnet_ips(ip_list: list[ipaddress.IPv4Network]): # subset_indexes = set() # # for i in range(0, len(ip_list)): # for j in range(i + 1, len(ip_list)): # x = ip_list[i] # y = ip_list[j] # # if x.subnet_of(y): # subset_indexes.add(i) # continue # elif y.subnet_of(x): # subset_indexes.add(j) # # for i in sorted(subset_indexes, reverse=True): # del ip_list[i] def remove_subnet_ips(ip_list: list[ipaddress.IPv4Network]): i = -1 while True: # iterate over the list to compare ip[i] with others # then and one to i and iterate over till the end list_len = len(ip_list) i += 1 # if i is out of range, then break if i >= list_len: break # make an empty list of subset_indexes and iterate over the list subset_indexes = set() for j in range(i + 1, list_len): x = ip_list[i] y = ip_list[j] # compare x and y to find subnet and add it to subset_indexes if x.subnet_of(y): subset_indexes.add(i) continue elif y.subnet_of(x): subset_indexes.add(j) # go back for one if [i] is going to be deleted if i in subset_indexes: i -= 1 # remove the subset_indexes from the list for x in sorted(subset_indexes, reverse=True): del ip_list[x] def get_private_ip_networks(): return [ ipaddress.ip_network("0.0.0.0/8"), ipaddress.ip_network("10.0.0.0/8"), ipaddress.ip_network("192.168.3.11/10"), ipaddress.ip_network("127.0.0.0/8"), ipaddress.ip_network("169.254.0.0/16"), ipaddress.ip_network("172.16.0.0/12"), ipaddress.ip_network("192.0.0.0/24"), ipaddress.ip_network("192.0.2.0/24"), ipaddress.ip_network("192.168.127.12/24"), ipaddress.ip_network("192.168.0.0/16"), ipaddress.ip_network("198.18.0.0/15"), ipaddress.ip_network("198.51.100.0/24"), ipaddress.ip_network("203.0.113.0/24"), ipaddress.ip_network("192.168.3.11/3"), ] def get_private_ip_networks_str(): return map(str, get_private_ip_networks())
en
0.548403
# -- Old method to remove subnets -- # -- I will remove it when I'm sure from the new method -- # def remove_subnet_ips(ip_list: list[ipaddress.IPv4Network]): # subset_indexes = set() # # for i in range(0, len(ip_list)): # for j in range(i + 1, len(ip_list)): # x = ip_list[i] # y = ip_list[j] # # if x.subnet_of(y): # subset_indexes.add(i) # continue # elif y.subnet_of(x): # subset_indexes.add(j) # # for i in sorted(subset_indexes, reverse=True): # del ip_list[i] # iterate over the list to compare ip[i] with others # then and one to i and iterate over till the end # if i is out of range, then break # make an empty list of subset_indexes and iterate over the list # compare x and y to find subnet and add it to subset_indexes # go back for one if [i] is going to be deleted # remove the subset_indexes from the list
3.282284
3
card.py
frrmack/CallofCthulhu
1
6621710
import random as rnd import numpy.random poisson = numpy.random.poisson from util import * from layout import * from graphics import CardImage, WoundTokenImage class Card: def __init__(self, name, imageFileName=None): self.name = name self.owner = None self.controller = None self.type = None self.position = None self.attached = [] self.struggles = [] self.subtypes = [] self.keywords = [] self.set = None self.number = None self.actions = [] self.disrupts = [] self.responses = [] self.forcedResponses = [] self.state = ['ready', 'exhausted'][0] # imageFiles can be given as a string or # a sequence of strings (filenames) if imageFileName != None: self.image = CardImage(imageFileName) else: self.image = None def __repr__(self): text = genericCardColor(self.name) if self.isExhausted(): text = boldColor('[Exh]') + text return text #-- Information def isInState(self, state): if self.state == state: return True else: return False def isOnBoard(self, board): return board.contains(self) def isInHand(self, Player): if self in Player.hand: return True else: return False def isExhausted(self): return self.isInState('exhausted') def isReady(self): return self.isInState('ready') def isInsane(self): return self.isInState('insane') #-- Actions def enterGame(self, player): self.controller = player if self.category == "character": self.woundTokenBag = [] for i in range(6): token = WoundToken(screen=self.owner.game.screen) self.woundTokenBag.append(token) def exhaust(self, draw=False): if not self.isReady(): raise RuleError("You can only exhaust a ready character") else: self.state = 'exhausted' x,y = self.image.pos x -= toInt( (CARDHEIGHT - CARDWIDTH)/2. ) y += toInt( (CARDHEIGHT - CARDWIDTH)/2. ) pos = x,y self.image.clear() self.image.turnLeft() for card in self.attached: card.image.clear() card.image.turnLeft() if draw: self.draw(pos) self.owner.screen.update() def ready(self, draw=False): if not self.isExhausted(): raise RuleError("You can only ready an exhausted character") else: self.state = 'ready' x,y = self.image.pos x += toInt( (CARDHEIGHT - CARDWIDTH)/2. ) y -= toInt( (CARDHEIGHT - CARDWIDTH)/2. ) pos = x,y self.image.clear() self.image.turnRight() for card in self.attached: card.image.clear() card.image.turnRight() if draw: self.image.draw(pos) self.owner.screen.update() def attach(self, card): if self.isInsane(): msg = "You cannot attach a card to an insane character." raise GameError(msg) if self.isExhausted(): card.image.turnLeft() self.attached.append(card) def getAttachedTo(self, card): card.attach(self) def die(self): for i in range(len(self.attached)): card = self.attached.pop() card.owner.discardPile.add(card) card.controller = None self.position.remove(self) self.owner.discardPile.add(self) self.controller = None #-- Graphics def setScreen(self, screen): self.screen = screen self.image.addToScreen(screen) return self def draw(self, pos): x,y = pos N = len(self.attached) for i in range(N-1,-1,-1): if self.isReady(): apos = x,y-RESOURCEBAR*(i+1) elif not self.image.turned180: apos = x-RESOURCEBAR*(i+1), y else: apos = x+RESOURCEBAR*(i+1), y self.attached[i].draw(apos) self.image.draw(pos) class Character(Card): category = 'character' def __init__(self, name, imageFileName=None, cost=0, terror=0, combat=0, arcane=0, investigation=0, skill=0, randomize = False, *args, **kwargs): Card.__init__(self, name, imageFileName, *args, **kwargs) self.state = ['ready','exhausted', 'insane'][0] self.printedTerror = self.terror = terror self.printedCombat = self.combat = combat self.printedArcane = self.arcane = arcane self.printedInvestigation = self.investigation = investigation self.printedSkill = self.skill = skill self.attached = [] self.wounds = 0 self.toughness = 0 if randomize: self.name = rnd.choice(['Aberrant ', 'Abominable ', 'Abysmal ', 'Blasphemous ', 'Cosmic ', 'Corrupt ', 'Disfigured ', 'Dark ', 'Diseased ', 'Deathless ', 'Endless ', 'Festering ', 'Fish-like ', 'Ghoulish ', 'Hideous ', 'Infected ', 'Ichorous ', 'Jabbering ', 'Lurking ', 'Mad ', 'Malevolent ', 'Mutilated ', 'Nameless ', 'Obscene ', 'Pagan ', 'Pale ', 'Repulsive ', 'Sickly ', 'Stagnant ', 'Unspeakable ', 'Viscous ', 'Warped ', 'Withered ', 'Weird ']) + \ rnd.choice(['Tentacle', 'Detective', 'Creature', 'Particle', 'Old Man', 'Horror', 'Abomination', 'Vapour', 'Serpent', 'Thing', 'Shadow', 'Earth', 'Investigator', 'Sorcerer', 'Insect', 'Criminal', 'Worshipper', 'Spawn', 'Ghoul', 'Humanoid', 'Creeper', 'Critters', 'Slime', 'Fish', 'Corpse']) self.printedTerror = self.terror = poisson(0.7) % 3 self.printedCombat = self.combat = poisson(0.7) % 3 self.printedArcane = self.arcane = poisson(0.7) % 3 self.printedInvestigation = self.investigation = poisson(0.7) self.printedCost = self.cost = self.terror + self.combat + \ self.arcane + self.investigation + rnd.randint(0,1) self.printedSkill = self.skill = poisson((trunc(self.cost-1.5, bottom=0))) + rnd.randint(0,1) def __repr__(self): text = '[%i] ' % self.cost + \ self.name + ' ' +\ self.terror * '[T]' +\ self.combat * '[C]' +\ self.arcane * '[A]' +\ self.investigation * '[I]' +\ ' ' + str(self.skill) + 's' text = genericCardColor(text) if self.isExhausted(): text = boldColor('[Exh]') + text elif self.isInsane(): text = boldColor('[Ins]') + text return text #-- Information def isInsane(self): return self.isInState('insane') def canGoInsane(self): if 'Willpower' in self.keywords or self.terror >0 or self.isInsane(): return False else: return True def canBeWounded(self): if 'Invulnerable' in self.keywords: return False else: return True #-- Actions def goInsane(self, draw=False): if self.isInsane(): raise RuleError("This character is already insane.") else: x,y = self.image.pos x -= toInt( (CARDHEIGHT - CARDWIDTH)/2. ) y += toInt( (CARDHEIGHT - CARDWIDTH)/2. ) pos = x,y self.image.clear() for card in self.attached: card.image.clear() if not self.isExhausted(): self.image.turnLeft() for card in self.attached: card.image.turnLeft() self.image.flipCard() for card in self.attached: card.image.flipCard() if draw: self.draw(pos) self.state = 'insane' # lose attachments for i in range(len(self.attached)): card = self.attached.pop() card.owner.discardPile.add(card) # lose toughness self.tempToughness = self.toughness self.toughness = 0 if self.wounds > self.toughness: print genericCardColor(self.name),"loses toughness due to going insane and dies." self.die() def restore(self, draw=False): if not self.isInsane(): raise RuleError("You can only restore insane characters") else: self.state = 'exhausted' pos = self.image.pos self.image.clear() self.image.flipCard() # for card in self.attached: # card.image.clear() # card.image.flipCard() if hasattr(self, "tempToughness"): self.toughness = self.tempToughness if draw: self.draw(pos) self.owner.screen.update() def wound(self, draw=False): self.wounds += 1 if self.wounds > self.toughness: self.die() else: self.image.surface = scale(self.image.orgSurface, size=self.image.regularSize) self.image.bigSurface = scale(self.image.orgSurface, size=self.image.zoomSize) for i in range(self.wounds): token = self.woundTokenBag[i] pos = WOUNDPOS[self.wounds][i] self.image.drawOn(token.image.surface, pos) pos = 3*pos[0], 3*pos[1] self.image.drawOn(scale(token.image.orgSurface,size=(3*TOKENEDGE,3*TOKENEDGE)), pos, targetSurface=self.image.bigSurface) if self.isExhausted(): self.image.turnLeft() if draw: self.image.redraw() class Event(Card): category = 'event' def __init__(self, name, imageFileName=None, randomize = False, *args, **kwargs): Card.__init__(self, name, imageFileName, *args, **kwargs) if randomize: self.name = rnd.choice(['Atrocious', 'Arabic', 'Fantastic', 'Loathsome', 'Remorseless', 'Protoplasmic', 'Irrational', 'Horrible', 'Destructive', 'Decay', 'Incriminating', 'Deadly', 'Slow', 'Empowering', 'Arcane']) +\ ' Event ' + rnd.choice(['I', 'II', 'III', 'IV', 'V', 'VI', 'VII', 'IX', 'X', 'XI', 'XII']) self.cost = trunc( poisson(1.2) + rnd.randint(0,1), top=4 ) def __repr__(self): return genericCardColor('[%i] ' % self.cost + self.name + ' ') class Support(Card): category = 'support' def __init__(self, name, imageFileName=None, randomize = False, *args, **kwargs): Card.__init__(self, name, imageFileName, *args, **kwargs) if randomize: self.name = rnd.choice(['Baboonish', 'Beastly', 'Cackling', 'Hapless', 'Effusive', 'Weak', 'Jaded', 'Crystalline', 'Worm-like', 'Gun', 'Corpulent', 'Baneful', 'Gangrenous', 'Insane', 'Incredible']) +\ ' Support ' + rnd.choice(['I', 'II', 'III', 'IV', 'V', 'VI', 'VII', 'IX', 'X', 'XI', 'XII']) self.cost = trunc( poisson(0.9) + rnd.randint(0,1), top=4 ) def __repr__(self): text = genericCardColor('[%i] ' % self.cost + self.name + ' ') if self.isExhausted(): text = boldColor('[Exh]') + text return text class WoundToken(object): def __init__(self,screen=None): self.image = WoundTokenImage(screen=screen) def draw(self,pos): self.image.draw(pos)
import random as rnd import numpy.random poisson = numpy.random.poisson from util import * from layout import * from graphics import CardImage, WoundTokenImage class Card: def __init__(self, name, imageFileName=None): self.name = name self.owner = None self.controller = None self.type = None self.position = None self.attached = [] self.struggles = [] self.subtypes = [] self.keywords = [] self.set = None self.number = None self.actions = [] self.disrupts = [] self.responses = [] self.forcedResponses = [] self.state = ['ready', 'exhausted'][0] # imageFiles can be given as a string or # a sequence of strings (filenames) if imageFileName != None: self.image = CardImage(imageFileName) else: self.image = None def __repr__(self): text = genericCardColor(self.name) if self.isExhausted(): text = boldColor('[Exh]') + text return text #-- Information def isInState(self, state): if self.state == state: return True else: return False def isOnBoard(self, board): return board.contains(self) def isInHand(self, Player): if self in Player.hand: return True else: return False def isExhausted(self): return self.isInState('exhausted') def isReady(self): return self.isInState('ready') def isInsane(self): return self.isInState('insane') #-- Actions def enterGame(self, player): self.controller = player if self.category == "character": self.woundTokenBag = [] for i in range(6): token = WoundToken(screen=self.owner.game.screen) self.woundTokenBag.append(token) def exhaust(self, draw=False): if not self.isReady(): raise RuleError("You can only exhaust a ready character") else: self.state = 'exhausted' x,y = self.image.pos x -= toInt( (CARDHEIGHT - CARDWIDTH)/2. ) y += toInt( (CARDHEIGHT - CARDWIDTH)/2. ) pos = x,y self.image.clear() self.image.turnLeft() for card in self.attached: card.image.clear() card.image.turnLeft() if draw: self.draw(pos) self.owner.screen.update() def ready(self, draw=False): if not self.isExhausted(): raise RuleError("You can only ready an exhausted character") else: self.state = 'ready' x,y = self.image.pos x += toInt( (CARDHEIGHT - CARDWIDTH)/2. ) y -= toInt( (CARDHEIGHT - CARDWIDTH)/2. ) pos = x,y self.image.clear() self.image.turnRight() for card in self.attached: card.image.clear() card.image.turnRight() if draw: self.image.draw(pos) self.owner.screen.update() def attach(self, card): if self.isInsane(): msg = "You cannot attach a card to an insane character." raise GameError(msg) if self.isExhausted(): card.image.turnLeft() self.attached.append(card) def getAttachedTo(self, card): card.attach(self) def die(self): for i in range(len(self.attached)): card = self.attached.pop() card.owner.discardPile.add(card) card.controller = None self.position.remove(self) self.owner.discardPile.add(self) self.controller = None #-- Graphics def setScreen(self, screen): self.screen = screen self.image.addToScreen(screen) return self def draw(self, pos): x,y = pos N = len(self.attached) for i in range(N-1,-1,-1): if self.isReady(): apos = x,y-RESOURCEBAR*(i+1) elif not self.image.turned180: apos = x-RESOURCEBAR*(i+1), y else: apos = x+RESOURCEBAR*(i+1), y self.attached[i].draw(apos) self.image.draw(pos) class Character(Card): category = 'character' def __init__(self, name, imageFileName=None, cost=0, terror=0, combat=0, arcane=0, investigation=0, skill=0, randomize = False, *args, **kwargs): Card.__init__(self, name, imageFileName, *args, **kwargs) self.state = ['ready','exhausted', 'insane'][0] self.printedTerror = self.terror = terror self.printedCombat = self.combat = combat self.printedArcane = self.arcane = arcane self.printedInvestigation = self.investigation = investigation self.printedSkill = self.skill = skill self.attached = [] self.wounds = 0 self.toughness = 0 if randomize: self.name = rnd.choice(['Aberrant ', 'Abominable ', 'Abysmal ', 'Blasphemous ', 'Cosmic ', 'Corrupt ', 'Disfigured ', 'Dark ', 'Diseased ', 'Deathless ', 'Endless ', 'Festering ', 'Fish-like ', 'Ghoulish ', 'Hideous ', 'Infected ', 'Ichorous ', 'Jabbering ', 'Lurking ', 'Mad ', 'Malevolent ', 'Mutilated ', 'Nameless ', 'Obscene ', 'Pagan ', 'Pale ', 'Repulsive ', 'Sickly ', 'Stagnant ', 'Unspeakable ', 'Viscous ', 'Warped ', 'Withered ', 'Weird ']) + \ rnd.choice(['Tentacle', 'Detective', 'Creature', 'Particle', 'Old Man', 'Horror', 'Abomination', 'Vapour', 'Serpent', 'Thing', 'Shadow', 'Earth', 'Investigator', 'Sorcerer', 'Insect', 'Criminal', 'Worshipper', 'Spawn', 'Ghoul', 'Humanoid', 'Creeper', 'Critters', 'Slime', 'Fish', 'Corpse']) self.printedTerror = self.terror = poisson(0.7) % 3 self.printedCombat = self.combat = poisson(0.7) % 3 self.printedArcane = self.arcane = poisson(0.7) % 3 self.printedInvestigation = self.investigation = poisson(0.7) self.printedCost = self.cost = self.terror + self.combat + \ self.arcane + self.investigation + rnd.randint(0,1) self.printedSkill = self.skill = poisson((trunc(self.cost-1.5, bottom=0))) + rnd.randint(0,1) def __repr__(self): text = '[%i] ' % self.cost + \ self.name + ' ' +\ self.terror * '[T]' +\ self.combat * '[C]' +\ self.arcane * '[A]' +\ self.investigation * '[I]' +\ ' ' + str(self.skill) + 's' text = genericCardColor(text) if self.isExhausted(): text = boldColor('[Exh]') + text elif self.isInsane(): text = boldColor('[Ins]') + text return text #-- Information def isInsane(self): return self.isInState('insane') def canGoInsane(self): if 'Willpower' in self.keywords or self.terror >0 or self.isInsane(): return False else: return True def canBeWounded(self): if 'Invulnerable' in self.keywords: return False else: return True #-- Actions def goInsane(self, draw=False): if self.isInsane(): raise RuleError("This character is already insane.") else: x,y = self.image.pos x -= toInt( (CARDHEIGHT - CARDWIDTH)/2. ) y += toInt( (CARDHEIGHT - CARDWIDTH)/2. ) pos = x,y self.image.clear() for card in self.attached: card.image.clear() if not self.isExhausted(): self.image.turnLeft() for card in self.attached: card.image.turnLeft() self.image.flipCard() for card in self.attached: card.image.flipCard() if draw: self.draw(pos) self.state = 'insane' # lose attachments for i in range(len(self.attached)): card = self.attached.pop() card.owner.discardPile.add(card) # lose toughness self.tempToughness = self.toughness self.toughness = 0 if self.wounds > self.toughness: print genericCardColor(self.name),"loses toughness due to going insane and dies." self.die() def restore(self, draw=False): if not self.isInsane(): raise RuleError("You can only restore insane characters") else: self.state = 'exhausted' pos = self.image.pos self.image.clear() self.image.flipCard() # for card in self.attached: # card.image.clear() # card.image.flipCard() if hasattr(self, "tempToughness"): self.toughness = self.tempToughness if draw: self.draw(pos) self.owner.screen.update() def wound(self, draw=False): self.wounds += 1 if self.wounds > self.toughness: self.die() else: self.image.surface = scale(self.image.orgSurface, size=self.image.regularSize) self.image.bigSurface = scale(self.image.orgSurface, size=self.image.zoomSize) for i in range(self.wounds): token = self.woundTokenBag[i] pos = WOUNDPOS[self.wounds][i] self.image.drawOn(token.image.surface, pos) pos = 3*pos[0], 3*pos[1] self.image.drawOn(scale(token.image.orgSurface,size=(3*TOKENEDGE,3*TOKENEDGE)), pos, targetSurface=self.image.bigSurface) if self.isExhausted(): self.image.turnLeft() if draw: self.image.redraw() class Event(Card): category = 'event' def __init__(self, name, imageFileName=None, randomize = False, *args, **kwargs): Card.__init__(self, name, imageFileName, *args, **kwargs) if randomize: self.name = rnd.choice(['Atrocious', 'Arabic', 'Fantastic', 'Loathsome', 'Remorseless', 'Protoplasmic', 'Irrational', 'Horrible', 'Destructive', 'Decay', 'Incriminating', 'Deadly', 'Slow', 'Empowering', 'Arcane']) +\ ' Event ' + rnd.choice(['I', 'II', 'III', 'IV', 'V', 'VI', 'VII', 'IX', 'X', 'XI', 'XII']) self.cost = trunc( poisson(1.2) + rnd.randint(0,1), top=4 ) def __repr__(self): return genericCardColor('[%i] ' % self.cost + self.name + ' ') class Support(Card): category = 'support' def __init__(self, name, imageFileName=None, randomize = False, *args, **kwargs): Card.__init__(self, name, imageFileName, *args, **kwargs) if randomize: self.name = rnd.choice(['Baboonish', 'Beastly', 'Cackling', 'Hapless', 'Effusive', 'Weak', 'Jaded', 'Crystalline', 'Worm-like', 'Gun', 'Corpulent', 'Baneful', 'Gangrenous', 'Insane', 'Incredible']) +\ ' Support ' + rnd.choice(['I', 'II', 'III', 'IV', 'V', 'VI', 'VII', 'IX', 'X', 'XI', 'XII']) self.cost = trunc( poisson(0.9) + rnd.randint(0,1), top=4 ) def __repr__(self): text = genericCardColor('[%i] ' % self.cost + self.name + ' ') if self.isExhausted(): text = boldColor('[Exh]') + text return text class WoundToken(object): def __init__(self,screen=None): self.image = WoundTokenImage(screen=screen) def draw(self,pos): self.image.draw(pos)
en
0.405233
# imageFiles can be given as a string or # a sequence of strings (filenames) #-- Information #-- Actions #-- Graphics #-- Information #-- Actions # lose attachments # lose toughness # for card in self.attached: # card.image.clear() # card.image.flipCard()
2.959853
3
easyGuiCheck.py
piotrbla/pyExamples
0
6621711
from easygui import * msgbox("ttt", "eeee")
from easygui import * msgbox("ttt", "eeee")
none
1
1.286172
1
setup.py
OdelinCharron/pycNic
0
6621712
<gh_stars>0 # -*- coding: utf-8 -*- from setuptools import setup, find_packages with open('README.md') as f: readme = f.read() with open('LICENSE') as f: license0 = f.read() setup( name='pycnic', version='0.0.1', description='Automated quiz for mri images', long_description=readme, author='<NAME>, <NAME>', author_email='<EMAIL>', url='https://github.com/OdelinCharron/pycnic', license=license0, packages=find_packages(exclude=('tests', 'docs', 'examples')), scripts=['pycNic'] )
# -*- coding: utf-8 -*- from setuptools import setup, find_packages with open('README.md') as f: readme = f.read() with open('LICENSE') as f: license0 = f.read() setup( name='pycnic', version='0.0.1', description='Automated quiz for mri images', long_description=readme, author='<NAME>, <NAME>', author_email='<EMAIL>', url='https://github.com/OdelinCharron/pycnic', license=license0, packages=find_packages(exclude=('tests', 'docs', 'examples')), scripts=['pycNic'] )
en
0.769321
# -*- coding: utf-8 -*-
1.161057
1
networkx/algorithms/shortest_paths/greedy_search.py
a-vitale/networkx
0
6621713
"""Shortest paths and path lengths using the Greedy algorithm. """ from heapq import heappush, heappop from itertools import count import networkx as nx from networkx.algorithms.shortest_paths.weighted import _weight_function def greedy_path(G, source, target, heuristic = None, weight = "weight"): #if source or target is not in G -> error msg if source not in G or target not in G: msg = f"Either source {source} or target {target} is not in G" raise nx.NodeNotFound(msg) #if heuristic is none -> define the default heuristic function: h = 0 if heuristic is None: def heuristic(u, v): return 0 #assegno a push la funzione heappush e a pop la funzione heappop. #A wieght invece assegno la funzione _weight_function(G, weight) #passando come parametro G e weight (input della funzione greedy_search) push = heappush pop = heappop getWeight = _weight_function(G, weight) #variabile di gestione dell'euristica nell'ordinamento (in caso di parità) c = count() #la fringe mantiene: priorità, nodo corrente (nel caso base è la radice), varibaile di gestione e il parent fringe = [(0, next(c), source, None)] #struttura dati che memorizza i parent dei nodi visitati explored = {} #struttura che mantiene i pesi dei cammini parziali weights = {} while queue: _, __, curNode, parent = pop(fringe) #caso base: target raggiunto. Costruzione del path tramite i parent if curNode == target: path = [curNode] node = parent while node is not None: path.append(node) node = explored[node] path.reverse() return path curWeight = weights[parent] + getWeight(parent, curNode, weight) if curNode in explored: if explored[curNode] is None: continue if curWeight > weights[curNode]: continue explored[curNode] = parent weights[curNode] = curWeight for neighbor, _ in G[curNode].items(): if neighbor not in explored: weights[neighbor] = weights[curNode] + getWeight(curnNode, neighbor, weight) heuristicValue = heuristic(neighbor, target) push(fringe, (heuristicValue, next(c), neighbor, curNode))
"""Shortest paths and path lengths using the Greedy algorithm. """ from heapq import heappush, heappop from itertools import count import networkx as nx from networkx.algorithms.shortest_paths.weighted import _weight_function def greedy_path(G, source, target, heuristic = None, weight = "weight"): #if source or target is not in G -> error msg if source not in G or target not in G: msg = f"Either source {source} or target {target} is not in G" raise nx.NodeNotFound(msg) #if heuristic is none -> define the default heuristic function: h = 0 if heuristic is None: def heuristic(u, v): return 0 #assegno a push la funzione heappush e a pop la funzione heappop. #A wieght invece assegno la funzione _weight_function(G, weight) #passando come parametro G e weight (input della funzione greedy_search) push = heappush pop = heappop getWeight = _weight_function(G, weight) #variabile di gestione dell'euristica nell'ordinamento (in caso di parità) c = count() #la fringe mantiene: priorità, nodo corrente (nel caso base è la radice), varibaile di gestione e il parent fringe = [(0, next(c), source, None)] #struttura dati che memorizza i parent dei nodi visitati explored = {} #struttura che mantiene i pesi dei cammini parziali weights = {} while queue: _, __, curNode, parent = pop(fringe) #caso base: target raggiunto. Costruzione del path tramite i parent if curNode == target: path = [curNode] node = parent while node is not None: path.append(node) node = explored[node] path.reverse() return path curWeight = weights[parent] + getWeight(parent, curNode, weight) if curNode in explored: if explored[curNode] is None: continue if curWeight > weights[curNode]: continue explored[curNode] = parent weights[curNode] = curWeight for neighbor, _ in G[curNode].items(): if neighbor not in explored: weights[neighbor] = weights[curNode] + getWeight(curnNode, neighbor, weight) heuristicValue = heuristic(neighbor, target) push(fringe, (heuristicValue, next(c), neighbor, curNode))
it
0.936215
Shortest paths and path lengths using the Greedy algorithm. #if source or target is not in G -> error msg #if heuristic is none -> define the default heuristic function: h = 0 #assegno a push la funzione heappush e a pop la funzione heappop. #A wieght invece assegno la funzione _weight_function(G, weight) #passando come parametro G e weight (input della funzione greedy_search) #variabile di gestione dell'euristica nell'ordinamento (in caso di parità) #la fringe mantiene: priorità, nodo corrente (nel caso base è la radice), varibaile di gestione e il parent #struttura dati che memorizza i parent dei nodi visitati #struttura che mantiene i pesi dei cammini parziali #caso base: target raggiunto. Costruzione del path tramite i parent
3.715657
4
Huawei2018CodeCraft-GSCPGZ/submit/predictor.py
polossk/DSML-Logs
2
6621714
# -*- coding: utf-8 -*- import preprocessor import regTree import scanner class SolidServer(object): def __init__(self, config): self.rest_cpu = config.cpu self.rest_mem = config.mem self.haveFlavors = {} def addVM(self, flavor): # add one virtual machine if flavor in self.haveFlavors.keys(): self.haveFlavors[flavor] += 1 else: self.haveFlavors[flavor] = 1 self.rest_cpu -= flavor.cpu self.rest_mem -= flavor.mem def validate(self, flavor): # if rest resources is fit for new vm return self.rest_cpu >= flavor.cpu and self.rest_mem >= flavor.mem def predict_vm(ecsDataPath, inputFilePath): result = [] raw = preprocessor.TrainingSet(ecsDataPath, inputFilePath) test_flavor = raw.get_flavorID() pred = [] train_X, train_y, test_feat = raw.getTrainFeat() for hoge in test_flavor: train_flavorID = regTree.makeFlavorTrain(hoge - 1, train_X, train_y) temp_regTree = regTree.build(train_flavorID) pred.append(round(regTree.predict(temp_regTree, test_feat))) flavors, server = raw.flavors, raw.server box = [SolidServer(server)] i = raw.flvNum - 1 while i >= 0: temp_flavor = flavors[i] temp_pred = pred[i] while temp_pred > 0: j = 0 while j < len(box): if box[j].validate(temp_flavor): box[j].addVM(temp_flavor) break else: j += 1 if j == len(box): tmp = SolidServer(raw.server) tmp.addVM(temp_flavor) box.append(tmp) temp_pred -= 1 i -= 1 res = sum(pred) result.append(str(int(res)) + '\n') for k in range(raw.flvNum): msg = 'flavor{0} {1}\n'.format(str(test_flavor[k]), int(pred[k])) result.append(msg) result.append('\n' + str(len(box)) + '\n') for x in range(len(box)): result.append(str(x + 1)) for key in box[x].haveFlavors.keys(): msg = ' {0} {1}'.format(key.name, str(box[x].haveFlavors[key])) result.append(msg) if x < len(box) - 1: result.append('\n') return result
# -*- coding: utf-8 -*- import preprocessor import regTree import scanner class SolidServer(object): def __init__(self, config): self.rest_cpu = config.cpu self.rest_mem = config.mem self.haveFlavors = {} def addVM(self, flavor): # add one virtual machine if flavor in self.haveFlavors.keys(): self.haveFlavors[flavor] += 1 else: self.haveFlavors[flavor] = 1 self.rest_cpu -= flavor.cpu self.rest_mem -= flavor.mem def validate(self, flavor): # if rest resources is fit for new vm return self.rest_cpu >= flavor.cpu and self.rest_mem >= flavor.mem def predict_vm(ecsDataPath, inputFilePath): result = [] raw = preprocessor.TrainingSet(ecsDataPath, inputFilePath) test_flavor = raw.get_flavorID() pred = [] train_X, train_y, test_feat = raw.getTrainFeat() for hoge in test_flavor: train_flavorID = regTree.makeFlavorTrain(hoge - 1, train_X, train_y) temp_regTree = regTree.build(train_flavorID) pred.append(round(regTree.predict(temp_regTree, test_feat))) flavors, server = raw.flavors, raw.server box = [SolidServer(server)] i = raw.flvNum - 1 while i >= 0: temp_flavor = flavors[i] temp_pred = pred[i] while temp_pred > 0: j = 0 while j < len(box): if box[j].validate(temp_flavor): box[j].addVM(temp_flavor) break else: j += 1 if j == len(box): tmp = SolidServer(raw.server) tmp.addVM(temp_flavor) box.append(tmp) temp_pred -= 1 i -= 1 res = sum(pred) result.append(str(int(res)) + '\n') for k in range(raw.flvNum): msg = 'flavor{0} {1}\n'.format(str(test_flavor[k]), int(pred[k])) result.append(msg) result.append('\n' + str(len(box)) + '\n') for x in range(len(box)): result.append(str(x + 1)) for key in box[x].haveFlavors.keys(): msg = ' {0} {1}'.format(key.name, str(box[x].haveFlavors[key])) result.append(msg) if x < len(box) - 1: result.append('\n') return result
en
0.636685
# -*- coding: utf-8 -*- # add one virtual machine # if rest resources is fit for new vm
2.509841
3
src/langumo/configuration.py
fossabot/langumo
7
6621715
<filename>src/langumo/configuration.py import yaml from typing import Dict, Any _default_configuration = { 'workspace': 'tmp', 'outputs': { 'vocabulary': 'build/vocab.txt', 'train-corpus': 'build/corpus.train.txt', 'eval-corpus': 'build/corpus.eval.txt', }, 'build': { 'parsing': { 'num-workers': 1, 'language': 'en', 'newline-token': '[NEWLINE]', 'min-length': 0, 'max-length': 1024 }, 'splitting': { 'validation-ratio': 0.1 }, 'tokenization': { 'subset-size': 1000000000, 'vocab-size': 32000, 'limit-alphabet': 1000, 'unk-token': '[UNK]', 'special-tokens': ['[START]', '[END]', '[PAD]', '[NEWLINE]'] } } } def _update_default_dict(data: Dict, default: Dict): for key in default: if key not in data: data[key] = default[key] elif isinstance(data[key], dict) and isinstance(default[key], dict): _update_default_dict(data[key], default[key]) class BuildConfig: def __init__(self, path: str): with open(path, 'r') as fp: self.config_dict = yaml.safe_load(fp) if 'langumo' not in self.config_dict: raise RuntimeError('build configuration file should contain ' '`langumo` namespace.') self.config_dict = self.config_dict['langumo'] # Update default configuration values. _update_default_dict(self.config_dict, _default_configuration) def __getitem__(self, key: str) -> Any: current = self.config_dict for namespace in key.split('.'): current = current[namespace] return current def __contains__(self, key: str) -> bool: current = self.config_dict for namespace in key.split('.'): if namespace not in current: return False current = current[namespace] return True
<filename>src/langumo/configuration.py import yaml from typing import Dict, Any _default_configuration = { 'workspace': 'tmp', 'outputs': { 'vocabulary': 'build/vocab.txt', 'train-corpus': 'build/corpus.train.txt', 'eval-corpus': 'build/corpus.eval.txt', }, 'build': { 'parsing': { 'num-workers': 1, 'language': 'en', 'newline-token': '[NEWLINE]', 'min-length': 0, 'max-length': 1024 }, 'splitting': { 'validation-ratio': 0.1 }, 'tokenization': { 'subset-size': 1000000000, 'vocab-size': 32000, 'limit-alphabet': 1000, 'unk-token': '[UNK]', 'special-tokens': ['[START]', '[END]', '[PAD]', '[NEWLINE]'] } } } def _update_default_dict(data: Dict, default: Dict): for key in default: if key not in data: data[key] = default[key] elif isinstance(data[key], dict) and isinstance(default[key], dict): _update_default_dict(data[key], default[key]) class BuildConfig: def __init__(self, path: str): with open(path, 'r') as fp: self.config_dict = yaml.safe_load(fp) if 'langumo' not in self.config_dict: raise RuntimeError('build configuration file should contain ' '`langumo` namespace.') self.config_dict = self.config_dict['langumo'] # Update default configuration values. _update_default_dict(self.config_dict, _default_configuration) def __getitem__(self, key: str) -> Any: current = self.config_dict for namespace in key.split('.'): current = current[namespace] return current def __contains__(self, key: str) -> bool: current = self.config_dict for namespace in key.split('.'): if namespace not in current: return False current = current[namespace] return True
en
0.113986
# Update default configuration values.
2.252023
2
app.py
What-to-watch/wtw-ml-model
0
6621716
import os import threading from surprise import dump from training import train from db_util import get_db_conn_url from flask_sqlalchemy import SQLAlchemy from flask import Flask, request app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = f"postgresql://{get_db_conn_url()}" db = SQLAlchemy(app) from sqlalchemy import text model_dump_path = os.path.normpath("model_dump") @app.route('/topN/<user_id>') def getTopN(user_id): n = request.args.get('n') if n is None: n = 5 else: n = int(n) model = get_model() query = text("SELECT id FROM movies WHERE id NOT IN (SELECT movie_id FROM ratings WHERE user_id = :user_id)") result = db.engine.execute(query, user_id=user_id).fetchall() movies = map(lambda row: { "movie_id": row[0], "prediction":model.predict(user_id, row[0]).est}, result) movies = sorted(movies, key=lambda m: m["prediction"], reverse=True) return { "movies":movies[:n] } @app.route('/train') def train_route(): train_task = threading.Thread(target=train, args=(model_dump_path,)) train_task.start() return { "status": "Started" } @app.route('/predict') def predict(): model = get_model() req = request.get_json() return { "prediction": model.predict(req['user'], req['movie']).est } def get_model(): path = model_dump_path if os.path.isfile(path): _, loaded_algo = dump.load(path) return loaded_algo else: algo = train(path) return algo
import os import threading from surprise import dump from training import train from db_util import get_db_conn_url from flask_sqlalchemy import SQLAlchemy from flask import Flask, request app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = f"postgresql://{get_db_conn_url()}" db = SQLAlchemy(app) from sqlalchemy import text model_dump_path = os.path.normpath("model_dump") @app.route('/topN/<user_id>') def getTopN(user_id): n = request.args.get('n') if n is None: n = 5 else: n = int(n) model = get_model() query = text("SELECT id FROM movies WHERE id NOT IN (SELECT movie_id FROM ratings WHERE user_id = :user_id)") result = db.engine.execute(query, user_id=user_id).fetchall() movies = map(lambda row: { "movie_id": row[0], "prediction":model.predict(user_id, row[0]).est}, result) movies = sorted(movies, key=lambda m: m["prediction"], reverse=True) return { "movies":movies[:n] } @app.route('/train') def train_route(): train_task = threading.Thread(target=train, args=(model_dump_path,)) train_task.start() return { "status": "Started" } @app.route('/predict') def predict(): model = get_model() req = request.get_json() return { "prediction": model.predict(req['user'], req['movie']).est } def get_model(): path = model_dump_path if os.path.isfile(path): _, loaded_algo = dump.load(path) return loaded_algo else: algo = train(path) return algo
none
1
2.443059
2
export.py
pdufter/pytorch-sgns
0
6621717
import torch import pickle import os vectors = torch.load("pts/sgns.pt") with open("data/idx2word.dat", "rb") as fin: vocab = pickle.load(fin) os.makedirs("export") with open("export/ovectors.txt", "w") as fp: for row in vectors['embedding.ovectors.weight'].cpu().numpy(): fp.write("\t".join([str(x) for x in row]) + "\n") with open("export/ivectors.txt", "w") as fp: for row in vectors['embedding.ivectors.weight'].cpu().numpy(): fp.write("\t".join([str(x) for x in row]) + "\n") with open("export/words.txt", "w") as fp: for token in vocab: fp.write("{}\n".format(token))
import torch import pickle import os vectors = torch.load("pts/sgns.pt") with open("data/idx2word.dat", "rb") as fin: vocab = pickle.load(fin) os.makedirs("export") with open("export/ovectors.txt", "w") as fp: for row in vectors['embedding.ovectors.weight'].cpu().numpy(): fp.write("\t".join([str(x) for x in row]) + "\n") with open("export/ivectors.txt", "w") as fp: for row in vectors['embedding.ivectors.weight'].cpu().numpy(): fp.write("\t".join([str(x) for x in row]) + "\n") with open("export/words.txt", "w") as fp: for token in vocab: fp.write("{}\n".format(token))
none
1
2.486494
2
PythonExercicios/ex088.py
cedricgenaro/Python
0
6621718
<reponame>cedricgenaro/Python from random import randint from time import sleep import enumerator as enumerator numeros = [] numero = 0 quant = 0 jogos = [] print('-'*30) print(f'{"JOGA NA MEGA SENA":^30}') print('-'*30) quant = int(input('Quantos jogos você quer que eu sorteie? ')) print('-='*3, f'SORTEANDO {quant} JOGOS', '-='*3) for c in range(0, quant): while True: numero = randint(1, 60) if numero not in numeros: numeros.append(numero) if len(numeros) == 6: numeros.sort() break jogos.append(numeros[:]) numeros.clear() for i, j in enumerate(jogos): sleep(1.5) print(f'Jogo {i+1}', f':{j}') print(f'{"BOA SORTE":-^30}')
from random import randint from time import sleep import enumerator as enumerator numeros = [] numero = 0 quant = 0 jogos = [] print('-'*30) print(f'{"JOGA NA MEGA SENA":^30}') print('-'*30) quant = int(input('Quantos jogos você quer que eu sorteie? ')) print('-='*3, f'SORTEANDO {quant} JOGOS', '-='*3) for c in range(0, quant): while True: numero = randint(1, 60) if numero not in numeros: numeros.append(numero) if len(numeros) == 6: numeros.sort() break jogos.append(numeros[:]) numeros.clear() for i, j in enumerate(jogos): sleep(1.5) print(f'Jogo {i+1}', f':{j}') print(f'{"BOA SORTE":-^30}')
none
1
3.600544
4
calculos.py
admelix/ejemplo-claudio
0
6621719
from sympy import * ''' aqui puedes crear funciones que recojan resultados de la web y con ello, puedes empezar a dividir el contenido de la web dependiendo del calculo que quieras mostrar. Un ejemplo es lo que esta abajo Al usar latex, puedes mostrar un resultado como una imagen. El unicode, lo muestra como si fuera en assci Tambien, en vez de una pagina web, puedes empezar a utilizar visual studio code para realizar tus calculos y hacer graficos de todo tipo y crear estadisticas y todo lo que quieras. es un lenguaje muy potente para el calculo matematico y personalmente lo uso bastante a la hora de crear estadisticas y mostrarlas en la web con pandas. ''' x,y,z = symbols('x y z') # init_session(use_latex=True) se imprime una especie de foto init_printing(use_unicode=True) def muestra(): # print_mathml(Integral(sqrt(1/x), x)) # ecuacion = Eq(1 + x,x**2) # res=solve(ecuacion, x) return print_mathml(Integral(sqrt(1/x), x))
from sympy import * ''' aqui puedes crear funciones que recojan resultados de la web y con ello, puedes empezar a dividir el contenido de la web dependiendo del calculo que quieras mostrar. Un ejemplo es lo que esta abajo Al usar latex, puedes mostrar un resultado como una imagen. El unicode, lo muestra como si fuera en assci Tambien, en vez de una pagina web, puedes empezar a utilizar visual studio code para realizar tus calculos y hacer graficos de todo tipo y crear estadisticas y todo lo que quieras. es un lenguaje muy potente para el calculo matematico y personalmente lo uso bastante a la hora de crear estadisticas y mostrarlas en la web con pandas. ''' x,y,z = symbols('x y z') # init_session(use_latex=True) se imprime una especie de foto init_printing(use_unicode=True) def muestra(): # print_mathml(Integral(sqrt(1/x), x)) # ecuacion = Eq(1 + x,x**2) # res=solve(ecuacion, x) return print_mathml(Integral(sqrt(1/x), x))
es
0.956077
aqui puedes crear funciones que recojan resultados de la web y con ello, puedes empezar a dividir el contenido de la web dependiendo del calculo que quieras mostrar. Un ejemplo es lo que esta abajo Al usar latex, puedes mostrar un resultado como una imagen. El unicode, lo muestra como si fuera en assci Tambien, en vez de una pagina web, puedes empezar a utilizar visual studio code para realizar tus calculos y hacer graficos de todo tipo y crear estadisticas y todo lo que quieras. es un lenguaje muy potente para el calculo matematico y personalmente lo uso bastante a la hora de crear estadisticas y mostrarlas en la web con pandas. # init_session(use_latex=True) se imprime una especie de foto # print_mathml(Integral(sqrt(1/x), x)) # ecuacion = Eq(1 + x,x**2) # res=solve(ecuacion, x)
3.386191
3
cms/atlascasestudies/migrations/0004_atlascasestudycategoryrelationship.py
rkhleics/nhs-ei.website
1
6621720
# Generated by Django 3.1.2 on 2020-11-17 18:00 from django.db import migrations, models import django.db.models.deletion import modelcluster.fields class Migration(migrations.Migration): dependencies = [ ('categories', '0009_region_setting'), ('atlascasestudies', '0003_atlascasestudyregionrelationship'), ] operations = [ migrations.CreateModel( name='AtlasCaseStudyCategoryRelationship', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('atlas_case_study', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='atlas_case_study_category_relationship', to='atlascasestudies.atlascasestudy')), ('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='categories.category')), ], ), ]
# Generated by Django 3.1.2 on 2020-11-17 18:00 from django.db import migrations, models import django.db.models.deletion import modelcluster.fields class Migration(migrations.Migration): dependencies = [ ('categories', '0009_region_setting'), ('atlascasestudies', '0003_atlascasestudyregionrelationship'), ] operations = [ migrations.CreateModel( name='AtlasCaseStudyCategoryRelationship', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('atlas_case_study', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='atlas_case_study_category_relationship', to='atlascasestudies.atlascasestudy')), ('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='categories.category')), ], ), ]
en
0.743845
# Generated by Django 3.1.2 on 2020-11-17 18:00
1.618463
2
docs/conf.py
cjw296/mush
0
6621721
# -*- coding: utf-8 -*- import os, pkginfo, datetime on_rtd = os.environ.get('READTHEDOCS', None) == 'True' pkg_info = pkginfo.Develop(os.path.join(os.path.dirname(__file__),'..')) extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.intersphinx', ] intersphinx_mapping = dict( python = ('http://docs.python.org/dev', None), testfixtures = ('http://pythonhosted.org/testfixtures', None), ) # General source_suffix = '.txt' master_doc = 'index' project = pkg_info.name first_year = 2013 current_year = datetime.datetime.now().year copyright = (str(current_year) if current_year==first_year else ('%s-%s'%(first_year,current_year)))+' <NAME>' version = release = pkg_info.version exclude_trees = ['_build'] unused_docs = ['description'] pygments_style = 'sphinx' # Options for HTML output if on_rtd: html_theme = 'default' else: html_theme = 'classic' htmlhelp_basename = project+'doc' # Options for LaTeX output latex_documents = [ ('index',project+'.tex', project+u' Documentation', 'Simplistix Ltd', 'manual'), ]
# -*- coding: utf-8 -*- import os, pkginfo, datetime on_rtd = os.environ.get('READTHEDOCS', None) == 'True' pkg_info = pkginfo.Develop(os.path.join(os.path.dirname(__file__),'..')) extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.intersphinx', ] intersphinx_mapping = dict( python = ('http://docs.python.org/dev', None), testfixtures = ('http://pythonhosted.org/testfixtures', None), ) # General source_suffix = '.txt' master_doc = 'index' project = pkg_info.name first_year = 2013 current_year = datetime.datetime.now().year copyright = (str(current_year) if current_year==first_year else ('%s-%s'%(first_year,current_year)))+' <NAME>' version = release = pkg_info.version exclude_trees = ['_build'] unused_docs = ['description'] pygments_style = 'sphinx' # Options for HTML output if on_rtd: html_theme = 'default' else: html_theme = 'classic' htmlhelp_basename = project+'doc' # Options for LaTeX output latex_documents = [ ('index',project+'.tex', project+u' Documentation', 'Simplistix Ltd', 'manual'), ]
en
0.535605
# -*- coding: utf-8 -*- # General # Options for HTML output # Options for LaTeX output
1.902127
2
firestoreservice.py
tbarford/bg_streamlit_demo
0
6621722
<reponame>tbarford/bg_streamlit_demo ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~## ##python3 script created by tBarford on 20220203 ## ## ##File Description: Firebase Firstore Service - CRUD functions for BG golf app MVP ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~## import firebase_admin from firebase_admin import credentials, firestore import streamlit as st import json #import asyncio as io #import concurrent class FirestoreService(): def __init__(self): #instantiate firebase credentialJson = json.loads(st.secrets["textkey"]) _credential = credentials.Certificate(credentialJson) try: _root = firebase_admin.get_app() except ValueError: _root = firebase_admin.initialize_app(_credential) self.db=firestore.client() #self.db = firestore.AsyncClient(credentials=_root.credential.get_credential(), project=_root.project_id) ## Read Operations def getShaftList(self, shaftType: str): docRef = self.db.collection('demo_db').document(shaftType).collections() shaftList = [shaft.id for shaft in docRef] return shaftList def getStiffness(self, shaftType: str, shaft:str): docRef = self.db.collection('demo_db').document(shaftType).collection(shaft).stream() stiffness = [doc.id for doc in docRef] return stiffness def getEI(self, shaftType: str, shaft: str, stiffness: str): docRef = self.db.collection('demo_db').document(shaftType).collection(shaft).document(stiffness) eiDict = docRef.get().to_dict() return (eiDict['lengths'], eiDict['ei']) def getFq(self, shaftType: str, shaft: str, stiffness: str): docRef = self.db.collection('demo_db').document(shaftType).collection(shaft).document(stiffness) fqDict = docRef.get().to_dict() return (fqDict['lengths'], fqDict['measuredFq']) ## Write Operations ## def writeToDb(db, docRef, dictToWrite: dict): docRef.set(dictToWrite) def updateDbEntry(db, docRef, dictToWrite: dict): docRef.update(dictToWrite) def writeToolData(db, shaft: str, shaftEntry: int, measurement: int, toolData: list): docRef = db.collection(u'eiDb').document(shaft).collection(u'fq_measure_'+str(shaftEntry)).document(str(measurement)) docRef.set({u'tool_data': toolData }) def updateFields(db): docRef = db.collection(u'title').document(u'field') docRef.update({ }) def readFromDb(db, docRef): try: doc = docRef.get() docDict = doc.to_dict() print(docDict) except Exception as e: print(e) def deleteFromDb(db): try: docRef = db.collection(u'title').document(u'field') doc = docRef.delete() except Exception as e: print(e) def main(): db = FirestoreService() #wdict = db.efficientGet('wood').to_dict() idict = db.getShaftList('iron') print(idict) if __name__ == '__main__': main()
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~## ##python3 script created by tBarford on 20220203 ## ## ##File Description: Firebase Firstore Service - CRUD functions for BG golf app MVP ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~## import firebase_admin from firebase_admin import credentials, firestore import streamlit as st import json #import asyncio as io #import concurrent class FirestoreService(): def __init__(self): #instantiate firebase credentialJson = json.loads(st.secrets["textkey"]) _credential = credentials.Certificate(credentialJson) try: _root = firebase_admin.get_app() except ValueError: _root = firebase_admin.initialize_app(_credential) self.db=firestore.client() #self.db = firestore.AsyncClient(credentials=_root.credential.get_credential(), project=_root.project_id) ## Read Operations def getShaftList(self, shaftType: str): docRef = self.db.collection('demo_db').document(shaftType).collections() shaftList = [shaft.id for shaft in docRef] return shaftList def getStiffness(self, shaftType: str, shaft:str): docRef = self.db.collection('demo_db').document(shaftType).collection(shaft).stream() stiffness = [doc.id for doc in docRef] return stiffness def getEI(self, shaftType: str, shaft: str, stiffness: str): docRef = self.db.collection('demo_db').document(shaftType).collection(shaft).document(stiffness) eiDict = docRef.get().to_dict() return (eiDict['lengths'], eiDict['ei']) def getFq(self, shaftType: str, shaft: str, stiffness: str): docRef = self.db.collection('demo_db').document(shaftType).collection(shaft).document(stiffness) fqDict = docRef.get().to_dict() return (fqDict['lengths'], fqDict['measuredFq']) ## Write Operations ## def writeToDb(db, docRef, dictToWrite: dict): docRef.set(dictToWrite) def updateDbEntry(db, docRef, dictToWrite: dict): docRef.update(dictToWrite) def writeToolData(db, shaft: str, shaftEntry: int, measurement: int, toolData: list): docRef = db.collection(u'eiDb').document(shaft).collection(u'fq_measure_'+str(shaftEntry)).document(str(measurement)) docRef.set({u'tool_data': toolData }) def updateFields(db): docRef = db.collection(u'title').document(u'field') docRef.update({ }) def readFromDb(db, docRef): try: doc = docRef.get() docDict = doc.to_dict() print(docDict) except Exception as e: print(e) def deleteFromDb(db): try: docRef = db.collection(u'title').document(u'field') doc = docRef.delete() except Exception as e: print(e) def main(): db = FirestoreService() #wdict = db.efficientGet('wood').to_dict() idict = db.getShaftList('iron') print(idict) if __name__ == '__main__': main()
en
0.362089
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~## ##python3 script created by tBarford on 20220203 ## ## ##File Description: Firebase Firstore Service - CRUD functions for BG golf app MVP ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~## #import asyncio as io #import concurrent #instantiate firebase #self.db = firestore.AsyncClient(credentials=_root.credential.get_credential(), project=_root.project_id) ## Read Operations ## Write Operations ## #wdict = db.efficientGet('wood').to_dict()
2.245442
2
rfb_mc/restrictive_formula_module.py
Meterius/rfb-mc
0
6621723
from abc import ABC, abstractmethod from typing import Generic, TypeVar, Hashable, Any, Dict, Type from rfb_mc.types import Params RestrictiveFormulaParams = TypeVar("RestrictiveFormulaParams", bound=Hashable) # parameter that determines all formula generation related values RestrictiveFormulaProperties = TypeVar("RestrictiveFormulaProperties") # properties of the restrictive formula that are determined by the parameters RestrictiveFormulaInstanceParams = TypeVar("RestrictiveFormulaInstanceParams") # values that parameterize a specific instance generated by the module and will be used by # the runner implementations to reconstruct the formula in the format of which ever solver is used class RestrictiveFormulaModule( Generic[RestrictiveFormulaParams, RestrictiveFormulaProperties, RestrictiveFormulaInstanceParams], ABC, ): @classmethod @abstractmethod def get_guid(cls) -> str: """ UID of the restrictive formula module that needs to be deterministic and unique across all other restrictive formula module implementations. An abbreviation of the name that is unlikely to be reused is suggested and possibly a version number in order to differentiate between different generations of the same module. """ raise NotImplementedError() @classmethod @abstractmethod def encode_restrictive_formula_params(cls, params: RestrictiveFormulaParams) -> Any: """ Encodes the formula parameters into a native python type. """ raise NotImplementedError() @classmethod @abstractmethod def decode_restrictive_formula_params(cls, params: Any) -> RestrictiveFormulaParams: """ Decodes the formula parameters from the native python type generated by the encoder. """ raise NotImplementedError() @classmethod @abstractmethod def get_restrictive_formula_properties( cls, params: Params, restrictive_formula_params: RestrictiveFormulaParams, ) -> RestrictiveFormulaProperties: """ Returns properties that the restrictive formula generation posses with the given parameters. """ raise NotImplementedError() @classmethod @abstractmethod def generate_restrictive_formula_instance_params( cls, params: Params, restrictive_formula_params: RestrictiveFormulaParams, q: int, ) -> RestrictiveFormulaInstanceParams: """ Generate the restrictive formula instance params for the given params using randomness from the random runner class. """ raise NotImplementedError() restrictive_formula_module_map: Dict[str, Type[RestrictiveFormulaModule]] = {} """ Map from restrictive formula module uid to module class. """ def register_restrictive_formula_module(rfm: Type[RestrictiveFormulaModule]): restrictive_formula_module_map[rfm.get_guid()] = rfm def get_restrictive_formula_module(uid: str) -> Type[RestrictiveFormulaModule]: if uid in restrictive_formula_module_map: return restrictive_formula_module_map[uid] else: raise RuntimeError(f"Restrictive formula module \"{uid}\" is required but is not registered")
from abc import ABC, abstractmethod from typing import Generic, TypeVar, Hashable, Any, Dict, Type from rfb_mc.types import Params RestrictiveFormulaParams = TypeVar("RestrictiveFormulaParams", bound=Hashable) # parameter that determines all formula generation related values RestrictiveFormulaProperties = TypeVar("RestrictiveFormulaProperties") # properties of the restrictive formula that are determined by the parameters RestrictiveFormulaInstanceParams = TypeVar("RestrictiveFormulaInstanceParams") # values that parameterize a specific instance generated by the module and will be used by # the runner implementations to reconstruct the formula in the format of which ever solver is used class RestrictiveFormulaModule( Generic[RestrictiveFormulaParams, RestrictiveFormulaProperties, RestrictiveFormulaInstanceParams], ABC, ): @classmethod @abstractmethod def get_guid(cls) -> str: """ UID of the restrictive formula module that needs to be deterministic and unique across all other restrictive formula module implementations. An abbreviation of the name that is unlikely to be reused is suggested and possibly a version number in order to differentiate between different generations of the same module. """ raise NotImplementedError() @classmethod @abstractmethod def encode_restrictive_formula_params(cls, params: RestrictiveFormulaParams) -> Any: """ Encodes the formula parameters into a native python type. """ raise NotImplementedError() @classmethod @abstractmethod def decode_restrictive_formula_params(cls, params: Any) -> RestrictiveFormulaParams: """ Decodes the formula parameters from the native python type generated by the encoder. """ raise NotImplementedError() @classmethod @abstractmethod def get_restrictive_formula_properties( cls, params: Params, restrictive_formula_params: RestrictiveFormulaParams, ) -> RestrictiveFormulaProperties: """ Returns properties that the restrictive formula generation posses with the given parameters. """ raise NotImplementedError() @classmethod @abstractmethod def generate_restrictive_formula_instance_params( cls, params: Params, restrictive_formula_params: RestrictiveFormulaParams, q: int, ) -> RestrictiveFormulaInstanceParams: """ Generate the restrictive formula instance params for the given params using randomness from the random runner class. """ raise NotImplementedError() restrictive_formula_module_map: Dict[str, Type[RestrictiveFormulaModule]] = {} """ Map from restrictive formula module uid to module class. """ def register_restrictive_formula_module(rfm: Type[RestrictiveFormulaModule]): restrictive_formula_module_map[rfm.get_guid()] = rfm def get_restrictive_formula_module(uid: str) -> Type[RestrictiveFormulaModule]: if uid in restrictive_formula_module_map: return restrictive_formula_module_map[uid] else: raise RuntimeError(f"Restrictive formula module \"{uid}\" is required but is not registered")
en
0.827305
# parameter that determines all formula generation related values # properties of the restrictive formula that are determined by the parameters # values that parameterize a specific instance generated by the module and will be used by # the runner implementations to reconstruct the formula in the format of which ever solver is used UID of the restrictive formula module that needs to be deterministic and unique across all other restrictive formula module implementations. An abbreviation of the name that is unlikely to be reused is suggested and possibly a version number in order to differentiate between different generations of the same module. Encodes the formula parameters into a native python type. Decodes the formula parameters from the native python type generated by the encoder. Returns properties that the restrictive formula generation posses with the given parameters. Generate the restrictive formula instance params for the given params using randomness from the random runner class. Map from restrictive formula module uid to module class.
2.731478
3
_not_in_use/tests/text_alignment_tests.py
righthan/policy_diffusion
33
6621724
import random import matplotlib.pyplot as plt import time import numpy as np from compiler.ast import flatten from alignment.sequence import Sequence from alignment.vocabulary import Vocabulary from alignment.sequencealigner import SimpleScoring, LocalSequenceAligner from utils.general_utils import find_subsequence from text_alignment import * #function from python package for testing results def seqToAlign(a, b, matchScore = 3, mismatchScore = -1, gapScore = -2): ''' args: a: list of words b: list of words matchScore: num mismatchScore: num gapScore: num Returns: o/w returns list of tuples with score and top alignments Description: helper function for finding alignments given a list of words ''' # Create a vocabulary and encode the sequences. a = a[0] b = b[0] seq1 = Sequence(a) seq2 = Sequence(b) v = Vocabulary() aEncoded = v.encodeSequence(seq1) bEncoded = v.encodeSequence(seq2) # Create a scoring and align the sequences using local aligner. scoring = SimpleScoring(matchScore, mismatchScore) aligner = LocalSequenceAligner(scoring, gapScore) score, encodeds = aligner.align(aEncoded, bEncoded, backtrace=True) alignments = [v.decodeSequenceAlignment(encoded) for encoded in encodeds] return [(a.score, list(a.first), list(a.second)) for a in alignments] #testing functions def create_doc_test_cases(): #tests t1 = [['a']*100] t2 = [['b']*50 + ['a','a','b']*50] s1 = [[1]*100] s2 = [[2]*50 + [1,1,2]*50] v1 = [np.array([0, 1, 2, 3, 4, 7, 6, 3, 2, 1, 3])] v2 = [np.array([0, 1, 2, 3, 4, 4, 5, 2, 1, 2, 2])] w1 = [np.array([7, 6, 3, 2, 1, 3, 0, 1, 2, 3, 4])] w2 = [np.array([4, 5, 2, 1, 2, 2, 0, 1, 2, 3, 4])] tests = [(t1,t2), (s1,s2),(v1,v2), (w1,w2), ([np.random.choice(5, 30)],[np.random.choice(5, 30)]), \ ([np.array([1, 2, 0, 0, 1, 2, 3, 0, 1, 3, 0, 4, 3, 3, 0, 3, 0, 2, 0, 4, 3, 4, 2, \ 1, 1, 1, 1, 1, 0, 1])], [np.array([2, 0, 3, 1, 2, 4, 0, 1, 3, 0, 1, 4, 1, 3, 1, 4, 0, 0, 1, 2, 4, 0, 0, \ 2, 4, 1, 3, 2, 2, 4])])] return tests #LocalAligner algorithm tests def LocalAligner_unit_tests(): def test_alignment(t1,t2): f = LocalAligner() alignment=f.align(t1,t2) #default score is 3,-1,-2 score, l, r = alignment.alignments[0] #find score of recovered alignment align_score = f.alignment_score(l,r) #run package algorithm alignments = seqToAlign(t1,t2) #default score is 3,-1,-2 if score == align_score and score == alignments[0][0]: print 'package, backtraced alignment, and alignmnet matrix consistent' else: print 'dp_alg_score: ' + str(score) print 'alignment_score: ' + str(align_score) print 'package_score: ' + str(alignments[0][0]) #tests tests = create_doc_test_cases() for test in tests: z1, z2 = test test_alignment(z1,z2) f = LocalAligner() alignment=f.align(z1,z2) #default score is 3,-1,-2 score, l, r = alignment.alignments[0] #run package algorithm alignments = seqToAlign(z1,z2) #default score is 3,-1,-2 l_true, r_true = alignments[0][1:] for i in range(len(l)): if l[i] != l_true[i]: print 'not same sequence' break for i in range(len(r)): if r[i] != r_true[i]: print 'not same sequence' break def test_alignment(t1,t2, algorithm): f = algorithm() alignment=f.align(t1,t2) #default score is 3,-1,-2 score, l, r = alignment.alignments[0] #find score of recovered alignment align_score = f.alignment_score(l,r) if score == align_score: print 'backtraced alignment and alignmnet matrix consistent' else: print 'backtraced alignment and alignmnet matrix not consistent' print 'dp_alg_score: ' + str(score) print 'alignment_score: ' + str(align_score) print 'left_alignment: ', l print 'right_alignment: ', r def generic_doc_unit_test(algorithm): tests = create_doc_test_cases() for test in tests: z1, z2 = test test_alignment(z1,z2, algorithm) def LocalAligner_speed_test(): input_sizes = [np.exp2(p) for p in range(2,7)] average_our_times = [] average_package_times = [] for input_size in input_sizes: print input_size v1 = [np.random.randint(0,10,input_size)] v2 = [np.random.randint(0,10,input_size)] our_times = [] package_times = [] f = LocalAligner() for i in range(2): t1 = time.time() f.align(v1,v2) our_times.append(time.time()-t1) t2 = time.time() seqToAlign(v1,v2) package_times.append(time.time()-t2) average_our_times.append(np.mean(our_times)) average_package_times.append(np.mean(package_times)) plt.plot(input_sizes,average_package_times, color = 'b', label = 'package') plt.plot(input_sizes,average_our_times, color='r', label = 'our implementation') plt.legend(loc='upper right') plt.xlabel('input size') plt.ylim(0,0.02) plt.show() def generic_doc_speed_test(algorithm): ''' compares speed of algorithm to local alignment algorithm ''' input_sizes = [np.exp2(p) for p in range(2,7)] average_alg_times = [] average_local_times = [] for input_size in input_sizes: print input_size v1 = [np.random.randint(0,10,input_size)] v2 = [np.random.randint(0,10,input_size)] local_times = [] alg_times = [] f = LocalAligner() g = algorithm() for i in range(2): t1 = time.time() f.align(v1,v2) local_times.append(time.time()-t1) t2 = time.time() g.align(v1,v2) alg_times.append(time.time()-t2) average_local_times.append(np.mean(local_times)) average_alg_times.append(np.mean(alg_times)) return average_local_times, average_alg_times def doc_test_alignment_indices(algorithm): #tests tests = create_doc_test_cases() good_job = True for test in tests: left_text, right_text = test try: left_text[0] = left_text[0].tolist() right_text[0] = right_text[0].tolist() except: pass f = algorithm() Alignment = f.align(left_text,right_text) left, right = clean_alignment(Alignment.alignments[0]) left_start, left_end = find_subsequence(left, flatten(left_text)) right_start, right_end = find_subsequence(right, flatten(right_text)) if Alignment.alignment_indices[0]['left_start'] != left_start or \ Alignment.alignment_indices[0]['left_end'] != left_end or \ Alignment.alignment_indices[0]['right_start'] != right_start or \ Alignment.alignment_indices[0]['right_end'] != right_end: print 'alignment length: ', len(left) print 'indices are messed up' print 'left_start: ', Alignment.alignment_indices[0]['left_start'] print 'true left_start: ', left_start print 'left_end: ', Alignment.alignment_indices[0]['left_end'] print 'true left_end', left_end print '\n' print 'right_start: ', Alignment.alignment_indices[0]['right_start'] print 'true right_start: ', right_start print 'right_end: ', Alignment.alignment_indices[0]['right_end'] print 'true right_end: ', right_end print '\n' good_job = False if good_job: print 'indices worked' #SectionLocalAlignment Tests def create_section_tests(): tests = create_doc_test_cases() #convert tests into sections so #that it makes sense for case left_test = [] right_test = [] for test1, test2 in tests: left_test.append(list(test1[0])) right_test.append(list(test2[0])) return left_test, right_test def section_unit_tests(Algorithm): left_test, right_test = create_section_tests() f = Algorithm() Alignment = f.align(left_test, [flatten(right_test)]) good_job = True for score, left, right in Alignment.alignments: true_score = f.alignment_score(left, right) if true_score != score: print 'left: ', left print 'right: ', right print 'true alignment score: ', true_score print 'calculated score: ', score good_job = False if good_job: print "calculated alignment scores correctly" def section_speed_test(): input_sizes = [np.exp2(p) for p in range(2,9)] average_local_times = [] average_section_times = [] for input_size in input_sizes: print input_size v1 = [np.random.randint(0,10,input_size)] v2 = [np.random.randint(0,10,input_size)] cut1 = random.randint(0,len(v1)) cut2 = random.randint(cut1,len(v2)) cut3 = random.randint(cut2,len(v2)) w1 = [v1[0][:cut1], v1[0][cut1:cut2], v1[0][cut2:cut3]] local_times = [] section_times = [] for i in range(2): t1 = time.time() f = LocalAligner() f.align(v1,v2) local_times.append(time.time()-t1) t2 = time.time() f = LocalAligner() f.align(w1,v2) section_times.append(time.time()-t2) average_local_times.append(np.mean(local_times)) average_section_times.append(np.mean(section_times)) plt.plot(input_sizes,average_section_times, color = 'b', label = 'section local alignment') plt.plot(input_sizes,average_local_times, color='r', label = 'local alignment') plt.legend(loc='upper right') plt.xlabel('input size') plt.ylim(0,0.02) plt.show() def section_test_alignment_indices(): left_test, right_test = create_section_tests() left_test_flattened = flatten(left_test) right_test_flattened = flatten(right_test) f = LocalAligner() Alignment = f.align(left_test, [right_test_flattened]) good_job = True for i in range(len(Alignment.alignments)): left, right = clean_alignment(Alignment.alignments[i]) print 'alignment length: ', len(left) left_start, left_end = find_subsequence(left, left_test_flattened) right_start, right_end = find_subsequence(right, right_test_flattened) if Alignment.alignment_indices[i]['left_start'] != left_start or \ Alignment.alignment_indices[i]['left_end'] != left_end or \ Alignment.alignment_indices[i]['right_start'] != right_start or \ Alignment.alignment_indices[i]['right_end'] != right_end: print 'indices are messed up: ' print 'left_start: ', Alignment.alignment_indices[i]['left_start'] print 'true left_start: ', left_start print 'left_end: ', Alignment.alignment_indices[i]['left_end'] print 'true left_end', left_end print '\n' print 'right_start: ', Alignment.alignment_indices[i]['right_start'] print 'true right_start: ', right_start print 'right_end: ', Alignment.alignment_indices[i]['right_end'] print 'true right_end: ', right_end print '\n' good_job = False if good_job: print 'indices worked' ############################################################ ##helper functions def clean_alignment(alignment): ''' arg: alignment object returns: 2 list of alignment words without the alignment symbol ''' keep1 = [] keep2 = [] for item in alignment[1]: if item != '-': keep1.append(item) for item in alignment[2]: if item != '-': keep2.append(item) return (keep1, keep2) if __name__ == '__main__': print "running LocalAligner unit tests.... \n" LocalAligner_unit_tests() print "running LocalAligner speed tests.... \n" LocalAligner_speed_test() print "running LocalAligner index tests.... \n" doc_test_alignment_indices(LocalAligner) print "running AffineLocalAligner unit tests.... \n" generic_doc_unit_test(AffineLocalAligner) print "running AffineLocalAligner speed tests.... \n" generic_doc_speed_test(AffineLocalAligner) print "running section unit tests for localaligner.... \n" section_unit_tests(LocalAligner) print "running section unit tests for affinealigner.... \n" section_unit_tests(AffineLocalAligner) print "running section speed tests.... \n" section_speed_test() print 'running test on keeping track of indices for section algorithm..... \n' section_test_alignment_indices() print 'running speed test on Word2VecLocalAligner.... \n'
import random import matplotlib.pyplot as plt import time import numpy as np from compiler.ast import flatten from alignment.sequence import Sequence from alignment.vocabulary import Vocabulary from alignment.sequencealigner import SimpleScoring, LocalSequenceAligner from utils.general_utils import find_subsequence from text_alignment import * #function from python package for testing results def seqToAlign(a, b, matchScore = 3, mismatchScore = -1, gapScore = -2): ''' args: a: list of words b: list of words matchScore: num mismatchScore: num gapScore: num Returns: o/w returns list of tuples with score and top alignments Description: helper function for finding alignments given a list of words ''' # Create a vocabulary and encode the sequences. a = a[0] b = b[0] seq1 = Sequence(a) seq2 = Sequence(b) v = Vocabulary() aEncoded = v.encodeSequence(seq1) bEncoded = v.encodeSequence(seq2) # Create a scoring and align the sequences using local aligner. scoring = SimpleScoring(matchScore, mismatchScore) aligner = LocalSequenceAligner(scoring, gapScore) score, encodeds = aligner.align(aEncoded, bEncoded, backtrace=True) alignments = [v.decodeSequenceAlignment(encoded) for encoded in encodeds] return [(a.score, list(a.first), list(a.second)) for a in alignments] #testing functions def create_doc_test_cases(): #tests t1 = [['a']*100] t2 = [['b']*50 + ['a','a','b']*50] s1 = [[1]*100] s2 = [[2]*50 + [1,1,2]*50] v1 = [np.array([0, 1, 2, 3, 4, 7, 6, 3, 2, 1, 3])] v2 = [np.array([0, 1, 2, 3, 4, 4, 5, 2, 1, 2, 2])] w1 = [np.array([7, 6, 3, 2, 1, 3, 0, 1, 2, 3, 4])] w2 = [np.array([4, 5, 2, 1, 2, 2, 0, 1, 2, 3, 4])] tests = [(t1,t2), (s1,s2),(v1,v2), (w1,w2), ([np.random.choice(5, 30)],[np.random.choice(5, 30)]), \ ([np.array([1, 2, 0, 0, 1, 2, 3, 0, 1, 3, 0, 4, 3, 3, 0, 3, 0, 2, 0, 4, 3, 4, 2, \ 1, 1, 1, 1, 1, 0, 1])], [np.array([2, 0, 3, 1, 2, 4, 0, 1, 3, 0, 1, 4, 1, 3, 1, 4, 0, 0, 1, 2, 4, 0, 0, \ 2, 4, 1, 3, 2, 2, 4])])] return tests #LocalAligner algorithm tests def LocalAligner_unit_tests(): def test_alignment(t1,t2): f = LocalAligner() alignment=f.align(t1,t2) #default score is 3,-1,-2 score, l, r = alignment.alignments[0] #find score of recovered alignment align_score = f.alignment_score(l,r) #run package algorithm alignments = seqToAlign(t1,t2) #default score is 3,-1,-2 if score == align_score and score == alignments[0][0]: print 'package, backtraced alignment, and alignmnet matrix consistent' else: print 'dp_alg_score: ' + str(score) print 'alignment_score: ' + str(align_score) print 'package_score: ' + str(alignments[0][0]) #tests tests = create_doc_test_cases() for test in tests: z1, z2 = test test_alignment(z1,z2) f = LocalAligner() alignment=f.align(z1,z2) #default score is 3,-1,-2 score, l, r = alignment.alignments[0] #run package algorithm alignments = seqToAlign(z1,z2) #default score is 3,-1,-2 l_true, r_true = alignments[0][1:] for i in range(len(l)): if l[i] != l_true[i]: print 'not same sequence' break for i in range(len(r)): if r[i] != r_true[i]: print 'not same sequence' break def test_alignment(t1,t2, algorithm): f = algorithm() alignment=f.align(t1,t2) #default score is 3,-1,-2 score, l, r = alignment.alignments[0] #find score of recovered alignment align_score = f.alignment_score(l,r) if score == align_score: print 'backtraced alignment and alignmnet matrix consistent' else: print 'backtraced alignment and alignmnet matrix not consistent' print 'dp_alg_score: ' + str(score) print 'alignment_score: ' + str(align_score) print 'left_alignment: ', l print 'right_alignment: ', r def generic_doc_unit_test(algorithm): tests = create_doc_test_cases() for test in tests: z1, z2 = test test_alignment(z1,z2, algorithm) def LocalAligner_speed_test(): input_sizes = [np.exp2(p) for p in range(2,7)] average_our_times = [] average_package_times = [] for input_size in input_sizes: print input_size v1 = [np.random.randint(0,10,input_size)] v2 = [np.random.randint(0,10,input_size)] our_times = [] package_times = [] f = LocalAligner() for i in range(2): t1 = time.time() f.align(v1,v2) our_times.append(time.time()-t1) t2 = time.time() seqToAlign(v1,v2) package_times.append(time.time()-t2) average_our_times.append(np.mean(our_times)) average_package_times.append(np.mean(package_times)) plt.plot(input_sizes,average_package_times, color = 'b', label = 'package') plt.plot(input_sizes,average_our_times, color='r', label = 'our implementation') plt.legend(loc='upper right') plt.xlabel('input size') plt.ylim(0,0.02) plt.show() def generic_doc_speed_test(algorithm): ''' compares speed of algorithm to local alignment algorithm ''' input_sizes = [np.exp2(p) for p in range(2,7)] average_alg_times = [] average_local_times = [] for input_size in input_sizes: print input_size v1 = [np.random.randint(0,10,input_size)] v2 = [np.random.randint(0,10,input_size)] local_times = [] alg_times = [] f = LocalAligner() g = algorithm() for i in range(2): t1 = time.time() f.align(v1,v2) local_times.append(time.time()-t1) t2 = time.time() g.align(v1,v2) alg_times.append(time.time()-t2) average_local_times.append(np.mean(local_times)) average_alg_times.append(np.mean(alg_times)) return average_local_times, average_alg_times def doc_test_alignment_indices(algorithm): #tests tests = create_doc_test_cases() good_job = True for test in tests: left_text, right_text = test try: left_text[0] = left_text[0].tolist() right_text[0] = right_text[0].tolist() except: pass f = algorithm() Alignment = f.align(left_text,right_text) left, right = clean_alignment(Alignment.alignments[0]) left_start, left_end = find_subsequence(left, flatten(left_text)) right_start, right_end = find_subsequence(right, flatten(right_text)) if Alignment.alignment_indices[0]['left_start'] != left_start or \ Alignment.alignment_indices[0]['left_end'] != left_end or \ Alignment.alignment_indices[0]['right_start'] != right_start or \ Alignment.alignment_indices[0]['right_end'] != right_end: print 'alignment length: ', len(left) print 'indices are messed up' print 'left_start: ', Alignment.alignment_indices[0]['left_start'] print 'true left_start: ', left_start print 'left_end: ', Alignment.alignment_indices[0]['left_end'] print 'true left_end', left_end print '\n' print 'right_start: ', Alignment.alignment_indices[0]['right_start'] print 'true right_start: ', right_start print 'right_end: ', Alignment.alignment_indices[0]['right_end'] print 'true right_end: ', right_end print '\n' good_job = False if good_job: print 'indices worked' #SectionLocalAlignment Tests def create_section_tests(): tests = create_doc_test_cases() #convert tests into sections so #that it makes sense for case left_test = [] right_test = [] for test1, test2 in tests: left_test.append(list(test1[0])) right_test.append(list(test2[0])) return left_test, right_test def section_unit_tests(Algorithm): left_test, right_test = create_section_tests() f = Algorithm() Alignment = f.align(left_test, [flatten(right_test)]) good_job = True for score, left, right in Alignment.alignments: true_score = f.alignment_score(left, right) if true_score != score: print 'left: ', left print 'right: ', right print 'true alignment score: ', true_score print 'calculated score: ', score good_job = False if good_job: print "calculated alignment scores correctly" def section_speed_test(): input_sizes = [np.exp2(p) for p in range(2,9)] average_local_times = [] average_section_times = [] for input_size in input_sizes: print input_size v1 = [np.random.randint(0,10,input_size)] v2 = [np.random.randint(0,10,input_size)] cut1 = random.randint(0,len(v1)) cut2 = random.randint(cut1,len(v2)) cut3 = random.randint(cut2,len(v2)) w1 = [v1[0][:cut1], v1[0][cut1:cut2], v1[0][cut2:cut3]] local_times = [] section_times = [] for i in range(2): t1 = time.time() f = LocalAligner() f.align(v1,v2) local_times.append(time.time()-t1) t2 = time.time() f = LocalAligner() f.align(w1,v2) section_times.append(time.time()-t2) average_local_times.append(np.mean(local_times)) average_section_times.append(np.mean(section_times)) plt.plot(input_sizes,average_section_times, color = 'b', label = 'section local alignment') plt.plot(input_sizes,average_local_times, color='r', label = 'local alignment') plt.legend(loc='upper right') plt.xlabel('input size') plt.ylim(0,0.02) plt.show() def section_test_alignment_indices(): left_test, right_test = create_section_tests() left_test_flattened = flatten(left_test) right_test_flattened = flatten(right_test) f = LocalAligner() Alignment = f.align(left_test, [right_test_flattened]) good_job = True for i in range(len(Alignment.alignments)): left, right = clean_alignment(Alignment.alignments[i]) print 'alignment length: ', len(left) left_start, left_end = find_subsequence(left, left_test_flattened) right_start, right_end = find_subsequence(right, right_test_flattened) if Alignment.alignment_indices[i]['left_start'] != left_start or \ Alignment.alignment_indices[i]['left_end'] != left_end or \ Alignment.alignment_indices[i]['right_start'] != right_start or \ Alignment.alignment_indices[i]['right_end'] != right_end: print 'indices are messed up: ' print 'left_start: ', Alignment.alignment_indices[i]['left_start'] print 'true left_start: ', left_start print 'left_end: ', Alignment.alignment_indices[i]['left_end'] print 'true left_end', left_end print '\n' print 'right_start: ', Alignment.alignment_indices[i]['right_start'] print 'true right_start: ', right_start print 'right_end: ', Alignment.alignment_indices[i]['right_end'] print 'true right_end: ', right_end print '\n' good_job = False if good_job: print 'indices worked' ############################################################ ##helper functions def clean_alignment(alignment): ''' arg: alignment object returns: 2 list of alignment words without the alignment symbol ''' keep1 = [] keep2 = [] for item in alignment[1]: if item != '-': keep1.append(item) for item in alignment[2]: if item != '-': keep2.append(item) return (keep1, keep2) if __name__ == '__main__': print "running LocalAligner unit tests.... \n" LocalAligner_unit_tests() print "running LocalAligner speed tests.... \n" LocalAligner_speed_test() print "running LocalAligner index tests.... \n" doc_test_alignment_indices(LocalAligner) print "running AffineLocalAligner unit tests.... \n" generic_doc_unit_test(AffineLocalAligner) print "running AffineLocalAligner speed tests.... \n" generic_doc_speed_test(AffineLocalAligner) print "running section unit tests for localaligner.... \n" section_unit_tests(LocalAligner) print "running section unit tests for affinealigner.... \n" section_unit_tests(AffineLocalAligner) print "running section speed tests.... \n" section_speed_test() print 'running test on keeping track of indices for section algorithm..... \n' section_test_alignment_indices() print 'running speed test on Word2VecLocalAligner.... \n'
en
0.654581
#function from python package for testing results args: a: list of words b: list of words matchScore: num mismatchScore: num gapScore: num Returns: o/w returns list of tuples with score and top alignments Description: helper function for finding alignments given a list of words # Create a vocabulary and encode the sequences. # Create a scoring and align the sequences using local aligner. #testing functions #tests #LocalAligner algorithm tests #default score is 3,-1,-2 #find score of recovered alignment #run package algorithm #default score is 3,-1,-2 #tests #default score is 3,-1,-2 #run package algorithm #default score is 3,-1,-2 #default score is 3,-1,-2 #find score of recovered alignment compares speed of algorithm to local alignment algorithm #tests #SectionLocalAlignment Tests #convert tests into sections so #that it makes sense for case ############################################################ ##helper functions arg: alignment object returns: 2 list of alignment words without the alignment symbol
2.795443
3
dataporten/tests/test_api.py
frafra/django-dataporten
6
6621725
<filename>dataporten/tests/test_api.py import json import responses from django.test import TestCase from ..api import usergroups, userinfo from .utils import mock_usergroups_request, mock_userinfo_request class TestTypes(TestCase): """ Should hit type definitions in order to catch syntax errors """ from ..api import GroupJSON class TestUserInfo(TestCase): """ Test dataporten userinfo endpoint """ @responses.activate def test_valid_case(self): userinfo_dict = mock_userinfo_request() userinfo_return = userinfo('testac') self.assertEqual( userinfo_return, userinfo_dict['user'], ) class TestGroups(TestCase): """ Test dataporten groups endpoint """ @responses.activate def test_valid_case(self): groups_dict = mock_usergroups_request() userinfo_return = usergroups('testac') self.assertEqual( userinfo_return, groups_dict, )
<filename>dataporten/tests/test_api.py import json import responses from django.test import TestCase from ..api import usergroups, userinfo from .utils import mock_usergroups_request, mock_userinfo_request class TestTypes(TestCase): """ Should hit type definitions in order to catch syntax errors """ from ..api import GroupJSON class TestUserInfo(TestCase): """ Test dataporten userinfo endpoint """ @responses.activate def test_valid_case(self): userinfo_dict = mock_userinfo_request() userinfo_return = userinfo('testac') self.assertEqual( userinfo_return, userinfo_dict['user'], ) class TestGroups(TestCase): """ Test dataporten groups endpoint """ @responses.activate def test_valid_case(self): groups_dict = mock_usergroups_request() userinfo_return = usergroups('testac') self.assertEqual( userinfo_return, groups_dict, )
en
0.482835
Should hit type definitions in order to catch syntax errors Test dataporten userinfo endpoint Test dataporten groups endpoint
2.650389
3
main.py
SynnexMetrodata/OpenCV-CustomROI
0
6621726
<reponame>SynnexMetrodata/OpenCV-CustomROI import cv2 import numpy ROIEnabled = False ROIShape = [] def createROI(event, x, y, flag, param): global ROIEnabled, ROIShape if ROIEnabled: if event == cv2.EVENT_LBUTTONUP: ROIShape.append((x,y)) def drawROI(frame, roi): if not ROIEnabled: return start_point = (0,0) if roi: start_point = next(iter(roi)) else: return zero_point = start_point for item in roi: end_point = item cv2.line(frame, start_point, end_point, (0,255,0), 2) start_point = end_point cv2.line(frame, start_point, zero_point, (0,255,0), 2) point = numpy.array(roi) argma = point.argmax(axis=0) argmi = point.argmin(axis=0) (x1, y1) = (int(point[argmi[0]][0]), int(point[argmi[1]][1])) (x2, y2) = (int(point[argma[0]][0]), int(point[argma[1]][1])) print(x1, y1, x2, y2) croppep = frame[y1:y2,x1:x2].copy() pts = point - point.min(axis=0) mask = numpy.zeros(croppep.shape[:2], numpy.uint8) cv2.drawContours(mask, [pts], -1, (255, 255, 255), -1, cv2.LINE_AA) dst = cv2.bitwise_and(croppep, croppep, mask=mask) if croppep.shape[0] > 0 and croppep.shape[1] > 0: cv2.imshow("ROI", dst) if __name__ == "__main__": cap = cv2.VideoCapture(0) cv2.namedWindow("CustomROI") cv2.setMouseCallback("CustomROI", createROI) while cap.isOpened(): ret, frame = cap.read() if ret: drawROI(frame, ROIShape) cv2.imshow("CustomROI", frame) key = cv2.waitKey(1) & 0xFF if key == ord('q'): break if key == ord('r'): ROIEnabled = not ROIEnabled if key == ord('n'): ROIShape = [] cap.release() cv2.destroyAllWindows()
import cv2 import numpy ROIEnabled = False ROIShape = [] def createROI(event, x, y, flag, param): global ROIEnabled, ROIShape if ROIEnabled: if event == cv2.EVENT_LBUTTONUP: ROIShape.append((x,y)) def drawROI(frame, roi): if not ROIEnabled: return start_point = (0,0) if roi: start_point = next(iter(roi)) else: return zero_point = start_point for item in roi: end_point = item cv2.line(frame, start_point, end_point, (0,255,0), 2) start_point = end_point cv2.line(frame, start_point, zero_point, (0,255,0), 2) point = numpy.array(roi) argma = point.argmax(axis=0) argmi = point.argmin(axis=0) (x1, y1) = (int(point[argmi[0]][0]), int(point[argmi[1]][1])) (x2, y2) = (int(point[argma[0]][0]), int(point[argma[1]][1])) print(x1, y1, x2, y2) croppep = frame[y1:y2,x1:x2].copy() pts = point - point.min(axis=0) mask = numpy.zeros(croppep.shape[:2], numpy.uint8) cv2.drawContours(mask, [pts], -1, (255, 255, 255), -1, cv2.LINE_AA) dst = cv2.bitwise_and(croppep, croppep, mask=mask) if croppep.shape[0] > 0 and croppep.shape[1] > 0: cv2.imshow("ROI", dst) if __name__ == "__main__": cap = cv2.VideoCapture(0) cv2.namedWindow("CustomROI") cv2.setMouseCallback("CustomROI", createROI) while cap.isOpened(): ret, frame = cap.read() if ret: drawROI(frame, ROIShape) cv2.imshow("CustomROI", frame) key = cv2.waitKey(1) & 0xFF if key == ord('q'): break if key == ord('r'): ROIEnabled = not ROIEnabled if key == ord('n'): ROIShape = [] cap.release() cv2.destroyAllWindows()
none
1
2.581523
3
LinInt.py
EdwardDantes/LinearInterpolation
0
6621727
# Python Program to give a Y values based on a requested X... Within the range of given (X,Y) data.
# Python Program to give a Y values based on a requested X... Within the range of given (X,Y) data.
en
0.75455
# Python Program to give a Y values based on a requested X... Within the range of given (X,Y) data.
2.970379
3
test/scripts/geometric_shapes.py
lmontaut/hpp-fcl
59
6621728
<gh_stars>10-100 # Datas for compare_convex_box from gepetto.corbaserver import Client from gepetto import Quaternion def translate (tr, t, d): return [ tr[i] + d*t[i] for i in range(3) ] + tr[3:] cl = Client () try: cl.gui.getWindowID("fcl") except: cl.gui.createWindow("fcl") cl.gui.addBox ('fcl/b0', 2, 2, 2, [1,0,0,0.5]) cl.gui.addBox ('fcl/b1', 2, 2, 2, [0,1,0,0.5]) cl.gui.setWireFrameMode ('fcl/b1', "WIREFRAME") cl.gui.addBox ('fcl/b1_0', 2, 2, 2, [0,0 ,1,0.5]) cl.gui.addBox ('fcl/b1_1', 2, 2, 2, [0,0.5,1,0.5]) cl.gui.addSphere ("fcl/p0", 0.01, [1, 0, 1, 1]) cl.gui.addSphere ("fcl/p1", 0.01, [0, 1, 1, 1]) cl.gui.addArrow ("fcl/n0", 0.01, 1., [1, 0, 1, 1]) cl.gui.addArrow ("fcl/n1", 0.01, 1., [0, 1, 1, 1]) eps = 0. d0 = 1.5183589910964868 + eps n0 = [0.0310588, 0.942603, -0.332467] d1 = 1.7485932899646754 + eps n1 = [0.132426, -0.0219519, -0.99095] qn0 = Quaternion() qn1 = Quaternion() qn0.fromTwoVector([1,0,0], n0) qn1.fromTwoVector([1,0,0], n1) pb1 = [ 0.135584, 0.933659, 0.290395, 0.119895, 0.977832, -0.164725, 0.0483272 ] pb1_0 = translate (pb1, n0, d0) pb1_1 = translate (pb1, n1, -d1) cl.gui.applyConfiguration ("fcl/b1", pb1) cl.gui.applyConfiguration ("fcl/b1_0", pb1_0) cl.gui.applyConfiguration ("fcl/b1_1", pb1_1) cl.gui.applyConfigurations(["fcl/p0","fcl/p1"], [ [0.832569, 0.259513, -0.239598, 0,0,0,1], [-0.879579, 0.719545, 0.171906, 0,0,0,1] ]) cl.gui.applyConfigurations(["fcl/n0","fcl/n1"], [ ( 0.832569, 0.259513, -0.239598, ) + qn0.toTuple(), ( -0.879579, 0.719545, 0.171906, ) + qn1.toTuple() ]) cl.gui.refresh()
# Datas for compare_convex_box from gepetto.corbaserver import Client from gepetto import Quaternion def translate (tr, t, d): return [ tr[i] + d*t[i] for i in range(3) ] + tr[3:] cl = Client () try: cl.gui.getWindowID("fcl") except: cl.gui.createWindow("fcl") cl.gui.addBox ('fcl/b0', 2, 2, 2, [1,0,0,0.5]) cl.gui.addBox ('fcl/b1', 2, 2, 2, [0,1,0,0.5]) cl.gui.setWireFrameMode ('fcl/b1', "WIREFRAME") cl.gui.addBox ('fcl/b1_0', 2, 2, 2, [0,0 ,1,0.5]) cl.gui.addBox ('fcl/b1_1', 2, 2, 2, [0,0.5,1,0.5]) cl.gui.addSphere ("fcl/p0", 0.01, [1, 0, 1, 1]) cl.gui.addSphere ("fcl/p1", 0.01, [0, 1, 1, 1]) cl.gui.addArrow ("fcl/n0", 0.01, 1., [1, 0, 1, 1]) cl.gui.addArrow ("fcl/n1", 0.01, 1., [0, 1, 1, 1]) eps = 0. d0 = 1.5183589910964868 + eps n0 = [0.0310588, 0.942603, -0.332467] d1 = 1.7485932899646754 + eps n1 = [0.132426, -0.0219519, -0.99095] qn0 = Quaternion() qn1 = Quaternion() qn0.fromTwoVector([1,0,0], n0) qn1.fromTwoVector([1,0,0], n1) pb1 = [ 0.135584, 0.933659, 0.290395, 0.119895, 0.977832, -0.164725, 0.0483272 ] pb1_0 = translate (pb1, n0, d0) pb1_1 = translate (pb1, n1, -d1) cl.gui.applyConfiguration ("fcl/b1", pb1) cl.gui.applyConfiguration ("fcl/b1_0", pb1_0) cl.gui.applyConfiguration ("fcl/b1_1", pb1_1) cl.gui.applyConfigurations(["fcl/p0","fcl/p1"], [ [0.832569, 0.259513, -0.239598, 0,0,0,1], [-0.879579, 0.719545, 0.171906, 0,0,0,1] ]) cl.gui.applyConfigurations(["fcl/n0","fcl/n1"], [ ( 0.832569, 0.259513, -0.239598, ) + qn0.toTuple(), ( -0.879579, 0.719545, 0.171906, ) + qn1.toTuple() ]) cl.gui.refresh()
en
0.360778
# Datas for compare_convex_box
2.047245
2
db_upgrade.py
aceokay/microblog
0
6621729
<gh_stars>0 # If you have database migration support, then when you are ready to release # the new version of the app to your production server you just need to record # a new migration, copy the migration scripts to your production server and # run a simple script that applies the changes for you. The database upgrade # can be done with this little Python script. #!flask/bin/python from migrate.versioning import api from config import SQLALCHEMY_DATABASE_URI from config import SQLALCHEMY_MIGRATE_REPO api.upgrade(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO) v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO) print('Current database version: ' + str(v))
# If you have database migration support, then when you are ready to release # the new version of the app to your production server you just need to record # a new migration, copy the migration scripts to your production server and # run a simple script that applies the changes for you. The database upgrade # can be done with this little Python script. #!flask/bin/python from migrate.versioning import api from config import SQLALCHEMY_DATABASE_URI from config import SQLALCHEMY_MIGRATE_REPO api.upgrade(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO) v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO) print('Current database version: ' + str(v))
en
0.897153
# If you have database migration support, then when you are ready to release # the new version of the app to your production server you just need to record # a new migration, copy the migration scripts to your production server and # run a simple script that applies the changes for you. The database upgrade # can be done with this little Python script. #!flask/bin/python
2.159249
2
util/util.py
KoryakovDmitry/TGRNet
55
6621730
<filename>util/util.py """This module contains simple helper functions """ from __future__ import print_function import torch import torch.distributed as dist import numpy as np from PIL import Image import os import pickle def tensor2im(input_image, imtype=np.uint8): """"Converts a Tensor array into a numpy image array. Parameters: input_image (tensor) -- the input image tensor array imtype (type) -- the desired type of the converted numpy array """ if not isinstance(input_image, np.ndarray): if isinstance(input_image, torch.Tensor): # get the data from a variable image_tensor = input_image.data else: return input_image if len(image_tensor.size()) == 4: image_numpy = image_tensor[0].cpu().float().numpy() # convert it into a numpy array else: image_numpy = image_tensor.cpu().float().numpy() if image_numpy.shape[0] == 1: # grayscale to RGB image_numpy = np.tile(image_numpy, (3, 1, 1)) image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 # post-processing: tranpose and scaling else: # if it is a numpy array, do nothing image_numpy = input_image return image_numpy.astype(imtype) def diagnose_network(net, name='network'): """Calculate and print the mean of average absolute(gradients) Parameters: net (torch network) -- Torch network name (str) -- the name of the network """ mean = 0.0 count = 0 for param in net.parameters(): if param.grad is not None: mean += torch.mean(torch.abs(param.grad.data)) count += 1 if count > 0: mean = mean / count print(name) print(mean) def save_image(image_numpy, image_path): """Save a numpy image to the disk Parameters: image_numpy (numpy array) -- input numpy array image_path (str) -- the path of the image """ image_pil = Image.fromarray(image_numpy) image_pil.save(image_path) def print_numpy(x, val=True, shp=False): """Print the mean, min, max, median, std, and size of a numpy array Parameters: val (bool) -- if print the values of the numpy array shp (bool) -- if print the shape of the numpy array """ x = x.astype(np.float64) if shp: print('shape,', x.shape) if val: x = x.flatten() print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % ( np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x))) def mkdirs(paths): """create empty directories if they don't exist Parameters: paths (str list) -- a list of directory paths """ if isinstance(paths, list) and not isinstance(paths, str): for path in paths: mkdir(path) else: mkdir(paths) def mkdir(path): """create a single empty directory if it didn't exist Parameters: path (str) -- a single directory path """ if not os.path.exists(path): os.makedirs(path) def reduce_dict(input_dict, average=True): """ """ world_size = get_world_size() if world_size < 2: return input_dict with torch.no_grad(): names = [] values = [] # sort the keys so that they are consistent across processes for k in sorted(input_dict.keys()): names.append(k) values.append(input_dict[k]) values = torch.stack(values, dim=0) dist.all_reduce(values) if average: values /= world_size reduced_dict = {k: v for k, v in zip(names, values)} return reduced_dict def all_gather(data): """ Run all_gather on arbitrary picklable data (not necessarily tensors) Args: data: any picklable object Returns: list[data]: list of data gathered from each rank """ world_size = get_world_size() if world_size == 1: return [data] # serialized to a Tensor buffer = pickle.dumps(data) storage = torch.ByteStorage.from_buffer(buffer) tensor = torch.ByteTensor(storage).to("cuda") # obtain Tensor size of each rank local_size = torch.tensor([tensor.numel()], device="cuda") size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)] dist.all_gather(size_list, local_size) size_list = [int(size.item()) for size in size_list] max_size = max(size_list) # receiving Tensor from all ranks # we pad the tensor because torch all_gather does not support # gathering tensors of different shapes tensor_list = [] for _ in size_list: tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda")) if local_size != max_size: padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda") tensor = torch.cat((tensor, padding), dim=0) dist.all_gather(tensor_list, tensor) data_list = [] for size, tensor in zip(size_list, tensor_list): buffer = tensor.cpu().numpy().tobytes()[:size] data_list.append(pickle.loads(buffer)) return data_list def get_world_size(): if not is_dist_avail_and_initialized(): return 1 return dist.get_world_size() def is_dist_avail_and_initialized(): if not dist.is_available(): return False if not dist.is_initialized(): return False return True def get_rank(): if not is_dist_avail_and_initialized(): return 0 return dist.get_rank() def is_main_process(): return get_rank() == 0 def setup_for_distributed(is_master): """ This function disables printing when not in master process """ import builtins as __builtin__ builtin_print = __builtin__.print def print(*args, **kwargs): force = kwargs.pop('force', False) if is_master or force: builtin_print(*args, **kwargs) __builtin__.print = print def init_distributed_mode(args): if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: args.rank = int(os.environ["RANK"]) args.world_size = int(os.environ['WORLD_SIZE']) args.gpu = int(os.environ['LOCAL_RANK']) elif 'SLURM_PROCID' in os.environ: args.rank = int(os.environ['SLURM_PROCID']) args.gpu = args.gpu_ids[args.rank % torch.cuda.device_count()] else: print('Not using distributed mode') args.distributed = False return args.distributed = True torch.cuda.set_device(args.gpu) args.dist_backend = 'nccl' print('| distributed init (rank {}): {}'.format( args.rank, args.dist_url), flush=True) torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank) torch.distributed.barrier() setup_for_distributed(args.rank == 0)
<filename>util/util.py """This module contains simple helper functions """ from __future__ import print_function import torch import torch.distributed as dist import numpy as np from PIL import Image import os import pickle def tensor2im(input_image, imtype=np.uint8): """"Converts a Tensor array into a numpy image array. Parameters: input_image (tensor) -- the input image tensor array imtype (type) -- the desired type of the converted numpy array """ if not isinstance(input_image, np.ndarray): if isinstance(input_image, torch.Tensor): # get the data from a variable image_tensor = input_image.data else: return input_image if len(image_tensor.size()) == 4: image_numpy = image_tensor[0].cpu().float().numpy() # convert it into a numpy array else: image_numpy = image_tensor.cpu().float().numpy() if image_numpy.shape[0] == 1: # grayscale to RGB image_numpy = np.tile(image_numpy, (3, 1, 1)) image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 # post-processing: tranpose and scaling else: # if it is a numpy array, do nothing image_numpy = input_image return image_numpy.astype(imtype) def diagnose_network(net, name='network'): """Calculate and print the mean of average absolute(gradients) Parameters: net (torch network) -- Torch network name (str) -- the name of the network """ mean = 0.0 count = 0 for param in net.parameters(): if param.grad is not None: mean += torch.mean(torch.abs(param.grad.data)) count += 1 if count > 0: mean = mean / count print(name) print(mean) def save_image(image_numpy, image_path): """Save a numpy image to the disk Parameters: image_numpy (numpy array) -- input numpy array image_path (str) -- the path of the image """ image_pil = Image.fromarray(image_numpy) image_pil.save(image_path) def print_numpy(x, val=True, shp=False): """Print the mean, min, max, median, std, and size of a numpy array Parameters: val (bool) -- if print the values of the numpy array shp (bool) -- if print the shape of the numpy array """ x = x.astype(np.float64) if shp: print('shape,', x.shape) if val: x = x.flatten() print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % ( np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x))) def mkdirs(paths): """create empty directories if they don't exist Parameters: paths (str list) -- a list of directory paths """ if isinstance(paths, list) and not isinstance(paths, str): for path in paths: mkdir(path) else: mkdir(paths) def mkdir(path): """create a single empty directory if it didn't exist Parameters: path (str) -- a single directory path """ if not os.path.exists(path): os.makedirs(path) def reduce_dict(input_dict, average=True): """ """ world_size = get_world_size() if world_size < 2: return input_dict with torch.no_grad(): names = [] values = [] # sort the keys so that they are consistent across processes for k in sorted(input_dict.keys()): names.append(k) values.append(input_dict[k]) values = torch.stack(values, dim=0) dist.all_reduce(values) if average: values /= world_size reduced_dict = {k: v for k, v in zip(names, values)} return reduced_dict def all_gather(data): """ Run all_gather on arbitrary picklable data (not necessarily tensors) Args: data: any picklable object Returns: list[data]: list of data gathered from each rank """ world_size = get_world_size() if world_size == 1: return [data] # serialized to a Tensor buffer = pickle.dumps(data) storage = torch.ByteStorage.from_buffer(buffer) tensor = torch.ByteTensor(storage).to("cuda") # obtain Tensor size of each rank local_size = torch.tensor([tensor.numel()], device="cuda") size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)] dist.all_gather(size_list, local_size) size_list = [int(size.item()) for size in size_list] max_size = max(size_list) # receiving Tensor from all ranks # we pad the tensor because torch all_gather does not support # gathering tensors of different shapes tensor_list = [] for _ in size_list: tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda")) if local_size != max_size: padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda") tensor = torch.cat((tensor, padding), dim=0) dist.all_gather(tensor_list, tensor) data_list = [] for size, tensor in zip(size_list, tensor_list): buffer = tensor.cpu().numpy().tobytes()[:size] data_list.append(pickle.loads(buffer)) return data_list def get_world_size(): if not is_dist_avail_and_initialized(): return 1 return dist.get_world_size() def is_dist_avail_and_initialized(): if not dist.is_available(): return False if not dist.is_initialized(): return False return True def get_rank(): if not is_dist_avail_and_initialized(): return 0 return dist.get_rank() def is_main_process(): return get_rank() == 0 def setup_for_distributed(is_master): """ This function disables printing when not in master process """ import builtins as __builtin__ builtin_print = __builtin__.print def print(*args, **kwargs): force = kwargs.pop('force', False) if is_master or force: builtin_print(*args, **kwargs) __builtin__.print = print def init_distributed_mode(args): if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: args.rank = int(os.environ["RANK"]) args.world_size = int(os.environ['WORLD_SIZE']) args.gpu = int(os.environ['LOCAL_RANK']) elif 'SLURM_PROCID' in os.environ: args.rank = int(os.environ['SLURM_PROCID']) args.gpu = args.gpu_ids[args.rank % torch.cuda.device_count()] else: print('Not using distributed mode') args.distributed = False return args.distributed = True torch.cuda.set_device(args.gpu) args.dist_backend = 'nccl' print('| distributed init (rank {}): {}'.format( args.rank, args.dist_url), flush=True) torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank) torch.distributed.barrier() setup_for_distributed(args.rank == 0)
en
0.646081
This module contains simple helper functions "Converts a Tensor array into a numpy image array. Parameters: input_image (tensor) -- the input image tensor array imtype (type) -- the desired type of the converted numpy array # get the data from a variable # convert it into a numpy array # grayscale to RGB # post-processing: tranpose and scaling # if it is a numpy array, do nothing Calculate and print the mean of average absolute(gradients) Parameters: net (torch network) -- Torch network name (str) -- the name of the network Save a numpy image to the disk Parameters: image_numpy (numpy array) -- input numpy array image_path (str) -- the path of the image Print the mean, min, max, median, std, and size of a numpy array Parameters: val (bool) -- if print the values of the numpy array shp (bool) -- if print the shape of the numpy array create empty directories if they don't exist Parameters: paths (str list) -- a list of directory paths create a single empty directory if it didn't exist Parameters: path (str) -- a single directory path # sort the keys so that they are consistent across processes Run all_gather on arbitrary picklable data (not necessarily tensors) Args: data: any picklable object Returns: list[data]: list of data gathered from each rank # serialized to a Tensor # obtain Tensor size of each rank # receiving Tensor from all ranks # we pad the tensor because torch all_gather does not support # gathering tensors of different shapes This function disables printing when not in master process
2.970401
3
PRODHAC.py
LyddonBeni/PRODACH
0
6621731
# -*- coding: utf-8 -*- __author__ = 'LyddonBeni' import numpy as np from matplotlib import pyplot as plt import scipy as sc import numpy as np print (""" ▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄ UNIVERSIDAD NACIONAL DE HUANCAVELICA FACULTAD DE CIENCIAS DE INGENIERÍA ESCUELA ACADÉMICA PROFESIONAL DE CIVIL ▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄ ░░░░░░ DISEÑO DE ALCANTARILLA Y CANALES ░░░░ ░░░░ PARA EL DISEÑO HIDRAULICO DE ALCANTARILLAS ░░░ ░ DE PROYECTOS VIALES EN LA REGION DE HUANCAVELICA ░ ===== PROYECTO DE TESIS ===== * AUTOR : <NAME>, <NAME> * ASESOR : <NAME>, Ivan """) ################ DATA GENERAL ###################### def fv(Tipo): if Tipo == 1: print (" Canal De Tipo Rectangular") b = float(input("\nBase del Canal(m): ") ) Z1 = Z2 = 0. error = 0.000001 y,va,con, cont = 0.8,1.,0.,60. while va > error: C1=(Qe*n/Sc**(0.5))**(3./2.) C2=pow(1+Z1**2,0.5)+pow(1+Z2**2,0.5) Z = (Z1+Z2)/2. m1 = (b*y+Z*y**2)**(5./2.)/(C2*y+b)-C1 m2 = 5./2.*(b+y*2*Z)*(b*y+Z*y**2)**(3./2.)/(C2*y+b)-C2*(b*y+Z*y**2)**(5./2.)/(C2*y+b)**2 yi = y - m1/m2 va = abs(y-yi) y = yi yc = y #print y con = con + 1 if con > cont: break A = b*yc + 0.5*Z1*yc**2 + 0.5*Z2*yc**2 T = b V = Qe/A return yc,A,V,T elif Tipo == 2: print (" Canal De Tipo Triangular") b = 0. Z1 = float(input("\nTalud del Canal Izquierda(m): ") ) Z2 = float(input("\nTalud del Canal Derecha(m): ") ) error = 0.000001 y,va,con, cont = 0.8,1.,0.,60. while va > error: C1=(Qe*n/Sc**(0.5))**(3./2.) C2=pow(1+Z1**2,0.5)+pow(1+Z2**2,0.5) Z = (Z1+Z2)/2. m1 = (b*y+Z*y**2)**(5./2.)/(C2*y+b)-C1 m2 = 5./2.*(b+y*2*Z)*(b*y+Z*y**2)**(3./2.)/(C2*y+b)-C2*(b*y+Z*y**2)**(5./2.)/(C2*y+b)**2 yi = y - m1/m2 va = abs(y-yi) y = yi yc = y #print y con = con + 1 if con > cont: break A = b*yc + 0.5*Z1*yc**2 + 0.5*Z2*yc**2 T = y*(Z1+Z2) V = Qe/A return yc,A,V,T elif Tipo == 3: print (" Canal De Tipo Trapezoidal") b = float(input("\nBase del Canal(m): ") ) Z1 = float(input("\nTalud del Canal Izquierda(m): ") ) Z2 = float(input("\nTalud del Canal Derecha(m): ") ) error = 0.000001 y,va,con, cont = 0.8,1.,0.,60. while va > error: C1=(Qe*n/Sc**(0.5))**(3./2.) C2=pow(1+Z1**2,0.5)+pow(1+Z2**2,0.5) Z = (Z1+Z2)/2. m1 = (b*y+Z*y**2)**(5./2.)/(C2*y+b)-C1 m2 = 5./2.*(b+y*2*Z)*(b*y+Z*y**2)**(3./2.)/(C2*y+b)-C2*(b*y+Z*y**2)**(5./2.)/(C2*y+b)**2 yi = y - m1/m2 va = abs(y-yi) y = yi yc = y #print y con = con + 1 if con > cont: break A = b*yc + 0.5*Z1*yc**2 + 0.5*Z2*yc**2 V = Qe/A T = b+y*Z1+y*Z2 return yc,A,V,T print ("\n1.Tipo Rectangular") print ("2.Tipo Triangular") print ("3.Tipo Trapezoidal") Tipo = int(input(u"Que Tipo de Canal Va a Ingresar: ") ) Qe = float(input(u"Ingrese Caudal de Diseño(m3/s): ") ) n = float(input(u"Ingrese Coeficiente de Manning: ") ) Sc = float(input(u"Ingrese Pendiente del Canal: ") ) y,A,V,T = fv(Tipo) print ("\n1.Tipo Circular") print ("2.Tipo Abovedado") Culv = int(input(u"Que Tipo de Alcantarilla Va a Ingresar: ") ) # Calculo del Diametro de la Alcantarilla: g = 9.81 # Gravedad Diam = [] val = [] for i in range(30): Diam = np.append(Diam,[12+3*i]) Diam = Diam/100. if Culv == 1: print (" Alcantarilla Tipo Circular") CotaA = float(input(u"Ingrese Cota del canal antes de Transicion: ") ) Cober = float(input(u"Ingrese Cobertura de Carretera: ") ) Borde = float(input(u"Ingrese Borde de Alcantarilla: ") ) Talud = float(input(u"Ingrese Talud de la Carretera: ") ) Lon = float(input(u"Ingrese Ancho del camino: ") ) Ss = float(input(u"Ingrese Pendiente de Alcantarilla;\: ") ) n = float(input(u"Ingrese Rugosidad de Alcantarilla;\: ") ) Aa = Qe/2.5 D =pow((4/np.pi)*Aa,0.5) for i in range(30): if D >= Diam[i]: val = np.append(val,[Diam[i]-D]) Dc = np.max(val) Dc = D + Dc Ad = np.pi*Dc**2/4 Vc = Qe/Ad hv = Vc**2/(2*g) NAEA = CotaA+y CotaB = NAEA -1.5*hv-Dc CotaF = CotaB+D+Cober CotaE = CotaA+Borde+y Lal = 2*Talud*(CotaF-CotaE)+Lon VZ = Lal*Ss CotaC = CotaB - VZ Sen = (Vc*n/1.) #CAUDAL, TALUD, RUGOSIDAD, PENDIENTE Q,Z,Z1,n,S=0.071357,0.5,0.8,0.014,0.012 #CALCULATE def f(Q,Z,Z1,n,S,y): A=y**2*((Z**2+1.)**0.5+(Z1**2+1.)**0.5-(Z+Z1)*0.5) P=2*y*(pow(Z1**2+1.,0.5)+pow(Z**2+1.,0.5))-y*(Z1+Z) k=Q*n/pow(S,0.5) fy=pow(A,5./3.)*pow(P,-2./3.)-k dA=y*2*((Z**2+1.)**0.5+(Z1**2+1.)**0.5-(Z+Z1)*0.5) dP=2*(pow(Z1**2+1.,0.5)+pow(Z**2+1.,0.5))-(Z1+Z) dfy=5./3.*A**(2./3.)*P**(-2./3.)*dA - 2./3.*pow(A,5./3.)*pow(P,-5./3.)*dP return fy,dfy,y y,Imax=0.5,40 Tol=1E-8 # Tolerancia Para la Iteraciones E,cont=4,0 print ("\t-------------------------------------------------------------") print ("\t N° y_i f(y_i) f'(y_i) y_(i+1) Error") print ("\t-------------------------------------------------------------") while (E>=Tol): fy,dfy,y=f(Q,Z,Z1,n,S,y) y1=y-fy/dfy cont+=1 E=np.abs(y-y1) print ("\t %.0f %.5f %.5f %.5f %.5f %.5f"%(cont,y,fy,dfy,y1,np.abs(y-y1))) y=y1 if (cont>=Imax): break print ("\nTIRANTE (y): ", round(y,4), "m") A=y**2*((Z**2+1.)**0.5+(Z1**2+1.)**0.5-(Z+Z1)*0.5) print ("Area: ",round(A,4),"m^2") P=2*y*(pow(Z1**2+1.,0.5)+pow(Z**2+1.,0.5))-y*(Z1+Z) print ("Perimetro Mojado: ",round(P,4),"m") print ("Velocidad: ",round(Q/A,4),"m/s") b=(2*A-y**2*(Z+Z1))/(2*y) Tt=b+y*Z+y*Z1 print ("\nBase de la Seccion: ",b) print ("Espejo de Agua: ",Tt) #print "Base Calculado: ",P-y*(pow(Z1**2+1.,0.5)+pow(Z**2+1.,0.5)) F=(Q/A)/pow((9.8106*A/Tt),0.5) if F == 1: print ("Numero de Froude: ",round(F,3)) print ("Esta en un Regimen Critico esta en crisis") elif 0 < F < 1: print ("Numero de Froude: ",round(F,3)) print ("Esta en un Regimen Sub Critico, se trata de un rio") elif 1 < F: print ("Numero de Froude: ",round(F,3)) print ("Esta en un Regimen Super Critico, se trata de un torrente") print ("Borde Libre segun Boureau of Reclamation: ",0.30)
# -*- coding: utf-8 -*- __author__ = 'LyddonBeni' import numpy as np from matplotlib import pyplot as plt import scipy as sc import numpy as np print (""" ▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄ UNIVERSIDAD NACIONAL DE HUANCAVELICA FACULTAD DE CIENCIAS DE INGENIERÍA ESCUELA ACADÉMICA PROFESIONAL DE CIVIL ▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄ ░░░░░░ DISEÑO DE ALCANTARILLA Y CANALES ░░░░ ░░░░ PARA EL DISEÑO HIDRAULICO DE ALCANTARILLAS ░░░ ░ DE PROYECTOS VIALES EN LA REGION DE HUANCAVELICA ░ ===== PROYECTO DE TESIS ===== * AUTOR : <NAME>, <NAME> * ASESOR : <NAME>, Ivan """) ################ DATA GENERAL ###################### def fv(Tipo): if Tipo == 1: print (" Canal De Tipo Rectangular") b = float(input("\nBase del Canal(m): ") ) Z1 = Z2 = 0. error = 0.000001 y,va,con, cont = 0.8,1.,0.,60. while va > error: C1=(Qe*n/Sc**(0.5))**(3./2.) C2=pow(1+Z1**2,0.5)+pow(1+Z2**2,0.5) Z = (Z1+Z2)/2. m1 = (b*y+Z*y**2)**(5./2.)/(C2*y+b)-C1 m2 = 5./2.*(b+y*2*Z)*(b*y+Z*y**2)**(3./2.)/(C2*y+b)-C2*(b*y+Z*y**2)**(5./2.)/(C2*y+b)**2 yi = y - m1/m2 va = abs(y-yi) y = yi yc = y #print y con = con + 1 if con > cont: break A = b*yc + 0.5*Z1*yc**2 + 0.5*Z2*yc**2 T = b V = Qe/A return yc,A,V,T elif Tipo == 2: print (" Canal De Tipo Triangular") b = 0. Z1 = float(input("\nTalud del Canal Izquierda(m): ") ) Z2 = float(input("\nTalud del Canal Derecha(m): ") ) error = 0.000001 y,va,con, cont = 0.8,1.,0.,60. while va > error: C1=(Qe*n/Sc**(0.5))**(3./2.) C2=pow(1+Z1**2,0.5)+pow(1+Z2**2,0.5) Z = (Z1+Z2)/2. m1 = (b*y+Z*y**2)**(5./2.)/(C2*y+b)-C1 m2 = 5./2.*(b+y*2*Z)*(b*y+Z*y**2)**(3./2.)/(C2*y+b)-C2*(b*y+Z*y**2)**(5./2.)/(C2*y+b)**2 yi = y - m1/m2 va = abs(y-yi) y = yi yc = y #print y con = con + 1 if con > cont: break A = b*yc + 0.5*Z1*yc**2 + 0.5*Z2*yc**2 T = y*(Z1+Z2) V = Qe/A return yc,A,V,T elif Tipo == 3: print (" Canal De Tipo Trapezoidal") b = float(input("\nBase del Canal(m): ") ) Z1 = float(input("\nTalud del Canal Izquierda(m): ") ) Z2 = float(input("\nTalud del Canal Derecha(m): ") ) error = 0.000001 y,va,con, cont = 0.8,1.,0.,60. while va > error: C1=(Qe*n/Sc**(0.5))**(3./2.) C2=pow(1+Z1**2,0.5)+pow(1+Z2**2,0.5) Z = (Z1+Z2)/2. m1 = (b*y+Z*y**2)**(5./2.)/(C2*y+b)-C1 m2 = 5./2.*(b+y*2*Z)*(b*y+Z*y**2)**(3./2.)/(C2*y+b)-C2*(b*y+Z*y**2)**(5./2.)/(C2*y+b)**2 yi = y - m1/m2 va = abs(y-yi) y = yi yc = y #print y con = con + 1 if con > cont: break A = b*yc + 0.5*Z1*yc**2 + 0.5*Z2*yc**2 V = Qe/A T = b+y*Z1+y*Z2 return yc,A,V,T print ("\n1.Tipo Rectangular") print ("2.Tipo Triangular") print ("3.Tipo Trapezoidal") Tipo = int(input(u"Que Tipo de Canal Va a Ingresar: ") ) Qe = float(input(u"Ingrese Caudal de Diseño(m3/s): ") ) n = float(input(u"Ingrese Coeficiente de Manning: ") ) Sc = float(input(u"Ingrese Pendiente del Canal: ") ) y,A,V,T = fv(Tipo) print ("\n1.Tipo Circular") print ("2.Tipo Abovedado") Culv = int(input(u"Que Tipo de Alcantarilla Va a Ingresar: ") ) # Calculo del Diametro de la Alcantarilla: g = 9.81 # Gravedad Diam = [] val = [] for i in range(30): Diam = np.append(Diam,[12+3*i]) Diam = Diam/100. if Culv == 1: print (" Alcantarilla Tipo Circular") CotaA = float(input(u"Ingrese Cota del canal antes de Transicion: ") ) Cober = float(input(u"Ingrese Cobertura de Carretera: ") ) Borde = float(input(u"Ingrese Borde de Alcantarilla: ") ) Talud = float(input(u"Ingrese Talud de la Carretera: ") ) Lon = float(input(u"Ingrese Ancho del camino: ") ) Ss = float(input(u"Ingrese Pendiente de Alcantarilla;\: ") ) n = float(input(u"Ingrese Rugosidad de Alcantarilla;\: ") ) Aa = Qe/2.5 D =pow((4/np.pi)*Aa,0.5) for i in range(30): if D >= Diam[i]: val = np.append(val,[Diam[i]-D]) Dc = np.max(val) Dc = D + Dc Ad = np.pi*Dc**2/4 Vc = Qe/Ad hv = Vc**2/(2*g) NAEA = CotaA+y CotaB = NAEA -1.5*hv-Dc CotaF = CotaB+D+Cober CotaE = CotaA+Borde+y Lal = 2*Talud*(CotaF-CotaE)+Lon VZ = Lal*Ss CotaC = CotaB - VZ Sen = (Vc*n/1.) #CAUDAL, TALUD, RUGOSIDAD, PENDIENTE Q,Z,Z1,n,S=0.071357,0.5,0.8,0.014,0.012 #CALCULATE def f(Q,Z,Z1,n,S,y): A=y**2*((Z**2+1.)**0.5+(Z1**2+1.)**0.5-(Z+Z1)*0.5) P=2*y*(pow(Z1**2+1.,0.5)+pow(Z**2+1.,0.5))-y*(Z1+Z) k=Q*n/pow(S,0.5) fy=pow(A,5./3.)*pow(P,-2./3.)-k dA=y*2*((Z**2+1.)**0.5+(Z1**2+1.)**0.5-(Z+Z1)*0.5) dP=2*(pow(Z1**2+1.,0.5)+pow(Z**2+1.,0.5))-(Z1+Z) dfy=5./3.*A**(2./3.)*P**(-2./3.)*dA - 2./3.*pow(A,5./3.)*pow(P,-5./3.)*dP return fy,dfy,y y,Imax=0.5,40 Tol=1E-8 # Tolerancia Para la Iteraciones E,cont=4,0 print ("\t-------------------------------------------------------------") print ("\t N° y_i f(y_i) f'(y_i) y_(i+1) Error") print ("\t-------------------------------------------------------------") while (E>=Tol): fy,dfy,y=f(Q,Z,Z1,n,S,y) y1=y-fy/dfy cont+=1 E=np.abs(y-y1) print ("\t %.0f %.5f %.5f %.5f %.5f %.5f"%(cont,y,fy,dfy,y1,np.abs(y-y1))) y=y1 if (cont>=Imax): break print ("\nTIRANTE (y): ", round(y,4), "m") A=y**2*((Z**2+1.)**0.5+(Z1**2+1.)**0.5-(Z+Z1)*0.5) print ("Area: ",round(A,4),"m^2") P=2*y*(pow(Z1**2+1.,0.5)+pow(Z**2+1.,0.5))-y*(Z1+Z) print ("Perimetro Mojado: ",round(P,4),"m") print ("Velocidad: ",round(Q/A,4),"m/s") b=(2*A-y**2*(Z+Z1))/(2*y) Tt=b+y*Z+y*Z1 print ("\nBase de la Seccion: ",b) print ("Espejo de Agua: ",Tt) #print "Base Calculado: ",P-y*(pow(Z1**2+1.,0.5)+pow(Z**2+1.,0.5)) F=(Q/A)/pow((9.8106*A/Tt),0.5) if F == 1: print ("Numero de Froude: ",round(F,3)) print ("Esta en un Regimen Critico esta en crisis") elif 0 < F < 1: print ("Numero de Froude: ",round(F,3)) print ("Esta en un Regimen Sub Critico, se trata de un rio") elif 1 < F: print ("Numero de Froude: ",round(F,3)) print ("Esta en un Regimen Super Critico, se trata de un torrente") print ("Borde Libre segun Boureau of Reclamation: ",0.30)
es
0.488589
# -*- coding: utf-8 -*- ▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄ UNIVERSIDAD NACIONAL DE HUANCAVELICA FACULTAD DE CIENCIAS DE INGENIERÍA ESCUELA ACADÉMICA PROFESIONAL DE CIVIL ▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄ ░░░░░░ DISEÑO DE ALCANTARILLA Y CANALES ░░░░ ░░░░ PARA EL DISEÑO HIDRAULICO DE ALCANTARILLAS ░░░ ░ DE PROYECTOS VIALES EN LA REGION DE HUANCAVELICA ░ ===== PROYECTO DE TESIS ===== * AUTOR : <NAME>, <NAME> * ASESOR : <NAME>, Ivan ################ DATA GENERAL ###################### #print y #print y #print y # Calculo del Diametro de la Alcantarilla: # Gravedad #CAUDAL, TALUD, RUGOSIDAD, PENDIENTE #CALCULATE # Tolerancia Para la Iteraciones #print "Base Calculado: ",P-y*(pow(Z1**2+1.,0.5)+pow(Z**2+1.,0.5))
3.506014
4
genia/utils/download.py
m-stoeckel/pyramid-nested-ner
6
6621732
<reponame>m-stoeckel/pyramid-nested-ner<filename>genia/utils/download.py<gh_stars>1-10 from oauth2client.client import GoogleCredentials from pydrive.drive import GoogleDrive from pydrive.auth import GoogleAuth from google.colab import auth if __name__ == "__main__": auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) bio_bert = ("1GJpGjQj6aZPV-EfbiQELpBkvlGtoKiyA", 'biobert_large_v1.1_pubmed.tar.gz') bio_nlp_vec = ("0BzMCqpcgEJgiUWs0ZnU0NlFTam8", 'bio_nlp_vec.tar.gz') for file_id, file_name in [bio_nlp_vec, bio_bert]: print(f'downloading {file_name}...') downloaded = drive.CreateFile({'id': file_id}) downloaded.GetContentFile(file_name)
from oauth2client.client import GoogleCredentials from pydrive.drive import GoogleDrive from pydrive.auth import GoogleAuth from google.colab import auth if __name__ == "__main__": auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) bio_bert = ("1GJpGjQj6aZPV-EfbiQELpBkvlGtoKiyA", 'biobert_large_v1.1_pubmed.tar.gz') bio_nlp_vec = ("0BzMCqpcgEJgiUWs0ZnU0NlFTam8", 'bio_nlp_vec.tar.gz') for file_id, file_name in [bio_nlp_vec, bio_bert]: print(f'downloading {file_name}...') downloaded = drive.CreateFile({'id': file_id}) downloaded.GetContentFile(file_name)
none
1
2.72649
3
placement_alg/ModifiedDijkstra.py
nigsics/dcpmtool
3
6621733
<reponame>nigsics/dcpmtool # Copyright 2018 <NAME> # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class ModifiedDijkstra(object): def __init__(self, g, wt="weight"): self.dist = {} # A map from nodes to their labels (float) self.predecessor = {} # A map from a node to a node self.g = g; self.wt = wt; edges = g.edges() # Set the value for infinite distance in the graph self.inf = 0.0; for e in edges: self.inf += abs(g[e[0]][e[1]][wt]); self.inf += 1.0; def getPath(self, source, dest, as_nodes = False): self.dist = {} # A map from nodes to their labels (float) self.predecessor = {} # A map from a node to a node # Initialize the distance labels to "infinity" vertices = self.g.nodes() for vertex in vertices: self.dist[vertex] = self.inf self.predecessor[vertex] = source # Further set up the distance from the source to itself and # to all one hops away. self.dist[source] = 0.0 if self.g.is_directed(): outEdges = self.g.out_edges([source]) else: outEdges = self.g.edges([source]) for edge in outEdges: self.dist[edge[1]] = self.g[edge[0]][edge[1]][self.wt] s = set(vertices) s.remove(source); currentMin = self._findMinNode(s) if currentMin == None: return None s.remove(currentMin) while currentMin != dest and (len(s) != 0) and currentMin != None: if self.g.is_directed(): outEdges = self.g.out_edges([currentMin]) else: outEdges = self.g.edges([currentMin]) for edge in outEdges: opposite = edge[1] if self.dist[currentMin] + self.g[edge[0]][edge[1]][self.wt] < self.dist[opposite]: self.dist[opposite] = self.dist[currentMin] + self.g[edge[0]][edge[1]][self.wt] self.predecessor[opposite] = currentMin s.add(opposite); currentMin = self._findMinNode(s) #print "Current min node {}, s = {}".format(currentMin, s) if currentMin == None: return None s.remove(currentMin) # Compute the path as a list of edges currentNode = dest; predNode = self.predecessor.get(dest); node_list = [dest] done = False path = [] while not done: path.append((predNode, currentNode)) currentNode = predNode predNode = self.predecessor[predNode] node_list.append(currentNode) done = currentNode == source node_list.reverse() if as_nodes: return node_list else: return path def _findMinNode(self, s): minNode = None minVal = self.inf for vertex in s: if self.dist[vertex] < minVal: minVal = self.dist[vertex] minNode = vertex return minNode
# Copyright 2018 <NAME> # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class ModifiedDijkstra(object): def __init__(self, g, wt="weight"): self.dist = {} # A map from nodes to their labels (float) self.predecessor = {} # A map from a node to a node self.g = g; self.wt = wt; edges = g.edges() # Set the value for infinite distance in the graph self.inf = 0.0; for e in edges: self.inf += abs(g[e[0]][e[1]][wt]); self.inf += 1.0; def getPath(self, source, dest, as_nodes = False): self.dist = {} # A map from nodes to their labels (float) self.predecessor = {} # A map from a node to a node # Initialize the distance labels to "infinity" vertices = self.g.nodes() for vertex in vertices: self.dist[vertex] = self.inf self.predecessor[vertex] = source # Further set up the distance from the source to itself and # to all one hops away. self.dist[source] = 0.0 if self.g.is_directed(): outEdges = self.g.out_edges([source]) else: outEdges = self.g.edges([source]) for edge in outEdges: self.dist[edge[1]] = self.g[edge[0]][edge[1]][self.wt] s = set(vertices) s.remove(source); currentMin = self._findMinNode(s) if currentMin == None: return None s.remove(currentMin) while currentMin != dest and (len(s) != 0) and currentMin != None: if self.g.is_directed(): outEdges = self.g.out_edges([currentMin]) else: outEdges = self.g.edges([currentMin]) for edge in outEdges: opposite = edge[1] if self.dist[currentMin] + self.g[edge[0]][edge[1]][self.wt] < self.dist[opposite]: self.dist[opposite] = self.dist[currentMin] + self.g[edge[0]][edge[1]][self.wt] self.predecessor[opposite] = currentMin s.add(opposite); currentMin = self._findMinNode(s) #print "Current min node {}, s = {}".format(currentMin, s) if currentMin == None: return None s.remove(currentMin) # Compute the path as a list of edges currentNode = dest; predNode = self.predecessor.get(dest); node_list = [dest] done = False path = [] while not done: path.append((predNode, currentNode)) currentNode = predNode predNode = self.predecessor[predNode] node_list.append(currentNode) done = currentNode == source node_list.reverse() if as_nodes: return node_list else: return path def _findMinNode(self, s): minNode = None minVal = self.inf for vertex in s: if self.dist[vertex] < minVal: minVal = self.dist[vertex] minNode = vertex return minNode
en
0.870522
# Copyright 2018 <NAME> # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # A map from nodes to their labels (float) # A map from a node to a node # Set the value for infinite distance in the graph # A map from nodes to their labels (float) # A map from a node to a node # Initialize the distance labels to "infinity" # Further set up the distance from the source to itself and # to all one hops away. #print "Current min node {}, s = {}".format(currentMin, s) # Compute the path as a list of edges
2.843168
3
test/scripts/sanity_test.py
bserdar/JovianDSS-KubernetesCSI
1
6621734
#!/usr/bin/python3 #from fabric.api import env, run from fabric import Connection import os import vagrant csiTestVM = "fedora29-csi-test-0.2" def cleanVM(root): v = vagrant.Vagrant(root=root) print(" - Cleanig VM ", root) try: v.destroy() except Exception as err: print(err) try: os.remove(root + "/Vagrantfile") except FileNotFoundError: pass def initVM(vmName, root): buildPath = root + "/build" v = vagrant.Vagrant(root=root) if not os.path.exists(root): os.makedirs(root) print(" - Setting up VM ", root) if not os.path.exists(buildPath): os.makedirs(buildPath) v.init(box_name=vmName) def copyBins(bins, root): cmd = "cp -R {0}/* {1}/build/".format(bins,root) print(" - Copying binaries: ", cmd) os.system(cmd) def runVM(root): v = vagrant.Vagrant(root=root) print(" - Starting VM ", root) v.up() def runPlugin(root): v = vagrant.Vagrant(root=root) # Start plugin cmd = "nohup /home/vagrant/build/jdss-csi-plugin --csi-address=127.0.0.1:15947 --soc-type=tcp --config ./build/controller-cfg.yaml >& /dev/null < /dev/null &" con = Connection(v.user_hostname_port(), connect_kwargs={ "key_filename": v.keyfile(), }) out = con.sudo(cmd) def runCSISanity(root): v = vagrant.Vagrant(root=root) # Run tests print("Starting sanity tests.") #out = v.ssh(command="/home/vagrant/go/src/csi-test/cmd/csi-sanity/csi-sanity -ginkgo.failFast -csi.endpoint 127.0.0.1:15947") cmd = "/home/vagrant/go/src/csi-test/cmd/csi-sanity/csi-sanity -ginkgo.failFast -csi.endpoint 127.0.0.1:15947" print("Running: ", cmd) con = Connection(v.user_hostname_port(), connect_kwargs={ "key_filename": v.keyfile(), }) out = con.run(cmd) def main(): root = "csi-sanity" cleanVM(root) initVM(csiTestVM,root) copyBins("bins", root) try: runVM(root) runPlugin(root) runCSISanity(root) except Exception as err: print(err) raise err cleanVM(root) print("Success!") if __name__ == "__main__": main()
#!/usr/bin/python3 #from fabric.api import env, run from fabric import Connection import os import vagrant csiTestVM = "fedora29-csi-test-0.2" def cleanVM(root): v = vagrant.Vagrant(root=root) print(" - Cleanig VM ", root) try: v.destroy() except Exception as err: print(err) try: os.remove(root + "/Vagrantfile") except FileNotFoundError: pass def initVM(vmName, root): buildPath = root + "/build" v = vagrant.Vagrant(root=root) if not os.path.exists(root): os.makedirs(root) print(" - Setting up VM ", root) if not os.path.exists(buildPath): os.makedirs(buildPath) v.init(box_name=vmName) def copyBins(bins, root): cmd = "cp -R {0}/* {1}/build/".format(bins,root) print(" - Copying binaries: ", cmd) os.system(cmd) def runVM(root): v = vagrant.Vagrant(root=root) print(" - Starting VM ", root) v.up() def runPlugin(root): v = vagrant.Vagrant(root=root) # Start plugin cmd = "nohup /home/vagrant/build/jdss-csi-plugin --csi-address=127.0.0.1:15947 --soc-type=tcp --config ./build/controller-cfg.yaml >& /dev/null < /dev/null &" con = Connection(v.user_hostname_port(), connect_kwargs={ "key_filename": v.keyfile(), }) out = con.sudo(cmd) def runCSISanity(root): v = vagrant.Vagrant(root=root) # Run tests print("Starting sanity tests.") #out = v.ssh(command="/home/vagrant/go/src/csi-test/cmd/csi-sanity/csi-sanity -ginkgo.failFast -csi.endpoint 127.0.0.1:15947") cmd = "/home/vagrant/go/src/csi-test/cmd/csi-sanity/csi-sanity -ginkgo.failFast -csi.endpoint 127.0.0.1:15947" print("Running: ", cmd) con = Connection(v.user_hostname_port(), connect_kwargs={ "key_filename": v.keyfile(), }) out = con.run(cmd) def main(): root = "csi-sanity" cleanVM(root) initVM(csiTestVM,root) copyBins("bins", root) try: runVM(root) runPlugin(root) runCSISanity(root) except Exception as err: print(err) raise err cleanVM(root) print("Success!") if __name__ == "__main__": main()
en
0.182335
#!/usr/bin/python3 #from fabric.api import env, run # Start plugin # Run tests #out = v.ssh(command="/home/vagrant/go/src/csi-test/cmd/csi-sanity/csi-sanity -ginkgo.failFast -csi.endpoint 127.0.0.1:15947")
2.08376
2
python/pymef90/mesh/__init__.py
jeanmichelscherer/mef90
9
6621735
<gh_stars>1-10 import sys sys.path.append(__path__[0]) from mef90EXODUS import * from mef90ABAQUS import * from mef90GMSH import * from mef90MSC import *
import sys sys.path.append(__path__[0]) from mef90EXODUS import * from mef90ABAQUS import * from mef90GMSH import * from mef90MSC import *
none
1
1.238229
1
examples/example_web_app/setup.py
aalhour/cookiecutter-aiohttp-sqlalchemy
46
6621736
<reponame>aalhour/cookiecutter-aiohttp-sqlalchemy<gh_stars>10-100 import os try: from setuptools import setup, find_packages except ImportError: from distutils.core import setup, find_packages # Get the version from example_web_app import __version__ def get_long_description(): readme = "" with open('README.md', encoding='utf-8') as readme_file: readme = readme_file.read() return readme REQUIREMENTS_FOLDER = os.getenv('REQUIREMENTS_PATH', '') requirements = [line.strip() for line in open(os.path.join(REQUIREMENTS_FOLDER, "requirements.txt"), 'r')] test_requirements = [line.strip() for line in open(os.path.join(REQUIREMENTS_FOLDER, "requirements_dev.txt"), 'r')] setup( name='example_web_app', version='{version}'.format(version=__version__), description="An Example Web API project powered by Aiohttp and SQLAlchemy", long_description=get_long_description(), author="<NAME>", author_email='<EMAIL>', url='example.com/api/v1.0', packages=find_packages(), include_package_data=True, package_data={ "example_web_app": [ "docs/*", "templates/*", "static/*", "static/js/*", "static/css/*", ] }, install_requires=requirements, zip_safe=False, keywords="example_web_app", classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'License :: OSI Approved :: ISC License (ISCL)', 'Natural Language :: English', 'Programming Language :: Python :: 3', ], test_suite='tests', tests_require=test_requirements, entry_points={ 'console_scripts': [ 'run_example_web_app=example_web_app.app:run_app', 'init_example=example_web_app.init_example:init_example' ] } )
import os try: from setuptools import setup, find_packages except ImportError: from distutils.core import setup, find_packages # Get the version from example_web_app import __version__ def get_long_description(): readme = "" with open('README.md', encoding='utf-8') as readme_file: readme = readme_file.read() return readme REQUIREMENTS_FOLDER = os.getenv('REQUIREMENTS_PATH', '') requirements = [line.strip() for line in open(os.path.join(REQUIREMENTS_FOLDER, "requirements.txt"), 'r')] test_requirements = [line.strip() for line in open(os.path.join(REQUIREMENTS_FOLDER, "requirements_dev.txt"), 'r')] setup( name='example_web_app', version='{version}'.format(version=__version__), description="An Example Web API project powered by Aiohttp and SQLAlchemy", long_description=get_long_description(), author="<NAME>", author_email='<EMAIL>', url='example.com/api/v1.0', packages=find_packages(), include_package_data=True, package_data={ "example_web_app": [ "docs/*", "templates/*", "static/*", "static/js/*", "static/css/*", ] }, install_requires=requirements, zip_safe=False, keywords="example_web_app", classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'License :: OSI Approved :: ISC License (ISCL)', 'Natural Language :: English', 'Programming Language :: Python :: 3', ], test_suite='tests', tests_require=test_requirements, entry_points={ 'console_scripts': [ 'run_example_web_app=example_web_app.app:run_app', 'init_example=example_web_app.init_example:init_example' ] } )
en
0.653712
# Get the version
1.717431
2
vorpy/integration/rungekutta.py
vdods/vorpy
3
6621737
<reponame>vdods/vorpy<filename>vorpy/integration/rungekutta.py """ Implements explicit Runge-Kutta integration methods, of ordinary (non-error-estimating) and error-estimating types. """ import abc import numpy as np import typing import vorpy.tensor class RungeKutta(metaclass=abc.ABCMeta): """ References: - Wikipedia RK article - https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods - List of RK methods - https://ipfs.io/ipfs/QmXoypizjW3WknFiJnKLwHCnL72vedxjQkDDP1mXWo6uco/wiki/List_of_Runge%E2%80%93Kutta_methods.html - A Tenth-Order Runge-Kutta Method with Error Estimate by <NAME> - http://sce.uhcl.edu/feagin/courses/rk10.pdf - An Explicit Sixth-Order Runge-Kutta Formula By <NAME> - https://www.ams.org/journals/mcom/1968-22-102/S0025-5718-68-99876-1/S0025-5718-68-99876-1.pdf - Appendix A; Runge-Kutta Methods - https://www.uni-muenster.de/imperia/md/content/physik_tp/lectures/ss2017/numerische_Methoden_fuer_komplexe_Systeme_II/rkm-1.pdf """ @classmethod @abc.abstractmethod def order (cls) -> int: """ Should return the order of this method. If a method has order p, then its local truncation error will be on the order of O(dt^(p+1)). Note that there is no simple relationship between order and stage count. From https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods#Explicit_Runge%E2%80%93Kutta_methods In general, if an explicit s-stage Runge–Kutta method has order p, then it can be proven that the number of stages must satisfy s >= p, and if p >= 5, then s >= p+1. However, it is not known whether these bounds are sharp in all cases. """ raise NotImplementedError('subclass must implement this in order to use it') # Note: @abc.abstractmethod should be the innermost decorator; # see https://docs.python.org/3/library/abc.html#abc.abstractmethod @classmethod @abc.abstractmethod def a (cls) -> np.ndarray: """ Returns the `a` part of the Butcher tableau of this RK method. See https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods#Adaptive_Runge%E2%80%93Kutta_methods Return value should have shape """ raise NotImplementedError('subclass must implement this in order to use it') @classmethod @abc.abstractmethod def b (cls) -> np.ndarray: """ Returns the `b` part of the Butcher tableau of this RK method. See https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods#Adaptive_Runge%E2%80%93Kutta_methods """ raise NotImplementedError('subclass must implement this in order to use it') @classmethod def b_star (cls) -> np.ndarray: """ Returns the `b*` part of the Butcher tableau of this RK method. See https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods#Adaptive_Runge%E2%80%93Kutta_methods Note that a non-embedded Runge-Kutta method does not need to implement this. """ raise NotImplementedError('subclass must implement this in order to use it') @classmethod @abc.abstractmethod def c (cls) -> np.ndarray: """ Returns the `c` part of the Butcher tableau of this RK method. See https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods#Adaptive_Runge%E2%80%93Kutta_methods """ raise NotImplementedError('subclass must implement this in order to use it') @classmethod @abc.abstractmethod def is_explicit_method (cls) -> bool: """ Should return true if this is an explicit method (meaning there are certain constraints on the Butcher tableau). Default is False (i.e. no constraint). See https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods#Explicit_Runge%E2%80%93Kutta_methods """ return False @classmethod @abc.abstractmethod def is_embedded_method (cls) -> bool: """ Should return true if this is an embedded method (meaning it uses a secondary, higher-order method to estimate the local truncation error). Default is False (i.e. no secondary, higher-order method). See https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods#Adaptive_Runge%E2%80%93Kutta_methods """ return False @classmethod def validate_method_definition (cls) -> None: """ Will raise an exception if there is any inconsistency in the definition of a, b, c (i.e. the Butcher tableau) of this method. If cls.is_explicit_method returns True, then it will require that a is strictly lower-triangular. If all checks pass, no exception will be raised. """ a = cls.a() if len(a.shape) != 2 or a.shape[0] != a.shape[1]: raise TypeError(f'expected a to be a square matrix (but a.shape was {a.shape}') stage_count = a.shape[0] order = cls.order() if order >= 5: if not (stage_count >= order+1): raise ValueError(f'For a Runge-Kutta method of order >= 5, the number of stages must be >= order+1 (but in this case, order = {order} and stage_count = {stage_count}') else: if not (stage_count >= order): raise ValueError(f'For a Runge-Kutta method of order < 5, the number of stages must be >= order (but in this case, order = {order} and stage_count = {stage_count}') if cls.is_explicit_method(): for row in range(stage_count): for col in range(row,stage_count): if a[row,col] != 0.0: raise ValueError(f'expected a to be strictly lower-triangular because cls.is_explicit_method() was True (but a was\n{a}') b = cls.b() if len(b.shape) != 1 or b.shape[0] != stage_count: raise TypeError(f'expected b to be a vector having dimension {stage_count} (but b.shape was {b.shape})') if cls.is_embedded_method(): b_star = cls.b_star() if len(b_star.shape) != 1 or b_star.shape[0] != stage_count: raise TypeError(f'expected b_star to be a vector having dimension {stage_count} (but b_star.shape was {b_star.shape})') # The following 2 checks aren't rigorously backed up, but are just from an observation that # fewer stages seem to imply lower order. Thus b (which should produce the integrator of # order equal to cls.order()) should have a zero at least in its last component. if b_star[-1] == 0.0: raise ValueError(f'expected b_star to have a non-zero final component (but b_star was {b_star}') if b[-1] != 0.0: raise ValueError(f'expected b to have a zero final component (but b was {b}') c = cls.c() if len(c.shape) != 1 or c.shape[0] != stage_count: raise TypeError(f'expected c to be a vector having dimension {stage_count} (but c.shape was {c.shape})') if cls.is_explicit_method(): if c[0] != 0.0: raise ValueError(f'expected c[0] to be zero because cls.is_explicit_method() was true (but c[0] was {c[0]}') @classmethod def order (cls) -> int: """ Returns the order of the method, meaning that the local truncation error is on the order of O(dt^(order+1)), and the total accumulated error is on the order of O(dt^order). """ cls.validate_method_definition() return __order_of_vector(cls.b()) @classmethod def _stage_count (cls) -> int: cls.validate_method_definition() return cls.a().shape[0] class RungeKutta_Explicit(RungeKutta): """ NOTE: For the time being, it is assumed that the computed integration step of an embedded integrator will be the lower-order value, since the higher-order value is ostensibly used to estimate the local truncation error. TODO: Write tests that verify that error is of the claimed order. TODO: b - b_star is constant (per method), so pre-compute this. TODO: Depending on what the semantics of b_star actually are (which one of b or b_star is used to produce the result), maybe rename this to b_embedded? TODO: implement estimation of global error (presumably it's the sum of local truncation error) """ def __init__ ( self, *, vector_field:typing.Callable[[float,np.ndarray],np.ndarray], parameter_shape:typing.Sequence[int], ) -> None: if not all(s >= 0 for s in parameter_shape): raise ValueError(f'parameter_shape must have all nonnegative components (but was actually {parameter_shape}') self.validate_method_definition() self.__vector_field = vector_field self.__parameter_shape = parameter_shape self.__parameter_dimension = vorpy.tensor.dimension_of_shape(parameter_shape) self.__stage_count = self._stage_count() # Create and keep some np.ndarray instances for intermediate and result computations in order to avoid # memory allocation during integration. self.__k = np.zeros((self.__stage_count, self.__parameter_dimension), dtype=np.float64) # This is the time value input to the integrator's step function. self.t_now = 0.0 # This is the parameter value input to the integrator's step function. self.y_now = np.zeros(parameter_shape, dtype=np.float64) # This is the time value output to the integrator's step function (the result is stored here). self.t_next = 0.0 # This is the parameter value output to the integrator's step function (the result is stored here). self.y_next = np.zeros(parameter_shape, dtype=np.float64) # If this is an embedded method, create an array for storage of the [square of the] local truncation # error estimate. We use the square in order to avoid taking a square root during integration. if self.is_embedded_method(): self.ltee_squared = np.nan @classmethod def is_explicit_method (cls) -> bool: return True def set_inputs (self, t:float, y:np.ndarray) -> None: self.t_now = t self.y_now[:] = y def get_outputs (self) -> typing.Tuple[float, np.ndarray]: return self.t_next, self.y_next def get_local_truncation_error_estimate (self) -> float: """ Returns the local truncation error estimate of the last call to step. Note that this function calls numpy.sqrt, since the square of the LTEE is what is computed and stored, in order to avoid a call to sqrt during the integration step. To access the squared LTEE, just use the ltee_squared attribute directly. """ return np.sqrt(self.ltee_squared) def step (self, dt:float) -> None: """ Integrates the initial conditions (t,y) using timestep dt and RK method defined by a, b, c (i.e. the Butcher tableau of the method). Stores the updated t and y values in self.t and self.y. Returns self.t, self.y for convenience. Reference: - https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods#Explicit_Runge%E2%80%93Kutta_methods """ a = self.a() b = self.b() if self.is_embedded_method(): b_star = self.b_star() c = self.c() # Because this is an explicit method, a[0,:] and c[0] are identically zero, so the first iteration # reduces to a simpler form. Flatten the result in order to make the index computations involving # __k simple. self.__k[0,:] = self.__vector_field(self.t_now, self.y_now).reshape(-1) # Do the rest of the iterations using the general form. for i in range(1, self.__stage_count): # Flatten the result in order to be able to assign to __k (which is a flattened parameter_shape). self.__k[i,:] = self.__vector_field( self.t_now + dt*c[i], self.y_now + dt*np.einsum('i,ij->j', a[i,0:i], self.__k[0:i,:]).reshape(*self.__parameter_shape), ).reshape(-1) self.t_next = self.t_now + dt self.y_next[:] = self.y_now + dt*np.einsum('i,ij->j', b, self.__k).reshape(*self.__parameter_shape) if self.is_embedded_method(): self.ltee_squared = (dt**2) * np.sum(np.einsum('i,ij->j', b - b_star, self.__k)**2) class RungeKutta_4(RungeKutta_Explicit): """ The original Runge-Kutta 4 method -- a 4th order method. Does not do any local truncation error estimation. Reference: - https://ipfs.io/ipfs/QmXoypizjW3WknFiJnKLwHCnL72vedxjQkDDP1mXWo6uco/wiki/List_of_Runge%E2%80%93Kutta_methods.html """ # Define the Butcher tableau using class variables, so new np.ndarrays aren't created during the step function. __a = np.array([ [0.0, 0.0, 0.0, 0.0], [0.5, 0.0, 0.0, 0.0], [0.0, 0.5, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], ]) __b = np.array([1/6, 1/3, 1/3, 1/6]) __c = np.array([0.0, 0.5, 0.5, 1.0]) @classmethod def order (cls) -> int: return 4 @classmethod def a (cls) -> np.ndarray: return cls.__a @classmethod def b (cls) -> np.ndarray: return cls.__b @classmethod def c (cls) -> np.ndarray: return cls.__c @classmethod def is_embedded_method (cls) -> bool: return False class RungeKuttaFehlberg_4_5(RungeKutta_Explicit): """ Runge-Kutta-Fehlberg 4(5) method. This is a fourth-order RK method which uses a 5th order RK method to estimate the local truncation error. Reference: - https://ipfs.io/ipfs/QmXoypizjW3WknFiJnKLwHCnL72vedxjQkDDP1mXWo6uco/wiki/List_of_Runge%E2%80%93Kutta_methods.html """ # Define the Butcher tableau using class variables, so new np.ndarrays aren't created during the step function. __a = np.array([ [ 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0], [ 1/4 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0], [ 3/32 , 9/32 , 0.0 , 0.0 , 0.0 , 0.0], [1932/2197, -7200/2197, 7296/2197, 0.0 , 0.0 , 0.0], [ 439/216 , -8.0 , 3680/513 , -845/4104, 0.0 , 0.0], [ -8/27 , 2.0 , -3544/2565, 1859/4104, -11/40, 0.0], ]) __b = np.array([25/216, 0.0, 1408/2565, 2197/4104, -1/5, 0]) __b_star = np.array([16/135, 0.0, 6656/12825, 28561/56430, -9/50, 2/55]) __c = np.array([0.0, 1/4, 3/8, 12/13, 1.0, 1/2]) @classmethod def order (cls) -> int: return 4 @classmethod def a (cls) -> np.ndarray: return cls.__a @classmethod def b (cls) -> np.ndarray: return cls.__b @classmethod def b_star (cls) -> np.ndarray: return cls.__b_star @classmethod def c (cls) -> np.ndarray: return cls.__c @classmethod def is_embedded_method (cls) -> bool: return True if __name__ == '__main__': def do_stuff_0 (): # Vector field of rigid counterclockwise rotation def V (t, y): return np.array([-y[1], y[0]]) #integrator = RungeKutta_4(vector_field=V, parameter_shape=(2,)) integrator = RungeKuttaFehlberg_4_5(vector_field=V, parameter_shape=(2,)) t = 0.0 y = np.array([1.0, 0.0]) dt = 0.1 t_max = 6.3 t_v = [t] y_v = [np.copy(y)] ltee_v = [0.0] while t < t_max: integrator.set_inputs(t, y) integrator.step(dt) t, y = integrator.get_outputs() t_v.append(t) y_v.append(np.copy(y)) ltee_v.append(np.sqrt(integrator.ltee_squared)) print(f'ltee_v = {ltee_v}') # Convert the list of np.ndarray to a full np.ndarray. y_t = np.array(y_v) import matplotlib.pyplot as plt def plot_stuff (): row_count = 1 col_count = 4 size = 5 fig,axis_vv = plt.subplots(row_count, col_count, squeeze=False, figsize=(size*col_count,size*row_count)) axis = axis_vv[0][0] axis.set_title('position') axis.set_aspect('equal') axis.plot(y_t[:,0], y_t[:,1], '.') axis = axis_vv[0][1] axis.set_title('x') axis.plot(t_v, y_t[:,0], '.') axis = axis_vv[0][2] axis.set_title('y') axis.plot(t_v, y_t[:,1], '.') axis = axis_vv[0][3] axis.set_title('local trunc. err. est.') axis.semilogy(t_v, ltee_v, '.') fig.tight_layout() filename = 'runge-kutta.png' plt.savefig(filename, bbox_inches='tight') print('wrote to file "{0}"'.format(filename)) # VERY important to do this -- otherwise your memory will slowly fill up! # Not sure which one is actually sufficient -- apparently none of them are, YAY! plt.clf() plt.close(fig) plt.close('all') del fig del axis_vv plot_stuff() def do_stuff_1 (): import sympy as sp import vorpy import vorpy.symbolic import vorpy.symplectic np.set_printoptions(precision=20) # Define the Kepler problem and use it to test the integrator def phase_space_coordinates (): return np.array(sp.var('x,y,p_x,p_y')).reshape(2,2) def K (p): return np.dot(p.flat, p.flat) / 2 def U (q): return -1 / sp.sqrt(np.dot(q.flat, q.flat)) def H (qp): """Total energy -- should be conserved.""" return K(qp[1,...]) + U(qp[0,...]) def p_theta (qp): """Angular momentum -- should be conserved.""" x,y,p_x,p_y = qp.reshape(-1) return x*p_y - y*p_x # Determine the Hamiltonian vector field of H. qp = phase_space_coordinates() X_H = vorpy.symplectic.symplectic_gradient_of(H(qp), qp) print(f'X_H:\n{X_H}') print('X_H lambdification') X_H_fast = vorpy.symbolic.lambdified(X_H, qp, replacement_d={'array':'np.array', 'dtype=object':'dtype=np.float64'}, verbose=True) print('H lambdification') H_fast = vorpy.symbolic.lambdified(H(qp), qp, replacement_d={'sqrt':'np.sqrt'}, verbose=True) print('p_theta lambdification') p_theta_fast = vorpy.symbolic.lambdified(p_theta(qp), qp, verbose=True) t_initial = 0.0 qp_initial = np.array([[1.0,0.0],[0.0,0.5]]) H_initial = H_fast(qp_initial) p_theta_initial = p_theta_fast(qp_initial) print(f'H_initial = {H_initial}') print(f'p_theta_initial = {p_theta_initial}') #integrator = RungeKutta_4(vector_field=V, parameter_shape=(2,)) integrator = RungeKuttaFehlberg_4_5(vector_field=(lambda t,qp:X_H_fast(qp)), parameter_shape=vorpy.tensor.shape(qp_initial)) t = t_initial y = qp_initial dt = 0.01 t_max = 3.0 t_v = [t] y_v = [np.copy(y)] ltee_v = [0.0] while t < t_max: integrator.set_inputs(t, y) integrator.step(dt) t, y = integrator.get_outputs() t_v.append(t) y_v.append(np.copy(y)) ltee_v.append(np.sqrt(integrator.ltee_squared)) print(f'ltee_v = {ltee_v}') # Convert the list of np.ndarray to a full np.ndarray. qp_t = np.array(y_v) H_v = vorpy.apply_along_axes(H_fast, (1,2), (qp_t,)) H_error_v = vorpy.apply_along_axes(lambda qp:np.abs(H_fast(qp) - H_initial), (1,2), (qp_t,)) #print(f'H_v = {H_v}') #print(f'H_error_v = {H_error_v}') p_theta_v = vorpy.apply_along_axes(p_theta_fast, (1,2), (qp_t,)) p_theta_error_v = vorpy.apply_along_axes(lambda qp:np.abs(p_theta_fast(qp) - p_theta_initial), (1,2), (qp_t,)) #print(f'p_theta_v = {p_theta_v}') #print(f'p_theta_error_v = {p_theta_error_v}') import matplotlib.pyplot as plt def plot_stuff (): row_count = 1 col_count = 5 size = 5 fig,axis_vv = plt.subplots(row_count, col_count, squeeze=False, figsize=(size*col_count,size*row_count)) axis = axis_vv[0][0] axis.set_title('position') axis.set_aspect('equal') axis.plot(qp_t[:,0,0], qp_t[:,0,1], '.') axis = axis_vv[0][1] axis.set_title('x and y') axis.plot(t_v, qp_t[:,0,0], '.') axis.plot(t_v, qp_t[:,0,1], '.') axis = axis_vv[0][2] axis.set_title('local trunc. err. est.') axis.semilogy(t_v, ltee_v, '.') axis = axis_vv[0][3] axis.set_title('H error') axis.semilogy(t_v, H_error_v, '.') axis = axis_vv[0][4] axis.set_title('p_theta error') axis.semilogy(t_v, p_theta_error_v, '.') fig.tight_layout() filename = 'runge-kutta-kepler.png' plt.savefig(filename, bbox_inches='tight') print('wrote to file "{0}"'.format(filename)) # VERY important to do this -- otherwise your memory will slowly fill up! # Not sure which one is actually sufficient -- apparently none of them are, YAY! plt.clf() plt.close(fig) plt.close('all') del fig del axis_vv plot_stuff() #do_stuff_0() do_stuff_1()
""" Implements explicit Runge-Kutta integration methods, of ordinary (non-error-estimating) and error-estimating types. """ import abc import numpy as np import typing import vorpy.tensor class RungeKutta(metaclass=abc.ABCMeta): """ References: - Wikipedia RK article - https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods - List of RK methods - https://ipfs.io/ipfs/QmXoypizjW3WknFiJnKLwHCnL72vedxjQkDDP1mXWo6uco/wiki/List_of_Runge%E2%80%93Kutta_methods.html - A Tenth-Order Runge-Kutta Method with Error Estimate by <NAME> - http://sce.uhcl.edu/feagin/courses/rk10.pdf - An Explicit Sixth-Order Runge-Kutta Formula By <NAME> - https://www.ams.org/journals/mcom/1968-22-102/S0025-5718-68-99876-1/S0025-5718-68-99876-1.pdf - Appendix A; Runge-Kutta Methods - https://www.uni-muenster.de/imperia/md/content/physik_tp/lectures/ss2017/numerische_Methoden_fuer_komplexe_Systeme_II/rkm-1.pdf """ @classmethod @abc.abstractmethod def order (cls) -> int: """ Should return the order of this method. If a method has order p, then its local truncation error will be on the order of O(dt^(p+1)). Note that there is no simple relationship between order and stage count. From https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods#Explicit_Runge%E2%80%93Kutta_methods In general, if an explicit s-stage Runge–Kutta method has order p, then it can be proven that the number of stages must satisfy s >= p, and if p >= 5, then s >= p+1. However, it is not known whether these bounds are sharp in all cases. """ raise NotImplementedError('subclass must implement this in order to use it') # Note: @abc.abstractmethod should be the innermost decorator; # see https://docs.python.org/3/library/abc.html#abc.abstractmethod @classmethod @abc.abstractmethod def a (cls) -> np.ndarray: """ Returns the `a` part of the Butcher tableau of this RK method. See https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods#Adaptive_Runge%E2%80%93Kutta_methods Return value should have shape """ raise NotImplementedError('subclass must implement this in order to use it') @classmethod @abc.abstractmethod def b (cls) -> np.ndarray: """ Returns the `b` part of the Butcher tableau of this RK method. See https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods#Adaptive_Runge%E2%80%93Kutta_methods """ raise NotImplementedError('subclass must implement this in order to use it') @classmethod def b_star (cls) -> np.ndarray: """ Returns the `b*` part of the Butcher tableau of this RK method. See https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods#Adaptive_Runge%E2%80%93Kutta_methods Note that a non-embedded Runge-Kutta method does not need to implement this. """ raise NotImplementedError('subclass must implement this in order to use it') @classmethod @abc.abstractmethod def c (cls) -> np.ndarray: """ Returns the `c` part of the Butcher tableau of this RK method. See https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods#Adaptive_Runge%E2%80%93Kutta_methods """ raise NotImplementedError('subclass must implement this in order to use it') @classmethod @abc.abstractmethod def is_explicit_method (cls) -> bool: """ Should return true if this is an explicit method (meaning there are certain constraints on the Butcher tableau). Default is False (i.e. no constraint). See https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods#Explicit_Runge%E2%80%93Kutta_methods """ return False @classmethod @abc.abstractmethod def is_embedded_method (cls) -> bool: """ Should return true if this is an embedded method (meaning it uses a secondary, higher-order method to estimate the local truncation error). Default is False (i.e. no secondary, higher-order method). See https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods#Adaptive_Runge%E2%80%93Kutta_methods """ return False @classmethod def validate_method_definition (cls) -> None: """ Will raise an exception if there is any inconsistency in the definition of a, b, c (i.e. the Butcher tableau) of this method. If cls.is_explicit_method returns True, then it will require that a is strictly lower-triangular. If all checks pass, no exception will be raised. """ a = cls.a() if len(a.shape) != 2 or a.shape[0] != a.shape[1]: raise TypeError(f'expected a to be a square matrix (but a.shape was {a.shape}') stage_count = a.shape[0] order = cls.order() if order >= 5: if not (stage_count >= order+1): raise ValueError(f'For a Runge-Kutta method of order >= 5, the number of stages must be >= order+1 (but in this case, order = {order} and stage_count = {stage_count}') else: if not (stage_count >= order): raise ValueError(f'For a Runge-Kutta method of order < 5, the number of stages must be >= order (but in this case, order = {order} and stage_count = {stage_count}') if cls.is_explicit_method(): for row in range(stage_count): for col in range(row,stage_count): if a[row,col] != 0.0: raise ValueError(f'expected a to be strictly lower-triangular because cls.is_explicit_method() was True (but a was\n{a}') b = cls.b() if len(b.shape) != 1 or b.shape[0] != stage_count: raise TypeError(f'expected b to be a vector having dimension {stage_count} (but b.shape was {b.shape})') if cls.is_embedded_method(): b_star = cls.b_star() if len(b_star.shape) != 1 or b_star.shape[0] != stage_count: raise TypeError(f'expected b_star to be a vector having dimension {stage_count} (but b_star.shape was {b_star.shape})') # The following 2 checks aren't rigorously backed up, but are just from an observation that # fewer stages seem to imply lower order. Thus b (which should produce the integrator of # order equal to cls.order()) should have a zero at least in its last component. if b_star[-1] == 0.0: raise ValueError(f'expected b_star to have a non-zero final component (but b_star was {b_star}') if b[-1] != 0.0: raise ValueError(f'expected b to have a zero final component (but b was {b}') c = cls.c() if len(c.shape) != 1 or c.shape[0] != stage_count: raise TypeError(f'expected c to be a vector having dimension {stage_count} (but c.shape was {c.shape})') if cls.is_explicit_method(): if c[0] != 0.0: raise ValueError(f'expected c[0] to be zero because cls.is_explicit_method() was true (but c[0] was {c[0]}') @classmethod def order (cls) -> int: """ Returns the order of the method, meaning that the local truncation error is on the order of O(dt^(order+1)), and the total accumulated error is on the order of O(dt^order). """ cls.validate_method_definition() return __order_of_vector(cls.b()) @classmethod def _stage_count (cls) -> int: cls.validate_method_definition() return cls.a().shape[0] class RungeKutta_Explicit(RungeKutta): """ NOTE: For the time being, it is assumed that the computed integration step of an embedded integrator will be the lower-order value, since the higher-order value is ostensibly used to estimate the local truncation error. TODO: Write tests that verify that error is of the claimed order. TODO: b - b_star is constant (per method), so pre-compute this. TODO: Depending on what the semantics of b_star actually are (which one of b or b_star is used to produce the result), maybe rename this to b_embedded? TODO: implement estimation of global error (presumably it's the sum of local truncation error) """ def __init__ ( self, *, vector_field:typing.Callable[[float,np.ndarray],np.ndarray], parameter_shape:typing.Sequence[int], ) -> None: if not all(s >= 0 for s in parameter_shape): raise ValueError(f'parameter_shape must have all nonnegative components (but was actually {parameter_shape}') self.validate_method_definition() self.__vector_field = vector_field self.__parameter_shape = parameter_shape self.__parameter_dimension = vorpy.tensor.dimension_of_shape(parameter_shape) self.__stage_count = self._stage_count() # Create and keep some np.ndarray instances for intermediate and result computations in order to avoid # memory allocation during integration. self.__k = np.zeros((self.__stage_count, self.__parameter_dimension), dtype=np.float64) # This is the time value input to the integrator's step function. self.t_now = 0.0 # This is the parameter value input to the integrator's step function. self.y_now = np.zeros(parameter_shape, dtype=np.float64) # This is the time value output to the integrator's step function (the result is stored here). self.t_next = 0.0 # This is the parameter value output to the integrator's step function (the result is stored here). self.y_next = np.zeros(parameter_shape, dtype=np.float64) # If this is an embedded method, create an array for storage of the [square of the] local truncation # error estimate. We use the square in order to avoid taking a square root during integration. if self.is_embedded_method(): self.ltee_squared = np.nan @classmethod def is_explicit_method (cls) -> bool: return True def set_inputs (self, t:float, y:np.ndarray) -> None: self.t_now = t self.y_now[:] = y def get_outputs (self) -> typing.Tuple[float, np.ndarray]: return self.t_next, self.y_next def get_local_truncation_error_estimate (self) -> float: """ Returns the local truncation error estimate of the last call to step. Note that this function calls numpy.sqrt, since the square of the LTEE is what is computed and stored, in order to avoid a call to sqrt during the integration step. To access the squared LTEE, just use the ltee_squared attribute directly. """ return np.sqrt(self.ltee_squared) def step (self, dt:float) -> None: """ Integrates the initial conditions (t,y) using timestep dt and RK method defined by a, b, c (i.e. the Butcher tableau of the method). Stores the updated t and y values in self.t and self.y. Returns self.t, self.y for convenience. Reference: - https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods#Explicit_Runge%E2%80%93Kutta_methods """ a = self.a() b = self.b() if self.is_embedded_method(): b_star = self.b_star() c = self.c() # Because this is an explicit method, a[0,:] and c[0] are identically zero, so the first iteration # reduces to a simpler form. Flatten the result in order to make the index computations involving # __k simple. self.__k[0,:] = self.__vector_field(self.t_now, self.y_now).reshape(-1) # Do the rest of the iterations using the general form. for i in range(1, self.__stage_count): # Flatten the result in order to be able to assign to __k (which is a flattened parameter_shape). self.__k[i,:] = self.__vector_field( self.t_now + dt*c[i], self.y_now + dt*np.einsum('i,ij->j', a[i,0:i], self.__k[0:i,:]).reshape(*self.__parameter_shape), ).reshape(-1) self.t_next = self.t_now + dt self.y_next[:] = self.y_now + dt*np.einsum('i,ij->j', b, self.__k).reshape(*self.__parameter_shape) if self.is_embedded_method(): self.ltee_squared = (dt**2) * np.sum(np.einsum('i,ij->j', b - b_star, self.__k)**2) class RungeKutta_4(RungeKutta_Explicit): """ The original Runge-Kutta 4 method -- a 4th order method. Does not do any local truncation error estimation. Reference: - https://ipfs.io/ipfs/QmXoypizjW3WknFiJnKLwHCnL72vedxjQkDDP1mXWo6uco/wiki/List_of_Runge%E2%80%93Kutta_methods.html """ # Define the Butcher tableau using class variables, so new np.ndarrays aren't created during the step function. __a = np.array([ [0.0, 0.0, 0.0, 0.0], [0.5, 0.0, 0.0, 0.0], [0.0, 0.5, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], ]) __b = np.array([1/6, 1/3, 1/3, 1/6]) __c = np.array([0.0, 0.5, 0.5, 1.0]) @classmethod def order (cls) -> int: return 4 @classmethod def a (cls) -> np.ndarray: return cls.__a @classmethod def b (cls) -> np.ndarray: return cls.__b @classmethod def c (cls) -> np.ndarray: return cls.__c @classmethod def is_embedded_method (cls) -> bool: return False class RungeKuttaFehlberg_4_5(RungeKutta_Explicit): """ Runge-Kutta-Fehlberg 4(5) method. This is a fourth-order RK method which uses a 5th order RK method to estimate the local truncation error. Reference: - https://ipfs.io/ipfs/QmXoypizjW3WknFiJnKLwHCnL72vedxjQkDDP1mXWo6uco/wiki/List_of_Runge%E2%80%93Kutta_methods.html """ # Define the Butcher tableau using class variables, so new np.ndarrays aren't created during the step function. __a = np.array([ [ 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0], [ 1/4 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0], [ 3/32 , 9/32 , 0.0 , 0.0 , 0.0 , 0.0], [1932/2197, -7200/2197, 7296/2197, 0.0 , 0.0 , 0.0], [ 439/216 , -8.0 , 3680/513 , -845/4104, 0.0 , 0.0], [ -8/27 , 2.0 , -3544/2565, 1859/4104, -11/40, 0.0], ]) __b = np.array([25/216, 0.0, 1408/2565, 2197/4104, -1/5, 0]) __b_star = np.array([16/135, 0.0, 6656/12825, 28561/56430, -9/50, 2/55]) __c = np.array([0.0, 1/4, 3/8, 12/13, 1.0, 1/2]) @classmethod def order (cls) -> int: return 4 @classmethod def a (cls) -> np.ndarray: return cls.__a @classmethod def b (cls) -> np.ndarray: return cls.__b @classmethod def b_star (cls) -> np.ndarray: return cls.__b_star @classmethod def c (cls) -> np.ndarray: return cls.__c @classmethod def is_embedded_method (cls) -> bool: return True if __name__ == '__main__': def do_stuff_0 (): # Vector field of rigid counterclockwise rotation def V (t, y): return np.array([-y[1], y[0]]) #integrator = RungeKutta_4(vector_field=V, parameter_shape=(2,)) integrator = RungeKuttaFehlberg_4_5(vector_field=V, parameter_shape=(2,)) t = 0.0 y = np.array([1.0, 0.0]) dt = 0.1 t_max = 6.3 t_v = [t] y_v = [np.copy(y)] ltee_v = [0.0] while t < t_max: integrator.set_inputs(t, y) integrator.step(dt) t, y = integrator.get_outputs() t_v.append(t) y_v.append(np.copy(y)) ltee_v.append(np.sqrt(integrator.ltee_squared)) print(f'ltee_v = {ltee_v}') # Convert the list of np.ndarray to a full np.ndarray. y_t = np.array(y_v) import matplotlib.pyplot as plt def plot_stuff (): row_count = 1 col_count = 4 size = 5 fig,axis_vv = plt.subplots(row_count, col_count, squeeze=False, figsize=(size*col_count,size*row_count)) axis = axis_vv[0][0] axis.set_title('position') axis.set_aspect('equal') axis.plot(y_t[:,0], y_t[:,1], '.') axis = axis_vv[0][1] axis.set_title('x') axis.plot(t_v, y_t[:,0], '.') axis = axis_vv[0][2] axis.set_title('y') axis.plot(t_v, y_t[:,1], '.') axis = axis_vv[0][3] axis.set_title('local trunc. err. est.') axis.semilogy(t_v, ltee_v, '.') fig.tight_layout() filename = 'runge-kutta.png' plt.savefig(filename, bbox_inches='tight') print('wrote to file "{0}"'.format(filename)) # VERY important to do this -- otherwise your memory will slowly fill up! # Not sure which one is actually sufficient -- apparently none of them are, YAY! plt.clf() plt.close(fig) plt.close('all') del fig del axis_vv plot_stuff() def do_stuff_1 (): import sympy as sp import vorpy import vorpy.symbolic import vorpy.symplectic np.set_printoptions(precision=20) # Define the Kepler problem and use it to test the integrator def phase_space_coordinates (): return np.array(sp.var('x,y,p_x,p_y')).reshape(2,2) def K (p): return np.dot(p.flat, p.flat) / 2 def U (q): return -1 / sp.sqrt(np.dot(q.flat, q.flat)) def H (qp): """Total energy -- should be conserved.""" return K(qp[1,...]) + U(qp[0,...]) def p_theta (qp): """Angular momentum -- should be conserved.""" x,y,p_x,p_y = qp.reshape(-1) return x*p_y - y*p_x # Determine the Hamiltonian vector field of H. qp = phase_space_coordinates() X_H = vorpy.symplectic.symplectic_gradient_of(H(qp), qp) print(f'X_H:\n{X_H}') print('X_H lambdification') X_H_fast = vorpy.symbolic.lambdified(X_H, qp, replacement_d={'array':'np.array', 'dtype=object':'dtype=np.float64'}, verbose=True) print('H lambdification') H_fast = vorpy.symbolic.lambdified(H(qp), qp, replacement_d={'sqrt':'np.sqrt'}, verbose=True) print('p_theta lambdification') p_theta_fast = vorpy.symbolic.lambdified(p_theta(qp), qp, verbose=True) t_initial = 0.0 qp_initial = np.array([[1.0,0.0],[0.0,0.5]]) H_initial = H_fast(qp_initial) p_theta_initial = p_theta_fast(qp_initial) print(f'H_initial = {H_initial}') print(f'p_theta_initial = {p_theta_initial}') #integrator = RungeKutta_4(vector_field=V, parameter_shape=(2,)) integrator = RungeKuttaFehlberg_4_5(vector_field=(lambda t,qp:X_H_fast(qp)), parameter_shape=vorpy.tensor.shape(qp_initial)) t = t_initial y = qp_initial dt = 0.01 t_max = 3.0 t_v = [t] y_v = [np.copy(y)] ltee_v = [0.0] while t < t_max: integrator.set_inputs(t, y) integrator.step(dt) t, y = integrator.get_outputs() t_v.append(t) y_v.append(np.copy(y)) ltee_v.append(np.sqrt(integrator.ltee_squared)) print(f'ltee_v = {ltee_v}') # Convert the list of np.ndarray to a full np.ndarray. qp_t = np.array(y_v) H_v = vorpy.apply_along_axes(H_fast, (1,2), (qp_t,)) H_error_v = vorpy.apply_along_axes(lambda qp:np.abs(H_fast(qp) - H_initial), (1,2), (qp_t,)) #print(f'H_v = {H_v}') #print(f'H_error_v = {H_error_v}') p_theta_v = vorpy.apply_along_axes(p_theta_fast, (1,2), (qp_t,)) p_theta_error_v = vorpy.apply_along_axes(lambda qp:np.abs(p_theta_fast(qp) - p_theta_initial), (1,2), (qp_t,)) #print(f'p_theta_v = {p_theta_v}') #print(f'p_theta_error_v = {p_theta_error_v}') import matplotlib.pyplot as plt def plot_stuff (): row_count = 1 col_count = 5 size = 5 fig,axis_vv = plt.subplots(row_count, col_count, squeeze=False, figsize=(size*col_count,size*row_count)) axis = axis_vv[0][0] axis.set_title('position') axis.set_aspect('equal') axis.plot(qp_t[:,0,0], qp_t[:,0,1], '.') axis = axis_vv[0][1] axis.set_title('x and y') axis.plot(t_v, qp_t[:,0,0], '.') axis.plot(t_v, qp_t[:,0,1], '.') axis = axis_vv[0][2] axis.set_title('local trunc. err. est.') axis.semilogy(t_v, ltee_v, '.') axis = axis_vv[0][3] axis.set_title('H error') axis.semilogy(t_v, H_error_v, '.') axis = axis_vv[0][4] axis.set_title('p_theta error') axis.semilogy(t_v, p_theta_error_v, '.') fig.tight_layout() filename = 'runge-kutta-kepler.png' plt.savefig(filename, bbox_inches='tight') print('wrote to file "{0}"'.format(filename)) # VERY important to do this -- otherwise your memory will slowly fill up! # Not sure which one is actually sufficient -- apparently none of them are, YAY! plt.clf() plt.close(fig) plt.close('all') del fig del axis_vv plot_stuff() #do_stuff_0() do_stuff_1()
en
0.756888
Implements explicit Runge-Kutta integration methods, of ordinary (non-error-estimating) and error-estimating types. References: - Wikipedia RK article - https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods - List of RK methods - https://ipfs.io/ipfs/QmXoypizjW3WknFiJnKLwHCnL72vedxjQkDDP1mXWo6uco/wiki/List_of_Runge%E2%80%93Kutta_methods.html - A Tenth-Order Runge-Kutta Method with Error Estimate by <NAME> - http://sce.uhcl.edu/feagin/courses/rk10.pdf - An Explicit Sixth-Order Runge-Kutta Formula By <NAME> - https://www.ams.org/journals/mcom/1968-22-102/S0025-5718-68-99876-1/S0025-5718-68-99876-1.pdf - Appendix A; Runge-Kutta Methods - https://www.uni-muenster.de/imperia/md/content/physik_tp/lectures/ss2017/numerische_Methoden_fuer_komplexe_Systeme_II/rkm-1.pdf Should return the order of this method. If a method has order p, then its local truncation error will be on the order of O(dt^(p+1)). Note that there is no simple relationship between order and stage count. From https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods#Explicit_Runge%E2%80%93Kutta_methods In general, if an explicit s-stage Runge–Kutta method has order p, then it can be proven that the number of stages must satisfy s >= p, and if p >= 5, then s >= p+1. However, it is not known whether these bounds are sharp in all cases. # Note: @abc.abstractmethod should be the innermost decorator; # see https://docs.python.org/3/library/abc.html#abc.abstractmethod Returns the `a` part of the Butcher tableau of this RK method. See https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods#Adaptive_Runge%E2%80%93Kutta_methods Return value should have shape Returns the `b` part of the Butcher tableau of this RK method. See https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods#Adaptive_Runge%E2%80%93Kutta_methods Returns the `b*` part of the Butcher tableau of this RK method. See https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods#Adaptive_Runge%E2%80%93Kutta_methods Note that a non-embedded Runge-Kutta method does not need to implement this. Returns the `c` part of the Butcher tableau of this RK method. See https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods#Adaptive_Runge%E2%80%93Kutta_methods Should return true if this is an explicit method (meaning there are certain constraints on the Butcher tableau). Default is False (i.e. no constraint). See https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods#Explicit_Runge%E2%80%93Kutta_methods Should return true if this is an embedded method (meaning it uses a secondary, higher-order method to estimate the local truncation error). Default is False (i.e. no secondary, higher-order method). See https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods#Adaptive_Runge%E2%80%93Kutta_methods Will raise an exception if there is any inconsistency in the definition of a, b, c (i.e. the Butcher tableau) of this method. If cls.is_explicit_method returns True, then it will require that a is strictly lower-triangular. If all checks pass, no exception will be raised. # The following 2 checks aren't rigorously backed up, but are just from an observation that # fewer stages seem to imply lower order. Thus b (which should produce the integrator of # order equal to cls.order()) should have a zero at least in its last component. Returns the order of the method, meaning that the local truncation error is on the order of O(dt^(order+1)), and the total accumulated error is on the order of O(dt^order). NOTE: For the time being, it is assumed that the computed integration step of an embedded integrator will be the lower-order value, since the higher-order value is ostensibly used to estimate the local truncation error. TODO: Write tests that verify that error is of the claimed order. TODO: b - b_star is constant (per method), so pre-compute this. TODO: Depending on what the semantics of b_star actually are (which one of b or b_star is used to produce the result), maybe rename this to b_embedded? TODO: implement estimation of global error (presumably it's the sum of local truncation error) # Create and keep some np.ndarray instances for intermediate and result computations in order to avoid # memory allocation during integration. # This is the time value input to the integrator's step function. # This is the parameter value input to the integrator's step function. # This is the time value output to the integrator's step function (the result is stored here). # This is the parameter value output to the integrator's step function (the result is stored here). # If this is an embedded method, create an array for storage of the [square of the] local truncation # error estimate. We use the square in order to avoid taking a square root during integration. Returns the local truncation error estimate of the last call to step. Note that this function calls numpy.sqrt, since the square of the LTEE is what is computed and stored, in order to avoid a call to sqrt during the integration step. To access the squared LTEE, just use the ltee_squared attribute directly. Integrates the initial conditions (t,y) using timestep dt and RK method defined by a, b, c (i.e. the Butcher tableau of the method). Stores the updated t and y values in self.t and self.y. Returns self.t, self.y for convenience. Reference: - https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods#Explicit_Runge%E2%80%93Kutta_methods # Because this is an explicit method, a[0,:] and c[0] are identically zero, so the first iteration # reduces to a simpler form. Flatten the result in order to make the index computations involving # __k simple. # Do the rest of the iterations using the general form. # Flatten the result in order to be able to assign to __k (which is a flattened parameter_shape). The original Runge-Kutta 4 method -- a 4th order method. Does not do any local truncation error estimation. Reference: - https://ipfs.io/ipfs/QmXoypizjW3WknFiJnKLwHCnL72vedxjQkDDP1mXWo6uco/wiki/List_of_Runge%E2%80%93Kutta_methods.html # Define the Butcher tableau using class variables, so new np.ndarrays aren't created during the step function. Runge-Kutta-Fehlberg 4(5) method. This is a fourth-order RK method which uses a 5th order RK method to estimate the local truncation error. Reference: - https://ipfs.io/ipfs/QmXoypizjW3WknFiJnKLwHCnL72vedxjQkDDP1mXWo6uco/wiki/List_of_Runge%E2%80%93Kutta_methods.html # Define the Butcher tableau using class variables, so new np.ndarrays aren't created during the step function. # Vector field of rigid counterclockwise rotation #integrator = RungeKutta_4(vector_field=V, parameter_shape=(2,)) # Convert the list of np.ndarray to a full np.ndarray. # VERY important to do this -- otherwise your memory will slowly fill up! # Not sure which one is actually sufficient -- apparently none of them are, YAY! # Define the Kepler problem and use it to test the integrator Total energy -- should be conserved. Angular momentum -- should be conserved. # Determine the Hamiltonian vector field of H. #integrator = RungeKutta_4(vector_field=V, parameter_shape=(2,)) # Convert the list of np.ndarray to a full np.ndarray. #print(f'H_v = {H_v}') #print(f'H_error_v = {H_error_v}') #print(f'p_theta_v = {p_theta_v}') #print(f'p_theta_error_v = {p_theta_error_v}') # VERY important to do this -- otherwise your memory will slowly fill up! # Not sure which one is actually sufficient -- apparently none of them are, YAY! #do_stuff_0()
2.73228
3
utils.py
Andre6o6/biometrics_project
0
6621738
<filename>utils.py import numpy as np import os, os.path from skimage import io def classify(clf, img, classes): ''' Find image among classes that is closest to img. ''' distances = np.array(list(map(lambda img2: clf.Distance(img, img2), classes))) distances = distances / np.sum(distances) return distances.argmin() def classify_many(clf, imgs, classes, vote='soft'): all_dist = [] for img in imgs: distances = np.array(list(map(lambda img2: clf.Distance(img, img2), classes))) distances = distances / np.sum(distances) all_dist.append(distances) if vote=='soft': return np.mean(all_dist, axis=0).argmin() else: votes = np.argmin(all_dist, axis=1) return np.bincount(votes).argmax() def rank(clf, img, label, classes): distances = np.array(list(map(lambda img2: clf.Distance(img, img2), classes))) distances = distances / np.sum(distances) dist = distances[label] r = sorted(distances).index(dist) return r def load_from_folders(path, from_idx=0, to_idx=11): ''' Load and split into train (images to be recognised) and test (class samples). ''' train = [] labels = [] test = [] i = 0 for dir in os.listdir(path)[from_idx:to_idx]: dir_path = os.path.join(path, dir) if os.path.isdir(dir_path): subj_images = [] for file in os.listdir(dir_path): file_path = os.path.join(dir_path, file) img = io.imread(file_path) subj_images.append(img) #random.shuffle(subj_images) test.append(subj_images[0]) # Save 1 image as an example of class train.extend(subj_images[1:]) # Add other images to train set ... labels.extend([i for x in subj_images[1:]]) # ... and save their class number i +=1 train = np.array(train) test = np.array(test) return train, labels, test
<filename>utils.py import numpy as np import os, os.path from skimage import io def classify(clf, img, classes): ''' Find image among classes that is closest to img. ''' distances = np.array(list(map(lambda img2: clf.Distance(img, img2), classes))) distances = distances / np.sum(distances) return distances.argmin() def classify_many(clf, imgs, classes, vote='soft'): all_dist = [] for img in imgs: distances = np.array(list(map(lambda img2: clf.Distance(img, img2), classes))) distances = distances / np.sum(distances) all_dist.append(distances) if vote=='soft': return np.mean(all_dist, axis=0).argmin() else: votes = np.argmin(all_dist, axis=1) return np.bincount(votes).argmax() def rank(clf, img, label, classes): distances = np.array(list(map(lambda img2: clf.Distance(img, img2), classes))) distances = distances / np.sum(distances) dist = distances[label] r = sorted(distances).index(dist) return r def load_from_folders(path, from_idx=0, to_idx=11): ''' Load and split into train (images to be recognised) and test (class samples). ''' train = [] labels = [] test = [] i = 0 for dir in os.listdir(path)[from_idx:to_idx]: dir_path = os.path.join(path, dir) if os.path.isdir(dir_path): subj_images = [] for file in os.listdir(dir_path): file_path = os.path.join(dir_path, file) img = io.imread(file_path) subj_images.append(img) #random.shuffle(subj_images) test.append(subj_images[0]) # Save 1 image as an example of class train.extend(subj_images[1:]) # Add other images to train set ... labels.extend([i for x in subj_images[1:]]) # ... and save their class number i +=1 train = np.array(train) test = np.array(test) return train, labels, test
en
0.912992
Find image among classes that is closest to img. Load and split into train (images to be recognised) and test (class samples). #random.shuffle(subj_images) # Save 1 image as an example of class # Add other images to train set ... # ... and save their class number
2.798695
3
openstack/image/v1/image.py
sivel/python-openstacksdk
1
6621739
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.image import image_service from openstack import resource class Image(resource.Resource): resource_key = 'image' resources_key = 'images' base_path = '/images' service = image_service.ImageService() # capabilities allow_create = True allow_retrieve = True allow_update = True allow_delete = True allow_list = True # Properties checksum = resource.prop('checksum') container_format = resource.prop('container_format') copy_from = resource.prop('copy_from') disk_format = resource.prop('disk_format') is_public = resource.prop('is_public') location = resource.prop('location') min_disk = resource.prop('min_disk') min_ram = resource.prop('min_ram') name = resource.prop('name') owner = resource.prop('owner') properties = resource.prop('properties') protected = resource.prop('protected') size = resource.prop('size') status = resource.prop('status') created_at = resource.prop('created_at') updated_at = resource.prop('updated_at')
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.image import image_service from openstack import resource class Image(resource.Resource): resource_key = 'image' resources_key = 'images' base_path = '/images' service = image_service.ImageService() # capabilities allow_create = True allow_retrieve = True allow_update = True allow_delete = True allow_list = True # Properties checksum = resource.prop('checksum') container_format = resource.prop('container_format') copy_from = resource.prop('copy_from') disk_format = resource.prop('disk_format') is_public = resource.prop('is_public') location = resource.prop('location') min_disk = resource.prop('min_disk') min_ram = resource.prop('min_ram') name = resource.prop('name') owner = resource.prop('owner') properties = resource.prop('properties') protected = resource.prop('protected') size = resource.prop('size') status = resource.prop('status') created_at = resource.prop('created_at') updated_at = resource.prop('updated_at')
en
0.853206
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # capabilities # Properties
1.8402
2
api/serializers.py
Waanneek/mediasoft_test
0
6621740
<filename>api/serializers.py<gh_stars>0 from datetime import time from rest_framework import serializers from api.models import * class CitySerializer(serializers.ModelSerializer): id = serializers.IntegerField(read_only=True) class Meta: model = CityModel fields = ['id', 'name'] class StreetSerializer(serializers.ModelSerializer): id = serializers.IntegerField(read_only=True) class Meta: model = StreetModel fields = ['id', 'name', 'city'] class ShopSerializer(serializers.ModelSerializer): id = serializers.IntegerField(read_only=True) open = serializers.IntegerField(source='isOpen', read_only=True) open_time = serializers.TimeField(default=time(8), initial=time(8)) close_time = serializers.TimeField(default=time(20), initial=time(20)) city = serializers.SlugRelatedField(slug_field='name', queryset=CityModel.objects.all()) street = serializers.SlugRelatedField(slug_field='name', queryset=StreetModel.objects.all()) class Meta: model = ShopModel fields = ['id' ,'name', 'city', 'street', 'building', 'open', 'open_time', 'close_time']
<filename>api/serializers.py<gh_stars>0 from datetime import time from rest_framework import serializers from api.models import * class CitySerializer(serializers.ModelSerializer): id = serializers.IntegerField(read_only=True) class Meta: model = CityModel fields = ['id', 'name'] class StreetSerializer(serializers.ModelSerializer): id = serializers.IntegerField(read_only=True) class Meta: model = StreetModel fields = ['id', 'name', 'city'] class ShopSerializer(serializers.ModelSerializer): id = serializers.IntegerField(read_only=True) open = serializers.IntegerField(source='isOpen', read_only=True) open_time = serializers.TimeField(default=time(8), initial=time(8)) close_time = serializers.TimeField(default=time(20), initial=time(20)) city = serializers.SlugRelatedField(slug_field='name', queryset=CityModel.objects.all()) street = serializers.SlugRelatedField(slug_field='name', queryset=StreetModel.objects.all()) class Meta: model = ShopModel fields = ['id' ,'name', 'city', 'street', 'building', 'open', 'open_time', 'close_time']
none
1
2.389356
2
triple_filtering/dictionaries.py
phongnt570/large-scale-csk-extraction
3
6621741
<reponame>phongnt570/large-scale-csk-extraction<filename>triple_filtering/dictionaries.py IGNORED_PO_PAIRS = { ("be", "seen"), ("be", "used"), ("be", "the fact"), ("be taken", "care"), ("make", "sense"), ("make", "sure"), ("make", "use"), ("pay", "attention"), ("be", " capable"), ("be", " able"), ("come in", " size"), ("play", " an important role"), ("play", " important role"), ("make", " difference"), ("do", " thing"), ("do", " the same"), ("take", " part"), ("give", " way"), ("give", " a way"), } IGNORED_OBJECTS = { "in", "on", "at", "of", "from", "by", "mine", "yours", "ours", "its", "theirs", "etc", "for etc", "of course", "other", "the one", "same", "the same", "the fact", "most cases", "likely", "ready", "present", "alive", "dead", "needed", "presented", "represented", "due", "on top", "the top", "top", "this", "that", "these", "those", "what", "which", "where", "when", "who", "whom", "how", "why", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "the first", "the second", "the third", "the fourth", "the fifth", "the sixth", "the seventh", "the eighth", "the ninth", "the tenth", "the best", "best", "better", "the worst", "worst", "worse", "all", "more", "less", "much", "many", "n/a", "be", "do", "have", "live", "die", "go", "make", "come", "help", "eat", "find", "see", "say", "know", "get", "let", "become", "allow", "ask", "the ability", "the right", "gone", "done", "born", "taken", "found", "known", "located", "seen", "used", "listed", "intended", "me", "myself", "us", "ourselves", "you", "yourself", "yourselves", "them", "themselves", "her", "herself", "him", "himself", "it", "itself", "make sure", "make history", "this case", "this situation", "the time", "this time", "today", "this year", "last year", "this month", "this week", "any way", "several", "damn", "other words", "the here", "the now", "the present", "the moment", "now", "here", "there", "the", "take place", "come to end", "such", "non", "on the left", "on the right", "play role", } IGNORED_PREDICATES = { "will", "would", "shall", "should", "may", "might", "must", "be like", "be defined in", "be defined by", "include", # NOTE: very noisy statements "star", "be directed by", }
IGNORED_PO_PAIRS = { ("be", "seen"), ("be", "used"), ("be", "the fact"), ("be taken", "care"), ("make", "sense"), ("make", "sure"), ("make", "use"), ("pay", "attention"), ("be", " capable"), ("be", " able"), ("come in", " size"), ("play", " an important role"), ("play", " important role"), ("make", " difference"), ("do", " thing"), ("do", " the same"), ("take", " part"), ("give", " way"), ("give", " a way"), } IGNORED_OBJECTS = { "in", "on", "at", "of", "from", "by", "mine", "yours", "ours", "its", "theirs", "etc", "for etc", "of course", "other", "the one", "same", "the same", "the fact", "most cases", "likely", "ready", "present", "alive", "dead", "needed", "presented", "represented", "due", "on top", "the top", "top", "this", "that", "these", "those", "what", "which", "where", "when", "who", "whom", "how", "why", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "the first", "the second", "the third", "the fourth", "the fifth", "the sixth", "the seventh", "the eighth", "the ninth", "the tenth", "the best", "best", "better", "the worst", "worst", "worse", "all", "more", "less", "much", "many", "n/a", "be", "do", "have", "live", "die", "go", "make", "come", "help", "eat", "find", "see", "say", "know", "get", "let", "become", "allow", "ask", "the ability", "the right", "gone", "done", "born", "taken", "found", "known", "located", "seen", "used", "listed", "intended", "me", "myself", "us", "ourselves", "you", "yourself", "yourselves", "them", "themselves", "her", "herself", "him", "himself", "it", "itself", "make sure", "make history", "this case", "this situation", "the time", "this time", "today", "this year", "last year", "this month", "this week", "any way", "several", "damn", "other words", "the here", "the now", "the present", "the moment", "now", "here", "there", "the", "take place", "come to end", "such", "non", "on the left", "on the right", "play role", } IGNORED_PREDICATES = { "will", "would", "shall", "should", "may", "might", "must", "be like", "be defined in", "be defined by", "include", # NOTE: very noisy statements "star", "be directed by", }
en
0.817959
# NOTE: very noisy statements
1.94071
2
dtree/displaytree.py
mgbarsky/botany_citizen
0
6621742
<filename>dtree/displaytree.py<gh_stars>0 from PIL import Image, ImageDraw def getwidth(tree): if tree == None: return 0 if tree.children==None: return 1 width = 0 for value in tree.children.values(): width += getwidth(value) return width def getdepth(tree): if tree == None: return 0 if tree.children==None: return 0 max = 0 for value in tree.children.values(): candidate = 1 + getdepth(value) if candidate > max: max = candidate return max def drawnode(draw,tree,x,y,feature_dict): if tree.children is not None: # Get the left and right "endpoints" of this node numnodes = 0 for value in tree.children.values(): numnodes += getwidth(value) left = x - numnodes*75 right = x + numnodes*75 dividelen = (right - left)/(len(tree.children) + 1) # Draw the condition string draw.text((x-100,y-10),"{}?".format(feature_dict[tree.col]), (0,0,0)) # Draw the result if there is one if tree.results is not None: txt = ' \n'.join(['%s:%d' % v for v in tree.results.items()]) draw.text((x - 20, y), txt, (0, 0, 0)) temp = 1 for value in tree.children.values(): # Draw a link to the branch draw.line((x, y, left + temp*dividelen, y + 100), fill=(255, 0, 0)) # Draw the branch node drawnode(draw, value, left + temp*dividelen, y + 100, feature_dict) temp += 1 else: txt=' \n'.join(['%s:%d'%v for v in tree.results.items( )]) draw.text((x-20,y),txt,(0,0,0)) def drawtree(tree,feature_dict,jpeg='tree.jpg'): w=getwidth(tree)*200 h=getdepth(tree)*100+120 img=Image.new('RGB',(w,h),(255,255,255)) draw=ImageDraw.Draw(img) drawnode(draw,tree,w/2,20,feature_dict) img.save(jpeg,'JPEG')
<filename>dtree/displaytree.py<gh_stars>0 from PIL import Image, ImageDraw def getwidth(tree): if tree == None: return 0 if tree.children==None: return 1 width = 0 for value in tree.children.values(): width += getwidth(value) return width def getdepth(tree): if tree == None: return 0 if tree.children==None: return 0 max = 0 for value in tree.children.values(): candidate = 1 + getdepth(value) if candidate > max: max = candidate return max def drawnode(draw,tree,x,y,feature_dict): if tree.children is not None: # Get the left and right "endpoints" of this node numnodes = 0 for value in tree.children.values(): numnodes += getwidth(value) left = x - numnodes*75 right = x + numnodes*75 dividelen = (right - left)/(len(tree.children) + 1) # Draw the condition string draw.text((x-100,y-10),"{}?".format(feature_dict[tree.col]), (0,0,0)) # Draw the result if there is one if tree.results is not None: txt = ' \n'.join(['%s:%d' % v for v in tree.results.items()]) draw.text((x - 20, y), txt, (0, 0, 0)) temp = 1 for value in tree.children.values(): # Draw a link to the branch draw.line((x, y, left + temp*dividelen, y + 100), fill=(255, 0, 0)) # Draw the branch node drawnode(draw, value, left + temp*dividelen, y + 100, feature_dict) temp += 1 else: txt=' \n'.join(['%s:%d'%v for v in tree.results.items( )]) draw.text((x-20,y),txt,(0,0,0)) def drawtree(tree,feature_dict,jpeg='tree.jpg'): w=getwidth(tree)*200 h=getdepth(tree)*100+120 img=Image.new('RGB',(w,h),(255,255,255)) draw=ImageDraw.Draw(img) drawnode(draw,tree,w/2,20,feature_dict) img.save(jpeg,'JPEG')
en
0.796303
# Get the left and right "endpoints" of this node # Draw the condition string # Draw the result if there is one # Draw a link to the branch # Draw the branch node
2.889091
3
client/python/pycrayon/version.py
pitoupitou/crayon
858
6621743
r""" Little utility to reveal the package version. Place in the root dir of the package. """ from pkg_resources import get_distribution __version__ = get_distribution(__name__.split('.')[0]).version
r""" Little utility to reveal the package version. Place in the root dir of the package. """ from pkg_resources import get_distribution __version__ = get_distribution(__name__.split('.')[0]).version
en
0.835573
Little utility to reveal the package version. Place in the root dir of the package.
1.500794
2
mapper/property_mappers.py
healyou/godville_monitor
0
6621744
from abc import ABCMeta, abstractmethod from typing import Dict, List def convertDictKeysToList(dict: Dict) -> List: items: List = list() for key, value in dict.keys(): items.append(key) return items def getValueOrDefaultNone(dict: Dict, key: str) -> object: return dict.get(key, None) # абстрактный маппер свойства из словаря class AbstractDictPropertyMapper(metaclass=ABCMeta): def __init__(self): pass @abstractmethod def mapObject(self, dictValue: Dict, key: str) -> object: pass # Маппер простого значения из словаря class DefaultDictPropertyMapper(AbstractDictPropertyMapper): def __init__(self): super(AbstractDictPropertyMapper, self).__init__() def mapObject(self, dictValue: Dict, key: str) -> object: return getValueOrDefaultNone(dictValue, key) # Маппер, который для свойства вернёт None class NoneObjectDictPropertyMapper(AbstractDictPropertyMapper): def __init__(self): super(AbstractDictPropertyMapper, self).__init__() def mapObject(self, dictValue: Dict, key: str) -> object: return None # Маппер листа объектов class ListDictPropertyMapper(AbstractDictPropertyMapper): def __init__(self): super(AbstractDictPropertyMapper, self).__init__() def mapObject(self, dictValue: Dict, key: str) -> object: return convertDictKeysToList(dictValue.get(key, dict()))
from abc import ABCMeta, abstractmethod from typing import Dict, List def convertDictKeysToList(dict: Dict) -> List: items: List = list() for key, value in dict.keys(): items.append(key) return items def getValueOrDefaultNone(dict: Dict, key: str) -> object: return dict.get(key, None) # абстрактный маппер свойства из словаря class AbstractDictPropertyMapper(metaclass=ABCMeta): def __init__(self): pass @abstractmethod def mapObject(self, dictValue: Dict, key: str) -> object: pass # Маппер простого значения из словаря class DefaultDictPropertyMapper(AbstractDictPropertyMapper): def __init__(self): super(AbstractDictPropertyMapper, self).__init__() def mapObject(self, dictValue: Dict, key: str) -> object: return getValueOrDefaultNone(dictValue, key) # Маппер, который для свойства вернёт None class NoneObjectDictPropertyMapper(AbstractDictPropertyMapper): def __init__(self): super(AbstractDictPropertyMapper, self).__init__() def mapObject(self, dictValue: Dict, key: str) -> object: return None # Маппер листа объектов class ListDictPropertyMapper(AbstractDictPropertyMapper): def __init__(self): super(AbstractDictPropertyMapper, self).__init__() def mapObject(self, dictValue: Dict, key: str) -> object: return convertDictKeysToList(dictValue.get(key, dict()))
ru
0.996648
# абстрактный маппер свойства из словаря # Маппер простого значения из словаря # Маппер, который для свойства вернёт None # Маппер листа объектов
3.398466
3
aiida_sssp_workflow/calculations/wien2k_ref.py
aiidateam/aiida-sssp-workflow
0
6621745
# -*- coding: utf-8 -*- """ The string copy paste from WIEN2k.txt of calcDelta package version 3.1 and WIEN2K eos results for the rare-earth nitrides (Lanthanides) """ WIEN2K_REN_REF = """ # Rare-Earth Nitrids(REN) WIEN2K eos data (courtesy of M. Topsakal) LaN 18.77799 122.037 4.461 CeN 16.19013 145.439 4.513 PrN 16.33072 120.290 4.025 NdN 16.57081 123.322 3.611 PmN 15.99888 136.555 0.567 SmN 15.96972 122.275 5.106 EuN 16.10033 117.219 4.435 GdN 15.53591 143.290 0.222 TbN 14.77379 140.135 4.069 DyN 14.60473 142.980 4.125 HoN 14.53329 151.149 4.315 ErN 14.25530 147.867 4.460 TmN 14.01555 143.253 4.493 YbN 13.86410 140.958 5.635 LuN 13.54987 170.558 3.892 """ WIEN2K_REF = """ # WIEN2k version 13.1 calcDelta package version 3.1 -- calculations by <NAME> H 17.3883 10.284 2.71 He 17.7708 0.847 7.71 Li 20.2191 13.839 3.34 Be 7.9099 122.903 3.04 B 7.2405 237.290 3.47 C 11.6366 208.991 3.58 N 28.8848 54.2195 3.7244 O 18.5590 51.378 3.89 F 19.1666 34.325 3.93 Ne 24.2492 1.406 14.44 Na 37.4686 7.472 3.77 Mg 22.9355 35.933 4.07 Al 16.4796 78.077 4.57 Si 20.4530 88.545 4.31 P 21.4709 68.208 4.35 S 17.1840 83.407 4.26 Cl 38.8889 19.081 4.34 Ar 52.3852 0.743 7.26 K 73.6793 3.574 4.59 Ca 42.1991 17.114 3.31 Sc 24.6196 54.393 3.42 Ti 17.3900 112.213 3.58 V 13.4520 181.674 3.75 Cr 11.7730 183.899 7.16 Mn 11.4473 118.632 -0.21 Fe 11.3436 197.652 5.80 Co 10.8635 216.39 4.36 Ni 10.8910 199.876 5.0059 Cu 11.9571 141.06 4.85 Zn 15.1947 74.54 5.27 Ga 20.3069 49.223 5.38 Ge 23.9148 59.128 4.99 As 22.5890 68.285 4.22 Se 29.7437 47.070 4.44 Br 39.4470 22.415 4.87 Kr 65.6576 0.671 9.86 Rb 90.8087 2.787 5.80 Sr 54.5272 11.256 3.49 Y 32.8442 41.593 3.02 Zr 23.3850 93.684 3.21 Nb 18.1368 171.270 3.55 Mo 15.7862 258.928 4.33 Tc 14.4366 299.149 4.46 Ru 13.7619 312.502 4.95 Rh 14.0396 257.824 5.32 Pd 15.3101 168.629 5.56 Ag 17.8471 90.148 5.42 Cd 22.8354 44.06 6.97 In 27.4710 34.937 4.78 Sn 36.8166 36.030 4.64 Sb 31.7296 50.367 4.52 Te 34.9765 44.787 4.69 I 50.2333 18.654 5.05 Xe 86.6814 0.548 6.34 Cs 117.080 1.982 2.14 Ba 63.1401 8.677 3.77 Lu 29.0544 46.384 2.94 Hf 22.5325 107.004 3.50 Ta 18.2856 195.147 3.71 W 16.1394 301.622 4.28 Re 14.9580 362.850 4.52 Os 14.2802 397.259 4.84 Ir 14.5004 347.680 5.18 Pt 15.6420 248.711 5.46 Au 17.9745 139.109 5.76 Hg 29.6124 8.05 8.90 Tl 31.3902 26.865 5.49 Pb 32.0028 39.544 4.53 Bi 36.9047 42.630 4.70 Po 37.5869 45.458 4.93 Rn 92.6852 0.564 8.62"""
# -*- coding: utf-8 -*- """ The string copy paste from WIEN2k.txt of calcDelta package version 3.1 and WIEN2K eos results for the rare-earth nitrides (Lanthanides) """ WIEN2K_REN_REF = """ # Rare-Earth Nitrids(REN) WIEN2K eos data (courtesy of M. Topsakal) LaN 18.77799 122.037 4.461 CeN 16.19013 145.439 4.513 PrN 16.33072 120.290 4.025 NdN 16.57081 123.322 3.611 PmN 15.99888 136.555 0.567 SmN 15.96972 122.275 5.106 EuN 16.10033 117.219 4.435 GdN 15.53591 143.290 0.222 TbN 14.77379 140.135 4.069 DyN 14.60473 142.980 4.125 HoN 14.53329 151.149 4.315 ErN 14.25530 147.867 4.460 TmN 14.01555 143.253 4.493 YbN 13.86410 140.958 5.635 LuN 13.54987 170.558 3.892 """ WIEN2K_REF = """ # WIEN2k version 13.1 calcDelta package version 3.1 -- calculations by <NAME> H 17.3883 10.284 2.71 He 17.7708 0.847 7.71 Li 20.2191 13.839 3.34 Be 7.9099 122.903 3.04 B 7.2405 237.290 3.47 C 11.6366 208.991 3.58 N 28.8848 54.2195 3.7244 O 18.5590 51.378 3.89 F 19.1666 34.325 3.93 Ne 24.2492 1.406 14.44 Na 37.4686 7.472 3.77 Mg 22.9355 35.933 4.07 Al 16.4796 78.077 4.57 Si 20.4530 88.545 4.31 P 21.4709 68.208 4.35 S 17.1840 83.407 4.26 Cl 38.8889 19.081 4.34 Ar 52.3852 0.743 7.26 K 73.6793 3.574 4.59 Ca 42.1991 17.114 3.31 Sc 24.6196 54.393 3.42 Ti 17.3900 112.213 3.58 V 13.4520 181.674 3.75 Cr 11.7730 183.899 7.16 Mn 11.4473 118.632 -0.21 Fe 11.3436 197.652 5.80 Co 10.8635 216.39 4.36 Ni 10.8910 199.876 5.0059 Cu 11.9571 141.06 4.85 Zn 15.1947 74.54 5.27 Ga 20.3069 49.223 5.38 Ge 23.9148 59.128 4.99 As 22.5890 68.285 4.22 Se 29.7437 47.070 4.44 Br 39.4470 22.415 4.87 Kr 65.6576 0.671 9.86 Rb 90.8087 2.787 5.80 Sr 54.5272 11.256 3.49 Y 32.8442 41.593 3.02 Zr 23.3850 93.684 3.21 Nb 18.1368 171.270 3.55 Mo 15.7862 258.928 4.33 Tc 14.4366 299.149 4.46 Ru 13.7619 312.502 4.95 Rh 14.0396 257.824 5.32 Pd 15.3101 168.629 5.56 Ag 17.8471 90.148 5.42 Cd 22.8354 44.06 6.97 In 27.4710 34.937 4.78 Sn 36.8166 36.030 4.64 Sb 31.7296 50.367 4.52 Te 34.9765 44.787 4.69 I 50.2333 18.654 5.05 Xe 86.6814 0.548 6.34 Cs 117.080 1.982 2.14 Ba 63.1401 8.677 3.77 Lu 29.0544 46.384 2.94 Hf 22.5325 107.004 3.50 Ta 18.2856 195.147 3.71 W 16.1394 301.622 4.28 Re 14.9580 362.850 4.52 Os 14.2802 397.259 4.84 Ir 14.5004 347.680 5.18 Pt 15.6420 248.711 5.46 Au 17.9745 139.109 5.76 Hg 29.6124 8.05 8.90 Tl 31.3902 26.865 5.49 Pb 32.0028 39.544 4.53 Bi 36.9047 42.630 4.70 Po 37.5869 45.458 4.93 Rn 92.6852 0.564 8.62"""
en
0.303248
# -*- coding: utf-8 -*- The string copy paste from WIEN2k.txt of calcDelta package version 3.1 and WIEN2K eos results for the rare-earth nitrides (Lanthanides) # Rare-Earth Nitrids(REN) WIEN2K eos data (courtesy of M. Topsakal) LaN 18.77799 122.037 4.461 CeN 16.19013 145.439 4.513 PrN 16.33072 120.290 4.025 NdN 16.57081 123.322 3.611 PmN 15.99888 136.555 0.567 SmN 15.96972 122.275 5.106 EuN 16.10033 117.219 4.435 GdN 15.53591 143.290 0.222 TbN 14.77379 140.135 4.069 DyN 14.60473 142.980 4.125 HoN 14.53329 151.149 4.315 ErN 14.25530 147.867 4.460 TmN 14.01555 143.253 4.493 YbN 13.86410 140.958 5.635 LuN 13.54987 170.558 3.892 # WIEN2k version 13.1 calcDelta package version 3.1 -- calculations by <NAME> H 17.3883 10.284 2.71 He 17.7708 0.847 7.71 Li 20.2191 13.839 3.34 Be 7.9099 122.903 3.04 B 7.2405 237.290 3.47 C 11.6366 208.991 3.58 N 28.8848 54.2195 3.7244 O 18.5590 51.378 3.89 F 19.1666 34.325 3.93 Ne 24.2492 1.406 14.44 Na 37.4686 7.472 3.77 Mg 22.9355 35.933 4.07 Al 16.4796 78.077 4.57 Si 20.4530 88.545 4.31 P 21.4709 68.208 4.35 S 17.1840 83.407 4.26 Cl 38.8889 19.081 4.34 Ar 52.3852 0.743 7.26 K 73.6793 3.574 4.59 Ca 42.1991 17.114 3.31 Sc 24.6196 54.393 3.42 Ti 17.3900 112.213 3.58 V 13.4520 181.674 3.75 Cr 11.7730 183.899 7.16 Mn 11.4473 118.632 -0.21 Fe 11.3436 197.652 5.80 Co 10.8635 216.39 4.36 Ni 10.8910 199.876 5.0059 Cu 11.9571 141.06 4.85 Zn 15.1947 74.54 5.27 Ga 20.3069 49.223 5.38 Ge 23.9148 59.128 4.99 As 22.5890 68.285 4.22 Se 29.7437 47.070 4.44 Br 39.4470 22.415 4.87 Kr 65.6576 0.671 9.86 Rb 90.8087 2.787 5.80 Sr 54.5272 11.256 3.49 Y 32.8442 41.593 3.02 Zr 23.3850 93.684 3.21 Nb 18.1368 171.270 3.55 Mo 15.7862 258.928 4.33 Tc 14.4366 299.149 4.46 Ru 13.7619 312.502 4.95 Rh 14.0396 257.824 5.32 Pd 15.3101 168.629 5.56 Ag 17.8471 90.148 5.42 Cd 22.8354 44.06 6.97 In 27.4710 34.937 4.78 Sn 36.8166 36.030 4.64 Sb 31.7296 50.367 4.52 Te 34.9765 44.787 4.69 I 50.2333 18.654 5.05 Xe 86.6814 0.548 6.34 Cs 117.080 1.982 2.14 Ba 63.1401 8.677 3.77 Lu 29.0544 46.384 2.94 Hf 22.5325 107.004 3.50 Ta 18.2856 195.147 3.71 W 16.1394 301.622 4.28 Re 14.9580 362.850 4.52 Os 14.2802 397.259 4.84 Ir 14.5004 347.680 5.18 Pt 15.6420 248.711 5.46 Au 17.9745 139.109 5.76 Hg 29.6124 8.05 8.90 Tl 31.3902 26.865 5.49 Pb 32.0028 39.544 4.53 Bi 36.9047 42.630 4.70 Po 37.5869 45.458 4.93 Rn 92.6852 0.564 8.62
1.466005
1
blockchain/block.py
bajcmartinez/blockchainpy
17
6621746
<filename>blockchain/block.py import hashlib from api.schema.block import BlockSchema from time import time class Block: def __init__(self, index, transactions, nonce, previous_hash): """ Constructs a new block :param index: :param transactions: :param previous_hash: """ self.index = index self.timestamp = time() self.transactions = transactions self.nonce = nonce self.previous_hash = previous_hash self.hash = self.hash_block() def serialize(self, ignore=None): """ Serializes a block into a string :return: """ if ignore is None: ignore = [] block_params = {x: self.__dict__[x] for x in self.__dict__ if x not in ignore} return BlockSchema().dumps(block_params) def hash_block(self): """ Calculates the hash of the block :return: """ sha = hashlib.sha256() sha.update(self.serialize(['hash']).encode('utf-8')) return sha.hexdigest()
<filename>blockchain/block.py import hashlib from api.schema.block import BlockSchema from time import time class Block: def __init__(self, index, transactions, nonce, previous_hash): """ Constructs a new block :param index: :param transactions: :param previous_hash: """ self.index = index self.timestamp = time() self.transactions = transactions self.nonce = nonce self.previous_hash = previous_hash self.hash = self.hash_block() def serialize(self, ignore=None): """ Serializes a block into a string :return: """ if ignore is None: ignore = [] block_params = {x: self.__dict__[x] for x in self.__dict__ if x not in ignore} return BlockSchema().dumps(block_params) def hash_block(self): """ Calculates the hash of the block :return: """ sha = hashlib.sha256() sha.update(self.serialize(['hash']).encode('utf-8')) return sha.hexdigest()
en
0.723556
Constructs a new block :param index: :param transactions: :param previous_hash: Serializes a block into a string :return: Calculates the hash of the block :return:
3.156212
3
AtCoder/Practice/Beginner/2-2-3/ABC007B.py
scnsh/CompetitiveProgramming
1
6621747
A = input() if A == 'a': print('-1') else: print('a')
A = input() if A == 'a': print('-1') else: print('a')
none
1
3.784126
4
src/stepseries/step400.py
ponoor/python-step-series
0
6621748
<filename>src/stepseries/step400.py #!/usr/bin/env python # -*- coding: utf-8 -*- """4 axis stepper motor driver with and Ethernet interface.""" from .stepXXX import STEPXXX class STEP400(STEPXXX): """Send and receive data from a STEP400 motor driver. Note: It is recommended to create a default message handler for this driver. Here is an example: >>> from stepseries.step400 import STEP400 >>> >>> def default_handler(message) -> None: ... print(message) ... >>> driver = STEP400(0, '10.1.21.56') # Your IP and dip ID here >>> driver.on(None, default_handler) Args: id (`int`): The id set by the DIP switches on the device. address (`str`): The ip address of the device. Defaults to `10.0.0.100`. port (`int`): The local port the device is listening on. Defaults to `50000`. server_address (`str`): The ip address of the server (this machine). Should always be `0.0.0.0`. Defaults to `0.0.0.0`. server_port (`int`): The port the server is listening on. Defaults to `50100`. add_id_to_args (`bool`): Whether to add `id` to `address` and `server_port` (the default behavior on the device). Defaults to `True`. """
<filename>src/stepseries/step400.py #!/usr/bin/env python # -*- coding: utf-8 -*- """4 axis stepper motor driver with and Ethernet interface.""" from .stepXXX import STEPXXX class STEP400(STEPXXX): """Send and receive data from a STEP400 motor driver. Note: It is recommended to create a default message handler for this driver. Here is an example: >>> from stepseries.step400 import STEP400 >>> >>> def default_handler(message) -> None: ... print(message) ... >>> driver = STEP400(0, '10.1.21.56') # Your IP and dip ID here >>> driver.on(None, default_handler) Args: id (`int`): The id set by the DIP switches on the device. address (`str`): The ip address of the device. Defaults to `10.0.0.100`. port (`int`): The local port the device is listening on. Defaults to `50000`. server_address (`str`): The ip address of the server (this machine). Should always be `0.0.0.0`. Defaults to `0.0.0.0`. server_port (`int`): The port the server is listening on. Defaults to `50100`. add_id_to_args (`bool`): Whether to add `id` to `address` and `server_port` (the default behavior on the device). Defaults to `True`. """
en
0.676152
#!/usr/bin/env python # -*- coding: utf-8 -*- 4 axis stepper motor driver with and Ethernet interface. Send and receive data from a STEP400 motor driver. Note: It is recommended to create a default message handler for this driver. Here is an example: >>> from stepseries.step400 import STEP400 >>> >>> def default_handler(message) -> None: ... print(message) ... >>> driver = STEP400(0, '10.1.21.56') # Your IP and dip ID here >>> driver.on(None, default_handler) Args: id (`int`): The id set by the DIP switches on the device. address (`str`): The ip address of the device. Defaults to `10.0.0.100`. port (`int`): The local port the device is listening on. Defaults to `50000`. server_address (`str`): The ip address of the server (this machine). Should always be `0.0.0.0`. Defaults to `0.0.0.0`. server_port (`int`): The port the server is listening on. Defaults to `50100`. add_id_to_args (`bool`): Whether to add `id` to `address` and `server_port` (the default behavior on the device). Defaults to `True`.
3.234877
3
front/definitions/exceptions.py
zhammer/morning-cd-front
0
6621749
"""Several expected exceptions in the front service.""" class FrontException(Exception): """Base exception for front exceptions.""" class ListensError(FrontException): """Exception raised upon encountering an error in the listens domain.""" class MusicError(FrontException): """Exception raised upon encountering an error in the music domain.""" class SunlightError(FrontException): """Exception raised upon encountering an error in the sunlight domain."""
"""Several expected exceptions in the front service.""" class FrontException(Exception): """Base exception for front exceptions.""" class ListensError(FrontException): """Exception raised upon encountering an error in the listens domain.""" class MusicError(FrontException): """Exception raised upon encountering an error in the music domain.""" class SunlightError(FrontException): """Exception raised upon encountering an error in the sunlight domain."""
en
0.704531
Several expected exceptions in the front service. Base exception for front exceptions. Exception raised upon encountering an error in the listens domain. Exception raised upon encountering an error in the music domain. Exception raised upon encountering an error in the sunlight domain.
2.015136
2
projects/fizzbuzz/python/fizz_buzz.py
jthompson22/methods
0
6621750
def fizz_buzz(num): string = ' ' for i in range(1,num+1): if (i%3==0 and i%5==0 and i % 7==0): string += "fizzbuzzbazz" #print("fizzbuzzbazz") elif (i%3==0 and i%5==0): string += "fizzbuzz" #print("fizzbuzz") elif (i%3==0 and i%7==0): string += "fizzbazz" #print("fizzbazz") elif (i%5==0 and i%7==0): string += "buzzbazz" #print("buzzbazz") elif (i%7==0): string += "bazz" #print("bazz") elif i%3==0: string += "fizz" #print("fizz") elif i%5==0: string += "buzz" #print("buzz") else: string += str(i); #print(i) string += "\n" return string; def main(): Quit = False; while Quit == False: print("Type in a value between 1 and 100. Use -1 to quit --->") i = int(input()) print("Your value is " + str(i)) if (i > 0 and i < 100): n = fizz_buzz(i) print(n) Quit = True; if (i == -1): print("Quit"); Quit = True; else: print("Try another number") main()
def fizz_buzz(num): string = ' ' for i in range(1,num+1): if (i%3==0 and i%5==0 and i % 7==0): string += "fizzbuzzbazz" #print("fizzbuzzbazz") elif (i%3==0 and i%5==0): string += "fizzbuzz" #print("fizzbuzz") elif (i%3==0 and i%7==0): string += "fizzbazz" #print("fizzbazz") elif (i%5==0 and i%7==0): string += "buzzbazz" #print("buzzbazz") elif (i%7==0): string += "bazz" #print("bazz") elif i%3==0: string += "fizz" #print("fizz") elif i%5==0: string += "buzz" #print("buzz") else: string += str(i); #print(i) string += "\n" return string; def main(): Quit = False; while Quit == False: print("Type in a value between 1 and 100. Use -1 to quit --->") i = int(input()) print("Your value is " + str(i)) if (i > 0 and i < 100): n = fizz_buzz(i) print(n) Quit = True; if (i == -1): print("Quit"); Quit = True; else: print("Try another number") main()
ru
0.280954
#print("fizzbuzzbazz") #print("fizzbuzz") #print("fizzbazz") #print("buzzbazz") #print("bazz") #print("fizz") #print("buzz") #print(i)
3.957291
4