hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
c71a546240f7c071174fd45a93cc36d20aa838b4
5,388
py
Python
barbican/common/resources.py
stanzikratel/barbican-2
10fae57c1cae3e140c19069a48f562d62ca53663
[ "Apache-2.0" ]
null
null
null
barbican/common/resources.py
stanzikratel/barbican-2
10fae57c1cae3e140c19069a48f562d62ca53663
[ "Apache-2.0" ]
null
null
null
barbican/common/resources.py
stanzikratel/barbican-2
10fae57c1cae3e140c19069a48f562d62ca53663
[ "Apache-2.0" ]
null
null
null
# Copyright (c) 2013-2014 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Shared business logic. """ from barbican.common import exception from barbican.common import utils from barbican.common import validators from barbican.model import models LOG = utils.getLogger(__name__) def get_or_create_tenant(keystone_id, tenant_repo): """Returns tenant with matching keystone_id. Creates it if it does not exist. :param keystone_id: The external-to-Barbican ID for this tenant. :param tenant_repo: Tenant repository. :return: Tenant model instance """ tenant = tenant_repo.find_by_keystone_id(keystone_id, suppress_exception=True) if not tenant: LOG.debug('Creating tenant for {0}'.format(keystone_id)) tenant = models.Tenant() tenant.keystone_id = keystone_id tenant.status = models.States.ACTIVE tenant_repo.create_from(tenant) return tenant def create_secret(data, tenant, crypto_manager, secret_repo, tenant_secret_repo, datum_repo, kek_repo, ok_to_generate=False): """Common business logic to create a secret.""" time_keeper = utils.TimeKeeper('Create Secret Resource') new_secret = models.Secret(data) time_keeper.mark('after Secret model create') new_datum = None content_type = data.get('payload_content_type', 'application/octet-stream') if 'payload' in data: payload = data.get('payload') content_encoding = data.get('payload_content_encoding') LOG.debug('Encrypting payload...') new_datum = crypto_manager.encrypt(payload, content_type, content_encoding, new_secret, tenant, kek_repo, enforce_text_only=True) time_keeper.mark('after encrypt') elif ok_to_generate: LOG.debug('Generating new secret...') # TODO(atiwari): With new typed Order API proposal # we need to translate new_secret to meta # currently it is working as meta will have same attributes new_datum = crypto_manager. \ generate_symmetric_encryption_key(new_secret, content_type, tenant, kek_repo) time_keeper.mark('after secret generate') else: LOG.debug('Creating metadata only for the new secret. ' 'A subsequent PUT is required') # Create Secret entities in datastore. secret_repo.create_from(new_secret) time_keeper.mark('after Secret datastore create') new_assoc = models.TenantSecret() time_keeper.mark('after TenantSecret model create') new_assoc.tenant_id = tenant.id new_assoc.secret_id = new_secret.id new_assoc.role = "admin" new_assoc.status = models.States.ACTIVE tenant_secret_repo.create_from(new_assoc) time_keeper.mark('after TenantSecret datastore create') if new_datum: new_datum.secret_id = new_secret.id datum_repo.create_from(new_datum) time_keeper.mark('after Datum datastore create') time_keeper.dump() return new_secret def create_encrypted_datum(secret, payload, content_type, content_encoding, tenant, crypto_manager, datum_repo, kek_repo): """Modifies the secret to add the plain_text secret information. :param secret: the secret entity to associate the secret data to :param payload: secret data to store :param content_type: payload content mime type :param content_encoding: payload content encoding :param tenant: the tenant (entity) who owns the secret :param crypto_manager: the crypto plugin manager :param datum_repo: the encrypted datum repository :param kek_repo: the KEK metadata repository :retval The response body, None if N/A """ if not payload: raise exception.NoDataToProcess() if validators.secret_too_big(payload): raise exception.LimitExceeded() if secret.encrypted_data: raise ValueError('Secret already has encrypted data stored for it.') # Encrypt payload LOG.debug('Encrypting secret payload...') new_datum = crypto_manager.encrypt(payload, content_type, content_encoding, secret, tenant, kek_repo) datum_repo.create_from(new_datum) return new_datum
37.416667
76
0.625464
627
5,388
5.189793
0.30941
0.027658
0.030117
0.040873
0.172711
0.05839
0.041795
0.041795
0.041795
0.041795
0
0.003482
0.306978
5,388
143
77
37.678322
0.86797
0.285264
0
0.17284
0
0
0.135005
0.012807
0
0
0
0.006993
0
1
0.037037
false
0
0.049383
0
0.123457
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c71ac734d6782f901c4c5400d878122dd11ea416
567
py
Python
7/prime.py
redfast00/euler
98fc49a1fcb8b49415cc4384952a6447378bd4f4
[ "MIT" ]
null
null
null
7/prime.py
redfast00/euler
98fc49a1fcb8b49415cc4384952a6447378bd4f4
[ "MIT" ]
null
null
null
7/prime.py
redfast00/euler
98fc49a1fcb8b49415cc4384952a6447378bd4f4
[ "MIT" ]
null
null
null
from math import sqrt def stream_primes(num): primes = [] candidate = 2 for i in range(num): prime = next_prime(primes, candidate) primes.append(prime) candidate = prime + 1 yield prime def next_prime(primes, candidate): while True: for prime in primes: if candidate % prime == 0: break elif prime > sqrt(candidate): return candidate else: return candidate candidate += 1 for prime in stream_primes(10001): print(prime)
22.68
45
0.560847
64
567
4.90625
0.453125
0.143312
0.095541
0.152866
0
0
0
0
0
0
0
0.02507
0.366843
567
24
46
23.625
0.849582
0
0
0.095238
0
0
0
0
0
0
0
0
0
1
0.095238
false
0
0.047619
0
0.238095
0.047619
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c71be407b214b6130f22496ab986a3ca003cfe56
777
py
Python
app/utils.py
HealYouDown/flo-league
c729cad1daddfb89e997c101bd2da505b7137d98
[ "MIT" ]
null
null
null
app/utils.py
HealYouDown/flo-league
c729cad1daddfb89e997c101bd2da505b7137d98
[ "MIT" ]
3
2021-05-03T19:05:11.000Z
2021-06-12T09:43:02.000Z
app/utils.py
HealYouDown/flo-league
c729cad1daddfb89e997c101bd2da505b7137d98
[ "MIT" ]
null
null
null
import datetime from app.models import Log from flask_login import current_user from app.extensions import db # https://stackoverflow.com/questions/6558535/find-the-date-for-the-first-monday-after-a-given-date def next_weekday( d: datetime.datetime = datetime.datetime.utcnow(), weekday: int = 0, ) -> datetime.datetime: days_ahead = weekday - d.weekday() if days_ahead <= 0: # Target day already happened this week days_ahead += 7 # Flatten the current time to just the date date = datetime.datetime(d.year, d.month, d.day) return date + datetime.timedelta(days_ahead) def add_moderator_log(log_text: str) -> None: db.session.add(Log( moderator_id=current_user.id, message=log_text, )) db.session.commit()
28.777778
99
0.705277
112
777
4.776786
0.535714
0.149533
0.08972
0
0
0
0
0
0
0
0
0.015848
0.187902
777
26
100
29.884615
0.832013
0.227799
0
0
0
0
0
0
0
0
0
0
0
1
0.105263
false
0
0.210526
0
0.368421
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c71c6e80583baf2cb3846a4c3d378463d41f4b27
9,582
py
Python
packages/gtmcore/gtmcore/environment/conda.py
gigabackup/gigantum-client
70fe6b39b87b1c56351f2b4c551b6f1693813e4f
[ "MIT" ]
60
2018-09-26T15:46:00.000Z
2021-10-10T02:37:14.000Z
packages/gtmcore/gtmcore/environment/conda.py
gigabackup/gigantum-client
70fe6b39b87b1c56351f2b4c551b6f1693813e4f
[ "MIT" ]
1,706
2018-09-26T16:11:22.000Z
2021-08-20T13:37:59.000Z
packages/gtmcore/gtmcore/environment/conda.py
griffinmilsap/gigantum-client
70fe6b39b87b1c56351f2b4c551b6f1693813e4f
[ "MIT" ]
11
2019-03-14T13:23:51.000Z
2022-01-25T01:29:16.000Z
from typing import List, Dict import json from gtmcore.http import ConcurrentRequestManager, ConcurrentRequest from gtmcore.environment.packagemanager import PackageManager, PackageResult, PackageMetadata from gtmcore.container import container_for_context from gtmcore.labbook import LabBook from gtmcore.logging import LMLogger logger = LMLogger.get_logger() class CondaPackageManagerBase(PackageManager): """Class to implement the conda package manager """ def __init__(self): # String to be set in child classes indicating which python version you are checking. Typically should be either # python 3.6* or python 2.7* self.python_depends_str = None # String of the name of the conda environment (e.g. py36 or py27, as created via container build) self.python_env = None # Note, currently we hard code channel config. Future changes to support the user specifying channels # will modify this behavior self.channel_priority = ['conda-forge', 'anaconda'] self.request_mgr = ConcurrentRequestManager() def list_versions(self, package_name: str, labbook: LabBook, username: str) -> List[str]: """Method to list all available versions of a package based on the package name Args: package_name: Name of the package to query labbook: Subject LabBook username: username of current user Returns: list(str): Version strings """ # Check for package in channels, picking out version by priority request_list = list() for channel in self.channel_priority: request_list.append(ConcurrentRequest(f"https://api.anaconda.org/package/{channel}/{package_name}", headers={'Accept': 'application/json'})) responses = self.request_mgr.resolve_many(request_list) versions = None for response in responses: if response.status_code != 200: continue versions = response.json.get('versions') break if not versions: raise ValueError(f"Package {package_name} not found in channels {' ,'.join(self.channel_priority)}.") versions.reverse() return versions def list_installed_packages(self, labbook: LabBook, username: str) -> List[Dict[str, str]]: """Method to get a list of all packages that are currently installed Note, this will return results for the computer/container in which it is executed. To get the properties of a LabBook container, a docker exec command would be needed from the Gigantum application container. return format is a list of dicts with the format (name: <package name>, version: <version string>) Returns: list """ project_container = container_for_context(username, labbook=labbook) result = project_container.run_container("conda list --no-pip --json", wait_for_output=True) if result: data = json.loads(result) if data: return [{"name": x['name'], 'version': x['version']} for x in data] else: return [] def validate_packages(self, package_list: List[Dict[str, str]], labbook: LabBook, username: str) \ -> List[PackageResult]: """Method to validate a list of packages, and if needed fill in any missing versions Should check both the provided package name and version. If the version is omitted, it should be generated from the latest version. Args: package_list(list): A list of dictionaries of packages to validate labbook(str): The labbook instance username(str): The username for the logged in user Returns: namedtuple: namedtuple indicating if the package and version are valid """ result = list() # Check for package in channels, picking out version by priority request_list = list() for pkg in package_list: for channel in self.channel_priority: request_list.append(ConcurrentRequest(f"https://api.anaconda.org/package/{channel}/{pkg['package']}", headers={'Accept': 'application/json'})) responses = self.request_mgr.resolve_many(request_list) # Repack into groups by package responses_per_package = list(zip(*(iter(responses),) * len(self.channel_priority))) for package, responses in zip(package_list, responses_per_package): versions = None latest_version = None for response in responses: if response.status_code != 200: continue versions = response.json.get('versions') latest_version = response.json.get('latest_version') break if not versions: # Package is not found result.append(PackageResult(package=package['package'], version=package.get('version'), error=True)) continue if package.get('version'): # Package has been set, so validate it if package.get('version') in versions: # Both package name and version are valid result.append(PackageResult(package=package['package'], version=package.get('version'), error=False)) else: # The package version is not in the list, so invalid result.append(PackageResult(package=package['package'], version=package.get('version'), error=True)) else: # You need to look up the latest version since not included result.append(PackageResult(package=package['package'], version=str(latest_version), error=False)) return result def get_packages_metadata(self, package_list: List[str], labbook: LabBook, username: str) -> List[PackageMetadata]: """Method to get package metadata Args: package_list: List of package names labbook(str): The labbook instance username(str): The username for the logged in user Returns: list """ def _extract_metadata(data): """Extraction method to pull out the docs URL and description""" latest_val = data.get('latest_version') description_val = data.get('summary').strip() docs_val = data.get('doc_url') if not docs_val: docs_val = data.get('html_url') return latest_val, description_val, docs_val # Check for package in channels, picking out version by priority request_list = list() for pkg in package_list: for channel in self.channel_priority: request_list.append(ConcurrentRequest(f"https://api.anaconda.org/package/{channel}/{pkg}", headers={'Accept': 'application/json'}, extraction_function=_extract_metadata)) responses = self.request_mgr.resolve_many(request_list) # Repack into groups by package responses_per_package = list(zip(*(iter(responses),) * len(self.channel_priority))) result = list() for package, responses in zip(package_list, responses_per_package): data = None for response in responses: if response.status_code == 200: data = response.extracted_json break if data: latest_version, description, docs_url = data result.append(PackageMetadata(package_manager="conda", package=package, latest_version=latest_version, description=description, docs_url=docs_url)) else: result.append(PackageMetadata(package_manager="conda", package=package, latest_version=None, description=None, docs_url=None)) return result def generate_docker_install_snippet(self, packages: List[Dict[str, str]], single_line: bool = False) -> List[str]: """Method to generate a docker snippet to install 1 or more packages Note: Because conda be so slow to solve environments with conda-forge included, always single line it. Args: packages(list(dict)): A list of package names and versions to install single_line(bool): If true, collapse Returns: list """ package_strings = [f"{x['name']}={x['version']}" for x in packages] if single_line: return [f"RUN conda install -yq {' '.join(package_strings)}"] else: return [f"RUN conda install -yq {' '.join(package_strings)}"] class Conda3PackageManager(CondaPackageManagerBase): """Class to implement the conda3 package manager """ def __init__(self): super().__init__() self.python_depends_str = 'python 3.6*' self.python_env = 'py36' class Conda2PackageManager(CondaPackageManagerBase): """Class to implement the conda2 package manager """ def __init__(self): super().__init__() self.python_depends_str = 'python 2.7*' self.python_env = 'py27'
40.774468
120
0.611668
1,073
9,582
5.340168
0.215284
0.026876
0.023211
0.017452
0.411518
0.369983
0.353229
0.343979
0.343979
0.329319
0
0.004512
0.306095
9,582
234
121
40.948718
0.857272
0.274995
0
0.474576
0
0
0.101608
0.016075
0
0
0
0
0
1
0.076271
false
0
0.059322
0
0.228814
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c71da90915f08f68f935060eea6dba44dc3beaac
1,147
py
Python
netchos/io/io_mpl_to_px.py
brainets/netchos
ccfcd2ec85894adffbd20fbc67410dbdacfe6812
[ "BSD-3-Clause" ]
11
2021-04-20T19:45:23.000Z
2021-11-17T15:18:33.000Z
netchos/io/io_mpl_to_px.py
brainets/netchos
ccfcd2ec85894adffbd20fbc67410dbdacfe6812
[ "BSD-3-Clause" ]
3
2021-04-26T09:01:42.000Z
2021-06-30T12:09:15.000Z
netchos/io/io_mpl_to_px.py
brainets/netchos
ccfcd2ec85894adffbd20fbc67410dbdacfe6812
[ "BSD-3-Clause" ]
2
2021-05-06T20:28:46.000Z
2021-05-24T10:36:44.000Z
"""Conversion of Matplotlib / Seaborn inputs to plotly.""" import os.path as op from pkg_resources import resource_filename import json def mpl_to_px_inputs(inputs, plt_types=None): """Convert typical matplotlib inputs to plotly to simplify API. Parameters ---------- inputs : dict Dictionary of inputs plt_types : string or list or None Sub select some plotting types (e.g heatmap, line etc.). If None, all types are used Returns ------- outputs : dict Dictionary of converted inputs """ # load reference table file = op.join(op.dirname(__file__), "io_mpl_to_px.json") with open(file, 'r') as f: table = json.load(f) # go through the desired plotting types for conversion if plt_types is None: plt_types = list(table.keys()) if isinstance(plt_types, str): plt_types = [plt_types] ref = {} for plt_type in plt_types: ref.update(table[plt_type]) # convert inputs outputs = {} for k, v in inputs.items(): if k in ref.keys(): k = ref[k] outputs[k] = v return outputs
25.488889
77
0.62075
159
1,147
4.339623
0.490566
0.092754
0.04058
0
0
0
0
0
0
0
0
0
0.280732
1,147
44
78
26.068182
0.836364
0.401046
0
0
0
0
0.0288
0
0
0
0
0
0
1
0.05
false
0
0.15
0
0.25
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c71f19c3cf33a6be263067d8b8a273844fc916bd
3,337
py
Python
openstack_dashboard/dashboards/admin/volume_types/qos_specs/forms.py
hemantsonawane95/horizon-apelby
01a5e72219aeca8c1451701ee85e232ed0618751
[ "Apache-2.0" ]
null
null
null
openstack_dashboard/dashboards/admin/volume_types/qos_specs/forms.py
hemantsonawane95/horizon-apelby
01a5e72219aeca8c1451701ee85e232ed0618751
[ "Apache-2.0" ]
null
null
null
openstack_dashboard/dashboards/admin/volume_types/qos_specs/forms.py
hemantsonawane95/horizon-apelby
01a5e72219aeca8c1451701ee85e232ed0618751
[ "Apache-2.0" ]
null
null
null
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from django.urls import reverse from django.utils.translation import gettext_lazy as _ from horizon import exceptions from horizon import forms from horizon import messages from openstack_dashboard import api KEY_NAME_REGEX = re.compile(r"^[a-zA-Z0-9-_:. /]+$", re.UNICODE) KEY_ERROR_MESSAGES = { 'invalid': _("The key must match the following the regex: " "'^[a-zA-Z0-9-_:. /]'")} class CreateKeyValuePair(forms.SelfHandlingForm): # this if for creating a spec key-value pair for an existing QOS Spec key = forms.RegexField(max_length=255, label=_("Key"), regex=KEY_NAME_REGEX, error_messages=KEY_ERROR_MESSAGES) value = forms.CharField(max_length=255, label=_("Value")) def handle(self, request, data): qos_spec_id = self.initial['qos_spec_id'] try: # first retrieve current value of specs specs = api.cinder.qos_spec_get(request, qos_spec_id) # now add new key-value pair to list of specs specs.specs[data['key']] = data['value'] api.cinder.qos_spec_set_keys(request, qos_spec_id, specs.specs) msg = _('Created spec "%s".') % data['key'] messages.success(request, msg) return True except Exception: redirect = reverse("horizon:admin:volume_types:index") exceptions.handle(request, _("Unable to create spec."), redirect=redirect) class EditKeyValuePair(forms.SelfHandlingForm): value = forms.CharField(max_length=255, label=_("Value")) # update the backend with the new qos spec value def handle(self, request, data): key = self.initial['key'] qos_spec_id = self.initial['qos_spec_id'] # build up new 'specs' object with all previous values plus new value try: # first retrieve current value of specs specs = api.cinder.qos_spec_get_keys(request, qos_spec_id, raw=True) specs.specs[key] = data['value'] api.cinder.qos_spec_set_keys(request, qos_spec_id, specs.specs) msg = _('Saved spec "%s".') % key messages.success(request, msg) return True except Exception: redirect = reverse("horizon:admin:volume_types:index") exceptions.handle(request, _("Unable to edit spec."), redirect=redirect)
39.72619
77
0.585556
393
3,337
4.834606
0.389313
0.051579
0.037895
0.033684
0.375263
0.364737
0.336842
0.336842
0.263158
0.263158
0
0.007583
0.328139
3,337
83
78
40.204819
0.839875
0.247228
0
0.509434
0
0
0.114274
0.025662
0
0
0
0
0
1
0.037736
false
0
0.132075
0
0.301887
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c71fc189fa6f73122afbe242bbfd89bd9a8a50ea
9,050
py
Python
data_structure/const_tree.py
alipay/StructuredLM_RTDT
6edf2acf8747e17015523d78b6c580431a4f7b5c
[ "Apache-2.0" ]
42
2021-06-01T07:07:12.000Z
2022-03-18T02:38:53.000Z
data_structure/const_tree.py
alipay/StructuredLM_RTDT
6edf2acf8747e17015523d78b6c580431a4f7b5c
[ "Apache-2.0" ]
1
2021-12-15T03:50:24.000Z
2021-12-15T08:46:56.000Z
data_structure/const_tree.py
alipay/StructuredLM_RTDT
6edf2acf8747e17015523d78b6c580431a4f7b5c
[ "Apache-2.0" ]
7
2021-06-02T02:28:01.000Z
2022-01-14T06:59:29.000Z
# coding=utf-8 # Copyright (c) 2021 Ant Group import sys LABEL_SEP = '@' INDENT_STRING1 = '│   ' INDENT_STRING2 = '├──' EMPTY_TOKEN = '___EMPTY___' def print_tree(const_tree, indent=0, out=sys.stdout): for i in range(indent - 1): out.write(INDENT_STRING1) if indent > 0: out.write(INDENT_STRING2) out.write(const_tree.tag) if not isinstance(const_tree.children[0], ConstTree): out.write(f' {const_tree.children[0].string}\n') else: out.write('\n') for child in const_tree.children: print_tree(child, indent + 1, out) def _make_tree(string, make_leaf_fn, make_internal_fn): tokens = string.replace('(', ' ( ').replace(')', ' ) ').split() index, stack = 0, [] lexicons = [] root = None while index < len(tokens): token = tokens[index] index += 1 if token == ')': if not stack: raise ConstTreeParserError('redundant ")" at token ' + str(index)) node = stack.pop() if not stack: root = node else: stack[-1].children.append(node) elif token == '(': tag = tokens[index] index += 1 stack.append(make_internal_fn(tag)) else: if not stack: raise ConnectionError('??? at pos ' + str(index)) new_token = [] while token != ')': if not token != '(': raise Exception('bracket error') new_token.append(token) token = tokens[index] index += 1 # is lexicon leaf_node = make_leaf_fn('_'.join(new_token)) lexicons.append(leaf_node) postag_node = stack.pop() postag_node.children.append(leaf_node) if not stack: root = postag_node else: stack[-1].children.append(postag_node) if not root or stack: raise ConstTreeParserError('missing ")".') return root, lexicons class ConstTreeParserError(Exception): pass class Lexicon: __slots__ = ('string', 'span', 'parent') def __init__(self, string, span=None): self.string = string self.span = span def __str__(self): return f'<Lexicon {self.string}>' def __repr__(self): return str(self) def __eq__(self, other): return self.string == other.string def __hash__(self): return hash(self.string) + 2 @property def tag(self): return self.string def to_string(self, quote_lexicon): if quote_lexicon: return f'"{self.string}"' return self.string class ConstTree: __slots__ = ('children', 'tag', 'span', 'index', 'parent', 'attrs') ROOT_LABEL = 'ROOT' def __init__(self, tag, children=None, span=None): self.tag = tag self.children = children if children is not None else [] self.span = span self.index = None def __str__(self): child_string = ' + '.join(child.tag for child in self.children) return f'{self.span} {self.tag} => {child_string}' def __repr__(self): return str(self) def __getitem__(self, index): if isinstance(index, int): return self.children[index] if isinstance(index, str): for child in self.children: if isinstance(child, ConstTree) and child.tag == index.upper(): return child raise KeyError def to_string(self, quote_lexicon=False): child_string = ' '.join(child.to_string(quote_lexicon) for child in self.children) return f'({self.tag} {child_string})' @staticmethod def from_string(string): """ Construct ConstTree from parenthesis representation. :param string: string of parenthesis representation :return: ConstTree root and all leaf Lexicons """ tree, lexicons = _make_tree(string, Lexicon, ConstTree) for index, lexicon in enumerate(lexicons): lexicon.span = index, index + 1 tree.populate_spans_internal() return tree, lexicons def traverse_postorder(self): for child in self.children: if isinstance(child, ConstTree): yield from child.traverse_postorder() yield self def traverse_postorder_with_lexicons(self): for child in self.children: if isinstance(child, ConstTree): yield from child.traverse_postorder_with_lexicons() else: yield child yield self def generate_preterminals(self): for child in self.children: if isinstance(child, ConstTree): yield from child.generate_preterminals() for child in self.children: if isinstance(child, Lexicon): yield self def generate_lexicons(self): for child in self.children: if isinstance(child, ConstTree): yield from child.generate_lexicons() for child in self.children: if isinstance(child, Lexicon): yield child def is_binary_tree(self): if isinstance(self.children[0], Lexicon): return True return len(self.children <= 2) and all(child.is_binary_tree() for child in self.children) def condensed_unary_chain(self, include_preterminal=True, remove_root=None): if self.tag == remove_root: assert len(self.children) == 1 return self.children[0].condensed_unary_chain(include_preterminal=include_preterminal) if len(self.children) > 1: return ConstTree(self.tag, children=list(child.condensed_unary_chain() for child in self.children), span=self.span) if isinstance(self.children[0], Lexicon): return ConstTree((self.tag if include_preterminal else EMPTY_TOKEN), children=list(self.children), span=self.span) assert isinstance(self.children[0], ConstTree) node = self new_tag = self.tag while len(node.children) == 1 and isinstance(node.children[0], ConstTree): node = node.children[0] if include_preterminal or isinstance(node.children[0], ConstTree): new_tag += LABEL_SEP + node.tag if len(node.children) == 1: children = list(node.children) else: children = list(child.condensed_unary_chain() for child in node.children) return ConstTree(new_tag, children=children, span=self.span) def expanded_unary_chain(self, add_root=None): if isinstance(self.children[0], Lexicon): children = list(self.children) else: children = list(child.expanded_unary_chain() for child in self.children) tags = self.tag.split(LABEL_SEP) for tag in reversed(tags): children = [ConstTree(tag, children=children, span=self.span)] root = children[0] if add_root: root = ConstTree(add_root, children=[root]) return root def calculate_span(self): self.span = self.children[0].span[0], self.children[-1].span[1] def populate_spans_internal(self): for child in self.children: if isinstance(child, ConstTree): child.populate_spans_internal() self.calculate_span() def add_postorder_index(self): for index, node in enumerate(self.traverse_postorder()): node.index = index def add_parents(self, parent=None): self.parent = parent for child in self.children: if isinstance(child, ConstTree): child.add_parents(self) def is_ancestor_of(self, other): other = other.parent while other is not None and other is not self: other = other.parent return other is not None def generate_path_to_root(self, include_self=False): node = self if not include_self: node = self.parent while node is not None: yield node node = node.parent def lowest_common_ancestor(self, other): path = list(other.generate_path_to_root()) for node in self.generate_path_to_root(): try: return path[path.index(node)] except ValueError: pass def remove_nodes(self, filter): _children = [] for c in self.children: if isinstance(c, ConstTree): if filter(c): pass else: filtered_node = c.remove_nodes(filter) _children.append(filtered_node) else: _children.append(c) return ConstTree(self.tag, _children)
30.782313
98
0.575912
1,028
9,050
4.899805
0.140078
0.069089
0.031765
0.038912
0.290054
0.225928
0.185428
0.161604
0.135398
0.107604
0
0.006736
0.327403
9,050
293
99
30.887372
0.820108
0.022652
0
0.276786
0
0
0.032573
0.003745
0
0
0
0
0.008929
1
0.133929
false
0.013393
0.004464
0.026786
0.272321
0.008929
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c71fcfdd300a9f0f56bf5188a7e7a694d05f3faa
4,098
py
Python
tests/test_minimize.py
The-Ludwig/iminuit
8eef7b711846d6c8db9fe1fc883f6fa0977eb514
[ "MIT" ]
null
null
null
tests/test_minimize.py
The-Ludwig/iminuit
8eef7b711846d6c8db9fe1fc883f6fa0977eb514
[ "MIT" ]
null
null
null
tests/test_minimize.py
The-Ludwig/iminuit
8eef7b711846d6c8db9fe1fc883f6fa0977eb514
[ "MIT" ]
null
null
null
import pytest from iminuit import minimize import numpy as np from numpy.testing import assert_allclose, assert_equal opt = pytest.importorskip("scipy.optimize") def func(x, *args): c = args[0] if args else 1 return c + x[0] ** 2 + (x[1] - 1) ** 2 + (x[2] - 2) ** 2 def grad(x, *args): return 2 * (x - (0, 1, 2)) def test_simple(): result = minimize(func, (1, 1, 1)) assert_allclose(result.x, (0, 1, 2), atol=1e-8) assert_allclose(result.fun, 1) assert result.nfev > 0 assert result.njev == 0 def test_gradient(): result = minimize(func, (1, 1, 1), jac=grad) assert_allclose(result.x, (0, 1, 2), atol=1e-8) assert_allclose(result.fun, 1) assert result.nfev > 0 assert result.njev > 0 def test_args(): result = minimize(func, np.ones(3), args=(5,)) assert_allclose(result.x, (0, 1, 2), atol=1e-8) assert_allclose(result.fun, 5) assert result.nfev > 0 assert result.njev == 0 def test_callback(): trace = [] result = minimize(func, np.ones(3), callback=lambda x: trace.append(x.copy())) assert_allclose(result.x, (0, 1, 2), atol=1e-8) assert_allclose(result.fun, 1) assert result.nfev == len(trace) assert_allclose(trace[0], np.ones(3), atol=1e-2) assert_allclose(trace[-1], result.x, atol=1e-2) def test_tol(): ref = np.ones(2) def rosen(par): x, y = par return (1 - x) ** 2 + 100 * (y - x ** 2) ** 2 r1 = minimize(rosen, (0, 0), tol=1) r2 = minimize(rosen, (0, 0), tol=1e-6) assert max(np.abs(r2.x - ref)) < max(np.abs(r1.x - ref)) def test_disp(capsys): minimize(lambda x: x ** 2, 0) assert capsys.readouterr()[0] == "" minimize(lambda x: x ** 2, 0, options={"disp": True}) assert capsys.readouterr()[0] != "" def test_hessinv(): r = minimize(func, (1, 1, 1)) href = np.zeros((3, 3)) for i in range(3): href[i, i] = 0.5 assert_allclose(r.hess_inv, href, atol=1e-8) def test_unsupported(): with pytest.raises(ValueError): minimize(func, (1, 1, 1), constraints=[]) with pytest.raises(ValueError): minimize(func, (1, 1, 1), jac=True) def test_call_limit(): ref = minimize(func, (1, 1, 1)) with pytest.warns(UserWarning): r1 = minimize(func, (1, 1, 1), options={"maxiter": 1}) assert r1.nfev < ref.nfev assert not r1.success assert "Call limit" in r1.message with pytest.warns(DeprecationWarning): r2 = minimize(func, (1, 1, 1), options={"maxfev": 1}) assert not r2.success assert r2.nfev == r1.nfev r3 = minimize(func, (1, 1, 1), options={"maxfun": 1}) assert not r3.success assert r3.nfev == r1.nfev def test_eps(): ref = minimize(func, (1, 1, 1)) r = minimize(func, (1, 1, 1), options={"eps": 1e-10}) assert np.any(ref.x != r.x) assert_allclose(r.x, ref.x, atol=1e-9) def test_bad_function(): class Fcn: n = 0 def __call__(self, x): self.n += 1 return x ** 2 + 1e-4 * (self.n % 3) r = minimize(Fcn(), [1], options={"maxfun": 100000000}) assert not r.success assert "Estimated distance to minimum too large" in r.message def test_bounds(): r1 = minimize(func, (1.5, 1.7, 1.5), bounds=opt.Bounds((1, 1.5, 1), (2, 2, 2))) assert r1.success assert_allclose(r1.x, (1, 1.5, 2), atol=1e-2) r2 = minimize(func, (1.5, 1.7, 1.5), bounds=((1, 2), (1.5, 2), (1, 2))) assert r2.success assert_equal(r1.x, r2.x) def test_method_warn(): with pytest.raises(ValueError): minimize(func, (1.5, 1.7, 1.5), method="foo") def test_hess_warn(): with pytest.warns(UserWarning): minimize(func, (1.5, 1.7, 1.5), hess=True) def test_unreliable_uncertainties(): r = minimize(func, (1.5, 1.7, 1.5), options={"stra": 0}) assert ( r.message == "Optimization terminated successfully, but uncertainties are unrealiable." ) def test_simplex(): r = minimize(func, (1.5, 1.7, 1.5), method="simplex", tol=1e-4) assert r.success assert_allclose(r.x, (0, 1, 2), atol=2e-3)
26.269231
85
0.59346
658
4,098
3.630699
0.18997
0.020929
0.092507
0.064462
0.38468
0.366262
0.251151
0.240268
0.232733
0.149854
0
0.071769
0.231576
4,098
155
86
26.43871
0.686885
0
0
0.171171
0
0
0.044168
0
0
0
0
0
0.36036
1
0.18018
false
0
0.045045
0.009009
0.279279
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c72190831a83ec1b623a951d123f7148309fad86
2,468
py
Python
murtanto/parsing.py
amandatv20/botfb
2be3ce0265fd86f48f24d2b496d36fd346e49d29
[ "MIT" ]
1
2021-03-24T13:54:33.000Z
2021-03-24T13:54:33.000Z
murtanto/parsing.py
amandatv20/botfb
2be3ce0265fd86f48f24d2b496d36fd346e49d29
[ "MIT" ]
2
2020-06-15T08:10:55.000Z
2020-06-16T15:03:19.000Z
murtanto/parsing.py
amandatv20/botfb
2be3ce0265fd86f48f24d2b496d36fd346e49d29
[ "MIT" ]
null
null
null
# coded by: salism3 # 23 - 05 - 2020 23:18 (Malam Takbir) from bs4 import BeautifulSoup as parser from . import sorting import re def to_bs4(html): return parser(html, "html.parser") def refsrc(html): return True if re.search(r'http.+\Wrefsrc', html) else False def parsing_href(html, href, one = False, bs4_class = False): data = to_bs4(html) if one: data = data.find("a", href = lambda x: x and href in x) if not bs4_class and data != None: data = sorting.to_mbasic(data["href"]) else: data = data.find_all("a", href = lambda x: x and href in x) if not bs4_class: data = [sorting.to_mbasic(x["href"]) for x in data] return data def parsing_href_regex(html, pattern, one = False, bs4_class = False): data = to_bs4(html) if one: data = data.find("a", href = lambda x: x and re.search(pattern, x)) if not bs4_class and data != None: data = sorting.to_mbasic(data["href"]) else: data = data.find_all("a", href = lambda x: x and re.search(pattern, x)) if not bs4_class: data = [sorting.to_mbasic(x["href"]) for x in data] return data def getMyName(html): data = to_bs4(html).find("title").text return data def getName(html): data = to_bs4(html).find("title").text return data def getMyId(html): data = to_bs4(html).find("a", href = lambda x:"/allactivity" in x)["href"] data = re.search(r"/\d+/?", data).group().replace("/", "") return data def getHiddenInput(html, post_action): rv = {} data = to_bs4(html).find("form", action = lambda x: post_action in x) data = data.find_all("input", {"type":"hidden", "name":True, "value":True}) for x in data: rv[x["name"]] = x["value"] return rv def friendRequestParser(html): confirm = parsing_href(html, "?confirm=") reject = parsing_href(html, "?delete=") rv = list(zip(confirm, reject)) next = parsing_href(html, "?ppk=", one = True) return {"items":rv, "next":next} def listFriendParser(html): data = parsing_href(html, "fref=fr_tab", bs4_class = True) nama = [x.text for x in data] id_ = [re.search(r"\w[\w.]+", x["href"].replace("/", "").replace("profile.php?id=", "")).group() for x in data] img = [x["src"] for x in to_bs4(html).find_all("img", alt = lambda x: x and "profile picture" in x)] if "/allactivity?" in html: del img[0] next = parsing_href(html, "unit_cursor=", one = True) return {"items":list(zip(nama, id_, img)), "next":next, "html":html}
31.641026
113
0.636548
395
2,468
3.883544
0.235443
0.026076
0.046936
0.050847
0.396349
0.377445
0.363755
0.363755
0.363755
0.363755
0
0.01506
0.192869
2,468
78
114
31.641026
0.75502
0.021475
0
0.344262
0
0
0.10488
0
0
0
0
0
0
1
0.163934
false
0
0.04918
0.032787
0.377049
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c724bce6559444b809161c07169a0eaf827f8a70
1,125
py
Python
leetcode/0506_relative_ranks.py
chaosWsF/Python-Practice
ff617675b6bcd125933024bb4c246b63a272314d
[ "BSD-2-Clause" ]
null
null
null
leetcode/0506_relative_ranks.py
chaosWsF/Python-Practice
ff617675b6bcd125933024bb4c246b63a272314d
[ "BSD-2-Clause" ]
null
null
null
leetcode/0506_relative_ranks.py
chaosWsF/Python-Practice
ff617675b6bcd125933024bb4c246b63a272314d
[ "BSD-2-Clause" ]
null
null
null
""" Given scores of N athletes, find their relative ranks and the people with the top three highest scores, who will be awarded medals: "Gold Medal", "Silver Medal" and "Bronze Medal". Example 1: Input: [5, 4, 3, 2, 1] Output: ["Gold Medal", "Silver Medal", "Bronze Medal", "4", "5"] Explanation: The first three athletes got the top three highest scores, so they got "Gold Medal", "Silver Medal" and "Bronze Medal". For the left two athletes, you just need to output their relative ranks according to their scores. Note: N is a positive integer and won't exceed 10,000. All the scores of athletes are guaranteed to be unique. """ class Solution: def findRelativeRanks(self, nums): scores_rank = sorted(nums, reverse=True) d = {} for i, score in enumerate(scores_rank): if i == 0: d[score] = 'Gold Medal' elif i == 1: d[score] = 'Silver Medal' elif i == 2: d[score] = 'Bronze Medal' else: d[score] = str(i + 1) return [d[x] for x in nums]
32.142857
84
0.593778
161
1,125
4.136646
0.509317
0.054054
0.067568
0.09009
0.174174
0.102102
0.102102
0
0
0
0
0.021823
0.307556
1,125
34
85
33.088235
0.833119
0.584
0
0
0
0
0.073913
0
0
0
0
0
0
1
0.071429
false
0
0
0
0.214286
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c72537aa56e0fec5c2e19ae544ffe17dd652b46b
727
py
Python
link_prob_show.py
Rheinwalt/spatial-effects-networks
7b77a22b45341b024a57e1759b7b61cd91d90849
[ "MIT" ]
3
2018-12-21T20:19:18.000Z
2021-01-02T12:58:56.000Z
link_prob_show.py
rick-foo/spatial-effects-networks
7b77a22b45341b024a57e1759b7b61cd91d90849
[ "MIT" ]
null
null
null
link_prob_show.py
rick-foo/spatial-effects-networks
7b77a22b45341b024a57e1759b7b61cd91d90849
[ "MIT" ]
2
2020-09-03T14:18:37.000Z
2021-10-01T18:06:42.000Z
import sys import numpy as np from sern import * ids, lon, lat = np.loadtxt('nodes', unpack = True) links = np.loadtxt('links', dtype = 'int') A, b = AdjacencyMatrix(ids, links) lon, lat = lon[b], lat[b] n = A.shape[0] # LinkProbability expects A as triu A = A[np.triu_indices(n, 1)] # play around with the scale, maybe you don't need log binning? D, x = IntegerDistances(lat, lon, scale = 50) p = LinkProbability(A, D) from matplotlib import pyplot as pl pl.plot(p, 'bo') pl.ylabel('Link probability given distance') pl.xlabel('Bin number') pl.savefig('link_prob_bin.png') pl.close('all') pl.semilogx(x, p, 'bo') pl.ylabel('Link probability given distance') pl.xlabel('Distance [km]') pl.savefig('link_prob_distance.png')
25.964286
63
0.707015
123
727
4.138211
0.528455
0.023576
0.019646
0.043222
0.184676
0.184676
0.184676
0.184676
0.184676
0.184676
0
0.00639
0.138927
727
27
64
26.925926
0.806709
0.130674
0
0.095238
0
0
0.228935
0.034976
0
0
0
0
0
1
0
false
0
0.190476
0
0.190476
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c7268aa939534725180b033986da1a690622e70b
3,899
py
Python
controller/components/app.py
isabella232/flight-lab
bd666b1d2bcec6f928a2e8da9f13fd5dae21319f
[ "Apache-2.0" ]
15
2018-10-18T07:50:46.000Z
2021-10-21T03:40:55.000Z
controller/components/app.py
google/flight-lab
bd666b1d2bcec6f928a2e8da9f13fd5dae21319f
[ "Apache-2.0" ]
9
2018-09-17T23:00:02.000Z
2019-01-22T21:08:04.000Z
controller/components/app.py
isabella232/flight-lab
bd666b1d2bcec6f928a2e8da9f13fd5dae21319f
[ "Apache-2.0" ]
12
2019-01-07T12:43:37.000Z
2021-10-21T03:40:44.000Z
# Copyright 2018 Flight Lab authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Library for components related to running apps.""" import subprocess import threading from components import base from protos import controller_pb2 from utils import app class AppComponent(base.Component): """Component to run command-line based app on any platform. This component can start app, restart app upon crash, and stop app. Events: "status_changed": when status of the app is changed. Args: app_component: instance of this class. """ def __init__(self, proto, *args, **kwargs): """Initializes the component. Args: proto: flightlab.App proto defining app details and options. """ super(AppComponent, self).__init__(proto, *args, **kwargs) self._app = app.Application( name=self.name, bin_path=self.settings.executable_path, arguments=(list(self.settings.arguments) if self.settings.arguments else []), working_dir=self.settings.working_dir, restart_on_crash=(self.settings.restart_on_crash if self.settings.restart_on_crash else False), env=(self.settings.env if self.settings.env else None)) self._app.on('started', self._on_app_started) self._app.on('stopped', self._on_app_stopped) self._monitor = threading.Timer(1, self._check_status) self._monitor.start() def close(self): if self._monitor: self._monitor.cancel() self._monitor = None self._app.stop() def _check_status(self): if self._app.has_running_instance(): component_status = controller_pb2.Component.ON app_status = controller_pb2.App.RUNNING else: component_status = controller_pb2.Component.OFF app_status = controller_pb2.App.NOT_RUNNING if (self.proto.status != component_status or self.settings.status != app_status): self.proto.status = component_status self.settings.status = app_status self.emit('status_changed', self) def _start(self): self.logger.info('[App - {0}] Starting...'.format(self.name)) self._app.start() def _stop(self): self.logger.info('[App - {0}] Stopping...'.format(self.name)) self._app.stop() def _restart(self): self._stop() self._start() def _on_app_started(self, app): self.logger.info('[App - {0}] Started.'.format(self.name)) self.settings.status = controller_pb2.App.RUNNING self.proto.status = controller_pb2.Component.ON self.emit('status_changed', self) def _on_app_stopped(self, app): self.logger.info('[App - {0}] Stopped.'.format(self.name)) self.settings.status = controller_pb2.App.NOT_RUNNING self.proto.status = controller_pb2.Component.OFF self.emit('status_changed', self) class CommandLineComponent(base.Component): """Component to run command-line based apps on any platform.""" def _start(self): for cmd in self.settings.when_on: self.logger.info('[{0}] Running: {1}'.format(self.name, cmd)) ret = subprocess.call(cmd) self.logger.info('[{0}] Done (return code={1})'.format(self.name, ret)) def _stop(self): for cmd in self.settings.when_off: self.logger.info('[{0}] Running: {1}'.format(self.name, cmd)) ret = subprocess.call(cmd) self.logger.info('[{0}] Done (return code={1})'.format(self.name, ret))
33.612069
77
0.691459
536
3,899
4.878731
0.276119
0.064245
0.058126
0.04283
0.423709
0.299426
0.225621
0.151434
0.118547
0.081836
0
0.009485
0.188766
3,899
116
78
33.612069
0.817262
0.257758
0
0.220588
0
0
0.082511
0
0
0
0
0
0
1
0.147059
false
0
0.073529
0
0.25
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c72c87715b18d844a4d1e6b4c82ec44a40f2bde2
2,810
py
Python
examples/pybullet/gym/pybullet_envs/minitaur/envs/env_randomizers/minitaur_alternating_legs_env_randomizer.py
felipeek/bullet3
6a59241074720e9df119f2f86bc01765917feb1e
[ "Zlib" ]
9,136
2015-01-02T00:41:45.000Z
2022-03-31T15:30:02.000Z
examples/pybullet/gym/pybullet_envs/minitaur/envs/env_randomizers/minitaur_alternating_legs_env_randomizer.py
felipeek/bullet3
6a59241074720e9df119f2f86bc01765917feb1e
[ "Zlib" ]
2,424
2015-01-05T08:55:58.000Z
2022-03-30T19:34:55.000Z
examples/pybullet/gym/pybullet_envs/minitaur/envs/env_randomizers/minitaur_alternating_legs_env_randomizer.py
felipeek/bullet3
6a59241074720e9df119f2f86bc01765917feb1e
[ "Zlib" ]
2,921
2015-01-02T10:19:30.000Z
2022-03-31T02:48:42.000Z
"""Randomize the minitaur_gym_alternating_leg_env when reset() is called. The randomization include swing_offset, extension_offset of all legs that mimics bent legs, desired_pitch from user input, battery voltage and motor damping. """ import os, inspect currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) parentdir = os.path.dirname(os.path.dirname(currentdir)) parentdir = os.path.dirname(os.path.dirname(parentdir)) os.sys.path.insert(0, parentdir) import numpy as np import tf.compat.v1 as tf from pybullet_envs.minitaur.envs import env_randomizer_base # Absolute range. NUM_LEGS = 4 BATTERY_VOLTAGE_RANGE = (14.8, 16.8) MOTOR_VISCOUS_DAMPING_RANGE = (0, 0.01) class MinitaurAlternatingLegsEnvRandomizer(env_randomizer_base.EnvRandomizerBase): """A randomizer that changes the minitaur_gym_alternating_leg_env.""" def __init__(self, perturb_swing_bound=0.1, perturb_extension_bound=0.1, perturb_desired_pitch_bound=0.01): super(MinitaurAlternatingLegsEnvRandomizer, self).__init__() self.perturb_swing_bound = perturb_swing_bound self.perturb_extension_bound = perturb_extension_bound self.perturb_desired_pitch_bound = perturb_desired_pitch_bound def randomize_env(self, env): perturb_magnitude = np.random.uniform(low=-self.perturb_swing_bound, high=self.perturb_swing_bound, size=NUM_LEGS) env.set_swing_offset(perturb_magnitude) tf.logging.info("swing_offset: {}".format(perturb_magnitude)) perturb_magnitude = np.random.uniform(low=-self.perturb_extension_bound, high=self.perturb_extension_bound, size=NUM_LEGS) env.set_extension_offset(perturb_magnitude) tf.logging.info("extension_offset: {}".format(perturb_magnitude)) perturb_magnitude = np.random.uniform(low=-self.perturb_desired_pitch_bound, high=self.perturb_desired_pitch_bound) env.set_desired_pitch(perturb_magnitude) tf.logging.info("desired_pitch: {}".format(perturb_magnitude)) randomized_battery_voltage = np.random.uniform(BATTERY_VOLTAGE_RANGE[0], BATTERY_VOLTAGE_RANGE[1]) env.minitaur.SetBatteryVoltage(randomized_battery_voltage) tf.logging.info("battery_voltage: {}".format(randomized_battery_voltage)) randomized_motor_damping = np.random.uniform(MOTOR_VISCOUS_DAMPING_RANGE[0], MOTOR_VISCOUS_DAMPING_RANGE[1]) env.minitaur.SetMotorViscousDamping(randomized_motor_damping) tf.logging.info("motor_damping: {}".format(randomized_motor_damping))
45.322581
86
0.70605
331
2,810
5.661631
0.268882
0.058698
0.034685
0.064034
0.351121
0.233191
0.139274
0.101921
0.077908
0.077908
0
0.010772
0.207117
2,810
61
87
46.065574
0.830341
0.11032
0
0.046512
0
0
0.035772
0
0
0
0
0
0
1
0.046512
false
0
0.093023
0
0.162791
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c72ca1c8b4319d09d601fa708b5ddc14cb8e0859
14,704
py
Python
pygsti/modelmembers/states/tensorprodstate.py
pyGSTi-Developers/pyGSTi
bfedc1de4d604f14b0f958615776fb80ddb59e33
[ "Apache-2.0" ]
73
2016-01-28T05:02:05.000Z
2022-03-30T07:46:33.000Z
pygsti/modelmembers/states/tensorprodstate.py
pyGSTi-Developers/pyGSTi
bfedc1de4d604f14b0f958615776fb80ddb59e33
[ "Apache-2.0" ]
113
2016-02-25T15:32:18.000Z
2022-03-31T13:18:13.000Z
pygsti/modelmembers/states/tensorprodstate.py
pyGSTi-Developers/pyGSTi
bfedc1de4d604f14b0f958615776fb80ddb59e33
[ "Apache-2.0" ]
41
2016-03-15T19:32:07.000Z
2022-02-16T10:22:05.000Z
""" The TensorProductState class and supporting functionality. """ #*************************************************************************************************** # Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS). # Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights # in this software. # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except # in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory. #*************************************************************************************************** import functools as _functools import itertools as _itertools import numpy as _np from pygsti.modelmembers.states.state import State as _State from pygsti.modelmembers import modelmember as _modelmember, term as _term from pygsti.baseobjs import statespace as _statespace from pygsti.tools import listtools as _lt from pygsti.tools import matrixtools as _mt class TensorProductState(_State): """ A state vector that is a tensor-product of other state vectors. Parameters ---------- factors : list of States a list of the component states to take the tensor product of. state_space : StateSpace, optional The state space for this operation. """ def __init__(self, factors, state_space): assert(len(factors) > 0), "Must have at least one factor!" self.factors = factors # do *not* copy - needs to reference common objects evotype = self.factors[0]._evotype rep = evotype.create_tensorproduct_state_rep([f._rep for f in factors], state_space) _State.__init__(self, rep, evotype) self.init_gpindices() # initialize our gpindices based on sub-members self._update_rep() # initializes rep data #Note: no to_memoized_dict needed, as ModelMember version does all we need. @classmethod def _from_memoized_dict(cls, mm_dict, serial_memo): state_space = _statespace.StateSpace.from_nice_serialization(mm_dict['state_space']) factors = [serial_memo[i] for i in mm_dict['submembers']] return cls(factors, state_space) def submembers(self): """ Get the ModelMember-derived objects contained in this one. Returns ------- list """ return self.factors # factor POVM object def _update_rep(self): self._rep.reps_have_changed() @property def parameter_labels(self): """ An array of labels (usually strings) describing this model member's parameters. """ vl = _np.empty(self.num_params, dtype=object) for factor_state, factor_local_inds in zip(self.factors, self._submember_rpindices): vl[factor_local_inds] = factor_state.parameter_labels return vl def to_dense(self, on_space='minimal', scratch=None): """ Return this state vector as a (dense) numpy array. The memory in `scratch` maybe used when it is not-None. Parameters ---------- on_space : {'minimal', 'Hilbert', 'HilbertSchmidt'} The space that the returned dense operation acts upon. For unitary matrices and bra/ket vectors, use `'Hilbert'`. For superoperator matrices and super-bra/super-ket vectors use `'HilbertSchmidt'`. `'minimal'` means that `'Hilbert'` is used if possible given this operator's evolution type, and otherwise `'HilbertSchmidt'` is used. scratch : numpy.ndarray, optional scratch space available for use. Returns ------- numpy.ndarray """ return self._rep.to_dense(on_space) def taylor_order_terms(self, order, max_polynomial_vars=100, return_coeff_polys=False): """ Get the `order`-th order Taylor-expansion terms of this state vector. This function either constructs or returns a cached list of the terms at the given order. Each term is "rank-1", meaning that it is a state preparation followed by or POVM effect preceded by actions on a density matrix `rho` of the form: `rho -> A rho B` The coefficients of these terms are typically polynomials of the State's parameters, where the polynomial's variable indices index the *global* parameters of the State's parent (usually a :class:`Model`) , not the State's local parameter array (i.e. that returned from `to_vector`). Parameters ---------- order : int The order of terms to get. max_polynomial_vars : int, optional maximum number of variables the created polynomials can have. return_coeff_polys : bool Whether a parallel list of locally-indexed (using variable indices corresponding to *this* object's parameters rather than its parent's) polynomial coefficients should be returned as well. Returns ------- terms : list A list of :class:`RankOneTerm` objects. coefficients : list Only present when `return_coeff_polys == True`. A list of *compact* polynomial objects, meaning that each element is a `(vtape,ctape)` 2-tuple formed by concatenating together the output of :method:`Polynomial.compact`. """ terms = [] fnq = [int(round(_np.log2(f.dim))) // 2 for f in self.factors] # num of qubits per factor # assumes density matrix evolution total_nQ = sum(fnq) # total number of qubits for p in _lt.partition_into(order, len(self.factors)): factor_lists = [self.factors[i].taylor_order_terms(pi, max_polynomial_vars) for i, pi in enumerate(p)] # When possible, create COLLAPSED factor_lists so each factor has just a single # (State) pre & post op, which can be formed into the new terms' # TensorProdState ops. # - DON'T collapse stabilizer states & clifford ops - can't for POVMs collapsible = False # bool(self._evotype =="svterm") # need to use reps for collapsing now... TODO? if collapsible: factor_lists = [[t.collapse_vec() for t in fterms] for fterms in factor_lists] for factors in _itertools.product(*factor_lists): # create a term with a TensorProdState - Note we always create # "prep"-mode vectors, since even when self._prep_or_effect == "effect" these # vectors are created with factor (prep- or effect-type) States not factor POVMs # we workaround this by still allowing such "prep"-mode # TensorProdStates to be represented as effects (i.e. in torep('effect'...) works) coeff = _functools.reduce(lambda x, y: x.mult(y), [f.coeff for f in factors]) pre_rep = self._evotype.create_tensorproduct_state_rep( [f.pre_state for f in factors if (f.pre_state is not None)], self.state_space) post_rep = self._evotype.create_tensorproduct_state_rep( [f.post_state for f in factors if (f.post_state is not None)], self.state_space) term = _term.RankOnePolynomialPrepTerm.create_from(coeff, pre_rep, post_rep, self._evotype, self.state_space) if not collapsible: # then may need to add more ops. Assume factor ops are clifford gates # Embed each factors ops according to their target qubit(s) and just daisy chain them ss = _statespace.QubitSpace(total_nQ); curQ = 0 for f, nq in zip(factors, fnq): targetLabels = tuple(range(curQ, curQ + nq)); curQ += nq term._rep.pre_ops.extend([self._evotype.create_embedded_rep(ss, targetLabels, op) for op in f.pre_ops]) # embed and add ops term._rep.post_ops.extend([self._evotype.create_embedded_rep(ss, targetLabels, op) for op in f.post_ops]) # embed and add ops terms.append(term) if return_coeff_polys: def _decompose_indices(x): return tuple(_modelmember._decompose_gpindices( self.gpindices, _np.array(x, _np.int64))) poly_coeffs = [t.coeff.map_indices(_decompose_indices) for t in terms] # with *local* indices tapes = [poly.compact(complex_coeff_tape=True) for poly in poly_coeffs] if len(tapes) > 0: vtape = _np.concatenate([t[0] for t in tapes]) ctape = _np.concatenate([t[1] for t in tapes]) else: vtape = _np.empty(0, _np.int64) ctape = _np.empty(0, complex) coeffs_as_compact_polys = (vtape, ctape) #self.local_term_poly_coeffs[order] = coeffs_as_compact_polys #FUTURE? return terms, coeffs_as_compact_polys else: return terms # Cache terms in FUTURE? @property def num_params(self): """ Get the number of independent parameters which specify this state vector. Returns ------- int the number of independent parameters. """ return len(self.gpindices_as_array()) def to_vector(self): """ Get the state vector parameters as an array of values. Returns ------- numpy array The parameters as a 1D array with length num_params(). """ v = _np.empty(self.num_params, 'd') for factor_state, factor_local_inds in zip(self.factors, self._submember_rpindices): v[factor_local_inds] = factor_state.to_vector() return v def from_vector(self, v, close=False, dirty_value=True): """ Initialize the state vector using a 1D array of parameters. Parameters ---------- v : numpy array The 1D vector of state vector parameters. Length must == num_params() close : bool, optional Whether `v` is close to this state vector's current set of parameters. Under some circumstances, when this is true this call can be completed more quickly. dirty_value : bool, optional The value to set this object's "dirty flag" to before exiting this call. This is passed as an argument so it can be updated *recursively*. Leave this set to `True` unless you know what you're doing. Returns ------- None """ for factor_state, factor_local_inds in zip(self.factors, self._submember_rpindices): factor_state.from_vector(v[factor_local_inds], close, dirty_value) #Update representation, which may be a dense matrix or # just fast-kron arrays or a stabilizer state. self._update_rep() # TODO - how does this apply to state reps?? def deriv_wrt_params(self, wrt_filter=None): """ The element-wise derivative this state vector. Construct a matrix whose columns are the derivatives of the state vector with respect to a single param. Thus, each column is of length dimension and there is one column per state vector parameter. An empty 2D array in the StaticState case (num_params == 0). Parameters ---------- wrt_filter : list or numpy.ndarray List of parameter indices to take derivative with respect to. (None means to use all the this operation's parameters.) Returns ------- numpy array Array of derivatives, shape == (dimension, num_params) """ typ = self.factors[0].to_dense(on_space='minimal').dtype if len(self.factors) > 0 else 'd' #HACK to deal with fact that output of to_dense is really what is differentiated # but this may not match self.dim == self.state_space.dim, e.g. for pure state vecs. dims = [len(fct.to_dense(on_space='minimal')) for fct in self.factors] dim = int(_np.product(dims)) derivMx = _np.zeros((dim, self.num_params), typ) #Product rule to compute jacobian # loop over the spamvec/povm we differentiate wrt: for i, (fct, fct_local_inds, fct_dim) in enumerate(zip(self.factors, self._submember_rpindices, dims)): vec = fct if vec.num_params == 0: continue # no contribution deriv = vec.deriv_wrt_params(None) # TODO: use filter?? / make relative to this gate... deriv.shape = (fct_dim, vec.num_params) if i > 0: # factors before ith pre = self.factors[0].to_dense(on_space='minimal') for vecA in self.factors[1:i]: pre = _np.kron(pre, vecA.to_dense(on_space='minimal')) deriv = _np.kron(pre[:, None], deriv) # add a dummy 1-dim to 'pre' and do kron properly... if i + 1 < len(self.factors): # factors after ith post = self.factors[i + 1].to_dense(on_space='minimal') for vecA in self.factors[i + 2:]: post = _np.kron(post, vecA.to_dense(on_space='minimal')) deriv = _np.kron(deriv, post[:, None]) # add a dummy 1-dim to 'post' and do kron properly... assert(fct_local_inds is not None), \ "Error: gpindices has not been initialized for factor %d - cannot compute derivative!" % i derivMx[:, fct_local_inds] += deriv derivMx.shape = (dim, self.num_params) # necessary? if wrt_filter is None: return derivMx else: return _np.take(derivMx, wrt_filter, axis=1) def has_nonzero_hessian(self): """ Whether this state vector has a non-zero Hessian with respect to its parameters. Returns ------- bool """ return False def __str__(self): s = "Tensor product %s vector with length %d\n" % (self._prep_or_effect, self.dim) #ar = self.to_dense() #s += _mt.mx_to_string(ar, width=4, prec=2) # factors are just other States s += " x ".join([_mt.mx_to_string(fct.to_dense(on_space='minimal'), width=4, prec=2) for fct in self.factors]) return s
42.994152
118
0.609698
1,890
14,704
4.602646
0.251323
0.02529
0.014484
0.012875
0.124267
0.098402
0.080929
0.069663
0.054834
0.046557
0
0.00568
0.293526
14,704
341
119
43.120235
0.831729
0.45287
0
0.081967
0
0
0.033569
0
0
0
0
0.008798
0.016393
1
0.114754
false
0
0.065574
0.008197
0.295082
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c72e729bd791fda04d1f1bf87cc60496068da071
5,862
py
Python
amazing/maze.py
danieloconell/maze-solver
f60e476d827d59bfa17cd2148787332707846882
[ "MIT" ]
null
null
null
amazing/maze.py
danieloconell/maze-solver
f60e476d827d59bfa17cd2148787332707846882
[ "MIT" ]
2
2021-06-08T19:35:19.000Z
2021-09-08T00:44:59.000Z
amazing/maze.py
danieloconell/amazing
f60e476d827d59bfa17cd2148787332707846882
[ "MIT" ]
null
null
null
from .exceptions import MazeNotSolved, AlgorithmNotFound from .dijkstra import Dijkstra from .astar import Astar from functools import wraps import warnings from daedalus import Maze as _maze from PIL import Image warnings.simplefilter("once", UserWarning) class Maze: """ Create a maze and solve it. Available algorithms: dijkstra astar (WIP) Steps: 1. Create maze using the daedalus library. 2. Convert maze to graph. 3. Solve maze with algorithm. """ WHITE = (0, 0, 0) BLACK = (255, 255, 255) RED = (255, 0, 0) def __init__(self, width, height, algorithm="dijkstra"): """Set algorithm to be used when solving. Args: algorithm (str) to be used when solving maze width (int) of maze in pixels height (int) of maze in pixels """ self.algorithm = algorithm if not width % 2 or not height % 2: warnings.warn( "Using even width or height, use even numbers for optimal images" ) self._create_maze(width, height) self._create_graph() self.width = width self.height = height def _create_maze(self, width, height): """Make maze to be solved and add border to maze. Args: width (int) of maze height (int) of maze """ # create maze self.maze = _maze(width, height) self.maze.create_perfect() # define maze variables self.entrance = self.maze.entrance self.exit = self.maze.exit # add index to maze self.maze = { row_i: {item_i: item for item_i, item in enumerate(row)} for row_i, row in enumerate(self.maze) } def _create_graph(self): """Remove unnecessary states from maze and convert maze to graph to be solved.""" self.graph = {} # convert to graph for column in self.maze.keys(): for row in self.maze[column].keys(): item = self.maze[column][row] if item != 1: neighbours = [] try: if self.maze[column][row - 1] != 1: neighbours.append(["left", (column, row - 1)]) except KeyError: None try: if self.maze[column][row + 1] != 1: neighbours.append(["right", (column, row + 1)]) except KeyError: None try: if self.maze[column - 1][row] != 1: neighbours.append(["above", (column - 1, row)]) except KeyError: None try: if self.maze[column + 1][row] != 1: neighbours.append(["below", (column + 1, row)]) except KeyError: None self.graph[(column, row)] = {x[:][1]: 1 for x in neighbours} # TODO: remove unnecessary states def _maze_maker(file_name): def real_decorator(func): @wraps(func) def wrapper(self, *args, **kwargs): data = [] for row_i, row in enumerate(list(self.maze)): for item_i, item in enumerate(self.maze[row].values()): func(self, data, item, row_i=row_i, item_i=item_i) # save maze image = Image.new("RGB", (self.width, self.height)) image.putdata(data) image.save(file_name) return wrapper return real_decorator @_maze_maker("maze.png") def save(self, data, item, row_i=None, item_i=None): """Save maze locally as an image.""" # invert maze because maze is incorrect if item: data.append(self.WHITE) else: data.append(self.BLACK) def solve(self): """ Solve maze using specified algorithm. Returns: shortest path as a queue from start to finish of maze """ if self.algorithm == "astar": algorithm = Astar() elif self.algorithm == "dijkstra": algorithm = Dijkstra() else: raise AlgorithmNotFound( f"Invalid algorithm: {self.algorithm}. See help({type(self).__name__}) for available algorithms." ) # add nodes to graph for node in self.graph: algorithm.add_node(node, self.graph[node]) # pydaedalus stores y then x value which need to be reversed self.entrance = tuple(reversed(self.entrance)) self.exit = tuple(reversed(self.exit)) self.path = algorithm.shortest_path(self.entrance, self.exit) @_maze_maker("solution.png") def save_solution(self, data, item, row_i=None, item_i=None): """Save maze image and the shortest path.""" if not hasattr(self, "path"): raise MazeNotSolved( f"Maze must be solved to save solution. Run {type(self).__name__}.solve() first." ) if (row_i, item_i) in self.path: data.append(self.RED) elif item: data.append(self.WHITE) else: data.append(self.BLACK) def __str__(self): """Just cause it looks nice.""" string = [] for row in self.maze: string.append(["█" if item else " " for item in self.maze[row].values()]) return "\n".join(["".join(line) for line in string]) def __repr__(self): """Easier on the eyes.""" return f"Maze(algorithm='{self.algorithm}', width={self.width}, height={self.height})"
32.932584
114
0.525589
677
5,862
4.463811
0.248154
0.045003
0.027796
0.017207
0.224355
0.176042
0.134348
0.134348
0.134348
0.134348
0
0.010052
0.372057
5,862
177
115
33.118644
0.810649
0.166496
0
0.171171
0
0.009009
0.082586
0.023688
0
0
0
0.00565
0
1
0.099099
false
0
0.063063
0
0.234234
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c72eaa2b73efe739c3a50690c7c96660b59023bd
4,215
py
Python
config.py
FarbodFarhangfar/midi_player_python
924cd164b7867d294c761a70d06ab330fa1b8373
[ "MIT" ]
null
null
null
config.py
FarbodFarhangfar/midi_player_python
924cd164b7867d294c761a70d06ab330fa1b8373
[ "MIT" ]
null
null
null
config.py
FarbodFarhangfar/midi_player_python
924cd164b7867d294c761a70d06ab330fa1b8373
[ "MIT" ]
null
null
null
import os def get_note_dic(): _note_dic = {'C': 0, 'C#': 1, 'Db': 1, 'D': 2, 'D#': 3, 'Eb': 3, 'E': 4, 'F': 5, 'F#': 6, 'Gb': 6, 'G': 7, 'G#': 8, 'Ab': 8, 'A': 9, 'A#': 10, 'Bb': 10, 'B': 11} return _note_dic def get_value_list(): values = {"16": 16, "8": 8, "4": 4, "2": 2, "1": 1, "0.5": 0.5, "1/2": 0.5, "0.25": 0.25, "1/4": 0.25, "0.125": 0.125, "1/8": 0.125, "0.0625": 0.0625, "1/16": 0.0625, "0.03125": 0.03125, "1/32": 0.03125} return values def instruments(inst): instruments_dict = { # Piano 'Acoustic Grand Piano': '1', 'Bright Acoustic Piano': '2', 'Electric Grand Piano': '3', 'Honky-tonk Piano': '4', 'Electric Piano 1': '5', 'Electric Piano 2': '6', 'Harpsichord': '7', 'Clavi': '8', # Chromatic Percussion 'Celesta': '9', 'Glockenspiel': '10', 'Music Box': '11', 'Vibraphone': '12', 'Marimba': '13', 'Xylophone': '14', 'Tubular Bells': '15', 'Dulcimer': '16', # Organ 'Drawbar Organ': '17', 'Percussive Organ': '18', 'Rock Organ': '19', 'Church Organ': '20', 'Reed Organ': '21', 'Accordion': '22', 'Harmonica': '23', 'Tango Accordion': '24', # Guitar 'Acoustic Guitar (nylon)': '25', 'Acoustic Guitar (steel)': '26', 'Electric Guitar (jazz)': '27', 'Electric Guitar (clean)': '28', 'Electric Guitar (muted)': '29', 'Overdriven Guitar': '30', 'Distortion Guitar': '31', 'Guitar Harmonics': '32', # Bass 'Acoustic Bass': '33', 'Electric Bass (finger)': '34', 'Electric Bass (pick)': '35', 'Fretless Bass': '36', 'Slap Bass 1': '37', 'Slap Bass 2': '38', 'Synth Bass 1': '39', 'Synth Bass 2': '40', # Strings 'Violin': '41', 'Viola': '42', 'Cello': '43', 'Contrabass': '44', 'Tremolo Strings': '45', 'Pizzicato Strings': '46', 'Orchestral Harp': '47', 'Timpani': '48', # Ensemble 'String Ensemble 1': '49', 'String Ensemble 2': '50', 'Synth Strings 1': '51', 'Synth Strings 2': '52', 'Choir Aahs': '53', 'Voice Oohs': '54', 'Synth Choir': '55', 'Orchestra Hit': '56', # Brass 'Trumpet': '57', 'Trombone': '58', 'Tuba': '59', 'Muted Trumpet': '60', 'French Horn': '61', 'Brass Section': '62', 'Synth Brass 1': '63', 'Synth Brass 2': '64', # Reed 'Soprano Sax': '65', 'Alto Sax': '66', 'Tenor Sax': '67', 'Baritone Sax': '68', 'Oboe': '69', 'English Horn': '70', 'Bassoon': '71', 'Clarinet': '72', # Pipe 'Piccolo': '73', 'Flute': '74', 'Recorder': '75', 'Pan Flute': '76', 'Blown bottle': '77', 'Shakuhachi': '78', 'Whistle': '79', 'Ocarina': '80', # Synth Lead 'Lead 1 (square)': '81', 'Lead 2 (sawtooth)': '82', 'Lead 3 (calliope)': '83', 'Lead 4 (chiff)': '84', 'Lead 5 (charang)': '85', 'Lead 6 (voice)': '86', 'Lead 7 (fifths)': '87', 'Lead 8 (bass + lead)': '88', # Synth Pad 'Pad 1 (new age)': '89', 'Pad 2 (warm)': '90', 'Pad 3 (polysynth)': '91', 'Pad 4 (choir)': '92', 'Pad 5 (bowed)': '93', 'Pad 6 (metallic)': '94', 'Pad 7 (halo)': '95', 'Pad 8 (sweep)': '96', # Synth Effects 'FX 1 (rain)': '97', 'FX 2 (soundtrack)': '98', 'FX 3 (crystal)': '99', 'FX 4 (atmosphere)': '100', 'FX 5 (brightness)': '101', 'FX 6 (goblins)': '102', 'FX 7 (echoes)': '103', 'FX 8 (sci-fi)': '104', # Ethnic 'Sitar': '105', 'Banjo': '106', 'Shamisen': '107', 'Koto': '108', 'Kalimba': '109', 'Bagpipe': '110', 'Fiddle': '111', 'Shanai': '112', # Percussive 'Tinkle Bell': '113', 'Agogo': '114', 'Steel Drums': '115', 'Woodblock': '116', 'Taiko Drum': '117', 'Melodic Tom': '118', 'Synth Drum': '119', 'Reverse Cymbal': '120', # Sound effects 'Guitar Fret Noise': '121', 'Breath Noise': '122', 'Seashore': '123', 'Bird Tweet': '124', 'Telephone Ring': '125', 'Helicopter': '126', 'Applause': '127'} return instruments_dict
38.669725
106
0.474496
523
4,215
3.804971
0.577438
0.010553
0.003015
0
0
0
0
0
0
0
0
0.137164
0.285647
4,215
108
107
39.027778
0.523746
0.034164
0
0
0
0
0.46596
0
0
0
0
0
0
1
0.042254
false
0
0.014085
0
0.098592
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c72f4c5b309a87813b09f64b422ca7519b3e740b
2,182
py
Python
roles/openshift_health_checker/library/ocutil.py
shgriffi/openshift-ansible
6313f519307cf50055589c3876d8bec398bbc4d4
[ "Apache-2.0" ]
164
2015-07-29T17:35:04.000Z
2021-12-16T16:38:04.000Z
roles/openshift_health_checker/library/ocutil.py
shgriffi/openshift-ansible
6313f519307cf50055589c3876d8bec398bbc4d4
[ "Apache-2.0" ]
3,634
2015-06-09T13:49:15.000Z
2022-03-23T20:55:44.000Z
roles/openshift_health_checker/library/ocutil.py
shgriffi/openshift-ansible
6313f519307cf50055589c3876d8bec398bbc4d4
[ "Apache-2.0" ]
250
2015-06-08T19:53:11.000Z
2022-03-01T04:51:23.000Z
#!/usr/bin/python """Interface to OpenShift oc command""" import os import shlex import shutil import subprocess from ansible.module_utils.basic import AnsibleModule ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')] def locate_oc_binary(): """Find and return oc binary file""" # https://github.com/openshift/openshift-ansible/issues/3410 # oc can be in /usr/local/bin in some cases, but that may not # be in $PATH due to ansible/sudo paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS oc_binary = 'oc' # Use shutil.which if it is available, otherwise fallback to a naive path search try: which_result = shutil.which(oc_binary, path=os.pathsep.join(paths)) if which_result is not None: oc_binary = which_result except AttributeError: for path in paths: if os.path.exists(os.path.join(path, oc_binary)): oc_binary = os.path.join(path, oc_binary) break return oc_binary def main(): """Module that executes commands on a remote OpenShift cluster""" module = AnsibleModule( argument_spec=dict( namespace=dict(type="str", required=False), config_file=dict(type="str", required=True), cmd=dict(type="str", required=True), extra_args=dict(type="list", default=[]), ), ) cmd = [locate_oc_binary(), '--config', module.params["config_file"]] if module.params["namespace"]: cmd += ['-n', module.params["namespace"]] cmd += shlex.split(module.params["cmd"]) + module.params["extra_args"] failed = True try: cmd_result = subprocess.check_output(list(cmd), stderr=subprocess.STDOUT) failed = False except subprocess.CalledProcessError as exc: cmd_result = '[rc {}] {}\n{}'.format(exc.returncode, ' '.join(exc.cmd), exc.output) except OSError as exc: # we get this when 'oc' is not there cmd_result = str(exc) module.exit_json( changed=False, failed=failed, result=cmd_result, ) if __name__ == '__main__': main()
29.486486
91
0.636114
283
2,182
4.770318
0.409894
0.059259
0.024444
0.042222
0.066667
0.032593
0
0
0
0
0
0.002405
0.237855
2,182
73
92
29.890411
0.809381
0.186068
0
0.042553
0
0
0.064387
0
0
0
0
0
0
1
0.042553
false
0
0.106383
0
0.170213
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c7300e0d4920ea9bf3233fb48ec01feb851a08ad
4,125
py
Python
code/network/__init__.py
michalochman/complex-networks
49337376e32fac253d8de9919d5acd00a9b566bb
[ "MIT" ]
null
null
null
code/network/__init__.py
michalochman/complex-networks
49337376e32fac253d8de9919d5acd00a9b566bb
[ "MIT" ]
null
null
null
code/network/__init__.py
michalochman/complex-networks
49337376e32fac253d8de9919d5acd00a9b566bb
[ "MIT" ]
null
null
null
import fractions class Network(object): def __init__(self, network): self.network = network def degree(self, link_type, key): return len(self.network.get(link_type).get(key)) def average_degree(self, link_type): degree = 0 for link in self.network.get(link_type).itervalues(): degree += len(link) return float(degree) / float(len(self.network.get(link_type))) def nn_degree(self, link_type, link_n_type, key): degree = self.degree(link_type, key) nn_degree = 0 for n_key in self.network.get(link_type, key): nn_degree += self.degree(link_n_type, n_key) return '%d/%d' % (nn_degree, degree) def jaccard_index(self, set_a, set_b): n = len(set_a & set_b) return float(n)/float(len(set_a) + len(set_b) - n) def jaccard_similarity(self, link_type, key_a, key_b, return_string=False): key_a = int(key_a) key_b = int(key_b) set_a = set(self.network.get(link_type).get(key_a).values()) set_b = set(self.network.get(link_type).get(key_b).values()) if return_string: intersection = len(set_a & set_b) union = len(set_a | set_b) gcd = fractions.gcd(intersection, union) return '%d/%d' % (intersection/gcd, union/gcd) return self.jaccard_index(set_a, set_b) def collaborative_similarity(self, link_type, link_n_type, key, return_string=False): degree = self.degree(link_type, key) if degree <= 1: return 0 similarity_sum = 0 for n_key_1 in self.network.get(link_type).get(key).itervalues(): for n_key_2 in self.network.get(link_type).get(key).itervalues(): if n_key_1 == n_key_2: continue similarity_sum += self.jaccard_similarity(link_n_type, n_key_1, n_key_2) if return_string: precision = 1e3 new_similarity_sum = round(similarity_sum * degree*(degree-1) * precision) gcd = fractions.gcd(new_similarity_sum, degree*(degree-1) * precision) new_similarity_sum /= gcd return '%d/%d' % (new_similarity_sum, degree*(degree-1)*round(new_similarity_sum/similarity_sum)) return similarity_sum / (degree*(degree-1)) def average_jaccard_similarity(self, link_type, link_n_type, return_string=False): nodes = 0 similarity_sum = 0 for key_links in self.network.get(link_type).itervalues(): for n_key_1 in key_links.itervalues(): for n_key_2 in key_links.itervalues(): if n_key_1 == n_key_2: continue nodes += 1 similarity_sum += self.jaccard_similarity(link_n_type, n_key_1, n_key_2) if nodes == 0: return 0 if return_string: precision = 1e3 new_similarity_sum = round(similarity_sum * nodes * precision) gcd = fractions.gcd(new_similarity_sum, nodes * precision) new_similarity_sum /= gcd return '%d/%d' % (new_similarity_sum, nodes*round(new_similarity_sum/similarity_sum)) return similarity_sum / nodes def network_collaborative_similarity(self, link_type, link_n_type, return_string=False): nodes = 0 similarity_sum = 0 for key, key_links in self.network.get(link_type).iteritems(): if self.degree(link_type, key) <= 1: continue nodes += 1 collaborative_similarity = self.collaborative_similarity(link_type, link_n_type, key) similarity_sum += collaborative_similarity if nodes == 0: return 0 if return_string: precision = 1e3 new_similarity_sum = round(similarity_sum * nodes * precision) gcd = fractions.gcd(new_similarity_sum, nodes * precision) new_similarity_sum /= gcd return '%d/%d' % (new_similarity_sum, nodes*(new_similarity_sum/similarity_sum)) return similarity_sum/nodes
42.96875
109
0.615758
548
4,125
4.335766
0.091241
0.164141
0.10101
0.075758
0.717172
0.646465
0.565236
0.506734
0.430976
0.307239
0
0.012903
0.286061
4,125
95
110
43.421053
0.793888
0
0
0.435294
0
0
0.006061
0
0
0
0
0
0
1
0.105882
false
0
0.011765
0.011765
0.305882
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c730483de9837a25bc1e629091819a776f0b1ff3
3,055
py
Python
invoke_ansible.py
samvarankashyap/ansible_api_usage
d03c67b4606d2e101ef7341bd31161b4db39cd5b
[ "Apache-2.0" ]
null
null
null
invoke_ansible.py
samvarankashyap/ansible_api_usage
d03c67b4606d2e101ef7341bd31161b4db39cd5b
[ "Apache-2.0" ]
null
null
null
invoke_ansible.py
samvarankashyap/ansible_api_usage
d03c67b4606d2e101ef7341bd31161b4db39cd5b
[ "Apache-2.0" ]
null
null
null
import ansible import pprint from ansible import utils from jinja2 import Environment, PackageLoader from collections import namedtuple from ansible import utils from ansible.parsing.dataloader import DataLoader from ansible.vars import VariableManager from ansible.inventory import Inventory from ansible.executor.playbook_executor import PlaybookExecutor from ansible.plugins.callback import CallbackBase from callbacks import PlaybookCallback def invoke_ansible_playbook(module_path, e_vars, playbook_path="site.yml", console=True): """ Invokes playbook """ loader = DataLoader() variable_manager = VariableManager() variable_manager.extra_vars = e_vars inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=['localhost']) passwords = {} utils.VERBOSITY = 4 Options = namedtuple('Options', ['listtags', 'listtasks', 'listhosts', 'syntax', 'connection', 'module_path', 'forks', 'remote_user', 'private_key_file', 'ssh_common_args', 'ssh_extra_args', 'sftp_extra_args', 'scp_extra_args', 'become', 'become_method', 'become_user', 'verbosity', 'check']) options = Options(listtags=False, listtasks=False, listhosts=False, syntax=False, connection='ssh', module_path=module_path, forks=100, remote_user='root', private_key_file=None, ssh_common_args=None, ssh_extra_args=None, sftp_extra_args=None, scp_extra_args=None, become=False, become_method=None, become_user='root', verbosity=utils.VERBOSITY, check=False) pbex = PlaybookExecutor(playbooks=[playbook_path], inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=passwords) if not console: cb = PlaybookCallback() pbex._tqm._stdout_callback = cb return_code = pbex.run() results = cb.results else: results = pbex.run() return results
40.197368
89
0.466776
225
3,055
6.12
0.351111
0.055919
0.028322
0.031954
0.037763
0
0
0
0
0
0
0.003092
0.470704
3,055
75
90
40.733333
0.848485
0.005237
0
0.056338
0
0
0.073243
0
0
0
0
0
0
1
0.014085
false
0.028169
0.169014
0
0.197183
0.014085
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c733c87e85c1c4f5626af759efe7bb3290f415c6
2,336
py
Python
bin/python/csv2es.py
reid-wagner/proteomics-pipelines
2214c2ad4c14fabcb50a3c0800e9d383ce73df3d
[ "MIT" ]
2
2018-09-06T14:05:59.000Z
2022-02-18T10:09:06.000Z
bin/python/csv2es.py
reid-wagner/proteomics-pipelines
2214c2ad4c14fabcb50a3c0800e9d383ce73df3d
[ "MIT" ]
7
2018-09-30T00:49:04.000Z
2022-01-27T07:55:26.000Z
bin/python/csv2es.py
reid-wagner/proteomics-pipelines
2214c2ad4c14fabcb50a3c0800e9d383ce73df3d
[ "MIT" ]
3
2019-10-29T12:20:45.000Z
2021-10-06T14:38:43.000Z
#!/usr/bin/env python3 import itertools import string from elasticsearch import Elasticsearch,helpers import sys import os from glob import glob import pandas as pd import json host = sys.argv[1] port = int(sys.argv[2]) alias = sys.argv[3] print(host) print(port) print(alias) es = Elasticsearch([{'host': host, 'port': port}]) # create our test index # Get all csv files in /root/data files = [y for x in os.walk('/root/data') for y in glob(os.path.join(x[0], '*.csv'))] count = 0 def clean_field(val): val = val.split('.') val = [i for i in val if i != ''] val = '_'.join(val) val = val.split() val = [i for i in val if i != ''] val = '_'.join(val) val = val.split('/') val = [i for i in val if i != ''] val = '_'.join(val) return val es.indices.delete(index=alias + '*', ignore=[400, 404]) indices = [] for file in files: data = pd.read_csv(file, sep=None, engine='python') index = alias + '_'.join(file.split('/')) index = clean_field(index).lower().split('_csv')[0] indices.append(index) es.indices.create(index) for col in data.columns: if col.startswith('Unnamed'): del data[col] else: data.rename(columns= { col : clean_field(col) },inplace=True ) data = data.reset_index() # Make sure there is no duplicate indexing data.rename(columns={'index':'row'},inplace =True) data['File'] = file data['_id'] = data['File'] + '.{}.'.format(str(count)) + data.reset_index()['index'].apply(str) data['_type'] = "document" data['_index'] = index records = data.to_json(orient='records') records = json.loads(records) helpers.bulk(es, records, chunk_size=100) count += 1 print(es.count(index=index)) # Create an index table in elasticsearch to locate the files indices_table = pd.DataFrame() indices_table['Index'] = pd.Series(indices) indices_table['File'] = pd.Series(files) indices_table['Alias'] = alias indices_table['_id'] = indices_table['Alias'] + '.' + indices_table['File'] indices_table['_type'] = "document" indices_table['_index'] = alias + '_indices' es.indices.create(alias + '_indices') records = indices_table.to_json(orient='records') records = json.loads(records) helpers.bulk(es, records, chunk_size=100) print(es.count(index=alias + '_indices'))
28.144578
99
0.644264
337
2,336
4.364985
0.311573
0.081577
0.018355
0.028552
0.17811
0.17811
0.17811
0.17811
0.17811
0.17811
0
0.010526
0.186644
2,336
82
100
28.487805
0.763684
0.074914
0
0.15625
0
0
0.08314
0
0
0
0
0
0
1
0.015625
false
0
0.125
0
0.15625
0.078125
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c735745b02553eb9e477617ad9c63df5e4730b1c
3,793
py
Python
bos_sarcat_scraper/__main__.py
hysds/bos_sarcat_scraper
1bf3612e7d8fad80c8704a909087be19cc3e1db2
[ "Apache-2.0" ]
1
2020-06-24T00:25:30.000Z
2020-06-24T00:25:30.000Z
bos_sarcat_scraper/__main__.py
aria-jpl/bos_sarcat_scraper
1bf3612e7d8fad80c8704a909087be19cc3e1db2
[ "Apache-2.0" ]
null
null
null
bos_sarcat_scraper/__main__.py
aria-jpl/bos_sarcat_scraper
1bf3612e7d8fad80c8704a909087be19cc3e1db2
[ "Apache-2.0" ]
1
2019-05-08T17:15:00.000Z
2019-05-08T17:15:00.000Z
from __future__ import absolute_import from builtins import str from builtins import input import sys import argparse from . import bosart_scrape import datetime import json def valid_date(s): try: try: date = datetime.datetime.strptime(s, "%Y-%m-%dT%H:%M:%S.%fZ") except: date = datetime.datetime.strptime(s, "%Y-%m-%dT%H:%M:%SZ") return date except ValueError: msg = "Not a valid date: '{0}'.".format(s) raise argparse.ArgumentTypeError(msg) def geojson(spatial_extent): if type(json.loads(spatial_extent)) is dict: return spatial_extent def sort_field(s_f): if s_f == "start_time" or s_f == "stop_time" or s_f == "bos_ingest": return s_f else: raise argparse.ArgumentError("The value for sortBy should be either start_time, stop_time or bos_ingest not %s."%s_f) def sort_order(order): if order == "asc" or order == "des": return order else: raise argparse.ArgumentError("The value for sort should be either asc or des not %s,"%order) def check_inputs(args): yes = "y" no = "n" if not args.fromTime and not args.fromBosIngestTime: print ("You have NOT specified any start time using --fromTime, -from or --fromBosIngestTime. \nYou are asking to find all acquisitions from the beginning of time! \nThis query will take a very long time.\nTHIS IS NOT RECOMMENDED.") resp = str(eval(input('Are you sure you want to proceed? (y/n):'))) if resp.lower() == yes.lower(): print("Okay! Please wait...") return True elif resp.lower() == no.lower(): print("Please try again with the start time specified using --fromTime, -from or --fromBosIngestTime.") exit() else: print("Please specify y/n\n") return False return True def main(): parser = argparse.ArgumentParser(description='Query BOS SarCat for acquisitions.') parser.add_argument("-from","--fromTime", help='specify the temporal start point in format , to get acquisitions starting after the given timestamp in the format yyyy-mm-ddThh:mm:ss.sssZ', type=valid_date) parser.add_argument("--fromBosIngestTime", help='provide date and time in format , to get acquisitions acquired by BOS after the given timestamp in the format yyyy-mm-ddThh:mm:ss.sssZ', type=valid_date) parser.add_argument("-to","--toTime", help='specify the temporal end point in format , to get acquisitions ending before the given timestamp in the format yyyy-mm-ddThh:mm:ss.sssZ', type=valid_date) parser.add_argument("--spatialExtent", help='specify the area of interest in GeoJSON format', type = geojson) parser.add_argument("--sortBy", help='type "start_time" , "stop_time" or "bos_ingest" to sort results by field', type = sort_field) parser.add_argument("--sort", help='type "asc" or "des" to get results in ascending or descending order of time respectively. If sortBy is specified but sort is not, then defaults to ascending', type = sort_order) args = parser.parse_args() checked = False while not checked: checked = check_inputs(args) # construct the parameter list based on user specified restrictions params = {} if args.fromTime: params["fromTime"] = args.fromTime if args.fromBosIngestTime: params["fromBosIngestTime"] = args.fromBosIngestTime if args.toTime: params["toTime"] = args.toTime if args.spatialExtent: params["spatialExtent"] = json.dumps(args.spatialExtent) if args.sortBy: params["sortBy"] = args.sortBy if args.sort: params["sort"] = args.sort print(bosart_scrape.make_api_call(parameters=params)) if __name__ == '__main__': main()
39.926316
240
0.675718
528
3,793
4.755682
0.314394
0.004779
0.040621
0.015532
0.241338
0.202708
0.178813
0.123855
0.123855
0.123855
0
0.000336
0.215924
3,793
94
241
40.351064
0.843981
0.017137
0
0.093333
0
0.066667
0.397746
0.02496
0
0
0
0
0
1
0.08
false
0
0.106667
0
0.28
0.066667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c73803a506dad8312572b3d3624ec1ddd2985a19
23,181
py
Python
vgm2electron.py
simondotm/vgm2electron
38e340d2baeaa3e5722ac982c82e58fb9858f9d9
[ "MIT" ]
2
2021-03-08T13:55:02.000Z
2021-05-02T12:50:38.000Z
vgm2electron.py
simondotm/vgm2electron
38e340d2baeaa3e5722ac982c82e58fb9858f9d9
[ "MIT" ]
null
null
null
vgm2electron.py
simondotm/vgm2electron
38e340d2baeaa3e5722ac982c82e58fb9858f9d9
[ "MIT" ]
null
null
null
#!/usr/bin/env python # vgm2electron.py # Tool for converting SN76489-based PSG VGM data to Acorn Electron # By Simon Morris (https://github.com/simondotm/) # See https://github.com/simondotm/vgm-packer # # Copyright (c) 2019 Simon Morris. All rights reserved. # # "MIT License": # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the Software # is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, # INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A # PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import functools import itertools import struct import sys import time import binascii import math import operator import os from modules.vgmparser import VgmStream class VgmElectron: OUTPUT_RAWDATA = False # output raw dumps of the data that was compressed by LZ4/Huffman VERBOSE = True # 0-3 represents approx the loudest 50% of volumes (=ON), 4-15 are the quietest 50% (=OFF) ATTENTUATION_THRESHOLD1 = 10 ATTENTUATION_THRESHOLD2 = 10 ATTENTUATION_THRESHOLD3 = 10 # define the number of octaves to transpose whole song by, in case too much bass getting lost TRANSPOSE_OCTAVES1 = 0 TRANSPOSE_OCTAVES2 = 0 TRANSPOSE_OCTAVES3 = 0 #-1 ENABLE_CHANNEL1 = True ENABLE_CHANNEL2 = True ENABLE_CHANNEL3 = True USE_TECHNIQUE = 2 def __init__(self): print("init") #---------------------------------------------------------- # Utilities #---------------------------------------------------------- # split the packed raw data into 11 separate streams # returns array of 11 bytearrays def split_raw(self, rawData, stripCommands = True): registers = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] registers_opt = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] latched_channel = -1 output_block = bytearray() output_blocks = [] for o in range(11): output_blocks.append( bytearray() ) if stripCommands: register_mask = 15 else: register_mask = 255 # unpack the raw binary data in 11 arrays of register data without any deltas between them # eg. the raw chip writes to all 11 registers every frame n = 0 Packet = True verbose = False while (Packet): packet_size = rawData[n] if verbose: print("packet_size=" + str(packet_size)) n += 1 if packet_size == 255: Packet = False else: for x in range(packet_size): d = rawData[n+x] #if verbose: # print " frame byte number=" +str(x) # print " frame byte=" +str(d) if d & 128: # latch c = (d>>5)&3 latched_channel = c if d & 16: # volume if verbose: print(" volume on channel " + str(c)) registers[c+7] = d & register_mask else: # tone if verbose: print(" tone on channel " + str(c)) registers[c*2+0] = d & register_mask else: if verbose: print(" tone data on latched channel " + str(latched_channel)) registers[latched_channel*2+1] = d # we no longer do any masking here # d & 63 # tone data only contains 6 bits of info anyway, so no need for mask if latched_channel == 3: print("ERROR CHANNEL") # emit current state of each of the 11 registers to 11 different bytearrays for x in range(11): output_blocks[x].append( registers[x] ) # next packet n += packet_size #print(output_blocks[6]) #IGNORE we no longer do this - let the decoder do it instead. if False: # make sure we only emit tone3 when it changes, or 15 for no-change # this prevents the LFSR from being reset lastTone3 = 255 for x in range(len(output_blocks[6])): t = output_blocks[6][x] if t == lastTone3: output_blocks[6][x] = 15 lastTone3 = t # print(output_blocks[6]) # Add EOF marker (0x08) to tone3 byte stream output_blocks[6].append(0x08) # 0x08 is an invalid noise tone. # return the split blocks return output_blocks # given an array of data points, serialize it to a bytearray # size is the number of bytes to be used to represent each element in the source array. def toByteArray(self, array, size = 1): r = bytearray() for v in array: if size < 2: r.append(v & 255) else: r.append(v & 255) r.append(v >> 8) return r #---------------------------------------------------------- # Process(filename) # Convert the given VGM file to an electron VGM file #---------------------------------------------------------- def process(self, src_filename, dst_filename): # load the VGM file, or alternatively interpret as a binary if src_filename.lower()[-4:] != ".vgm": print("ERROR: Not a VGM source") return vgm = VgmStream(src_filename) data_block = vgm.as_binary() data_offset = 0 # parse the header header_size = data_block[0] # header size play_rate = data_block[1] # play rate if header_size == 5 and play_rate == 50: packet_count = data_block[2] + data_block[3]*256 # packet count LO duration_mm = data_block[4] # duration mm duration_ss = data_block[5] # duration ss data_offset = header_size+1 data_offset += data_block[data_offset]+1 data_offset += data_block[data_offset]+1 print("header_size=" +str(header_size)) print("play_rate="+str(play_rate)) print("packet_count="+str(packet_count)) print("duration_mm="+str(duration_mm)) print("duration_ss="+str(duration_ss)) print("data_offset="+str(data_offset)) else: print("No header.") print("") # Trim off the header data. The rest is raw data. data_block = data_block[data_offset:] #---------------------------------------------------------- # Unpack the register data into 11 separate data streams #---------------------------------------------------------- registers = self.split_raw(data_block, True) #---------------------------------------------------------- # Begin VGM conversion to Electron #---------------------------------------------------------- # Filter out channels we do not need # Modify all volumes to full or none # Interleave sound to a single channel # output final VGM vgm_stream = bytearray() vgm_time = 0 electron_data = bytearray() # given an SN76489 tone register value, return the equivalent Electron ULA register setting def sn_to_electron(tone_value): # hack to protect against divbyzero if (tone_value == 0): tone_value = 1 hz = float(vgm.vgm_source_clock) / ( 2.0 * float(tone_value) * 16.0) print(" sn_to_electron freq " + str(hz) + "hz") # electron # Sound frequency = 1 MHz / [32 * (S + 1)] # f * 32*(S+1) = 1Mhz # 32*(S+1) = 1Mhz / f # (S+1) = 1Mhz / f*32 #print ("SN freq is " + str(hz)) ula6 = int( 1000000.0 / (hz * 32.0) ) - 1 # check we are within range if ula6 < 0: print(" WARNING: Electron freqency '" + str(ula6) + "' too high (" + str(hz) + ")") ula6 = 0 if ula6 > 255: print(" WARNING: Electron frequency '" + str(ula6) + "' too low (" + str(hz) + ")") ula6 = 255 return ula6 #-------------------------------------------------------------- # conversion settings #-------------------------------------------------------------- # convert the register data to a vgm stream sample_interval = int(44100 / vgm.metadata['rate']) # 882 # 50hz - TODO: use frame rate print("sample_interval=" + str(sample_interval)) USE_TONE3 = VgmElectron.ENABLE_CHANNEL3 # True # TODO: make these all parameters # Add channel filter option # Add mix type options # --attentuation 468 --filter 123 --transpose 00F --mix 123 --arpeggio 2 --rate 50 # Add option to clamp or transpose out of range frequencies # Make the .ula output file filename.electron.ula # Add 0x01 as a terminating byte in the output ULA MIX_RATE = 2 # modulo 2 for interleaving channels # other options # bias for channels # transpose or silence out of range notes channel_mix = 0 #-------------------------------------------------------------- # pre-process music to suit Electron capabilities #-------------------------------------------------------------- for i in range(len(registers[0])): print("Frame " + str(i)) #-------------------------------------------------------------- # step 1- map volumes to 1-bit precision #-------------------------------------------------------------- # 11 registers per frame # Tone 0 HL Tone 1 HL Tone 2 HL Tone 3 Vol 0123 for r in range(11): if r > 6: register_data = registers[r][i] # apply the threshold for each channel threshold = VgmElectron.ATTENTUATION_THRESHOLD1 if r == 8: threshold = VgmElectron.ATTENTUATION_THRESHOLD2 if r == 9: threshold = VgmElectron.ATTENTUATION_THRESHOLD3 # if its a volume, map to loudest volume or no volume (using logarithmic scale) if register_data < threshold: register_data = 0 # full volume else: register_data = 15 # zero volume if r == 7 and VgmElectron.ENABLE_CHANNEL1 == False: register_data = 15 # zero volume if r == 8 and VgmElectron.ENABLE_CHANNEL2 == False: register_data = 15 # zero volume if r == 9 and VgmElectron.ENABLE_CHANNEL3 == False: register_data = 15 # zero volume registers[r][i] = register_data #-------------------------------------------------------------- # step 2 - transpose to fit frequency range #-------------------------------------------------------------- # final step - bring tone1 into the frequency range of the electron # if the frequency goes below the range of the ULA capabilities, add an octave def retune(octaves, l,h,v): #if (octaves == 0): # print(" No transpose performed, octaves set to 0") # return print( " tonehi=" + str(registers[h][i]) + ", tonelo=" + str(registers[l][i])) tone_value = (registers[h][i] << 4) + registers[l][i] if tone_value > 0: tone_freq = float(vgm.vgm_source_clock) / ( 2.0 * float(tone_value) * 16.0) print(" Retune, Channel " + str(int(l/2)) + " tone=" + str(tone_value) + ", freq=" + str(tone_freq)) # electron baseline is 122Hz not 244Hz as the AUG states. baseline_freq = 1000000.0 / (32.0*256.0) target_freq = tone_freq retuned = 0 transpose = abs(octaves) while retuned != transpose: # target_freq < baseline_freq: if (octaves < 0): target_freq /= 2.0 else: target_freq *= 2.0 retuned += 1 # if cant reach baseline freq, transpose once, then silence if still too low :( if target_freq < baseline_freq: print(" WARNING: Freq too low - Added " + str(1) + " octave(s) - from " + str(target_freq) + " to " + str(target_freq*2.0) + "Hz") # better to just clamp low frequencies at the bottom, and risk tuning issues rather than transposition jumps target_freq = baseline_freq #*= 2.0 retuned = 1 if target_freq < baseline_freq: registers[v][i] = 15 print(" Tone " + str(i) + " silenced because frequency too low - " + str(target_freq)) #target_freq *= 2.0 #retuned += 1 if retuned: #print(" WARNING: Freq too low - Added " + str(retuned) + " octave(s) - from " + str(tone_freq) + " to " + str(target_freq) + "Hz") tone_value = int( round( float(vgm.vgm_source_clock) / (2.0 * target_freq * 16.0 ) ) ) registers[h][i] = tone_value >> 4 registers[l][i] = tone_value & 15 # transpose #if TRANSPOSE_OCTAVES > 0: print(" Transposing ") retune(VgmElectron.TRANSPOSE_OCTAVES1, 0,1,7) retune(VgmElectron.TRANSPOSE_OCTAVES2, 2,3,8) retune(VgmElectron.TRANSPOSE_OCTAVES3, 4,5,9) #-------------------------------------------------------------- # Step 3 - mix the 2 primary channels down to 1 channel #-------------------------------------------------------------- # map channel 2 to channel 1 # noise channel is completely ignored ENABLE_DOWNMIX = True if ENABLE_DOWNMIX: print(" Downmix channels ") #print("Frame " + str(i)) vol1 = registers[7][i] vol2 = registers[8][i] vol3 = registers[9][i] tone1_active = vol1 != 15 tone2_active = vol2 != 15 tone3_active = vol3 != 15 tone_active = tone1_active or tone2_active or tone3_active if tone_active: print(" Tone active, mixing") output_tone = 1 if self.USE_TECHNIQUE == 2: c1f = (registers[1][i] << 4) + registers[0][i] c2f = (registers[3][i] << 4) + registers[2][i] c3f = (registers[5][i] << 4) + registers[4][i] active_channels = [ False, False, False ] if tone1_active: active_channels[0] = True print("Channel 1 is active volume") if tone2_active: active_channels[1] = True print("Channel 2 is active volume") if tone3_active: active_channels[2] = True print("Channel 3 is active volume") # any channels playing the same frequency are filtered out if tone1_active and tone2_active and c2f == c1f: active_channels[1] = False print("Channel 2 is same freq as Channel 1, filtered") if tone1_active and tone3_active and c3f == c1f: active_channels[2] = False print("Channel 3 is same freq as Channel 1, filtered") if tone2_active and tone3_active and c2f == c3f: active_channels[2] = False print("Channel 3 is same freq as Channel 2, filtered") channel_count = 0 if active_channels[0]: channel_count += 1 if active_channels[1]: channel_count += 1 if active_channels[2]: channel_count += 1 print("channel_count=" + str(channel_count)) output_mix = [] if active_channels[0]: output_mix.append(1) if active_channels[1]: output_mix.append(2) if active_channels[2]: output_mix.append(3) mix = (i % channel_count) output_tone = output_mix[mix] if self.USE_TECHNIQUE == 1: # interleaving of channels 1+2 is done on odd/even frames for a consistent effect mix = (i % MIX_RATE) == 0 #(i & 1) == 0 # random is no good, thought it might average out but it sounds , well random #mix = random.random() < 0.5 # test code to see if modulo 3 any good, it wasn't if False: if channel_mix == 0 and vol1 != 0: channel_mix = (channel_mix + 1) % 3 if channel_mix == 1 and vol2 != 0: channel_mix = (channel_mix + 1) % 3 if channel_mix == 1 and vol3 != 0: channel_mix = (channel_mix + 1) % 3 output_tone = (channel_mix % 3) + 1 print("output tone=" + str(output_tone)) channel_mix = (channel_mix + 1) % 3 if True: # detect if channel 1 needs priority this frame # - its volume is on, and the alternative frame mix flag is good c1p = vol1 == 0 and mix # don't give channel 2 priority if tone is the same and channel1 is playing c1f = (registers[1][i] << 4) + registers[0][i] c2f = (registers[3][i] << 4) + registers[2][i] sametone = (c1f == c2f/2) or (c1f == c2f * 2) or (c1f == c2f) sametone = sametone and (vol1 == vol2) and (vol1 == 0) if vol1 == 0 and sametone: #diff < 100: #registers[0][i] == registers[2][i] and registers[1][i] == registers[2][i] and vol1 == 0: c1p = True print(" NOTE: channel 1 & channel 2 have same tone") # replace channel 1 data with channel 2 data # if, channel2 is active, but c1 doesn't have priority this frame if vol2 == 0 and not c1p:# and vol1 != 0: output_tone = 2 # if no volume on tone1, we can look at channel 3 too if USE_TONE3: #if registers[7][i] == 15: if vol1 == 15 and vol2 == 15 and vol3 == 0 and not mix:# and not c1p and output_tone != 2: print("tone3 active") output_tone = 3 # pick which tone to output if output_tone == 1: # do nothing, because tone1 register frequency already setup output_tone = 1 elif output_tone == 2: # replace tone 1 frequency with tone 2 frequency registers[0][i] = registers[2][i] registers[1][i] = registers[3][i] registers[7][i] = registers[8][i] elif output_tone == 3: # replace tone 1 frequency with tone 3 frequency registers[0][i] = registers[4][i] registers[1][i] = registers[5][i] registers[7][i] = registers[9][i] else: print("UNHANDLED CASE - output_tone not set") # output ULA data final_volume = registers[7][i] ula_tone = 0 # zero is highest freq. so inaudible, so thats how we handle volume if final_volume == 0: final_tone1 = (registers[1][i] << 4) + registers[0][i] ula_tone = sn_to_electron(final_tone1) electron_data.append( ula_tone ) # write to output ULA file ula_file = open(dst_filename + ".ula.bin", 'wb') ula_file.write(electron_data) ula_file.close() #-------------------------------------------------------------- # Final stage - output to vgm #-------------------------------------------------------------- # Tone1----- Tone2----- Tone3----- Tone4 Vol1 Vol2 Vol3 Vol4 control = [ 0x80, 0x00, 0xa0, 0x00, 0xc0, 0x00, 0xe0, 0x90, 0xb0, 0xd0, 0xf0 ] #filter = [ 0,1,2,3,7,8 ] #filter = [ 2,3,8 ] #filter = [ 0,1,2,3,4,5,6,7,8,9,10 ] filter = [ 0,1,2,3,4,5,7,8,9 ] if ENABLE_DOWNMIX: filter = [ 0,1,7 ] last_tone3 = 255 for i in range(len(registers[0])): # 11 registers per frame # Tone 0 HL Tone 1 HL Tone 2 HL Tone 3 Vol 0123 for r in range(11): register_data = registers[r][i] # dont update noise register unless different update = True if r == 6: if register_data == last_tone3: update = False else: last_tone3 = register_data if not r in filter: update = False if update: register_data |= control[r] vgm_stream.extend( struct.pack('B', 0x50) ) # COMMAND vgm_stream.extend( struct.pack('B', register_data) ) # DATA # next frame if sample_interval == 882: # wait 50 vgm_stream.extend( struct.pack('B', 0x63) ) elif sample_interval == 735: # wait 60 vgm_stream.extend( struct.pack('B', 0x62) ) else: vgm_stream.extend( struct.pack('B', 0x61) ) vgm_stream.extend( struct.pack('B', int(sample_interval % 256)) ) vgm_stream.extend( struct.pack('B', int(sample_interval / 256)) ) # END command vgm_stream.extend( struct.pack('B', 0x66) ) vgm.write_vgm(vgm_stream, dst_filename) #output = bytearray() # write the electron vgm file #open(dst_filename, "wb").write( output ) #------------------------------------------------------------------------ # Main() #------------------------------------------------------------------------ import argparse # Determine if running as a script if __name__ == '__main__': print("Vgm2Electron.py : VGM music converter for Acorn Electron") print("Written in 2019 by Simon Morris, https://github.com/simondotm/vgm-packer") print("") epilog_string = "" parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, epilog=epilog_string) parser.add_argument("input", help="VGM source file (must be single SN76489 PSG format) [input]") parser.add_argument("-o", "--output", metavar="<output>", help="write VGC file <output> (default is '[input].vgc')") parser.add_argument("-v", "--verbose", help="Enable verbose mode", action="store_true") parser.add_argument("-a", "--attenuation", default="444", metavar="<nnn>", help="Set attenuation threshold for each channel, 3 character string where each character is 0-F and 0 is loudest, 4 is 50%, F is quietest, default: 444") parser.add_argument("-t", "--transpose", default="000", metavar="<nnn>", help="Set octaves to transpose for each channel, where 1 is +1 octave and F is -1 octave.") parser.add_argument("-c", "--channels", default="123", metavar="[1][2][3]", help="Set which channels will be included in the conversion, default 123, which means all 3 channels") parser.add_argument("-q", "--technique", default=2, metavar="<n>", help="Set which downmix technique to use 1 or 2.") args = parser.parse_args() src = args.input dst = args.output if dst == None: dst = os.path.splitext(src)[0] + ".electron.vgm" # attenuation options attenuation = args.attenuation if (len(attenuation) != 3): print("ERROR: attenuation must be 3 values eg. '444'") sys.exit() #print("attenuation=" + attenuation) VgmElectron.ATTENTUATION_THRESHOLD1 = int(attenuation[0],16) VgmElectron.ATTENTUATION_THRESHOLD2 = int(attenuation[1],16) VgmElectron.ATTENTUATION_THRESHOLD3 = int(attenuation[2],16) # transpose options transpose = args.transpose if (len(transpose) != 3): print("ERROR: transpose must be 3 values eg. '000'") sys.exit() #print("transpose=" + transpose) # 0 1 2 3 4 5 6 7 8 9 a b c d e f ttable = [0,1,2,3,4,5,6,7,-8,-7,-6,-5,-4,-3,-2,-1] VgmElectron.TRANSPOSE_OCTAVES1 = ttable[ int(transpose[0],16) ] VgmElectron.TRANSPOSE_OCTAVES2 = ttable[ int(transpose[1],16) ] VgmElectron.TRANSPOSE_OCTAVES3 = ttable[ int(transpose[2],16) ] # channel options print(args.channels) VgmElectron.ENABLE_CHANNEL1 = args.channels.find("1") >= 0 VgmElectron.ENABLE_CHANNEL2 = args.channels.find("2") >= 0 VgmElectron.ENABLE_CHANNEL3 = args.channels.find("3") >= 0 print("Channel 1: Enabled=" + str(VgmElectron.ENABLE_CHANNEL1) + ", Transpose=" + str(VgmElectron.TRANSPOSE_OCTAVES1) + ", Attenuation="+str(VgmElectron.ATTENTUATION_THRESHOLD1)) print("Channel 2: Enabled=" + str(VgmElectron.ENABLE_CHANNEL2) + ", Transpose=" + str(VgmElectron.TRANSPOSE_OCTAVES2) + ", Attenuation="+str(VgmElectron.ATTENTUATION_THRESHOLD2)) print("Channel 3: Enabled=" + str(VgmElectron.ENABLE_CHANNEL3) + ", Transpose=" + str(VgmElectron.TRANSPOSE_OCTAVES3) + ", Attenuation="+str(VgmElectron.ATTENTUATION_THRESHOLD3)) # technique VgmElectron.USE_TECHNIQUE = int(args.technique) print("Using technique " + str(VgmElectron.USE_TECHNIQUE)) # check for missing files if not os.path.isfile(src): print("ERROR: File '" + src + "' not found") sys.exit() packer = VgmElectron() packer.VERBOSE = args.verbose packer.process(src, dst)
31.798354
230
0.60981
3,154
23,181
4.383323
0.188015
0.002893
0.003906
0.004629
0.155805
0.120506
0.090127
0.059096
0.047089
0.046438
0
0.044968
0.225832
23,181
728
231
31.842033
0.725398
0.351926
0
0.148352
0
0.005495
0.130094
0
0
0
0.004588
0.001374
0
1
0.016484
false
0
0.03022
0
0.093407
0.134615
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c739f9c426d2980ab50d3acc428d5d636d5dd280
14,198
py
Python
twitter_sent.py
rthorst/TwitterSentiment
b719feffbfed1dfe9028db0900b3158d19322284
[ "MIT" ]
6
2020-02-21T15:50:34.000Z
2021-11-09T19:45:50.000Z
twitter_sent.py
rthorst/TwitterSentiment
b719feffbfed1dfe9028db0900b3158d19322284
[ "MIT" ]
null
null
null
twitter_sent.py
rthorst/TwitterSentiment
b719feffbfed1dfe9028db0900b3158d19322284
[ "MIT" ]
null
null
null
import webapp2 import tweepy import json import csv import os import statistics import bokeh from bokeh.io import show, output_file from bokeh.plotting import figure from bokeh.models import HoverTool, ColumnDataSource from bokeh.embed import components, json_item from bokeh.resources import INLINE from bokeh.models.glyphs import Line, Text import numpy as np import random import operator from collections import Counter from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer """ ---AUTHOR: --- Robert Thorstad thorstadrs@gmail.com ---LICENSE: --- MIT License. ---ABOUT: --- Application to get the sentiment of recent tweets based on a keyword. Example: keyword -> "taco bell" retrieve 300 recent tweets mentioning taco bell. get average sentiment. plot distribution of tweets and sentiment. plot most informative words for this application. This script runs based on google app server. Expects Python 2.7 Depenencies need to be included in the lib/ directory (pip install -t lib [PACKAGE_NAME]) The main work is done by the MainPage class. The get() method runs the main pipeline of code and returns HTML as a string. Working online version: https://twittersentiment-247018.appspot.com/ """ def get_tweets(keyword, max_tweets=200): """ Given a keyword as a string (e.g. "data science"), get recent tweets matching that string up to # max_tweets. Return a list of tweets, represented as strings. """ # API keys. consumer_key = "kNOG1klRMMUYbsjMuY5TKl4lE" consumer_secret = "ieghv6WI1qseYly43A0Ra1MPksEw1i5Onma0txfEu5aHantD2v" access_key = "3291622062-15ssVc0qpJXf2SFXbA7vgfl1Sooz4Ueo2DGPQVz" access_secret = "9XJuzgGSVLnx93tq6NfRzMT07S6o2lzjmHfjt3VRlkqXn" # Initialize tweepy API object and authorize using API key. auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_key, access_secret) api = tweepy.API(auth) """ Get tweets.""" alltweets = [] for status in tweepy.Cursor( api.search, q=keyword + " -RT", # the -RT flag excludes retweets. count=1000, result_type="recent", include_entities=True, monitor_rate_limit=True, wait_on_rate_limit=True, lang="en", ).items(): # get text of the tweet, encoding as utf-8. text = str(status.text.encode("utf-8")) # add to the data structure, alltweets, holding the tweets. alltweets.append(text) # if we've reached max_tweets, break. if len(alltweets) >= max_tweets: break return alltweets class VaderSentimentModel: """ Calculate sentiment using a mostly lexicon-based approach that is optimized for social media. Approach is social media aware, for example emoticons are part of the lexicon and tokenization is twitter-sensitive. There are also some basic rules, e.g. it's sensitive to negations. """ def __init__(self): # Initialize a vader_analyzer object which does the work of sentiment analysis. self.vader_analyzer = SentimentIntensityAnalyzer() pass def classify_sentiment(self, tweet): # Classify sentiment of a single tweet. # Input tweet: as string. # Return sentiment score : # range -1 (very negaitve) to +1 (very positive). # score is calculated as p(positive) - p(negative) # normalizing to range from -1 to 1. # calculate sentiment in a dictionary. key is polarity ("pos", "neg", "neut") and value is probability. sentiment_dict = self.vader_analyzer.polarity_scores(tweet) # retrieve the compound sentiment score, which is p(pos) - p(neg), but normalized to range from {-1, 1} score = sentiment_dict["compound"] # compound is the combined score scaled to {-1, 1} return score def plot_tweets(tweets, sentiment_scores): """ Create a histogram-style barplot of tweets and their sentiment. Return a bokeh plot object, expressed as a tuple of (resources, script, div). Where : resources: some CSS, etc. that goes in the head of the webpage for styling the plot. script: javascript for the plot to function. expressed as string. div: html div container for the plot. expressed as string. """ # Sort tweets from negative to positive. # This step is not strictly necessary, but makes it easier to see the overall shape of the data. sorted_indices = np.argsort(sentiment_scores) sentiment_scores = np.array(sentiment_scores)[sorted_indices] tweets = np.array(tweets)[sorted_indices] # Express the data as a bokeh data source object. source = ColumnDataSource(data={ "text": tweets, "sentiment": sentiment_scores, "x": np.arange(len(tweets)), }) """ Create plot. """ # Create plot object. width = 0.9 p = figure(x_axis_label="Tweet", y_axis_label="Sentiment (0 = Neutral)") p.vbar(source=source, x="x", top="sentiment", width=width) # Add hover tool, allowing mouseover to view text and sentiment. hover = HoverTool( tooltips=[ ("text", "@text"), ("sentiment", "@sentiment") ], formatters={ "text": "printf", "sentiment": "printf" }, mode="vline" ) p.add_tools(hover) """ Format plot. """ # axis font size p.xaxis.axis_label_text_font_size = "15pt" p.yaxis.axis_label_text_font_size = "15pt" # remove tick marks from axes p.xaxis.major_tick_line_color = None p.xaxis.minor_tick_line_color = None p.yaxis.major_tick_line_color = None p.yaxis.minor_tick_line_color = None # adjust plot width, height scale = 1.5 p.plot_height = int(250 * scale) p.plot_width = int(450 * scale) # remove toolbar (e.g. move, resize, etc) from right of plot. p.toolbar.logo = None p.toolbar_location = None # remove gridlines p.xgrid.visible = False p.ygrid.visible = False # remove x axis tick labels (done by setting label fontsize to 0 pt) p.xaxis.major_label_text_font_size = '0pt' """ Export plot """ # Create resources string, which is CSS, etc. that goes in the head of resources = INLINE.render() # Get javascript (script) and HTML div (div) for the plot. script, div = components(p) return (resources, script, div) def plot_reason(tweets, sentiment_scores): """ Plot the top words that lead us to the classification as positive or negative. Return: script : javascript for the plot, expressed as string. div : html container for the plot, expressed as string. NOTE: requires the shared resources attribute from plot_tweets() in the HTML header. """ """ Calculate the sentiment of each individual token in the tweets. """ # list tokens, keeping only unique tokens (e.g. remove repeated words). all_toks = [] for tweet in tweets: toks = tweet.lower().split() all_toks.extend(toks) all_toks = [tok for tok in set(all_toks)] # remove duplicates. # calculate sentiment of each token. sm = VaderSentimentModel() toks_sentiment = [sm.classify_sentiment(tok) for tok in all_toks] """ sort tokens by sentiment. if overall valence is negative, sort negative to postitive. if overall valence is positive, sort positive to negative. thus, in any case, the earliest elements in the list are the most informative words. """ nwords = 20 # negative? sort neg -> positive. if np.mean(sentiment_scores) < 0: sorted_indices = np.argsort(toks_sentiment) # else (positive)? sort positive -> negative else: sorted_indices = np.argsort(toks_sentiment)[::-1] # toks_to_plot: shape (nwords, ) list of informative tokens. # sentiment_to_plot: shape (nwords, ) list of sentiment of these tokens. toks_to_plot = np.array(all_toks)[sorted_indices][:nwords] sentiment_to_plot = np.array(toks_sentiment)[sorted_indices][:nwords] # convert all sentiment scores to positive values. # this is for DISPLAY only, to make all plots go from left to right. # we still retain the correct tokens and sorting order. sentiment_to_plot = np.array([abs(v) for v in sentiment_to_plot]) """ Set up plot. - create data source object. - define formatting variables. """ text_offset = 0.1 source = ColumnDataSource(data={ "token": toks_to_plot, "sentiment": sentiment_to_plot, "x": np.arange(len(toks_to_plot))[::-1], "label_x": sentiment_to_plot + text_offset }) """ Make plot. """ # Create initial plot. width = 0.9 xrange = [0, max(sentiment_to_plot) + 1] p2 = figure(x_axis_label="Sentiment", y_axis_label="Word", x_range=xrange) p2.hbar(source=source, y="x", right="sentiment", height=width) """ Format plot. """ # Annotate each bar with the word being represented. glyph = Text(x="label_x", y="x", text="token") p2.add_glyph(source, glyph) # Axis labels. p2.xaxis.axis_label_text_font_size = "15pt" p2.yaxis.axis_label_text_font_size = "15pt" # Remove ticks. p2.xaxis.major_tick_line_color = None p2.xaxis.minor_tick_line_color = None p2.yaxis.major_tick_line_color = None p2.yaxis.minor_tick_line_color = None # Remove y axis tick labels. p2.yaxis.major_label_text_font_size = '0pt' # Plot width, height. scale = 1.5 p2.plot_height = int(250 * scale) p2.plot_width = int(250 * scale) # remove toolbar (e.g. move, resize, etc) from right of plot. p2.toolbar.logo = None p2.toolbar_location = None # remove gridlines p2.xgrid.visible = False p2.ygrid.visible = False # remove x axis tick labels (set font to 0pt) p2.xaxis.major_label_text_font_size = '0pt' # get bokeh component for plot 2. script2, div2 = components(p2) return (script2, div2) class MainPage(webapp2.RequestHandler): """ This class does the work of writing HTML to the google app server. Thus, we allow the get() method to incorporate: our main pipeline (getting tweets, analyzing sentiment, producing graphs) writing html """ def get(self): """ Get tweets and sentiment scores. """ # Retrieve keyword from the HTML form. If no keyword provided, use a random suggested keyword. keyword = self.request.get("keyword") if not keyword: suggested_keywords = ["alarm clocks", "the future", "miller lite", "taco bell", "yoga", "netflix", "life", "traffic", "elon musk", "beards", "world trade", "pepsi", "amazon"] indices = np.arange(len(suggested_keywords)) random.shuffle(indices) keyword = suggested_keywords[indices[0]] # Get recent tweets based on the keyword, up to 300 maximum tweets. tweets = get_tweets(keyword, max_tweets=300) # Compute the sentiment of each tweet. v = VaderSentimentModel() sentiment_scores = [v.classify_sentiment(tw) for tw in tweets] # shape (ntweets,) # Label sentiment categorically, e.g. "negative" or "positive" M_sent = np.mean(sentiment_scores) map = {1 : "positive", 0 : "negative"} valence = map[int(M_sent > 0)] """ Create plots. """ ############# # Plot #1: ############ # Plot the distribution of tweets and sentiment. # Resources is CSS code that goes in the header of the HTML. Shared across all bokeh plots. # Script1 is javascript for this plot. # Div1 is an HTML container for the plot. Goes where you want the plot to appear. resources, script1, div1 = plot_tweets(tweets=tweets, sentiment_scores=sentiment_scores) ############# # Plot #2: ############ # Plot the key words that lead us to this classification. # Script2 is javascript for this plot. # Div2 is an HTML container for this plot. Goes where you want the plot to appear. # Requires the HTML to include the shared resources, generated above, in the <HEAD> script2, div2 = plot_reason(tweets=tweets, sentiment_scores=sentiment_scores) """ Create HTML output. """ # Load HTML template. # This is a functioning webpage, with some placeholders for the keywords and plots we have created. html_p = os.path.join("html", "index.html") html = open(html_p, "r").read() # Fill in placeholders in the HTML with varibles we have created. term_to_value = { "[[!KEYWORD]]" : keyword, "[[!VALENCE]]" : valence, "[[!BOKEH_SCRIPT]]" : script1, "[[!BOKEH_SCRIPT2]]": script2, "[[!BOKEH_DIV]]" : div1, "[[!BOKEH_RESOURCES]]" : resources, "[[!BOKEH_DIV2]]" : div2 } for term, val in term_to_value.items(): html = html.replace(term, val) """ Write a response. This essentially returns HTML to the google app engine. This will render a webpage visible to the user. """ self.response.headers["Content-Type"] = "text/html" self.response.write(html) # Run application. routes = [('/', MainPage)] my_app = webapp2.WSGIApplication(routes, debug=True)
33.885442
120
0.623257
1,763
14,198
4.913216
0.276234
0.025976
0.012006
0.015701
0.18206
0.124336
0.064188
0.042023
0.019164
0.011083
0
0.016143
0.284477
14,198
419
121
33.885442
0.8365
0.322369
0
0.046784
0
0
0.085939
0.022237
0
0
0
0
0
1
0.035088
false
0.005848
0.105263
0
0.175439
0.011696
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c73a657eabaaa5580cd95fd8f430b160b1e8e216
8,956
py
Python
tests/testcgatools.py
ereide/pyga-camcal
fd25748ddb11c5b05ef24a2deca2689e0d899875
[ "MIT" ]
5
2018-05-22T09:11:31.000Z
2022-03-11T02:32:01.000Z
tests/testcgatools.py
ereide/pyga-camcal
fd25748ddb11c5b05ef24a2deca2689e0d899875
[ "MIT" ]
null
null
null
tests/testcgatools.py
ereide/pyga-camcal
fd25748ddb11c5b05ef24a2deca2689e0d899875
[ "MIT" ]
null
null
null
import unittest import clifford as cl from clifford import g3c from numpy import pi, e import numpy as np from scipy.sparse.linalg.matfuncs import _sinch as sinch from clifford import MultiVector from pygacal.common.cgatools import ( Sandwich, Dilator, Translator, Reflector, inversion, Rotor, Transversor, I3, I5, VectorEquality, Distance, ga_log, ga_exp, MVEqual, Meet, extractBivectorParameters_complicated, ga_exp_complicated, one) from pygacal.geometry import createRandomBivector, createRandomVector, createRandomPoints from pygacal.geometry.lines import createLine from pygacal.geometry.planes import createPlane layout = g3c.layout locals().update(g3c.blades) ep, en, up, down, homo, E0, ninf, no = (g3c.stuff["ep"], g3c.stuff["en"], g3c.stuff["up"], g3c.stuff["down"], g3c.stuff["homo"], g3c.stuff["E0"], g3c.stuff["einf"], -g3c.stuff["eo"]) np.random.seed(2512) def AssertMVEqual(actual, expected, rtol = 1e-5, atol = 1e-6, verbose = False): assert(MVEqual(actual, expected, rtol, atol, verbose)) def AssertMVUnEqual(actual, expected, rtol = 1e-5, atol = 1e-6, verbose = False): assert(not MVEqual(actual, expected, rtol, atol, verbose)) class TestCGAOperators(unittest.TestCase): def testDilator(self): x = 2*e1 + 3* e2 + 4*e3 X = up(x) assert(down(Sandwich(X, Dilator(0.1))) == x * 0.1) def testTranslation(self): x = 2*e1 + 3* e2 + 4*e3 X = up(x) a = 2 * e1 + e3 assert(down(Sandwich(X, Translator(a))) == x + a) def testRotation(self): x = 2*e1 + 3* e2 + 4*e3 X = up(x) actual = down(Sandwich(X, Rotor(e12, pi/2))) expected = (-3.0)*e1 + 2.0*e2 + 4.0 * e3 assert(actual == expected) def testInversion(self): x = 2*e1 + 3* e2 + 4*e3 X = up(x) assert(down(inversion(X)) * x == 1) def testDistance(self): a = e1 b = e2 A, B = up(a), up(b) assert(Distance(A, B) == np.sqrt(2)) def testMeet(self): A, B, C, D = createRandomPoints(N = 4, scale = 50) L = createLine(A, B) L2 = createLine(A, C) P1 = createPlane(A, B, C) P2 = createPlane(A, B, D) L_actual = Meet(P1, P2) assert(MVEqual(L, L_actual)) #Plane to line Q = (ninf ^ A).normal() P3 = A ^ C ^ D ^ ninf Q_actual = Meet(P3, L).normal() #How do we define order/direction? assert(MVEqual(Q, Q_actual)) def testAssertEqual(self): verbose = False a = createRandomBivector() b = a + 0.01 a2 = b - 0.01 c = a + 1 d = c - a AssertMVEqual(a, a2, verbose = verbose) AssertMVUnEqual(a, b, verbose = verbose) AssertMVEqual(d, 1, verbose = verbose) def testLogarithm(self): verbose = False if verbose: print("\nTest Logarithms and exponents") phi = 0.5 #Rotation amount P = (e12 + 2*e23 + 3*e13).normal() #Rotation Plane P_n = P*I3 t = 2.73 * e1 + 3.14*e2 #Translation vector t_nor = (P_n | t) * P_n #Decomposition into normal component t_par = t - t_nor #Decomposition into paralel component assert(t_par + t_nor == t) if verbose: print("P = ", P) print("phi = ", phi) print("t = ", t) print("t_nor = ", t_nor) print("t_par = ", t_par) print("") assert(P|t_nor == 0) #Normal to P assert(P^t_nor != 0) #Normal to P assert(P|t_par != 0) #Parallel to P assert(P^t_par == 0) #Parallel to P assert(P*t != 0) #Non zero product R_expected = (np.cos(phi) + (np.sin(phi) * P))*(1 + (t_nor*ninf)) + np.sinc(phi/np.pi)*t_par * ninf B_expected = phi * P + t*ninf R_exponential = np.exp(B_expected) R_actual = ga_exp(B_expected, verbose = verbose) B_new = ga_log(R_expected, verbose = verbose) R_ga = ga_exp(B_new) if verbose: print("R_old ", R_expected) print("R_expected ", R_actual) print("R_exponential", R_exponential) print("R_ga ", R_ga) print("B_new ", B_new) print("B_expected ", B_expected) #Rotor properties AssertMVEqual(R_expected * ~R_expected, 1, verbose = verbose) AssertMVEqual(R_ga * ~R_ga, 1, verbose = verbose) #Equalities AssertMVEqual(R_actual, R_expected, verbose = verbose) AssertMVEqual(R_exponential, R_expected, verbose = verbose) AssertMVEqual(B_new, B_expected, verbose = verbose) AssertMVEqual(R_ga, R_expected, verbose = verbose) N = 100 #Random bivectors to test this as well for i in range(N): B = createRandomBivector() AssertMVEqual(B, ga_log(ga_exp(B, verbose = verbose), verbose = verbose), verbose = verbose) def testComplicatedLogarithm(self): verbose = True if verbose: print("\nTest Complicated Logarithms and exponents") phi = 0.2 #Rotation amount P = (e12 + 2*e23 + 3*e13).normal() #Rotation Plane P_n = P*I3 #t = 0 t = 2.73 * e1 + 3.14*e2 #Translation vector t_nor = (P_n | t) * P_n #Decomposition into normal component t_par = t - t_nor #Decomposition into paralel component omega = 0.1 assert(t_par + t_nor == t) if verbose: print("P = ", P) print("phi = ", phi) print("t = ", t) print("t_nor = ", t_nor) print("t_par = ", t_par) print("omega = ", omega) print("") """ assert(P|t_nor == 0) #Normal to P assert(P^t_nor != 0) #Normal to P assert(P|t_par != 0) #Parallel to P assert(P^t_par == 0) #Parallel to P assert(P*t != 0) #Non zero product assert(t_par|t_nor == 0) #Non zero product """ B_expected = (phi * P) + (t*ninf) + (omega * E0) k = (omega * omega + phi * phi) R_expected = (np.cos(phi) + np.sin(phi) * P)*(np.cosh(omega) + np.sinh(omega) * E0 + sinch(omega) * t_nor*ninf) if (k > 0): R_expected += 1/k* ( (-omega * np.sin(phi) * np.cosh(omega) + phi * np.cos(phi) * np.sinh(omega)) * P + ( omega * np.cos(phi) * np.sinh(omega) + phi * np.sin(phi) * np.cosh(omega))) * t_par * ninf else: R_expected += t_par * ninf phi_test, P_test, t_nor_test, t_par_test, omega_test = extractBivectorParameters_complicated(B_expected) B_actual = phi_test * P_test + (t_nor_test + t_par_test)*ninf + omega_test * E0 #Testing some basic properties of the extraction AssertMVEqual(phi*(P * ~P), phi*one, verbose = False) AssertMVEqual(phi*P, phi*P_test, verbose = False) R_exponential = np.exp(B_expected) R_actual = ga_exp_complicated(B_expected, verbose = verbose) #B_new = ga_log(R_expected, verbose = verbose) #R_ga = ga_exp(B_new) if verbose: print("R_expected ", R_expected) print("R_actual ", R_actual) print("R_exponential ", R_exponential) #print("R_ga ", R_ga) #print("B_new ", B_new) print("B_expected ", B_expected) print() #BivectorExtraction AssertMVEqual(B_actual, B_expected, verbose = verbose) AssertMVEqual(R_expected * ~R_expected, one, verbose = verbose) #Rotor properties AssertMVEqual(R_actual * ~R_actual, one, verbose = verbose) #Only an approximation AssertMVEqual(R_exponential * ~R_exponential, one, verbose = verbose) #AssertMVEqual(R_expected * ~R_expected, 1, verbose = verbose) #AssertMVEqual(R_ga * ~R_ga, 1, verbose = verbose) #Equalities #AssertMVEqual(R_actual, R_expected, verbose = verbose) AssertMVEqual(R_exponential, R_actual, rtol = 1e-2, atol = 1e-3, verbose = verbose) #AssertMVEqual(B_new, B_expected, verbose = verbose) #AssertMVEqual(R_ga, R_expected, verbose = verbose) #N = 100 #Random bivectors to test this as well #for i in range(N): # B = createRandomBivector() # AssertMVEqual(B, ga_log(ga_exp(B, verbose = verbose), verbose = verbose), verbose = verbose) if __name__ == "__main__": unittest.main()
33.17037
125
0.546226
1,142
8,956
4.140105
0.16725
0.097716
0.055838
0.01692
0.530457
0.514805
0.473985
0.461506
0.461506
0.450508
0
0.02789
0.335418
8,956
269
126
33.29368
0.766465
0.124721
0
0.280488
0
0
0.043403
0
0
0
0
0
0.219512
1
0.067073
false
0
0.067073
0
0.140244
0.158537
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c73c3d02ecdfac6eb2c791e1853c9f4bcf52f552
6,909
py
Python
router/posts.py
DiegoLing33/prestij.xyz-api
69a11a2c93dd98975f9becbc4b8f596e4941a05f
[ "MIT" ]
null
null
null
router/posts.py
DiegoLing33/prestij.xyz-api
69a11a2c93dd98975f9becbc4b8f596e4941a05f
[ "MIT" ]
null
null
null
router/posts.py
DiegoLing33/prestij.xyz-api
69a11a2c93dd98975f9becbc4b8f596e4941a05f
[ "MIT" ]
null
null
null
# ██╗░░░░░██╗███╗░░██╗░██████╗░░░░██████╗░██╗░░░░░░█████╗░░█████╗░██╗░░██╗ # ██║░░░░░██║████╗░██║██╔════╝░░░░██╔══██╗██║░░░░░██╔══██╗██╔══██╗██║░██╔╝ # ██║░░░░░██║██╔██╗██║██║░░██╗░░░░██████╦╝██║░░░░░███████║██║░░╚═╝█████═╝░ # ██║░░░░░██║██║╚████║██║░░╚██╗░░░██╔══██╗██║░░░░░██╔══██║██║░░██╗██╔═██╗░ # ███████╗██║██║░╚███║╚██████╔╝░░░██████╦╝███████╗██║░░██║╚█████╔╝██║░╚██╗ # ╚══════╝╚═╝╚═╝░░╚══╝░╚═════╝░░░░╚═════╝░╚══════╝╚═╝░░╚═╝░╚════╝░╚═╝░░╚═╝ # # Developed by Yakov V. Panov (C) Ling • Black 2020 # @site http://ling.black # ██╗░░░░░██╗███╗░░██╗░██████╗░░░░██████╗░██╗░░░░░░█████╗░░█████╗░██╗░░██╗ # ██║░░░░░██║████╗░██║██╔════╝░░░░██╔══██╗██║░░░░░██╔══██╗██╔══██╗██║░██╔╝ # ██║░░░░░██║██╔██╗██║██║░░██╗░░░░██████╦╝██║░░░░░███████║██║░░╚═╝█████═╝░ # ██║░░░░░██║██║╚████║██║░░╚██╗░░░██╔══██╗██║░░░░░██╔══██║██║░░██╗██╔═██╗░ # ███████╗██║██║░╚███║╚██████╔╝░░░██████╦╝███████╗██║░░██║╚█████╔╝██║░╚██╗ # ╚══════╝╚═╝╚═╝░░╚══╝░╚═════╝░░░░╚═════╝░╚══════╝╚═╝░░╚═╝░╚════╝░╚═╝░░╚═╝ # # Developed by Yakov V. Panov (C) Ling • Black 2020 # @site http://ling.black from typing import List from fastapi import APIRouter, Depends, HTTPException from pydantic import BaseModel from core.response import RequestLimit from database import get_db, DatabaseUtils from database.wow.models import PostModel, PostCommentsModel from wow.interface.entity import PostCategory, Post, PostCategoryCreate, PostCreate, PostLikeCreate, PostCommentCreate from wow.utils.posts import PostsUtils from wow.utils.users import BlizzardUsersUtils router = APIRouter() class TokenArgs(BaseModel): token: str class TokenPostIdArgs(BaseModel): token: str post_id: int class CommentIdAndToken(TokenArgs): comment_id: int class PostAPIList(BaseModel): items: List[Post] count: int class PostAPIListResponse(BaseModel): response: PostAPIList request: RequestLimit # ----------------------------------- # CATEGORIES # ----------------------------------- @router.post( "/categories", response_model=PostCategory, summary='Adds the category' ) def add_category(body: PostCategoryCreate): """ Adds the category :param body: :return: """ blizzard_id = BlizzardUsersUtils.id__safe(body.token) return PostsUtils.add_category(user_id=blizzard_id, url=body.url, title=body.title) @router.get( "/categories", response_model=List[PostCategory], summary='Returns the categories' ) def get_categories(): """ Returns the categories list :return: """ return PostsUtils.get_categories() # ----------------------------------- # POSTS # ----------------------------------- @router.get( "/", response_model=PostAPIListResponse, summary='Returns all the posts' ) def get_posts_all(limit: int = 100, offset: int = 0): return PostsUtils.get_posts_limit( limit=limit, offset=offset ) @router.get( "/category/{category_url}", response_model=PostAPIListResponse, summary='Returns the posts in category' ) def get_posts_all(category_url: int, limit: int = 100, offset: int = 0): """ Returns all the posts by category :param category_url: :param limit: :param offset: :return: """ return PostsUtils.get_posts_by_category_limit( category_id=category_url, limit=limit, offset=offset ) @router.get( "/user/{blizzard_id}", response_model=PostAPIListResponse, summary='Returns the posts by users' ) def get_posts_all(blizzard_id: int, limit: int = 100, offset: int = 0): """ Returns all the posts by category :param blizzard_id: :param limit: :param offset: :return: """ return PostsUtils.get_posts_by_blizzard_id( blizzard_id=blizzard_id, limit=limit, offset=offset ) @router.post( "/like", summary='Likes the post', tags=['Лайки'] ) def like_post(body: PostLikeCreate): blizzard_id = BlizzardUsersUtils.id__safe(body.token) return PostsUtils.add_like( user_id=blizzard_id, post_id=body.post_id, ) @router.post( "/unlike", summary='Unlikes the post', tags=['Лайки'] ) def like_post(body: PostLikeCreate): blizzard_id = BlizzardUsersUtils.id__safe(body.token) return PostsUtils.remove_like( user_id=blizzard_id, post_id=body.post_id, ) @router.post( "/comment", summary='Adds the comment', tags=['Комментарии'] ) def like_post(body: PostCommentCreate): blizzard_id = BlizzardUsersUtils.id__safe(body.token) return PostsUtils.add_comment( user_id=blizzard_id, post_id=body.post_id, reply_id=body.reply_id, text=body.text, ) @router.delete( "/comment", summary='Removes the comment', tags=['Комментарии'] ) def removes_post(body: CommentIdAndToken, db=Depends(get_db)): blizzard_id = BlizzardUsersUtils.id__safe(body.token) com = db.query(PostCommentsModel).filter(PostCommentsModel.id == body.comment_id).filter( PostCommentsModel.user_id == blizzard_id) if com.count() > 0: com.delete() db.commit() return True return False @router.post( "/", response_model=Post, summary='Adds the post' ) def add_post(body: PostCreate): """ Adds the post item :param body: :return: """ blizzard_id = BlizzardUsersUtils.id__safe(body.token) return PostsUtils.add_post( user_id=blizzard_id, category_id=body.category_id, title=body.title, content=body.content, tags=body.tags, image=body.image ) @router.delete( "/{post_id}", summary='Deletes the post' ) def delete_post(post_id: int, body: TokenArgs, db=Depends(get_db)): blizzard_id = BlizzardUsersUtils.id__safe(body.token) q = db.query(PostModel).filter(PostModel.id == post_id).filter(PostModel.user_id == blizzard_id) if q.count() == 0: raise HTTPException(status_code=404, detail='Post is undefined') return DatabaseUtils.remove_query(db, q) @router.post( "/{post_id}", summary='Edits the post' ) def edit_post(post_id: int, body: PostCreate, db=Depends(get_db)): blizzard_id = BlizzardUsersUtils.id__safe(body.token) q = db.query(PostModel).filter(PostModel.id == post_id).filter(PostModel.user_id == blizzard_id) if q.count() == 0: raise HTTPException(status_code=404, detail='Post is undefined') q.update({ 'title': body.title, 'content': body.content, 'category_id': body.category_id, 'image': body.image, 'tags': body.tags, }) db.commit() return True @router.get( "/{post_id}", response_model=Post, summary='Returns the post' ) def get_post(post_id: int, db=Depends(get_db)): return db.query(PostModel).filter(PostModel.id == post_id).first()
25.876404
118
0.568823
725
6,909
6.448276
0.172414
0.047059
0.025668
0.051337
0.577968
0.52877
0.497326
0.474225
0.465882
0.459037
0
0.005161
0.186713
6,909
266
119
25.973684
0.672718
0.22145
0
0.390533
0
0
0.08837
0.004591
0
0
0
0
0
1
0.076923
false
0
0.053254
0.011834
0.289941
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c73c5c8e9b60dd28827b865f9cd0c2682cc0cd16
3,216
py
Python
toontown/catalog/CatalogChatBalloon.py
CrankySupertoon01/Toontown-2
60893d104528a8e7eb4aced5d0015f22e203466d
[ "MIT" ]
1
2021-02-13T22:40:50.000Z
2021-02-13T22:40:50.000Z
toontown/catalog/CatalogChatBalloon.py
CrankySupertoonArchive/Toontown-2
60893d104528a8e7eb4aced5d0015f22e203466d
[ "MIT" ]
1
2018-07-28T20:07:04.000Z
2018-07-30T18:28:34.000Z
toontown/catalog/CatalogChatBalloon.py
CrankySupertoonArchive/Toontown-2
60893d104528a8e7eb4aced5d0015f22e203466d
[ "MIT" ]
2
2019-12-02T01:39:10.000Z
2021-02-13T22:41:00.000Z
from pandac.PandaModules import * class CatalogChatBalloon: TEXT_SHIFT = (0.1, -0.05, 1.1) TEXT_SHIFT_REVERSED = -0.05 TEXT_SHIFT_PROP = 0.08 NATIVE_WIDTH = 10.0 MIN_WIDTH = 2.5 MIN_HEIGHT = 1 BUBBLE_PADDING = 0.3 BUBBLE_PADDING_PROP = 0.05 BUTTON_SCALE = 6 BUTTON_SHIFT = (-0.2, 0, 0.6) FRAME_SHIFT = (0.2, 1.4) def __init__(self, model): self.model = model def generate(self, text, font, textColor=(0,0,0,1), balloonColor=(1,1,1,1), wordWrap = 10.0, button=None, reversed=False): root = NodePath('balloon') # Add balloon geometry: balloon = self.model.copyTo(root) top = balloon.find('**/top') middle = balloon.find('**/middle') bottom = balloon.find('**/bottom') balloon.setColor(balloonColor) if balloonColor[3] < 1.0: balloon.setTransparency(1) # Render the text into a TextNode, using the font: t = root.attachNewNode(TextNode('text')) t.node().setFont(font) t.node().setWordwrap(wordWrap) t.node().setText(text) t.node().setTextColor(textColor) width, height = t.node().getWidth(), t.node().getHeight() # Turn off depth write for the text: The place in the depth buffer is # held by the chat bubble anyway, and the text renders after the bubble # so there's no risk of the bubble overwriting the text's pixels. t.setAttrib(DepthWriteAttrib.make(0)) t.setPos(self.TEXT_SHIFT) t.setX(t, self.TEXT_SHIFT_PROP*width) t.setZ(t, height) if reversed: # The nametag code wants the text on the left side of the axis, # rather than on the right side. Therefore, we move the text to the # opposite side: t.setX(self.TEXT_SHIFT_REVERSED - self.TEXT_SHIFT_PROP*width - width) # Give the chat bubble a button, if one is requested: if button: np = button.copyTo(root) np.setPos(t, width, 0, -height) np.setPos(np, self.BUTTON_SHIFT) np.setScale(self.BUTTON_SCALE) # Set a minimum width and height for short or empty messages if width < self.MIN_WIDTH: width = self.MIN_WIDTH if reversed: t.setX(t, -width/2.0) else: t.setX(t, width/2.0) t.node().setAlign(TextNode.ACenter) if height < self.MIN_HEIGHT: height = self.MIN_HEIGHT t.setX(t, height/2.0) t.node().setAlign(TextNode.ACenter) # Set the balloon's size: width *= 1+self.BUBBLE_PADDING_PROP width += self.BUBBLE_PADDING balloon.setSx(width/self.NATIVE_WIDTH) if reversed: balloon.setSx(-balloon.getSx()) balloon.setTwoSided(True) # Render the backface of the balloon middle.setSz(height) top.setZ(top, height-1) # Calculate the frame occupied by the balloon: left, bottom = self.FRAME_SHIFT if reversed: left = -left - width frame = (left, left+width, bottom, bottom+height+1) return root, frame
34.212766
81
0.589552
430
3,216
4.325581
0.318605
0.021505
0.027957
0.01828
0.068817
0.045161
0.032258
0
0
0
0
0.026822
0.304415
3,216
93
82
34.580645
0.804649
0.195896
0
0.090909
0
0
0.013608
0
0
0
0
0
0
1
0.030303
false
0
0.015152
0
0.242424
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c73c9cd86a4a585bb09b4cbd3f15cf16c3ddc42d
831
py
Python
TTS/vocoder/tf/utils/io.py
mightmay/Mien-TTS
8a22ff0a79558b3cf4981ce1b63f4d1485ea6338
[ "MIT" ]
null
null
null
TTS/vocoder/tf/utils/io.py
mightmay/Mien-TTS
8a22ff0a79558b3cf4981ce1b63f4d1485ea6338
[ "MIT" ]
null
null
null
TTS/vocoder/tf/utils/io.py
mightmay/Mien-TTS
8a22ff0a79558b3cf4981ce1b63f4d1485ea6338
[ "MIT" ]
1
2021-04-28T17:30:03.000Z
2021-04-28T17:30:03.000Z
import datetime import pickle import tensorflow as tf def save_checkpoint(model, current_step, epoch, output_path, **kwargs): """ Save TF Vocoder model """ state = { 'model': model.weights, 'step': current_step, 'epoch': epoch, 'date': datetime.date.today().strftime("%B %d, %Y"), } state.update(kwargs) pickle.dump(state, open(output_path, 'wb')) def load_checkpoint(model, checkpoint_path): """ Load TF Vocoder model """ checkpoint = pickle.load(open(checkpoint_path, 'rb')) chkp_var_dict = {var.name: var.numpy() for var in checkpoint['model']} tf_vars = model.weights for tf_var in tf_vars: layer_name = tf_var.name chkp_var_value = chkp_var_dict[layer_name] tf.keras.backend.set_value(tf_var, chkp_var_value) return model
29.678571
74
0.65704
113
831
4.619469
0.39823
0.05364
0.061303
0
0
0
0
0
0
0
0
0
0.216607
831
27
75
30.777778
0.801843
0.052948
0
0
0
0
0.046572
0
0
0
0
0
0
1
0.095238
false
0
0.142857
0
0.285714
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c73dae2399d233b79b4e4ba84ebee8f7d71a6c22
10,463
py
Python
archive/old_plots/plot_supplemental_divergence_correlations.py
garudlab/mother_infant
98a27c83bf5ece9497d5a030c6c9396a8c514781
[ "BSD-2-Clause" ]
2
2020-08-09T06:19:11.000Z
2021-08-18T17:12:23.000Z
archive/old_plots/plot_supplemental_divergence_correlations.py
garudlab/mother_infant
98a27c83bf5ece9497d5a030c6c9396a8c514781
[ "BSD-2-Clause" ]
null
null
null
archive/old_plots/plot_supplemental_divergence_correlations.py
garudlab/mother_infant
98a27c83bf5ece9497d5a030c6c9396a8c514781
[ "BSD-2-Clause" ]
8
2019-02-20T22:21:55.000Z
2021-02-13T00:55:40.000Z
import matplotlib matplotlib.use('Agg') import config import parse_midas_data import parse_HMP_data import os.path import pylab import sys import numpy import diversity_utils import gene_diversity_utils import calculate_substitution_rates import stats_utils import matplotlib.colors as colors import matplotlib.cm as cmx from math import log10,ceil import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec from numpy.random import randint from scipy.cluster.hierarchy import dendrogram, linkage from scipy.cluster.hierarchy import cophenet from scipy.cluster.hierarchy import fcluster from scipy.stats import gaussian_kde mpl.rcParams['font.size'] = 6 mpl.rcParams['lines.linewidth'] = 0.5 mpl.rcParams['legend.frameon'] = False mpl.rcParams['legend.fontsize'] = 'small' ################################################################################ # # Standard header to read in argument information # ################################################################################ import argparse parser = argparse.ArgumentParser() parser.add_argument("--debug", help="Loads only a subset of SNPs for speed", action="store_true") parser.add_argument("--chunk-size", type=int, help="max number of records to load", default=1000000000) args = parser.parse_args() debug = args.debug chunk_size = args.chunk_size ################################################################################ good_species_list = ['Bacteroides_vulgatus_57955', 'Bacteroides_uniformis_57318', 'Alistipes_putredinis_61533'] #################################################### # # Set up Figure (3 panels, arranged in 1x3 grid) # #################################################### pylab.figure(1,figsize=(7,1.5)) fig = pylab.gcf() # make three panels panels outer_grid = gridspec.GridSpec(1,3,width_ratios=[1,1,1],wspace=0.1) ####### # # SNP divergence vs Gene divergence in B. vulgatus # ####### gene_axis = plt.Subplot(fig, outer_grid[0]) fig.add_subplot(gene_axis) gene_axis.set_ylabel('SNP divergence\n %s' % (good_species_list[0])) gene_axis.set_xlabel('Gene divergence\n %s' % (good_species_list[0])) gene_axis.set_ylim([1e-06,1e-01]) #gene_axis.set_xlim([1e-02,1]) gene_axis.spines['top'].set_visible(False) gene_axis.spines['right'].set_visible(False) gene_axis.get_xaxis().tick_bottom() gene_axis.get_yaxis().tick_left() ####### # # SNP divergence (B vulgatus) vs SNP divergence (A putredinis) # ####### species_axis_1 = plt.Subplot(fig, outer_grid[1]) fig.add_subplot(species_axis_1) species_axis_1.set_xlabel('SNP divergence\n %s' % (good_species_list[1])) species_axis_1.set_ylim([1e-06,1e-01]) species_axis_1.set_xlim([1e-06,1e-01]) species_axis_1.spines['top'].set_visible(False) species_axis_1.spines['right'].set_visible(False) species_axis_1.get_xaxis().tick_bottom() species_axis_1.get_yaxis().tick_left() ####### # # SNP divergence (B vulgatus) vs SNP divergence (A putredinis) # ####### species_axis_2 = plt.Subplot(fig, outer_grid[2]) fig.add_subplot(species_axis_2) species_axis_2.set_xlabel('SNP divergence\n %s' % (good_species_list[2])) species_axis_2.set_ylim([1e-06,1e-01]) species_axis_2.set_xlim([1e-06,1e-01]) species_axis_2.spines['top'].set_visible(False) species_axis_2.spines['right'].set_visible(False) species_axis_2.get_xaxis().tick_bottom() species_axis_2.get_yaxis().tick_left() ######## # # Now do calculation and plot figures # ######## sys.stderr.write("Loading sample metadata...\n") subject_sample_map = parse_HMP_data.parse_subject_sample_map() sample_order_map = parse_HMP_data.parse_sample_order_map() sys.stderr.write("Done!\n") snp_divergence_map = {species_name: {} for species_name in good_species_list} gene_divergence_map = {species_name: {} for species_name in good_species_list} for species_name in good_species_list: sys.stderr.write("Loading haploid samples...\n") snp_samples = diversity_utils.calculate_haploid_samples(species_name, debug=debug) sys.stderr.write("Calculating unique samples...\n") # Only consider one sample per person snp_samples = snp_samples[parse_midas_data.calculate_unique_samples(subject_sample_map, sample_list=snp_samples)] sys.stderr.write("Loading pre-computed substitution rates for %s...\n" % species_name) substitution_rate_map = calculate_substitution_rates.load_substitution_rate_map(species_name) sys.stderr.write("Calculating snp matrix...\n") dummy_samples, snp_difference_matrix, snp_opportunity_matrix = calculate_substitution_rates.calculate_matrices_from_substitution_rate_map(substitution_rate_map, 'core', allowed_samples=snp_samples) snp_samples = dummy_samples sys.stderr.write("Done!\n") sys.stderr.write("Calculating gene matrix...\n") gene_samples, gene_difference_matrix, gene_opportunity_matrix = calculate_substitution_rates.calculate_matrices_from_substitution_rate_map(substitution_rate_map, 'genes', allowed_samples=snp_samples) snp_samples = gene_samples sys.stderr.write("Done!\n") # Focus on the subset of samples that have sufficient gene depth and snp depth desired_samples = gene_samples # Figure out which pairs of indices in desired_samples belong to diff subjects desired_same_sample_idxs, desired_same_subject_idxs, desired_diff_subject_idxs = parse_midas_data.calculate_subject_pairs( subject_sample_map, desired_samples) # Turn these into indices for snp and gene matrices snp_sample_idx_map = parse_midas_data.calculate_sample_idx_map(desired_samples, snp_samples) gene_sample_idx_map = parse_midas_data.calculate_sample_idx_map(desired_samples, gene_samples) same_subject_snp_idxs = parse_midas_data.apply_sample_index_map_to_indices(snp_sample_idx_map, desired_same_subject_idxs) same_subject_gene_idxs = parse_midas_data.apply_sample_index_map_to_indices(gene_sample_idx_map, desired_same_subject_idxs) diff_subject_snp_idxs = parse_midas_data.apply_sample_index_map_to_indices(snp_sample_idx_map, desired_diff_subject_idxs) diff_subject_gene_idxs = parse_midas_data.apply_sample_index_map_to_indices(gene_sample_idx_map, desired_diff_subject_idxs) for sample_pair_idx in xrange(0,len(diff_subject_snp_idxs[0])): snp_i = diff_subject_snp_idxs[0][sample_pair_idx] snp_j = diff_subject_snp_idxs[1][sample_pair_idx] gene_i = diff_subject_gene_idxs[0][sample_pair_idx] gene_j = diff_subject_gene_idxs[1][sample_pair_idx] sample_i = desired_samples[gene_i] sample_j = desired_samples[gene_j] # This will serve as a key in snp_divergence_map sample_pair = frozenset([sample_i,sample_j]) # Focus on pairs of samples with sufficient coverage if snp_opportunity_matrix[snp_i,snp_j]>0: snp_d = snp_difference_matrix[snp_i,snp_j]*1.0/snp_opportunity_matrix[snp_i,snp_j] snp_divergence_map[species_name][sample_pair] = snp_d if gene_opportunity_matrix[gene_i, gene_j]>0: gene_d = gene_difference_matrix[gene_i, gene_j]*1.0/gene_opportunity_matrix[gene_i, gene_j] gene_divergence_map[species_name][sample_pair] = gene_d ################# # # Plot figures! # ################# # First calculate SNP vs gene divergence in B. vulgatus species_name = good_species_list[0] snp_divergences = [] gene_divergences = [] # Loop over sample pairs that are in both snp_divergence_map and gene_divergence_map for sample_pair in (set(snp_divergence_map[species_name].keys()) & set(gene_divergence_map[species_name].keys()) ): snp_divergences.append( snp_divergence_map[species_name][sample_pair] ) gene_divergences.append( gene_divergence_map[species_name][sample_pair] ) snp_divergences = numpy.array(snp_divergences) gene_divergences = numpy.array(gene_divergences) # Null expectation (medians line up) median_ratio = numpy.median(snp_divergences)/numpy.median(gene_divergences) gene_axis.loglog([1e-02,1],[1e-02*median_ratio,1*median_ratio],'k-',linewidth=0.25) gene_axis.loglog(gene_divergences, snp_divergences, 'r.', markersize=2,alpha=0.5,markeredgewidth=0, rasterized=True) # Then SNP divergence between two species species_1 = good_species_list[0] species_2 = good_species_list[1] snp_divergences_1 = [] snp_divergences_2 = [] # Loop over sample pairs that are in both snp_divergence_map and gene_divergence_map for sample_pair in (set(snp_divergence_map[species_1].keys()) & set(snp_divergence_map[species_2].keys()) ): snp_divergences_1.append( snp_divergence_map[species_1][sample_pair] ) snp_divergences_2.append( snp_divergence_map[species_2][sample_pair] ) snp_divergences_1 = numpy.array(snp_divergences_1) snp_divergences_2 = numpy.array(snp_divergences_2) # Null expectation (medians line up) median_ratio = numpy.median(snp_divergences_1)/numpy.median(snp_divergences_2) species_axis_1.loglog([1e-06,1e-01],[1e-06*median_ratio,1e-01*median_ratio],'k-',linewidth=0.25) # Observed values species_axis_1.loglog(snp_divergences_2, snp_divergences_1, 'r.', markersize=2,alpha=0.5,markeredgewidth=0, rasterized=True) # Then SNP divergence between other two species species_1 = good_species_list[0] species_2 = good_species_list[2] snp_divergences_1 = [] snp_divergences_2 = [] # Loop over sample pairs that are in both snp_divergence_map and gene_divergence_map for sample_pair in (set(snp_divergence_map[species_1].keys()) & set(snp_divergence_map[species_2].keys()) ): snp_divergences_1.append( snp_divergence_map[species_1][sample_pair] ) snp_divergences_2.append( snp_divergence_map[species_2][sample_pair] ) snp_divergences_1 = numpy.array(snp_divergences_1) snp_divergences_2 = numpy.array(snp_divergences_2) # Null expectation (medians line up) median_ratio = numpy.median(snp_divergences_1)/numpy.median(snp_divergences_2) species_axis_2.loglog([1e-06,1e-01],[1e-06*median_ratio,1e-01*median_ratio],'k-',linewidth=0.25) species_axis_2.loglog(snp_divergences_2, snp_divergences_1, 'r.', markersize=2,alpha=0.5,markeredgewidth=0,rasterized=True) # Since y-axes are shared, do not duplicate ticklables species_axis_1.set_yticklabels([]) species_axis_2.set_yticklabels([]) sys.stderr.write("Saving figure...\t") fig.savefig('%s/supplemental_divergence_correlations.pdf' % (parse_midas_data.analysis_directory),bbox_inches='tight',dpi=600) sys.stderr.write("Done!\n")
38.047273
203
0.750454
1,531
10,463
4.772044
0.176355
0.057487
0.03504
0.037777
0.568163
0.487271
0.450041
0.378319
0.364084
0.353408
0
0.023441
0.111154
10,463
275
204
38.047273
0.762151
0.121476
0
0.147651
0
0
0.073887
0.013997
0
0
0
0
0
1
0
false
0
0.161074
0
0.161074
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c73e6e9b07e0e5afa67a521f170e1521081ec4b3
34,246
py
Python
multivis/plotFeatures.py
brettChapman/cimcb_vis
b373ed426b24ece1dcc20febd7c8023921b024d6
[ "MIT" ]
1
2021-06-27T23:52:40.000Z
2021-06-27T23:52:40.000Z
multivis/plotFeatures.py
brettChapman/cimcb_vis
b373ed426b24ece1dcc20febd7c8023921b024d6
[ "MIT" ]
null
null
null
multivis/plotFeatures.py
brettChapman/cimcb_vis
b373ed426b24ece1dcc20febd7c8023921b024d6
[ "MIT" ]
2
2021-06-27T23:53:03.000Z
2021-07-12T12:59:23.000Z
import sys import copy import matplotlib import matplotlib.pyplot as plt import seaborn as sns from collections import Counter from .utils import * import numpy as np import pandas as pd class plotFeatures: usage = """Produces different feature plots given a data table and peak table. Initial_Parameters ---------- peaktable : Pandas dataframe containing peak data. Must contain 'Name' and 'Label'. datatable : Pandas dataframe containing matrix of values to plot (N samples x N features). Columns/features must be same as 'Name' from Peak Table. Methods ------- set_params : Set parameters - plot_type: The type of plot. Either "point", "violin", "box", "swarm", "violin-swarm" or "box-swarm" (default: 'point') column_numbers: The number of columns to display in the plots (default: 4) log_data: Perform a log ('natural', base 2 or base 10) on all data (default: (True, 2)) scale_data: Scale the data ('standard' (centers to the mean and scales to unit variance), 'minmax' (scales between 0 and 1), 'maxabs' (scales to the absolute maximum value), 'robust' (centers to the median and scales to between 25th and 75th quantile range) (default: (True, 'minmax')) impute_data: Impute any missing values using KNN impute with a set number of nearest neighbours (default: (True, 3)) style: Set the matplotlib style (see https://matplotlib.org/stable/tutorials/introductory/customizing.html) (default: 'seaborn-white') transparent: Setting to 'True' will make the background transparent (default: False) figSize: The figure size as a tuple (width,height) (default: (15,10)) fontSize: The font size for all text (default: 12) colour_palette: The colour palette to use for the plot (default: None) y_axis_label: The label to customise the y axis (default: None) x_axis_rotation: Rotate the x axis labels this number of degrees (default: 0) group_column_name: The group column name used in the datatable (e.g. 'Class') (default: None) point_estimator: The statistical function to use for the point plot. Either "mean" or "median" (default: 'mean') point_ci: The bootstrapped confidence interval for the point plot. Can also be standard deviation ("sd") (default: 95) violin_distribution_type: The representation of the distribution of data points within the violin plot. Either "quartile", "box", "point", "stick" or None (default: 'box') violin_width_scale: The method used to scale the width of the violin plot. Either "area", "count" or "width" (default: "width") box_iqr: The proportion past the lower and upper quartiles to extend the plot whiskers for the box plot. Points outside this range will be identified as outliers (default: 1.5) saveImage: Setting to 'True' will save the image to file (default: True) imageFileName: The image file name to save to (default: [plot_type]_features.png') dpi: The number of Dots Per Inch (DPI) for the image (default: 200) help : Print this help text plot : Generates feature plots """ def __init__(self, peaktable, datatable): peaktable = self.__checkPeakTable(self.__checkData(peaktable)) datatable = self.__checkData(datatable) # Slice the meta-data, and select only peaks from the peaktable for processing, and add the meta-data back meta = datatable.T[~datatable.T.index.isin(peaktable['Name'])].T.reset_index(drop=True) dat = datatable[peaktable['Name']].reset_index() datatable = pd.concat([meta, dat], axis=1).set_index(['index']) datatable.index.name = None self.__peaktable = peaktable # Search for duplicate labels and amend with a suffix, to avoid issues when relabelling the datatable labels = copy.deepcopy(list(peaktable['Label'])) label_counts = {k: v for k, v in Counter(labels).items() if v > 1} for i in reversed(range(len(labels))): item = str(labels[i]) if item in label_counts and label_counts[item]: labels[i] += "_" + str(label_counts[item]) label_counts[item] -= 1 #Label datatable with peak labels instead of names for ease of feature plotting col_label_dict = dict(zip(list(peaktable['Name']), labels)) datatable.rename(columns=col_label_dict, inplace=True) self.__peak_labels = labels self.__datatable = datatable self.set_params() def help(self): print(plotFeatures.usage) def set_params(self, plot_type='point', column_numbers=4, log_data=(True, 2), scale_data=(True, 'minmax'), impute_data=(True, 3), style='seaborn-white', transparent=False, figSize = (15, 10), fontSize = 12, colour_palette=None, y_axis_label=None, x_axis_rotation=0, group_column_name=None, point_estimator='mean', point_ci=95, violin_distribution_type='box', violin_width_scale='width', box_iqr=1.5, saveImage=True, imageFileName='_features.png', dpi = 200): plot_type, column_numbers, log_data, scale_data, impute_data, style, transparent, figSize, fontSize, colour_palette, y_axis_label, x_axis_rotation, group_column_name, point_estimator, point_ci, violin_distribution_type, violin_width_scale, box_iqr, saveImage, imageFileName, dpi = self.__paramCheck(plot_type, column_numbers, log_data, scale_data, impute_data, style, transparent, figSize, fontSize, colour_palette, y_axis_label, x_axis_rotation, group_column_name, point_estimator, point_ci, violin_distribution_type, violin_width_scale, box_iqr, saveImage, imageFileName, dpi) self.__plot_type = plot_type; self.__column_numbers = column_numbers; self.__log_data = log_data; self.__scale_data = scale_data; self.__impute_data = impute_data; self.__style = style; self.__transparent = transparent; self.__figSize = figSize; self.__fontSize = fontSize; self.__colour_palette = colour_palette; self.__y_axis_label = y_axis_label; self.__x_axis_rotation = x_axis_rotation; self.__group_column_name = group_column_name; self.__point_estimator = point_estimator; self.__point_ci = point_ci; self.__violin_distribution_type = violin_distribution_type; self.__violin_width_scale = violin_width_scale; self.__box_iqr = box_iqr; self.__saveImage = saveImage; self.__imageFileName = imageFileName; self.__dpi = dpi; def plot(self): datatable = copy.deepcopy(self.__datatable) labels = self.__peak_labels plot_type = self.__plot_type group_column_name = self.__group_column_name column_numbers = self.__column_numbers colour_palette = self.__colour_palette point_ci = self.__point_ci point_estimator = self.__point_estimator log_data = self.__log_data scale_data = self.__scale_data impute_data = self.__impute_data x_axis_rotation = self.__x_axis_rotation y_axis_label = self.__y_axis_label violin_distribution_type = self.__violin_distribution_type violin_width_scale = self.__violin_width_scale box_iqr = self.__box_iqr imageFileName = self.__imageFileName saveImage = self.__saveImage fontSize = self.__fontSize style = self.__style transparent = self.__transparent dpi = self.__dpi figSize = self.__figSize meta = datatable.T[~datatable.T.index.isin(labels)].T.reset_index(drop=True) X = datatable[labels].reset_index(drop=True) (log_bool, log_base) = log_data; if log_bool: if isinstance(log_base, str) and log_base.lower() == 'natural': X = X.applymap(np.log); elif log_base == 2: X = X.applymap(np.log2); elif log_base == 10: X = X.applymap(np.log10); else: print("Error: The chosen log type is invalid.") sys.exit() (scale_bool, scale_type) = scale_data if scale_bool: if isinstance(scale_type, str) and scale_type.lower() == 'standard': X = scaler(X, type=scale_type.lower()).reset_index(drop=True) elif isinstance(scale_type, str) and scale_type.lower() == 'minmax': X = scaler(X, type=scale_type.lower()).reset_index(drop=True) elif isinstance(scale_type, str) and scale_type.lower() == 'maxabs': X = scaler(X, type=scale_type.lower()).reset_index(drop=True) elif isinstance(scale_type, str) and scale_type.lower() == 'robust': X = scaler(X, type=scale_type.lower()).reset_index(drop=True) else: print("Error: The chosen scale type is invalid.") sys.exit() (impute_bool, k) = impute_data; if impute_bool: X = imputeData(X, k=k).reset_index(drop=True) if not isinstance(X, pd.DataFrame): X = pd.DataFrame(X, columns=labels) # Add the meta data back in with the logged, scaled, or imputed data datatable = pd.concat([meta, X], axis=1).reset_index(drop=True) with plt.style.context(style): fig, axes = plt.subplots(nrows=int(np.ceil(float(len(labels) / column_numbers))), ncols=column_numbers, sharey=True, figsize=figSize) if plot_type == 'point': for peak_index, peak in enumerate(labels): if point_estimator.lower() == 'mean': point_estimator = 'Mean' ax = sns.pointplot(data=datatable, x=group_column_name, y=peak, estimator=np.nanmean, capsize=0.1, ci=point_ci, palette=colour_palette, ax=axes.flat[peak_index]) elif point_estimator.lower() == 'median': point_estimator = 'Median' ax = sns.pointplot(data=datatable, x=group_column_name, y=peak, estimator=np.nanmedian, capsize=0.1, ci=point_ci, palette=colour_palette, ax=axes.flat[peak_index]) else: print("Error: Invalid point plot estimator type.") sys.exit() ax.tick_params(labelrotation=x_axis_rotation, labelsize=fontSize) if log_bool: if scale_data: if isinstance(point_ci, str): if point_ci == 'sd': ax.set_title(peak + ' within SD', fontsize=fontSize) ax.set_xlabel('') if y_axis_label is None: ax.set_ylabel('Log({}) scaled ({}) {} Peak Area within SD'.format(log_base, scale_type, point_estimator), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) else: ax.set_title(peak + ' with {}% CI'.format(point_ci), fontsize=fontSize) ax.set_xlabel('') if y_axis_label is None: ax.set_ylabel('Log({}) scaled ({}) {} Peak Area & {}% CI'.format(log_base, scale_type, point_estimator, point_ci), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) else: if isinstance(point_ci, str): if point_ci == 'sd': ax.set_title(peak + ' within SD', fontsize=fontSize) ax.set_xlabel('') if y_axis_label is None: ax.set_ylabel('Log({}) {} Peak Area within SD'.format(log_base, point_estimator), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) else: ax.set_title(peak + ' with {}% CI'.format(point_ci), fontsize=fontSize) ax.set_xlabel('') if y_axis_label is None: ax.set_ylabel('Log({}) {} Peak Area & {}% CI'.format(log_base, point_estimator, point_ci), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) else: if scale_data: if isinstance(point_ci, str): if point_ci == 'sd': ax.set_title(peak + ' within SD', fontsize=fontSize) ax.set_xlabel('') if y_axis_label is None: ax.set_ylabel('Scaled ({}) {} Peak Area within SD'.format(scale_type, point_estimator), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) else: ax.set_title(peak + ' with {}% CI'.format(point_ci), fontsize=fontSize) ax.set_xlabel('') if y_axis_label is None: ax.set_ylabel('Scaled ({}) {} Peak Area & {}% CI'.format(scale_type, point_estimator, point_ci), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) else: if isinstance(point_ci, str): if point_ci == 'sd': ax.set_title(peak + ' within SD', fontsize=fontSize) ax.set_xlabel('') if y_axis_label is None: ax.set_ylabel('{} Peak Area within SD'.format(point_estimator), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) else: ax.set_title(peak + ' with {}% CI'.format(point_ci), fontsize=fontSize) ax.set_xlabel('') if y_axis_label is None: ax.set_ylabel('{} Peak Area & {}% CI'.format(point_estimator, point_ci), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) elif plot_type.lower() == 'violin': for peak_index, peak in enumerate(labels): ax = sns.violinplot(data=datatable, x=group_column_name, y=peak, linewidth=1, inner=violin_distribution_type, scale=violin_width_scale, palette=colour_palette, ax=axes.flat[peak_index]) ax.tick_params(labelrotation=x_axis_rotation, labelsize=fontSize) ax.set_title(peak, fontsize=fontSize) ax.set_xlabel('') if log_bool: if scale_data: if y_axis_label is None: ax.set_ylabel('Log({}) scaled ({}) Peak Area'.format(log_base, scale_type), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) else: if y_axis_label is None: ax.set_ylabel('Log({}) Peak Area'.format(log_base), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) else: if scale_data: if y_axis_label is None: ax.set_ylabel('Scaled ({}) Peak Area'.format(scale_type), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) else: if y_axis_label is None: ax.set_ylabel('Peak Area', fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) elif plot_type.lower() == 'box': for peak_index, peak in enumerate(labels): ax = sns.boxplot(data=datatable, x=group_column_name, y=peak, palette=colour_palette, whis=box_iqr, ax=axes.flat[peak_index]) ax.tick_params(labelrotation=x_axis_rotation, labelsize=fontSize) ax.set_title(peak, fontsize=fontSize) ax.set_xlabel('') if log_bool: if scale_data: if y_axis_label is None: ax.set_ylabel('Log({}) scaled ({}) Peak Area'.format(log_base, scale_type), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) else: if y_axis_label is None: ax.set_ylabel('Log({}) Peak Area'.format(log_base), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) else: if scale_data: if y_axis_label is None: ax.set_ylabel('Scaled ({}) Peak Area'.format(scale_type), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) else: if y_axis_label is None: ax.set_ylabel('Peak Area', fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) elif plot_type.lower() == 'swarm': for peak_index, peak in enumerate(labels): ax = sns.swarmplot(data=datatable, x=group_column_name, y=peak, size=10, palette=colour_palette, ax=axes.flat[peak_index]) ax.tick_params(labelrotation=x_axis_rotation, labelsize=fontSize) ax.set_title(peak, fontsize=fontSize) ax.set_xlabel('') if log_bool: if scale_data: if y_axis_label is None: ax.set_ylabel('Log({}) scaled ({}) Peak Area'.format(log_base, scale_type), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) else: if y_axis_label is None: ax.set_ylabel('Log({}) Peak Area'.format(log_base), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) else: if scale_data: if y_axis_label is None: ax.set_ylabel('Scaled ({}) Peak Area'.format(scale_type), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) else: if y_axis_label is None: ax.set_ylabel('Peak Area', fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) elif plot_type.lower() == 'violin-swarm': for peak_index, peak in enumerate(labels): ax = sns.violinplot(data=datatable, x=group_column_name, y=peak, linewidth=1, inner=None, scale=violin_width_scale, palette=colour_palette, ax=axes.flat[peak_index]) ax = sns.swarmplot(data=datatable, x=group_column_name, y=peak, color="white", edgecolor="gray", ax=axes.flat[peak_index]) ax.tick_params(labelrotation=x_axis_rotation, labelsize=fontSize) ax.set_title(peak, fontsize=fontSize) ax.set_xlabel('') if log_bool: if scale_data: if y_axis_label is None: ax.set_ylabel('Log({}) scaled ({}) Peak Area'.format(log_base, scale_type), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) else: if y_axis_label is None: ax.set_ylabel('Log({}) Peak Area'.format(log_base), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) else: if scale_data: if y_axis_label is None: ax.set_ylabel('Scaled ({}) Peak Area'.format(scale_type), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) else: if y_axis_label is None: ax.set_ylabel('Peak Area', fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) elif plot_type.lower() == 'box-swarm': for peak_index, peak in enumerate(labels): ax = sns.boxplot(data=datatable, x=group_column_name, y=peak, palette=colour_palette, whis=np.inf, ax=axes.flat[peak_index]) ax = sns.swarmplot(data=datatable, x=group_column_name, y=peak, color="0.2", ax=axes.flat[peak_index]) ax.tick_params(labelrotation=x_axis_rotation, labelsize=fontSize) ax.set_title(peak, fontsize=fontSize) ax.set_xlabel('') if log_bool: if scale_data: if y_axis_label is None: ax.set_ylabel('Log({}) scaled ({}) Peak Area'.format(log_base, scale_type), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) else: if y_axis_label is None: ax.set_ylabel('Log({}) Peak Area'.format(log_base), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) else: if scale_data: if y_axis_label is None: ax.set_ylabel('Scaled ({}) Peak Area'.format(scale_type), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) else: if y_axis_label is None: ax.set_ylabel('Peak Area', fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) fig.tight_layout(h_pad=5, w_pad=2) if saveImage: plt.savefig(plot_type + 'Plot' + imageFileName, dpi=dpi, transparent=transparent) plt.show() def __paramCheck(self, plot_type, column_numbers, log_data, scale_data, impute_data, style, transparent, figSize, fontSize, colour_palette, y_axis_label, x_axis_rotation, group_column_name, point_estimator, point_ci, violin_distribution_type, violin_width_scale, box_iqr, saveImage, imageFileName, dpi): cmap_list = list(matplotlib.cm.cmaps_listed) + list(matplotlib.cm.datad) cmap_list_r = [cmap + '_r' for cmap in cmap_list] cmap_list = cmap_list + cmap_list_r plot_types = ['point', 'violin', 'box', 'swarm', 'violin-swarm', 'box-swarm'] estimator_types = ['mean', 'median'] datatable = self.__datatable if plot_type.lower() not in plot_types: print("Error: Plot type is not valid. Choose one of the following: {}.".format(', '.join(plot_types))) sys.exit() if not isinstance(column_numbers, int): print("Error: Column numbers is not valid. Choose a integer value.") sys.exit() if not isinstance(log_data, tuple): print("Error: Log data type if not a tuple. Please ensure the value is a tuple (e.g. (True, 2).") sys.exit() else: (log_bool, log_base) = log_data if not isinstance(log_bool, bool): print("Error: Log data first tuple item is not a boolean value. Choose either \"True\" or \"False\".") sys.exit() base_types = ['natural', 2, 10] if isinstance(log_base, str): log_base = log_base.lower() if log_base not in base_types: print("Error: Log data second tuple item is not valid. Choose one of {}.".format(', '.join(base_types))) sys.exit() if not isinstance(scale_data, tuple): print("Error: Scale data type if not a tuple. Please ensure the value is a tuple (e.g. (True, 'standard').") sys.exit() else: (scale_bool, scale_type) = scale_data if not isinstance(scale_bool, bool): print("Error: Scale data first tuple item is not a boolean value. Choose either \"True\" or \"False\".") sys.exit() scale_types = ['standard', 'minmax', 'maxabs', 'robust'] if isinstance(scale_type, str): scale_type = scale_type.lower() if scale_type not in scale_types: print("Error: Scale data second tuple item is not valid. Choose one of {}.".format(', '.join(scale_types))) sys.exit() if not isinstance(impute_data, tuple): print("Error: Impute data type if not a tuple. Please ensure the value is a tuple (e.g. (True, 3).") sys.exit() else: (impute_bool, k) = impute_data if not isinstance(impute_bool, bool): print("Error: Impute data first tuple item is not a boolean value. Choose either \"True\" or \"False\".") sys.exit() if not isinstance(k, float): if not isinstance(k, int): print("Error: Impute data second tuple item, the nearest neighbours k value, is not valid. Choose a float or integer value.") sys.exit() if not isinstance(style, str): print("Error: Seaborn style is not valid. Choose a string value.") sys.exit() else: styleList = list(plt.style.available) if style not in styleList: print("Error: Chosen style is not valid. Choose one of the following: {}.".format(', '.join(styleList))) sys.exit() if not isinstance(transparent, bool): print("Error: The transparent value is not valid. Choose either \"True\" or \"False\".") sys.exit() if not isinstance(figSize, tuple): print("Error: Figure size is not valid. Choose a tuple of length 2.") sys.exit() else: for length in figSize: if not isinstance(length, float): if not isinstance(length, int): print("Error: Figure size value is not valid. Choose a float or integer value.") sys.exit() if not isinstance(fontSize, float): if not isinstance(fontSize, int): print("Error: Font size is not valid. Choose a float or integer value.") sys.exit() if colour_palette is not None: if not isinstance(colour_palette, str): print("Error: The colour palette is not valid. Choose a string value.") sys.exit() else: if colour_palette not in cmap_list: print("Error: The colour palette is not valid. Choose one of the following: {}.".format(', '.join(cmap_list))) sys.exit() if y_axis_label is not None: if isinstance(y_axis_label, str): print("Error: The y axis label is not valid. Choose a string value.") sys.exit() if not isinstance(x_axis_rotation, float): if not isinstance(x_axis_rotation, int): print("Error: The x axis rotation value is not valid. Choose a float or integer value.") sys.exit() if ((x_axis_rotation < 0) or (x_axis_rotation > 360)): print("Error: The x axis rotation value is not valid. Choose a value >=0 or <= 360.") sys.exit() if group_column_name is not None: if not isinstance(group_column_name, str): print("Error: Group column name is not valid. Choose a string value.") sys.exit() else: if group_column_name not in list(datatable.columns): print("Error: Group column name not valid. Choose one of {}.".format(', '.join(list(datatable.columns)))) sys.exit() if point_estimator.lower() not in estimator_types: print("Error: The chosen point plot estimator is invalid. Choose one of \"{}\".".format('\" or \"'.join(estimator_types))) sys.exit() if isinstance(point_ci, str): if point_ci != 'sd': print("Error: The string value for point plot ci is invalid. Choose a float, integer or 'sd' value for standard deviation.") sys.exit() else: if not isinstance(point_ci, float): if not isinstance(point_ci, int): print("Error: The value for point plot ci is invalid. Choose a float, integer or 'sd' value for standard deviation.") sys.exit() violin_distribution_types = ['quartile', 'box', 'point', 'stick', None] violin_width_scale_types = ['area', 'count', 'width'] if plot_type.lower() == "violin": if violin_distribution_type not in violin_distribution_types: print("Error: Violin distribution type not valid. Choose one of the following: {}.".format(', '.join(violin_distribution_types))) sys.exit() if violin_width_scale not in violin_width_scale_types: print("Error: Violin width scale type not valid. Choose one of the following: {}.".format(', '.join(violin_width_scale_types))) sys.exit() if plot_type.lower == "box": if not isinstance(box_iqr, float): if not isinstance(box_iqr, int): print( "Error: The box plot interquartile range extension beyond whiskers is not valid. Choose a float or integer value.") sys.exit() if not isinstance(saveImage, bool): print("Error: Save image is not valid. Choose either \"True\" or \"False\".") sys.exit() if not isinstance(imageFileName, str): print("Error: Image file name is not valid. Choose a string value.") sys.exit() if not isinstance(dpi, float): if not isinstance(dpi, int): print("Error: Dpi is not valid. Choose a float or integer value.") sys.exit() return plot_type, column_numbers, log_data, scale_data, impute_data, style, transparent, figSize, fontSize, colour_palette, y_axis_label, x_axis_rotation, group_column_name, point_estimator, point_ci, violin_distribution_type, violin_width_scale, box_iqr, saveImage, imageFileName, dpi def __checkData(self, df): if not isinstance(df, pd.DataFrame): print("Error: A dataframe was not entered. Please check your data.") return df def __checkPeakTable(self, PeakTable): if "Name" not in PeakTable.columns: print("Error: \"Name\" column not in Peak Table. Please check your data.") sys.exit() if "Label" not in PeakTable.columns: print("Error: \"Label\" column not in Peak Table. Please check your data.") sys.exit() # Do not assume the peaks/nodes have been indexed correctly. Remove any index columns and reindex. column_list = [column.lower() for column in PeakTable.columns] if 'idx' in column_list: index = column_list.index('idx') column_name = PeakTable.columns[index] PeakTable = PeakTable.drop(columns=[column_name]) if 'index' in column_list: index = column_list.index('index') column_name = PeakTable.columns[index] PeakTable = PeakTable.drop(columns=[column_name]) PeakTable = PeakTable.reset_index(drop=True) PeakTable.index.name = 'Idx' PeakTable = PeakTable.reset_index() return PeakTable
52.605223
586
0.5464
3,903
34,246
4.585191
0.089931
0.02291
0.038556
0.039338
0.577839
0.543753
0.517825
0.495809
0.49363
0.485807
0
0.00362
0.36267
34,246
651
587
52.605223
0.81632
0.013023
0
0.469159
0
0.037383
0.208759
0.001509
0
0
0
0
0
1
0.013084
false
0
0.016822
0
0.039252
0.074766
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c73eca01ba5620a706110aaabb7ea66ae754f7f0
1,183
py
Python
core/data/DataWriter.py
berendkleinhaneveld/Registrationshop
0d6f3ee5324865cdcb419369139f37c39dfe9a1c
[ "MIT" ]
25
2015-11-08T16:36:54.000Z
2022-01-20T16:03:28.000Z
core/data/DataWriter.py
berendkleinhaneveld/Registrationshop
0d6f3ee5324865cdcb419369139f37c39dfe9a1c
[ "MIT" ]
2
2016-12-01T23:13:08.000Z
2017-07-25T02:40:49.000Z
core/data/DataWriter.py
berendkleinhaneveld/Registrationshop
0d6f3ee5324865cdcb419369139f37c39dfe9a1c
[ "MIT" ]
10
2016-07-05T14:39:16.000Z
2022-01-01T02:05:55.000Z
""" DataWriter.py """ from DataController import DataController from DataReader import DataReader from vtk import vtkMetaImageWriter from vtk import vtkXMLImageDataWriter class DataWriter(DataController): """ DataWriter writes an image data object to disk using the provided format. """ def __init__(self): super(DataWriter, self).__init__() self.supportedExtensions = [DataReader.TypeMHD, DataReader.TypeVTI, DataReader.TypeMHA] def WriteToFile(self, imageData, exportFileName, fileType): if fileType == DataReader.TypeMHD: if not exportFileName.endswith(".mhd"): exportFileName = exportFileName + ".mhd" writer = vtkMetaImageWriter() writer.SetFileName(exportFileName) writer.SetInputData(imageData) writer.Write() elif fileType == DataReader.TypeVTI: writer = vtkXMLImageDataWriter() writer.SetFileName(exportFileName) writer.SetInputData(imageData) writer.Write() elif fileType == DataReader.TypeMHA: writer = vtkMetaImageWriter() writer.SetFileName(exportFileName) writer.SetInputData(imageData) writer.Write() else: raise NotImplementedError("No writing support for type " + str(fileType))
27.511628
76
0.752325
114
1,183
7.736842
0.447368
0.061224
0.105442
0.12585
0.339002
0.339002
0.339002
0.339002
0.339002
0.339002
0
0
0.155537
1,183
42
77
28.166667
0.882883
0.073542
0
0.366667
0
0
0.033395
0
0
0
0
0
0
1
0.066667
false
0
0.133333
0
0.233333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c73ff4534e3b71c1974b4bf7835f8ec9472d9d62
7,483
py
Python
parkings/models/permit.py
klemmari1/parkkihubi
93218c6046c0910e8a4c723dc7128c6eec085b8c
[ "MIT" ]
12
2016-11-29T15:13:10.000Z
2021-06-12T06:45:38.000Z
parkings/models/permit.py
niuzhipeng123/parkkihubi
93218c6046c0910e8a4c723dc7128c6eec085b8c
[ "MIT" ]
154
2016-11-30T09:07:58.000Z
2022-02-12T08:29:36.000Z
parkings/models/permit.py
niuzhipeng123/parkkihubi
93218c6046c0910e8a4c723dc7128c6eec085b8c
[ "MIT" ]
15
2016-11-29T19:32:48.000Z
2022-01-05T11:31:39.000Z
from itertools import chain from django.conf import settings from django.contrib.gis.db import models as gis_models from django.db import models, router, transaction from django.utils import timezone from django.utils.translation import gettext_lazy as _ from ..fields import CleaningJsonField from ..validators import DictListValidator, TextField, TimestampField from .constants import GK25FIN_SRID from .enforcement_domain import EnforcementDomain from .mixins import TimestampedModelMixin from .parking import Parking class PermitArea(TimestampedModelMixin): name = models.CharField(max_length=40, verbose_name=_('name')) domain = models.ForeignKey( EnforcementDomain, on_delete=models.PROTECT, related_name='permit_areas') identifier = models.CharField(max_length=10, verbose_name=_('identifier')) geom = gis_models.MultiPolygonField( srid=GK25FIN_SRID, verbose_name=_('geometry')) permitted_user = models.ForeignKey( settings.AUTH_USER_MODEL, on_delete=models.PROTECT, verbose_name=_("permitted_user")) class Meta: unique_together = [('domain', 'identifier')] ordering = ('identifier',) def __str__(self): return '{}/{}: {}'.format(self.domain.code, self.identifier, self.name) class PermitSeriesQuerySet(models.QuerySet): def active(self): return self.filter(active=True) def latest_active(self): return self.active().order_by('-modified_at').first() def prunable(self, time_limit=None): limit = time_limit or ( timezone.now() - settings.PARKKIHUBI_PERMITS_PRUNABLE_AFTER) return self.filter(created_at__lt=limit, active=False) class PermitSeries(TimestampedModelMixin, models.Model): active = models.BooleanField(default=False) owner = models.ForeignKey( settings.AUTH_USER_MODEL, on_delete=models.PROTECT, verbose_name=_("owner")) objects = PermitSeriesQuerySet.as_manager() class Meta: ordering = ('created_at', 'id') verbose_name = _("permit series") verbose_name_plural = _("permit series") @classmethod def delete_prunable_series(cls, time_limit=None): prunable = cls.objects.prunable(time_limit) Permit.objects.filter(series__in=prunable).delete() prunable.delete() def __str__(self): return str(self.id) class PermitQuerySet(models.QuerySet): def active(self): return self.filter(series__active=True) def by_time(self, timestamp): lookup_items = PermitLookupItem.objects.by_time(timestamp) return self.filter(lookup_items__in=lookup_items).distinct() def by_subject(self, registration_number): lookup_items = PermitLookupItem.objects.by_subject(registration_number) return self.filter(lookup_items__in=lookup_items).distinct() def by_area(self, area): lookup_items = PermitLookupItem.objects.by_area(area) return self.filter(lookup_items__in=lookup_items).distinct() def bulk_create(self, permits, *args, **kwargs): for permit in permits: assert isinstance(permit, Permit) permit.full_clean() with transaction.atomic(using=self.db, savepoint=False): created_permits = super().bulk_create(permits, *args, **kwargs) PermitLookupItem.objects.using(self.db).bulk_create( chain(*(x._make_lookup_items() for x in created_permits))) return created_permits class Permit(TimestampedModelMixin, models.Model): domain = models.ForeignKey( EnforcementDomain, on_delete=models.PROTECT, related_name='permits') series = models.ForeignKey(PermitSeries, on_delete=models.PROTECT) external_id = models.CharField(max_length=50, null=True, blank=True) subjects = CleaningJsonField(blank=True, validators=[DictListValidator({ 'start_time': TimestampField(), 'end_time': TimestampField(), 'registration_number': TextField(max_length=20), })]) areas = CleaningJsonField(blank=True, validators=[DictListValidator({ 'start_time': TimestampField(), 'end_time': TimestampField(), 'area': TextField(max_length=10), })]) objects = PermitQuerySet.as_manager() class Meta: unique_together = [('series', 'external_id')] indexes = [ models.Index(fields=['series', 'id']), ] ordering = ('series', 'id') def __str__(self): return 'Permit {id} ({series}{active}/{external_id} {dom})'.format( id=self.id, dom=self.domain.code, series=self.series, active='*' if self.series.active else '', external_id=self.external_id) def save(self, using=None, *args, **kwargs): self.full_clean() using = using or router.db_for_write(type(self), instance=self) with transaction.atomic(using=using, savepoint=False): super(Permit, self).save(using=using, *args, **kwargs) self.lookup_items.all().using(using).delete() new_lookup_items = self._make_lookup_items() PermitLookupItem.objects.using(using).bulk_create(new_lookup_items) def _make_lookup_items(self): for area in self.areas: for subject in self.subjects: max_start_time = max(subject['start_time'], area['start_time']) min_end_time = min(subject['end_time'], area['end_time']) if max_start_time >= min_end_time: continue yield PermitLookupItem( permit=self, registration_number=Parking.normalize_reg_num( subject['registration_number']), area=PermitArea.objects.get(identifier=area['area'], domain=self.domain), start_time=max_start_time, end_time=min_end_time ) class PermitLookupItemQuerySet(models.QuerySet): def active(self): return self.filter(permit__series__active=True) def by_time(self, timestamp): return self.filter(start_time__lte=timestamp, end_time__gte=timestamp) def by_subject(self, registration_number): normalized_reg_num = Parking.normalize_reg_num(registration_number) return self.filter(registration_number=normalized_reg_num) def by_area(self, area): return self.filter(area=area) class PermitLookupItem(models.Model): permit = models.ForeignKey( Permit, related_name="lookup_items", on_delete=models.CASCADE) registration_number = models.CharField(max_length=20) area = models.ForeignKey(PermitArea, on_delete=models.PROTECT, default=None, null=True, blank=True) start_time = models.DateTimeField() end_time = models.DateTimeField() objects = PermitLookupItemQuerySet.as_manager() class Meta: indexes = [ models.Index(fields=[ 'registration_number', 'start_time', 'end_time', 'area', 'permit']), ] ordering = ('registration_number', 'start_time', 'end_time') def __str__(self): return ( '{start_time:%Y-%m-%d %H:%M} -- {end_time:%Y-%m-%d %H:%M} / ' '{registration_number} / {area}' ).format( start_time=self.start_time, end_time=self.end_time, registration_number=self.registration_number, area=self.area.identifier)
37.415
103
0.667379
834
7,483
5.739808
0.197842
0.036766
0.033424
0.026321
0.263631
0.205766
0.17464
0.17464
0.131815
0.131815
0
0.00275
0.222371
7,483
199
104
37.603015
0.8199
0
0
0.201258
0
0.006289
0.071094
0.006949
0
0
0
0
0.006289
1
0.119497
false
0
0.075472
0.062893
0.490566
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c744286930e6918cebec7544521adbaf000c03cc
4,265
py
Python
poi_mining/biz/LSA/logEntropy.py
yummydeli/machine_learning
54471182ac21ef0eee26557a7bd6f3a3dc3a09bd
[ "MIT" ]
1
2019-09-29T13:36:29.000Z
2019-09-29T13:36:29.000Z
poi_mining/biz/LSA/logEntropy.py
yummydeli/machine_learning
54471182ac21ef0eee26557a7bd6f3a3dc3a09bd
[ "MIT" ]
null
null
null
poi_mining/biz/LSA/logEntropy.py
yummydeli/machine_learning
54471182ac21ef0eee26557a7bd6f3a3dc3a09bd
[ "MIT" ]
null
null
null
#!/usr/bin/env python # encoding:utf-8 # ############################################################################## # The MIT License (MIT) # # Copyright (c) [2015] [baidu.com] # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # ############################################################################## """ 生成LogEntropy矩阵并筛选出合适的词汇 """ import glob import collections import pandas from sklearn.feature_extraction.text import CountVectorizer import math class LogEntropy(object): """计算logentropy, 得到类别关键字""" def __init__(self): self.fnames = glob.glob('data/segs/names.*') def extract_segs(self): """分词文件中获取分词结果""" idx = [] words = [] for f in self.fnames: lines = [] for i, line in enumerate(open(f)): if i % 2 == 1: non_int = '\t'.join([e for e in line.decode('GBK').rstrip('\n').split('\t') \ if not e.isdigit()]) lines.append(non_int) words.append('\t'.join(lines)) idx.append(f.split('.')[1][1:]) return words, idx def mk_document_term_matrix(self): """生成TDM矩阵""" words, idx = self.extract_segs() countvec = CountVectorizer() dtm = pandas.DataFrame(countvec.fit_transform(words).toarray(), columns=countvec.get_feature_names(), index=idx) """ canting faguo riben zhongwen 1001 1 0 0 1 991 1 0 1 0 203 1 1 0 0 """ return dtm def global_weighting(self, dtm): """ 1 - Entropy(words) / log(N) """ # normalized entropy for word pdtm = (dtm / dtm.sum(axis=0)) ndocs = pdtm.shape[0] gw = 1 + (pdtm.applymap(lambda x: x * math.log(x) if x != 0 else 0).sum() / math.log(ndocs)) """ canting 2.220446e-16 faguo 1.000000e+00 riben 1.000000e+00 zhongwen 1.000000e+00 """ return gw def local_weighting(self, dtm): """ math.log(freq + 1)""" lw = dtm.applymap(lambda freq: math.log(freq + 1)) """ canting faguo riben zhongwen 1001 0.693147 0.000000 0.000000 0.693147 991 0.693147 0.000000 0.693147 0.000000 203 0.693147 0.693147 0.000000 0.000000 """ return lw def logEntropyWeighting(self): """计算最终的logentropy得分""" dtm = self.mk_document_term_matrix() """ canting faguo riben zhongwen 1001 1.539096e-16 0.000000 0.000000 0.693147 991 1.539096e-16 0.000000 0.693147 0.000000 203 1.539096e-16 0.693147 0.000000 0.000000 """ logEntro = (self.global_weighting(dtm.copy()) * self.local_weighting(dtm)).applymap( lambda x: 0 if x < 0.001 else x ) logEntro.T.to_csv('data/keyWords.cates', sep='\t', encoding='UTF-8') if __name__ == '__main__': lsaEntropy = LogEntropy() lsaEntropy.logEntropyWeighting()
35.541667
100
0.557562
516
4,265
4.54845
0.410853
0.03579
0.027269
0.03579
0.11078
0.095441
0.040903
0
0
0
0
0.093126
0.30762
4,265
119
101
35.840336
0.701659
0.298476
0
0
0
0
0.032258
0
0
0
0
0
0
1
0.130435
false
0
0.108696
0
0.347826
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c746ec91b306e818609b2388a6f07e590b53157d
10,961
py
Python
a3/ga.py
mishless/LearningSystems
635d9af9d00ae0360d7ca8571bf47f782fdcdfe9
[ "MIT" ]
1
2021-08-01T03:30:49.000Z
2021-08-01T03:30:49.000Z
a3/ga.py
mishless/LearningSystems
635d9af9d00ae0360d7ca8571bf47f782fdcdfe9
[ "MIT" ]
null
null
null
a3/ga.py
mishless/LearningSystems
635d9af9d00ae0360d7ca8571bf47f782fdcdfe9
[ "MIT" ]
null
null
null
# Genetic Algorithm for solving the Traveling Salesman problem # Authors: Mihaela Stoycheva, Vukan Turkulov # Includes import configparser import math import matplotlib.pyplot as plt import numpy import random import sys from operator import itemgetter #Global variables(yay!) # Configuration variables(read from config.txt) mutation_rate = 0; population_size = 0; elitism_rate = 0; tournament_rate = 0; max_iterations = 0; input_file_name = ""; parent_rate = 0; # General global variables cities = {}; number_of_cities = 0; parent_number = 0; tournament_size = 0; elite_number = 0; crossover_number = 0; def read_config(): global mutation_rate; global elitism_rate; global tournament_rate; global population_size; global input_file_name; global max_iterations; global parent_rate; global parent_number; global tournament_size; global elite_number; global crossover_number; config = configparser.ConfigParser(); config.read("config.txt"); mutation_rate = float(config['general']['mutation_rate']); population_size = int(config['general']['population_size']); elitism_rate = float(config['general']['elitism_rate']); tournament_rate = float(config['general']['tournament_rate']); max_iterations = int(config['general']['max_iterations']); parent_rate = float(config['general']['parent_rate']); input_file_name = config['general']['input_file_name']; parent_number = int(population_size * parent_rate); elite_number = int(population_size * elitism_rate); tournament_size = int(population_size * tournament_rate); crossover_number = population_size - elite_number; def print_config(): print("***** CONFIGURATION *****"); print_var("Population size", population_size); print_var("Elitism rate", elitism_rate); print_var("Tournament rate", tournament_rate); print_var("Mutation rate", mutation_rate); print_var("Parent rate", parent_rate); print_var("Iteration number", max_iterations); print(""); print_var("Tournament size", tournament_size); print_var("Parent number", parent_number); print_var("Elite number", elite_number); print_var("Crossover number", crossover_number); print(""); def read_input_file(): global number_of_cities; file = open(input_file_name, "r"); file_lines = file.readlines(); file.close(); for file_line in file_lines: temp = file_line.split(); cities[int(temp[0])] = {'x' : float(temp[1]), 'y' : float(temp[2])}; number_of_cities = len(cities); def get_distance(city1, city2): return math.sqrt( ((city1['x']-city2['x'])**2) + ((city1['y']-city2['y'])**2)); def print_cities(): print("***** CITIES *****"); for key, city in cities.items(): print("#" + "%2s" % str(key) + ": (" + "%6s" % str(city['x']) + ', ' + "%6s" % str(city['y']) + ')'); print(""); def print_var(name, var): print(name + ":" + " "*(17-len(name)) + str(var)); def init(): read_config(); read_input_file(); print_config(); def create_random_individual(): individual = []; # We must begin at first city individual.append(1); # Create list of city indexes indexes = list(range(2,number_of_cities+1)); while len(indexes) > 0: picked_index = random.choice(indexes); indexes.remove(picked_index); individual.append(picked_index); # We must end at first city individual.append(1); return individual; def print_population(population, name): print("***** POPULATION: " + name + " *****"); print("Population size = " + str(len(population))); i = 0; for individual in population: print("IND #" + str(i) + ": " + str(individual)); i += 1; def print_population_2(population, name): print("***** POPULATION: " + name + " *****"); print("Population size = " + str(len(population))); i = 0; for individual in population: print("IND #" + str(i) + " distance = " + str(evaluate_individual(individual))); i += 1; print(""); def print_population_3(population, name): print("***** POPULATION: " + name + " *****"); print("Population size = " + str(len(population))); for individual in population: print(str(individual) + ": distance = " + str(evaluate_individual(individual))); print(""); def create_random_population(population_size): population = []; for i in range(0, population_size): population.append(create_random_individual()); return population; def evaluate_individual(individual): distance_traveled = 0; for i in range(0, len(individual)-1): distance_traveled = (distance_traveled + get_distance(cities[individual[i]], cities[individual[i+1]])); return distance_traveled; def evaluate_population(population): evaluations = []; for individual in population: evaluations.append((evaluate_individual(individual), individual)); return evaluations; def select_tournament_pool(data): tournament_pool = []; indexes = list(range(0, len(data))); for i in range(0, tournament_size): chosen_index = random.choice(indexes); tournament_pool.append(data[chosen_index]); indexes.remove(chosen_index); return tournament_pool; def best_solution(pool): best_individual = {'eval' : sys.float_info.max}; for individual in pool: if individual['eval'] < best_individual['eval']: best_individual = individual; return best_individual; def run_tournament(pool): return best_solution(pool); def merge_popul_and_eval(population, evaluations): data = []; for i in range(0, len(population)): data.append({'ind' : population[i], 'eval' : evaluations[i]}); return data; def select_parent_pool(population, evaluations): parent_pool = []; data = merge_popul_and_eval(population, evaluations); for i in range(0, parent_number): tournament_pool = select_tournament_pool(data); parent = run_tournament(tournament_pool); parent_pool.append(parent['ind']); data.remove(parent); return parent_pool; def is_individual_valid(individual): if(len(individual) != (number_of_cities+1)): print("INVALID " + str(individual)); return False; if(individual[0] != 1): print("INVALID " + str(individual)); return False; if(individual[-1] != 1): print("INVALID " + str(individual)); return False; for city in individual: if city == 1: if individual.count(city) != 2: print("INVALID " + str(individual)); return False; else: if individual.count(city) != 1: print("INVALID " + str(individual)); return False; return True; def is_population_valid(population): for individual in population: if is_individual_valid(individual) == False: return False; return True; def create_child(parent1, parent2): l = len(parent1); x = random.randint(1, l-1); y = random.randint(x, l-1); child = []; extract = parent1[x:y]; """print_var("P1", parent1); print_var("P2", parent2); print_var("x", x); print_var("y", y); print_var("Extract", extract);""" i = 0; for j in range(0, x): while(parent2[i] in extract): i += 1; child.append(parent2[i]); i += 1; child.extend(extract); for j in range(y, l): while(parent2[i] in extract): i += 1; child.append(parent2[i]); i += 1; return child; def generate_children(parent_pool, child_num): children = []; for i in range(0, child_num): parent1 = random.choice(parent_pool); parent_pool.remove(parent1); parent2 = random.choice(parent_pool); parent_pool.append(parent1); new_child = create_child(parent1, parent2); children.append(new_child); return children; def generate_elites(population, evaluations, number): data = merge_popul_and_eval(population, evaluations); elites = []; for i in range(0, number): best = best_solution(data); elites.append(best['ind']); data.remove(best); return elites; def mutate_individual(individual): i = random.randint(1, len(individual)-2); j = i; while j == i: j = random.randint(1, len(individual)-2); individual[i], individual[j] = individual[j], individual[i]; def mutate_population(population): for individual in population: if random.random() < mutation_rate: mutate_individual(individual); def test_stuff(): """ p1 = "abcdefg"; p2 = "1234567"; for i in range(0,10): print(create_child(p1,p2)); ind = [1,2,3,4,5,6]; print("Before", ind); mutate_individual(ind); print("After", ind); exit();""" def perform_GA(): best_solutions = []; best_individuals = []; best_solution = None; #print("***** ALGORITHM START *****"); population = create_random_population(population_size); iteration_counter = 1; while True: #print("Running iteration " + str(iteration_counter) + ":"); evaluations = evaluate_population(population); best_solution = min(evaluations, key=lambda evaluation:evaluation[0]) best_solutions.append(best_solution[0]); best_individuals.append(best_solution[1]); evaluations = [evaluation[0] for evaluation in evaluations] if iteration_counter == max_iterations: break; parent_pool = select_parent_pool(population, evaluations); children = generate_children(parent_pool, crossover_number); mutate_population(children); elites = generate_elites(population, evaluations, elite_number); # Prepare population for the next iteration population = children + elites; iteration_counter += 1; if is_population_valid(population) == False: break; return (best_solutions, best_individuals); def do_what_needs_to_be_done(): results = []; bests = []; print("***** ALGORITHM START *****"); sys.stdout.flush() for i in range(0, 10): print("Starting cycle " + str(i+1)); results.append(perform_GA()); bests.append((results[i][0][-1], results[i][1][-1])); best_ind = bests.index(min(bests, key=lambda best:best[0])); print(str(best_ind)); print("***** RESULTS *****"); print("Best result is " + str(bests[best_ind][0])); print("Best result is " + str(bests[best_ind][1])); plt.plot(results[best_ind][0]); plt.show(); #main init(); do_what_needs_to_be_done()
26.159905
77
0.624487
1,283
10,961
5.147311
0.145752
0.033919
0.012114
0.014991
0.224258
0.172926
0.110539
0.08086
0.071169
0.05633
0
0.015013
0.234285
10,961
418
78
26.222488
0.771834
0.056564
0
0.183453
0
0
0.06716
0
0
0
0
0
0
1
0.104317
false
0
0.02518
0.007194
0.208633
0.165468
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c74916514901ff1d3dbfb832b264c70329520805
3,063
py
Python
src/config/svc-monitor/svc_monitor/services/loadbalancer/drivers/ha_proxy/custom_attributes/haproxy_validator.py
jnpr-pranav/contrail-controller
428eee37c28c31830fd764315794e1a6e52720c1
[ "Apache-2.0" ]
37
2020-09-21T10:42:26.000Z
2022-01-09T10:16:40.000Z
src/config/svc-monitor/svc_monitor/services/loadbalancer/drivers/ha_proxy/custom_attributes/haproxy_validator.py
jnpr-pranav/contrail-controller
428eee37c28c31830fd764315794e1a6e52720c1
[ "Apache-2.0" ]
null
null
null
src/config/svc-monitor/svc_monitor/services/loadbalancer/drivers/ha_proxy/custom_attributes/haproxy_validator.py
jnpr-pranav/contrail-controller
428eee37c28c31830fd764315794e1a6e52720c1
[ "Apache-2.0" ]
21
2020-08-25T12:48:42.000Z
2022-03-22T04:32:18.000Z
from builtins import str from builtins import range from builtins import object import logging import inspect import os class CustomAttr(object): """This type handles non-flat data-types like int, str, bool. """ def __init__(self, key, value): self._value = value self._key = key def validate(self): pass def post_validation(self): pass class CustomAttrTlsContainer(CustomAttr): def __init__(self, key, value): super(CustomAttrTlsContainer, self).__init__(key, value) def validate(self): return True def post_validation(self): return self._value def validate_custom_attributes(custom_attributes_dict, section, custom_attributes): section_dict = {} if custom_attributes and section in custom_attributes_dict: for key, value in list(custom_attributes.items()): if key in custom_attributes_dict[section]: #Sanitize the value try: type_attr = custom_attributes_dict[section][key]['type'] limits = custom_attributes_dict[section][key]['limits'] if type_attr == 'int': value = int(value) if value in range(limits[0], limits[1]): section_dict.update({key:value}) else: logging.info("Skipping key: %s, value: %s due to" \ "validation failure" % (key, value)) elif type_attr == 'str': if len(value) in range(limits[0], limits[1]): section_dict.update({key:value}) else: logging.info("Skipping key: %s, value: %s due to" \ "validation failure" % (key, value)) elif type_attr == 'bool': if value in limits: if value == 'True': value = '' elif value == 'False': value = 'no ' section_dict.update({key:value}) else: logging.info("Skipping key: %s, value: %s due to" \ "validation failure" % (key, value)) elif inspect.isclass(eval(type_attr)): new_custom_attr = eval(type_attr)(key, value) if new_custom_attr.validate(): value = new_custom_attr.post_validation() section_dict.update({key:value}) else: logging.info("Skipping key: %s, value: %s due to" \ "validation failure" % (key, value)) except Exception as e: logging.error(str(e)) continue return section_dict
39.269231
79
0.479595
291
3,063
4.872852
0.247423
0.073343
0.070522
0.076164
0.373766
0.304654
0.304654
0.304654
0.304654
0.304654
0
0.002307
0.433888
3,063
77
80
39.779221
0.815456
0.025139
0
0.369231
0
0
0.080863
0
0
0
0
0
0
1
0.107692
false
0.030769
0.092308
0.030769
0.276923
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c74949362f59fa0673a80dd80fbdd7f5a0af70d8
1,405
py
Python
python/janitor/typecache.py
monkeyman79/janitor
a41187c1b58b736a5de2b0b30eb51d85a65b17c3
[ "MIT" ]
2
2018-11-06T13:02:27.000Z
2021-02-22T19:07:22.000Z
python/janitor/typecache.py
monkeyman79/janitor
a41187c1b58b736a5de2b0b30eb51d85a65b17c3
[ "MIT" ]
1
2016-09-28T12:24:43.000Z
2016-09-28T13:47:35.000Z
python/janitor/typecache.py
monkeyman79/janitor
a41187c1b58b736a5de2b0b30eb51d85a65b17c3
[ "MIT" ]
null
null
null
import gdb class TypeCache(object): def __init__(self): self.cache = {} self.intptr_type = False def clear(self): self.cache = {} self.intptr_type = False def get_type(self, typename): if typename in self.cache: return self.cache[typename] try: gdb_type = gdb.lookup_type(typename) self.cache[typename] = gdb_type return gdb_type except: pass try: proto = gdb.parse_and_eval("(%s*)0" % typename) gdb_type = proto.type.target() self.cache[typename] = gdb_type return gdb_type except: pass return None def get_intptr_type(self): if self.intptr_type != False: return self.intptr_type ptr_type = self.get_type("void*") if ptr_type == None: self.intptr_type = None return None ulong_type = self.get_type("unsigned long") if ulong_type == None: self.intptr_type = None return None if ulong_type.sizeof >= ptr_type.sizeof: self.intptr_type = ulong_type return ulong_type ullong_type = self.get_type("unsigned long long") self.intptr_type = ullong_type return ullong_type cache = TypeCache()
26.509434
59
0.540925
160
1,405
4.5125
0.23125
0.124654
0.155125
0.078947
0.401662
0.401662
0.32687
0.32687
0.130194
0.130194
0
0.001148
0.380071
1,405
52
60
27.019231
0.827784
0
0
0.44186
0
0
0.029915
0
0
0
0
0
0
1
0.093023
false
0.046512
0.023256
0
0.348837
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c74a04a139575fe8c546ea452d0215d058b4fa6f
805
py
Python
key_phrase.py
Santara/autoSLR
8c524b8a0023d1434cb7be4e110103605d0d2cab
[ "MIT" ]
1
2020-08-12T23:17:38.000Z
2020-08-12T23:17:38.000Z
key_phrase.py
Santara/autoSLR
8c524b8a0023d1434cb7be4e110103605d0d2cab
[ "MIT" ]
null
null
null
key_phrase.py
Santara/autoSLR
8c524b8a0023d1434cb7be4e110103605d0d2cab
[ "MIT" ]
1
2019-08-29T09:36:46.000Z
2019-08-29T09:36:46.000Z
import os import sys directory = sys.argv[1] outfile = open("key_phrases.csv","w") files = {} for filename in os.listdir(directory): text=[] with open(os.path.join(directory, filename)) as f: text=[l.strip() for l in f if len(l.strip())>2] data='' for t in text: if len(t.split()) > 1: data = data+'. '+t.strip() whitelist = set('abcdefghijklmnopqrstuvwxy ABCDEFGHIJKLMNOPQRSTUVWXYZ') answer = ''.join(filter(whitelist.__contains__, data)) answer=' '.join(answer.split()) import rake import operator rake_object = rake.Rake("/home/ashutosh/Sudeshna/RAKE-tutorial/data/stoplists/SmartStoplist.txt", 3,3,1) import pprint pp = pprint.PrettyPrinter() keywords = rake_object.run(answer) for entry in keywords: outfile.write("%s, %s\n" % (entry[0], str(entry[1])) ) outfile.close()
25.15625
105
0.695652
117
805
4.726496
0.521368
0.028933
0
0
0
0
0
0
0
0
0
0.011494
0.135404
805
31
106
25.967742
0.783046
0
0
0
0
0
0.185093
0.150311
0
0
0
0
0
1
0
false
0
0.2
0
0.2
0.08
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c74ab0b0f80631d9cb06c8040217e1f860dd10c2
1,127
py
Python
tests/test_utils.py
aced-differentiate/dft-input-gen
14bee323517714c433682bad2dcb897b223dd5ec
[ "Apache-2.0" ]
1
2021-04-15T09:54:52.000Z
2021-04-15T09:54:52.000Z
tests/test_utils.py
CitrineInformatics/dft-input-gen
14bee323517714c433682bad2dcb897b223dd5ec
[ "Apache-2.0" ]
1
2021-01-28T22:12:07.000Z
2021-01-28T22:12:07.000Z
tests/test_utils.py
aced-differentiate/dft-input-gen
14bee323517714c433682bad2dcb897b223dd5ec
[ "Apache-2.0" ]
2
2020-12-08T18:14:13.000Z
2020-12-18T19:01:11.000Z
"""Unit tests for helper utilities in :mod:`dftinputgen.utils`.""" import os import pytest from ase import io as ase_io from dftinputgen.utils import get_elem_symbol from dftinputgen.utils import read_crystal_structure from dftinputgen.utils import get_kpoint_grid_from_spacing from dftinputgen.utils import DftInputGeneratorUtilsError test_base_dir = os.path.dirname(__file__) feo_conv_file = os.path.join(test_base_dir, "qe", "files", "feo_conv.vasp") feo_conv = ase_io.read(feo_conv_file) def test_get_elem_symbol(): assert get_elem_symbol("Fe-34") == "Fe" assert get_elem_symbol("3RGe-34") == "Ge" with pytest.raises(DftInputGeneratorUtilsError): get_elem_symbol("G23") def test_read_crystal_structure(): # str with path to crystal structure file is OK cs = read_crystal_structure(feo_conv_file) assert cs == feo_conv # any other type of input should throw an error with pytest.raises(TypeError): read_crystal_structure(feo_conv) def test_kpoint_grid_from_spacing(): assert get_kpoint_grid_from_spacing(feo_conv, 0.2) == pytest.approx( [7, 7, 7] )
28.897436
75
0.754215
168
1,127
4.744048
0.392857
0.070263
0.138018
0.130489
0.196989
0
0
0
0
0
0
0.012618
0.156167
1,127
38
76
29.657895
0.825447
0.135759
0
0
0
0
0.040331
0
0
0
0
0
0.166667
1
0.125
false
0
0.291667
0
0.416667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c74b3631946b737bd9c4684c29b89101e0d8c544
6,044
py
Python
core/models.py
nforesperance/Django-Channels-ChatApp
b244954206214f7dc1b8793291d957a5bf80f0e2
[ "MIT" ]
2
2020-07-18T05:19:36.000Z
2020-07-18T05:19:38.000Z
core/models.py
nforesperance/Django-Channels-ChatApp
b244954206214f7dc1b8793291d957a5bf80f0e2
[ "MIT" ]
4
2021-03-19T02:37:45.000Z
2021-06-04T23:02:41.000Z
core/models.py
nforesperance/Django-Channels-ChatApp
b244954206214f7dc1b8793291d957a5bf80f0e2
[ "MIT" ]
null
null
null
from django.contrib.auth.models import User from django.db.models import (Model, TextField, DateTimeField, ForeignKey, CASCADE) from asgiref.sync import async_to_sync from channels.layers import get_channel_layer from django.db import models import json class MessageModel(Model): """ This class represents a chat message. It has a owner (user), timestamp and the message body. """ user = ForeignKey(User, on_delete=CASCADE, verbose_name='user', related_name='from_user', db_index=True) recipient = ForeignKey(User, on_delete=CASCADE, verbose_name='recipient', related_name='to_user', db_index=True) timestamp = DateTimeField('timestamp', auto_now_add=True, editable=False, db_index=True) body = TextField('body') def __str__(self): return str(self.id) def characters(self): """ Toy function to count body characters. :return: body's char number """ return len(self.body) def notify_ws_clients(self): """ Inform client there is a new message. """ notification = { 'type': 'chat_message', 'message': '{}'.format(self.id) } channel_layer = get_channel_layer() print("user.id {}".format(self.user.id)) print("user.id {}".format(self.recipient.id)) async_to_sync(channel_layer.group_send)("{}".format(self.user.id), notification) async_to_sync(channel_layer.group_send)("{}".format(self.recipient.id), notification) def save(self, *args, **kwargs): """ Trims white spaces, saves the message and notifies the recipient via WS if the message is new. """ new = self.id self.body = self.body.strip() # Trimming whitespaces from the body super(MessageModel, self).save(*args, **kwargs) if new is None: self.notify_ws_clients() # Meta class Meta: app_label = 'core' verbose_name = 'message' verbose_name_plural = 'messages' ordering = ('-timestamp',) class Group(models.Model): name = models.CharField(max_length = 20) members = models.TextField() messages = models.TextField () def set_members(self,user_id_list): self.members = json.dumps(user_id_list) def get_members(self): return json.loads(self.members) def add(self,user_id): current_list = self.get_members() if user_id in current_list: print("user is already in the group") else: new_list = current_list.append(user_id) self.set_members(new_list) def remove(self,user_id): current_list = self.get_members() if user_id in current_list: new_list = current_list.remove(user_id) self.set_members(new_list) else: print("User is not a member of theis group") def has(self,user_id): current_list = self.get_members() return(user_id in current_list) # Set of functions for dealing with group messages def set_messages(self,message_id_list): self.messages = json.dumps(message_id_list) def get_messages(self): return json.loads(self.messages) def add_message(self,message_id): current_list = self.get_messages() new_list = current_list.append(message_id) self.set_messages(new_list) def delete_message(self,message_id): current_list = self.get_messages() if message_id in current_list: new_list = current_list.remove(message_id) self.set_messages(new_list) def save(self, *args, **kwargs): if self.pk is None or self.members is None or self.members == '': self.set_members([]) if self.pk is None or self.messages is None or self.messages == '': self.set_messages([]) super(Group, self).save(*args, **kwargs) def __str__(self): return self.name+" ID: "+str(self.id) # Meta class Meta: app_label = 'core' verbose_name = 'Group' verbose_name_plural = 'Groups' ordering = ('name',) class GroupMessage(Model): """ This class represents a chat message. It has a owner (user), timestamp and the message body. """ sender = ForeignKey(User, on_delete=CASCADE, verbose_name='sender', related_name='from_sender', db_index=True) group = ForeignKey(Group, on_delete=CASCADE, verbose_name='group', related_name='to_group', db_index=True) time = DateTimeField('time', auto_now_add=True, editable=False, db_index=True) body = TextField('body') def __str__(self): return str(self.id) def characters(self): """ Toy function to count body characters. :return: body's char number """ return len(self.body) def notify_ws_clients(self): """ Inform client there is a new message. """ notification = { 'type': 'group_message', 'group': '{}'.format(self.id) } channel_layer = get_channel_layer() group_id = "group"+str(self.group.id) print("group.id {}".format(group_id)) async_to_sync(channel_layer.group_send)(group_id, notification) def save(self, *args, **kwargs): """ Trims white spaces, saves the message and notifies the recipient via WS if the message is new. """ new = self.id self.body = self.body.strip() # Trimming whitespaces from the body super(GroupMessage, self).save(*args, **kwargs) if new is None: self.notify_ws_clients() # Meta class Meta: app_label = 'core' verbose_name = 'group message' verbose_name_plural = 'group messags' ordering = ('-time',)
32.67027
93
0.603077
741
6,044
4.723347
0.174089
0.024
0.018857
0.024286
0.635714
0.563429
0.563429
0.502286
0.440286
0.358857
0
0.000467
0.290702
6,044
184
94
32.847826
0.815955
0.119126
0
0.398374
0
0
0.062647
0
0
0
0
0
0
1
0.154472
false
0
0.04878
0.04065
0.398374
0.04065
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c74bed1c84a21dce43450d469d8869b0372e61e0
15,798
py
Python
backup/model.py
jsikyoon/ASNP-RMR
ddd3e586b01ba3a7f8b3721582aca7403649400e
[ "MIT" ]
8
2020-07-21T02:49:54.000Z
2021-09-28T02:22:37.000Z
backup/model.py
jsikyoon/ASNP-RMR
ddd3e586b01ba3a7f8b3721582aca7403649400e
[ "MIT" ]
null
null
null
backup/model.py
jsikyoon/ASNP-RMR
ddd3e586b01ba3a7f8b3721582aca7403649400e
[ "MIT" ]
1
2020-09-02T06:39:49.000Z
2020-09-02T06:39:49.000Z
import tensorflow as tf import numpy as np # utility methods def batch_mlp(input, output_sizes, variable_scope): """Apply MLP to the final axis of a 3D tensor (reusing already defined MLPs). Args: input: input tensor of shape [B,n,d_in]. output_sizes: An iterable containing the output sizes of the MLP as defined in `basic.Linear`. variable_scope: String giving the name of the variable scope. If this is set to be the same as a previously defined MLP, then the weights are reused. Returns: tensor of shape [B,n,d_out] where d_out=output_sizes[-1] """ # Get the shapes of the input and reshape to parallelise across observations batch_size, _, filter_size = input.shape.as_list() output = tf.reshape(input, (-1, filter_size)) output.set_shape((None, filter_size)) # Pass through MLP with tf.variable_scope(variable_scope, reuse=tf.AUTO_REUSE): for i, size in enumerate(output_sizes[:-1]): output = tf.nn.relu( tf.layers.dense(output, size, name="layer_{}".format(i))) # Last layer without a ReLu output = tf.layers.dense( output, output_sizes[-1], name="layer_{}".format(i + 1)) # Bring back into original shape output = tf.reshape(output, (batch_size, -1, output_sizes[-1])) return output class DeterministicEncoder(object): """The Deterministic Encoder.""" def __init__(self, output_sizes, attention): """(A)NP deterministic encoder. Args: output_sizes: An iterable containing the output sizes of the encoding MLP. attention: The attention module. """ self._output_sizes = output_sizes self._attention = attention def __call__(self, context_x, context_y, target_x): """Encodes the inputs into one representation. Args: context_x: Tensor of shape [B,observations,d_x]. For this 1D regression task this corresponds to the x-values. context_y: Tensor of shape [B,observations,d_y]. For this 1D regression task this corresponds to the y-values. target_x: Tensor of shape [B,target_observations,d_x]. For this 1D regression task this corresponds to the x-values. Returns: The encoded representation. Tensor of shape [B,target_observations,d] """ # Concatenate x and y along the filter axes encoder_input = tf.concat([context_x, context_y], axis=-1) # Pass final axis through MLP hidden = batch_mlp(encoder_input, self._output_sizes, "deterministic_encoder") # Apply attention with tf.variable_scope("deterministic_encoder", reuse=tf.AUTO_REUSE): hidden = self._attention(context_x, target_x, hidden) return hidden class LatentEncoder(object): """The Latent Encoder.""" def __init__(self, output_sizes, num_latents): """(A)NP latent encoder. Args: output_sizes: An iterable containing the output sizes of the encoding MLP. num_latents: The latent dimensionality. """ self._output_sizes = output_sizes self._num_latents = num_latents def __call__(self, x, y): """Encodes the inputs into one representation. Args: x: Tensor of shape [B,observations,d_x]. For this 1D regression task this corresponds to the x-values. y: Tensor of shape [B,observations,d_y]. For this 1D regression task this corresponds to the y-values. Returns: A normal distribution over tensors of shape [B, num_latents] """ # Concatenate x and y along the filter axes encoder_input = tf.concat([x, y], axis=-1) # Pass final axis through MLP hidden = batch_mlp(encoder_input, self._output_sizes, "latent_encoder") # Aggregator: take the mean over all points hidden = tf.reduce_mean(hidden, axis=1) # Have further MLP layers that map to the parameters of the Gaussian latent with tf.variable_scope("latent_encoder", reuse=tf.AUTO_REUSE): # First apply intermediate relu layer hidden = tf.nn.relu( tf.layers.dense(hidden, (self._output_sizes[-1] + self._num_latents)/2, name="penultimate_layer")) # Then apply further linear layers to output latent mu and log sigma mu = tf.layers.dense(hidden, self._num_latents, name="mean_layer") log_sigma = tf.layers.dense(hidden, self._num_latents, name="std_layer") # Compute sigma sigma = 0.1 + 0.9 * tf.sigmoid(log_sigma) return tf.contrib.distributions.Normal(loc=mu, scale=sigma) class Decoder(object): """The Decoder.""" def __init__(self, output_sizes): """(A)NP decoder. Args: output_sizes: An iterable containing the output sizes of the decoder MLP as defined in `basic.Linear`. """ self._output_sizes = output_sizes def __call__(self, representation, target_x): """Decodes the individual targets. Args: representation: The representation of the context for target predictions. Tensor of shape [B,target_observations,?]. target_x: The x locations for the target query. Tensor of shape [B,target_observations,d_x]. Returns: dist: A multivariate Gaussian over the target points. A distribution over tensors of shape [B,target_observations,d_y]. mu: The mean of the multivariate Gaussian. Tensor of shape [B,target_observations,d_x]. sigma: The standard deviation of the multivariate Gaussian. Tensor of shape [B,target_observations,d_x]. """ # concatenate target_x and representation hidden = tf.concat([representation, target_x], axis=-1) # Pass final axis through MLP hidden = batch_mlp(hidden, self._output_sizes, "decoder") # Get the mean an the variance mu, log_sigma = tf.split(hidden, 2, axis=-1) # Bound the variance sigma = 0.1 + 0.9 * tf.nn.softplus(log_sigma) # Get the distribution dist = tf.contrib.distributions.MultivariateNormalDiag( loc=mu, scale_diag=sigma) return dist, mu, sigma class LatentModel(object): """The (A)NP model.""" def __init__(self, latent_encoder_output_sizes, num_latents, decoder_output_sizes, use_deterministic_path=True, deterministic_encoder_output_sizes=None, attention=None): """Initialises the model. Args: latent_encoder_output_sizes: An iterable containing the sizes of hidden layers of the latent encoder. num_latents: The latent dimensionality. decoder_output_sizes: An iterable containing the sizes of hidden layers of the decoder. The last element should correspond to d_y * 2 (it encodes both mean and variance concatenated) use_deterministic_path: a boolean that indicates whether the deterministic encoder is used or not. deterministic_encoder_output_sizes: An iterable containing the sizes of hidden layers of the deterministic encoder. The last one is the size of the deterministic representation r. attention: The attention module used in the deterministic encoder. Only relevant when use_deterministic_path=True. """ self._latent_encoder = LatentEncoder(latent_encoder_output_sizes, num_latents) self._decoder = Decoder(decoder_output_sizes) self._use_deterministic_path = use_deterministic_path if use_deterministic_path: self._deterministic_encoder = DeterministicEncoder( deterministic_encoder_output_sizes, attention) def __call__(self, query, num_targets, target_y=None): """Returns the predicted mean and variance at the target points. Args: query: Array containing ((context_x, context_y), target_x) where: context_x: Tensor of shape [B,num_contexts,d_x]. Contains the x values of the context points. context_y: Tensor of shape [B,num_contexts,d_y]. Contains the y values of the context points. target_x: Tensor of shape [B,num_targets,d_x]. Contains the x values of the target points. num_targets: Number of target points. target_y: The ground truth y values of the target y. Tensor of shape [B,num_targets,d_y]. Returns: log_p: The log_probability of the target_y given the predicted distribution. Tensor of shape [B,num_targets]. mu: The mean of the predicted distribution. Tensor of shape [B,num_targets,d_y]. sigma: The variance of the predicted distribution. Tensor of shape [B,num_targets,d_y]. """ (context_x, context_y), target_x = query # Pass query through the encoder and the decoder prior = self._latent_encoder(context_x, context_y) # For training, when target_y is available, use targets for latent encoder. # Note that targets contain contexts by design. if target_y is None: latent_rep = prior.sample() # For testing, when target_y unavailable, use contexts for latent encoder. else: posterior = self._latent_encoder(target_x, target_y) latent_rep = posterior.sample() latent_rep = tf.tile(tf.expand_dims(latent_rep, axis=1), [1, num_targets, 1]) if self._use_deterministic_path: deterministic_rep = self._deterministic_encoder(context_x, context_y, target_x) representation = tf.concat([deterministic_rep, latent_rep], axis=-1) else: representation = latent_rep dist, mu, sigma = self._decoder(representation, target_x) # If we want to calculate the log_prob for training we will make use of the # target_y. At test time the target_y is not available so we return None. if target_y is not None: log_p = dist.log_prob(target_y) posterior = self._latent_encoder(target_x, target_y) kl = tf.reduce_sum( tf.contrib.distributions.kl_divergence(posterior, prior), axis=-1, keepdims=True) kl = tf.tile(kl, [1, num_targets]) loss = - tf.reduce_mean(log_p - kl / tf.cast(num_targets, tf.float32)) else: log_p = None kl = None loss = None return mu, sigma, log_p, kl, loss def uniform_attention(q, v): """Uniform attention. Equivalent to np. Args: q: queries. tensor of shape [B,m,d_k]. v: values. tensor of shape [B,n,d_v]. Returns: tensor of shape [B,m,d_v]. """ total_points = tf.shape(q)[1] rep = tf.reduce_mean(v, axis=1, keepdims=True) # [B,1,d_v] rep = tf.tile(rep, [1, total_points, 1]) return rep def laplace_attention(q, k, v, scale, normalise): """Computes laplace exponential attention. Args: q: queries. tensor of shape [B,m,d_k]. k: keys. tensor of shape [B,n,d_k]. v: values. tensor of shape [B,n,d_v]. scale: float that scales the L1 distance. normalise: Boolean that determines whether weights sum to 1. Returns: tensor of shape [B,m,d_v]. """ k = tf.expand_dims(k, axis=1) # [B,1,n,d_k] q = tf.expand_dims(q, axis=2) # [B,m,1,d_k] unnorm_weights = - tf.abs((k - q) / scale) # [B,m,n,d_k] unnorm_weights = tf.reduce_sum(unnorm_weights, axis=-1) # [B,m,n] if normalise: weight_fn = tf.nn.softmax else: weight_fn = lambda x: 1 + tf.tanh(x) weights = weight_fn(unnorm_weights) # [B,m,n] rep = tf.einsum('bik,bkj->bij', weights, v) # [B,m,d_v] return rep def dot_product_attention(q, k, v, normalise): """Computes dot product attention. Args: q: queries. tensor of shape [B,m,d_k]. k: keys. tensor of shape [B,n,d_k]. v: values. tensor of shape [B,n,d_v]. normalise: Boolean that determines whether weights sum to 1. Returns: tensor of shape [B,m,d_v]. """ d_k = tf.shape(q)[-1] scale = tf.sqrt(tf.cast(d_k, tf.float32)) unnorm_weights = tf.einsum('bjk,bik->bij', k, q) / scale # [B,m,n] if normalise: weight_fn = tf.nn.softmax else: weight_fn = tf.sigmoid weights = weight_fn(unnorm_weights) # [B,m,n] rep = tf.einsum('bik,bkj->bij', weights, v) # [B,m,d_v] return rep def multihead_attention(q, k, v, num_heads=8): """Computes multi-head attention. Args: q: queries. tensor of shape [B,m,d_k]. k: keys. tensor of shape [B,n,d_k]. v: values. tensor of shape [B,n,d_v]. num_heads: number of heads. Should divide d_v. Returns: tensor of shape [B,m,d_v]. """ d_k = q.get_shape().as_list()[-1] d_v = v.get_shape().as_list()[-1] head_size = d_v / num_heads key_initializer = tf.random_normal_initializer(stddev=d_k**-0.5) value_initializer = tf.random_normal_initializer(stddev=d_v**-0.5) rep = tf.constant(0.0) for h in range(num_heads): o = dot_product_attention( tf.layers.Conv1D(head_size, 1, kernel_initializer=key_initializer, name='wq%d' % h, use_bias=False, padding='VALID')(q), tf.layers.Conv1D(head_size, 1, kernel_initializer=key_initializer, name='wk%d' % h, use_bias=False, padding='VALID')(k), tf.layers.Conv1D(head_size, 1, kernel_initializer=key_initializer, name='wv%d' % h, use_bias=False, padding='VALID')(v), normalise=True) rep += tf.layers.Conv1D(d_v, 1, kernel_initializer=value_initializer, name='wo%d' % h, use_bias=False, padding='VALID')(o) return rep class Attention(object): """The Attention module.""" def __init__(self, rep, output_sizes, att_type, scale=1., normalise=True, num_heads=8): """Create attention module. Takes in context inputs, target inputs and representations of each context input/output pair to output an aggregated representation of the context data. Args: rep: transformation to apply to contexts before computing attention. One of: ['identity','mlp']. output_sizes: list of number of hidden units per layer of mlp. Used only if rep == 'mlp'. att_type: type of attention. One of the following: ['uniform','laplace','dot_product','multihead'] scale: scale of attention. normalise: Boolean determining whether to: 1. apply softmax to weights so that they sum to 1 across context pts or 2. apply custom transformation to have weights in [0,1]. num_heads: number of heads for multihead. """ self._rep = rep self._output_sizes = output_sizes self._type = att_type self._scale = scale self._normalise = normalise if self._type == 'multihead': self._num_heads = num_heads def __call__(self, x1, x2, r): """Apply attention to create aggregated representation of r. Args: x1: tensor of shape [B,n1,d_x]. x2: tensor of shape [B,n2,d_x]. r: tensor of shape [B,n1,d]. Returns: tensor of shape [B,n2,d] Raises: NameError: The argument for rep/type was invalid. """ if self._rep == 'identity': k, q = (x1, x2) elif self._rep == 'mlp': # Pass through MLP k = batch_mlp(x1, self._output_sizes, "attention") q = batch_mlp(x2, self._output_sizes, "attention") else: raise NameError("'rep' not among ['identity','mlp']") if self._type == 'uniform': rep = uniform_attention(q, r) elif self._type == 'laplace': rep = laplace_attention(q, k, r, self._scale, self._normalise) elif self._type == 'dot_product': rep = dot_product_attention(q, k, r, self._normalise) elif self._type == 'multihead': rep = multihead_attention(q, k, r, self._num_heads) else: raise NameError(("'att_type' not among ['uniform','laplace','dot_product'" ",'multihead']")) return rep
36.068493
81
0.660653
2,261
15,798
4.433879
0.145953
0.044988
0.03192
0.053067
0.421347
0.364888
0.295362
0.257456
0.229127
0.224339
0
0.007579
0.239967
15,798
437
82
36.15103
0.827351
0.466641
0
0.169591
0
0
0.048275
0.009784
0
0
0
0
0
1
0.087719
false
0
0.011696
0
0.187135
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c74e4682a52e8afc4e35ad4f69f1a64dccbd1416
3,520
py
Python
minotaur/_minotaur.py
giannitedesco/minotaur
1a043818775e14054cc3467ba6d1c07cbf128c6b
[ "Apache-2.0" ]
172
2020-08-24T14:34:00.000Z
2021-12-29T21:56:33.000Z
minotaur/_minotaur.py
giannitedesco/minotaur
1a043818775e14054cc3467ba6d1c07cbf128c6b
[ "Apache-2.0" ]
3
2020-08-25T13:46:30.000Z
2021-02-27T01:25:38.000Z
minotaur/_minotaur.py
giannitedesco/minotaur
1a043818775e14054cc3467ba6d1c07cbf128c6b
[ "Apache-2.0" ]
4
2020-08-24T17:21:18.000Z
2021-12-29T21:57:42.000Z
from typing import Dict, Tuple, Optional from pathlib import Path import asyncio from ._mask import Mask from ._event import Event from ._base import InotifyBase __all__ = ('Minotaur',) class Notification: __slots__ = ( '_path', '_type', '_isdir', '_unmount', '_qoverflow', ) def __init__(self, path: Path, type: Mask, isdir: bool, unmount: bool, qoverflow: bool = False): self._path = path self._type = type self._isdir = bool(isdir) self._unmount = bool(unmount) self._qoverflow = bool(qoverflow) @property def isdir(self) -> bool: return self._isdir @property def unmount(self) -> bool: return self._unmount @property def qoverflow(self) -> bool: return self._qoverflow @property def path(self) -> Path: return self._path def __repr__(self) -> str: t = self._isdir and 'dir' or 'file' return f'{type(self).__name__}({self._type.name} {t} {self._path})' @classmethod def create(cls, path: Path, mask: Mask) -> 'Notification': return cls(path, mask & Mask.EVENT_TYPE, bool(mask & Mask.ISDIR), bool(mask & Mask.UNMOUNT), bool(mask & Mask.Q_OVERFLOW)) class Minotaur(InotifyBase): """ Fancy interface for Inotify which does questionable things like: 1. Resolve watch-descriptors back to paths (which races with renames of original paths and can't be used safely, but other inotify packages provide this feature, so here it is for your delectation). 2. Link rename_from/rename_to events together. This feature would be useful but isn't yet actually implemented. Working on it... """ __slots__ = ( '_wdmap', '_cmap', ) _wdmap: Dict[int, Path] _cmap: Dict[Tuple[int, int], Event] def __init__(self, blocking: bool = True, cloexec: bool = True, loop: Optional[asyncio.AbstractEventLoop] = None, ) -> None: super().__init__(blocking, cloexec, loop) self._wdmap = {} self._cmap = {} def add_watch(self, p: Path, mask: Mask) -> int: try: wd = super().add_watch(p, mask) except Exception: raise else: self._wdmap[wd] = p.resolve() return wd def rm_watch(self, wd: int) -> int: try: return super().rm_watch(wd) except Exception: raise else: del self._wdmap[wd] def _resolve_path(self, wd: int, name: Path) -> Path: try: base_dir = self._wdmap[wd] except KeyError: path = name else: path = base_dir / name return path def __next__(self) -> Notification: evt = super()._next_event() if evt is None: raise StopIteration # TODO: Link rename_from/rename_to together if we have them path = self._resolve_path(evt.wd, evt.name) return Notification.create(path, evt.mask) async def __anext__(self) -> Notification: evt = await super()._next_event_async() if evt is None: raise StopIteration path = self._resolve_path(evt.wd, evt.name) return Notification.create(path, evt.mask)
26.268657
75
0.559659
398
3,520
4.718593
0.31407
0.025559
0.022364
0.028754
0.124601
0.101171
0.070288
0.070288
0.070288
0.070288
0
0.000865
0.343182
3,520
133
76
26.466165
0.811419
0.130966
0
0.265306
0
0.010204
0.042673
0.012901
0
0
0
0.007519
0
1
0.122449
false
0
0.061224
0.05102
0.357143
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c751066d68d4e91afb71f1ee11d13e9bcbb998a8
8,802
py
Python
novelty-detection/train_wood_vgg19.py
matherm/python-data-science
bdb49b18c5ef6044f8a9e6f95c81d5f7bb1d511f
[ "MIT" ]
1
2020-03-24T09:22:04.000Z
2020-03-24T09:22:04.000Z
novelty-detection/train_wood_vgg19.py
matherm/python-data-science
bdb49b18c5ef6044f8a9e6f95c81d5f7bb1d511f
[ "MIT" ]
1
2020-06-16T14:42:29.000Z
2020-06-16T14:42:29.000Z
novelty-detection/train_wood_vgg19.py
matherm/python-data-science
bdb49b18c5ef6044f8a9e6f95c81d5f7bb1d511f
[ "MIT" ]
null
null
null
import argparse import sys import torch import numpy as np import torch.nn as nn from torch.utils.data import DataLoader from torchvision.datasets import MNIST from torchvision.datasets import CIFAR10 import torchvision.transforms as transforms import matplotlib.pyplot as plt parser = argparse.ArgumentParser(description='PyTorch Novelty Detection') # TRAINING PARAMS parser.add_argument('--epochs', type=int, default=100, metavar='', help='Amount of epochs for training (default: 100)') parser.add_argument('--batch_size', type=int, default=1000, metavar='', help='Batch size for SGD (default: 100)') parser.add_argument('--lrate', type=float, default=0.0001, metavar="", help="Learning rate (default: 0.001") parser.add_argument('--with_cuda', action='store_true', dest='use_cuda', help="Shall cuda be used (default: False)") parser.add_argument('--model', type=int, default=0, help="Which model to train (0=KLminimizer, 1=Euclidean-Minimizer) (default: 0)") parser.add_argument('--plots', action='store_true', dest='plots', help="Shall matplotlib be used (default: False)") parser.add_argument('--grid', action='store_true', dest='grid', help="Grid search (default: False)") argv = parser.parse_args() sys.argv = [sys.argv[0]] from ummon import * from negvarbound import * from model import * from helpers import Evaluator import helpers torch.manual_seed(4) if __name__ == '__main__': # WOOD transform = transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), VGG19Features("pool4"), helpers.flatten_transform]) wood_data = ImagePatches("/ext/data/Wood-0035.png", mode='rgb', train=True, stride_y=14, stride_x=14, window_size=28, transform=transform) wood_data_test = AnomalyImagePatches("/ext/data/Wood-0035.png", mode='rgb', train=True, stride_y=14, stride_x=14, window_size=28, transform=transform, propability=1.0, anomaly=SquareAnomaly(size=8, color=255)) wood_data = [wood_data[i][0].data for i in range(len(wood_data))] wood_data = torch.stack(wood_data).numpy() / 10 wood_data_test = [wood_data_test[i][0].data for i in range(len(wood_data_test))] wood_data_test = torch.stack(wood_data_test).numpy() / 10 # Novelty data_novelty = wood_data_test # Train data_train = wood_data # Val data_val = data_train ###################################################### # NORMAL DISTRIBUTION ###################################################### # Model model = ModelNormal(input_features = data_train.shape[1], hidden_layer=20, latent_features=20) torch.manual_seed(4) # LOSS criterion = KLLoss(model=model, size_average=False) # INSTANTIATE OPTIMIZER optimizer = torch.optim.SGD(model.parameters(), lr=argv.lrate, weight_decay=1) #Evaluator evaluator = Evaluator(model, data_train, data_val, data_novelty) # Activate matplotlib argv.plots = True with Logger(loglevel=10, log_batch_interval=601) as lg: # CREATE A TRAINER my_trainer = UnsupervisedTrainer(lg, model, criterion, optimizer, trainingstate = Trainingstate(), model_filename="KL_MIN", use_cuda= argv.use_cuda, profile = False, convergence_eps = 1e-5) # START TRAINING my_trainer.fit(dataloader_training=(wood_data, 20), epochs=200) evaluator.evaluate_model(argv) ###################################################### # LOGNORMAL ###################################################### # Model model = ModelLogNormal(input_features = data_train.shape[1], hidden_layer=20, latent_features=20) torch.manual_seed(4) # LOSS criterion = KLLoss_lognormal(model=model, size_average=False) # INSTANTIATE OPTIMIZER optimizer = torch.optim.SGD(model.parameters(), lr=argv.lrate, weight_decay=1) #Evaluator evaluator = Evaluator(model, data_train, data_val, data_novelty) # Activate matplotlib argv.plots = True with Logger(loglevel=10, log_batch_interval=601) as lg: # CREATE A TRAINER my_trainer = UnsupervisedTrainer(lg, model, criterion, optimizer, trainingstate = Trainingstate(), model_filename="KL_MIN", use_cuda= argv.use_cuda, profile = False, convergence_eps = 1e-5) # START TRAINING my_trainer.fit(dataloader_training=(data_train, 20), epochs=argv.epochs) evaluator.evaluate_model(argv) ###################################################### # LAPLACE ###################################################### # Model model = ModelLaplace(input_features = data_train.shape[1], hidden_layer=20, latent_features=20) torch.manual_seed(4) # LOSS criterion = KLLoss_laplace(model=model, size_average=False, mean=2, scale=0.5) # INSTANTIATE OPTIMIZER optimizer = torch.optim.SGD(model.parameters(), lr=0.000001, weight_decay=1) #Evaluator evaluator = Evaluator(model, data_train, data_val, data_novelty) # Activate matplotlib argv.plots = True with Logger(loglevel=10, log_batch_interval=601) as lg: # CREATE A TRAINER my_trainer = UnsupervisedTrainer(lg, model, criterion, optimizer, trainingstate = Trainingstate(), model_filename="KL_MIN", use_cuda= argv.use_cuda, profile = False, convergence_eps = 1e-1) # START TRAINING my_trainer.fit(dataloader_training=(data_train, 20), epochs=300) evaluator.evaluate_model(argv) # {'AUROC LAT (TRAIN)': 0.8743801652892562, # 'AUROC LAT (VAL)': 0.8661157024793389, # 'AUROC REC (TRAIN)': 0.86900826446281, # 'AUROC REC (VAL)': 0.8528925619834712} ###################################################### # LAPLACE WITH R-SHIFT ###################################################### class CombinedLoss(nn.Module): def __init__(self, model, *args, **kwargs): super(CombinedLoss, self).__init__() self.model = model self.r_shift = KLLoss_shift_r(model=model, size_average=False) self.kl_loss = KLLoss_laplace(model=model, size_average=False, mean=10, scale=0.3) def forward(self, inpt, outpt): self.r_shift() return self.kl_loss(inpt,outpt) # Model model = ModelLaplace(input_features = data_train.shape[1], hidden_layer=20, latent_features=20) torch.manual_seed(4) # LOSS criterion = CombinedLoss(model) # INSTANTIATE OPTIMIZER optimizer = torch.optim.SGD(model.parameters(), lr=argv.lrate, weight_decay=1) #Evaluator evaluator = Evaluator(model, data_train, data_val, data_novelty) # Activate matplotlib argv.plots = True with Logger(loglevel=10, log_batch_interval=601) as lg: # CREATE A TRAINER my_trainer = UnsupervisedTrainer(lg, model, criterion, optimizer, trainingstate = Trainingstate(), model_filename="KL_MIN", use_cuda= argv.use_cuda, profile = False, convergence_eps = 1e-3) # START TRAINING my_trainer.fit(dataloader_training=(data_train, 20), epochs=200) evaluator.evaluate_model(argv) # {'AUROC LAT (TRAIN)': 0.8590909090909091, # 'AUROC LAT (VAL)': 0.8752066115702479, # 'AUROC REC (TRAIN)': 0.8677685950413224, # 'AUROC REC (VAL)': 0.8619834710743801}
34.249027
213
0.546353
893
8,802
5.204927
0.234043
0.025818
0.025602
0.017212
0.594664
0.577453
0.568847
0.545181
0.510757
0.498064
0
0.051189
0.311975
8,802
256
214
34.382813
0.716314
0.095546
0
0.503876
0
0
0.066952
0.009087
0
0
0
0
0
1
0.015504
false
0
0.116279
0
0.147287
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c75685d19bc8be9c76eb30777f9bd2a54b73db11
682
py
Python
tests/conftest.py
junjunjunk/torchgpipe
3db11e1da0fc432eb3f3807ddcb22967973c8b28
[ "Apache-2.0" ]
532
2019-05-27T09:23:04.000Z
2022-03-31T04:07:55.000Z
tests/conftest.py
junjunjunk/torchgpipe
3db11e1da0fc432eb3f3807ddcb22967973c8b28
[ "Apache-2.0" ]
29
2019-07-01T19:49:54.000Z
2021-11-28T00:51:00.000Z
tests/conftest.py
junjunjunk/torchgpipe
3db11e1da0fc432eb3f3807ddcb22967973c8b28
[ "Apache-2.0" ]
68
2019-05-27T09:27:32.000Z
2022-03-27T13:52:18.000Z
import pytest import torch @pytest.fixture(autouse=True) def manual_seed_zero(): torch.manual_seed(0) @pytest.fixture(scope='session') def cuda_sleep(): # Warm-up CUDA. torch.empty(1, device='cuda') # From test/test_cuda.py in PyTorch. start = torch.cuda.Event(enable_timing=True) end = torch.cuda.Event(enable_timing=True) start.record() torch.cuda._sleep(1000000) end.record() end.synchronize() cycles_per_ms = 1000000 / start.elapsed_time(end) def cuda_sleep(seconds): torch.cuda._sleep(int(seconds * cycles_per_ms * 1000)) return cuda_sleep def pytest_report_header(): return f'torch: {torch.__version__}'
22
62
0.696481
95
682
4.768421
0.484211
0.099338
0.05298
0.0883
0.13245
0.13245
0
0
0
0
0
0.035714
0.178886
682
30
63
22.733333
0.773214
0.070381
0
0
0
0
0.058637
0
0
0
0
0
0
1
0.2
false
0
0.1
0.05
0.4
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c756e2f724651746fcaf020b50f3e0f2bdeb6442
54,090
py
Python
lib/python/treadmill/scheduler/__init__.py
drienyov/treadmill
ce21537cd9a2fdb0567ac2aa3de1afcb2f6861de
[ "Apache-2.0" ]
null
null
null
lib/python/treadmill/scheduler/__init__.py
drienyov/treadmill
ce21537cd9a2fdb0567ac2aa3de1afcb2f6861de
[ "Apache-2.0" ]
null
null
null
lib/python/treadmill/scheduler/__init__.py
drienyov/treadmill
ce21537cd9a2fdb0567ac2aa3de1afcb2f6861de
[ "Apache-2.0" ]
null
null
null
"""Treadmill hierarchical scheduler. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import abc import collections import datetime import heapq import itertools import logging import operator import sys import time import enum import numpy as np import six _LOGGER = logging.getLogger(__name__) MAX_PRIORITY = 100 DEFAULT_RANK = 100 _UNPLACED_RANK = sys.maxsize DIMENSION_COUNT = None _MAX_UTILIZATION = float('inf') _GLOBAL_ORDER_BASE = time.mktime((2014, 1, 1, 0, 0, 0, 0, 0, 0)) # 21 day DEFAULT_SERVER_UPTIME = 21 * 24 * 60 * 60 # 1 day MIN_SERVER_UPTIME = 1 * 24 * 60 * 60 # 7 days DEFAULT_MAX_APP_LEASE = 7 * 24 * 60 * 60 # Default partition threshold DEFAULT_THRESHOLD = 0.9 # pylint: disable=C0302,too-many-lines def _bit_count(value): """Returns number of bits set. """ count = 0 while value: value &= value - 1 count += 1 return count def zero_capacity(): """Returns zero capacity vector. """ assert DIMENSION_COUNT is not None, 'Dimension count not set.' return np.zeros(DIMENSION_COUNT) def eps_capacity(): """Returns eps capacity vector. """ assert DIMENSION_COUNT is not None, 'Dimension count not set.' return np.array( [np.finfo(float).eps for _x in range(0, DIMENSION_COUNT)] ) def _global_order(): """Use timestamp in nanoseconds, from Jan 1st 2014, to break tie in scheduling conflicts for apps of the same priority, in a FIFO fashion. """ # Take the current EPOCH in nanosec global_order = int(time.time() * 1000000) - _GLOBAL_ORDER_BASE return global_order def utilization(demand, allocated, available): """Calculates utilization score. """ return np.max(np.subtract(demand, allocated) / available) def _all(oper, left, right): """Short circuit all for ndarray. """ return all( oper(ai, bi) for ai, bi in six.moves.zip(left, right) ) def _any(oper, left, right): """Short circuit any for ndarray. """ return any( oper(ai, bi) for ai, bi in six.moves.zip(left, right) ) def _any_eq(left, right): """Short circuit any eq for ndarray. """ return _any(operator.eq, left, right) def _any_isclose(left, right): """Short circuit any isclose for ndarray. """ return _any(np.isclose, left, right) def _any_lt(left, right): """Short circuit any lt for ndarray. """ return _any(operator.lt, left, right) def _any_le(left, right): """Short circuit any le for ndarray. """ return _any(operator.le, left, right) def _any_gt(left, right): """Short circuit any gt for ndarray. """ return _any(operator.gt, left, right) def _any_ge(left, right): """Short circuit any ge for ndarray. """ return _any(operator.ge, left, right) def _all_eq(left, right): """Short circuit all eq for ndarray. """ return _all(operator.eq, left, right) def _all_isclose(left, right): """Short circuit all isclose for ndarray. """ return _all(np.isclose, left, right) def _all_lt(left, right): """Short circuit all lt for ndarray. """ return _all(operator.lt, left, right) def _all_le(left, right): """Short circuit all le for ndarray. """ return _all(operator.le, left, right) def _all_gt(left, right): """Short circuit all gt for ndarray. """ return _all(operator.gt, left, right) def _all_ge(left, right): """Short circuit all ge for ndarray. """ return _all(operator.ge, left, right) class IdentityGroup: """Identity group. """ __slots__ = ( 'available', 'count', ) def __init__(self, count=0): self.count = count self.available = set(range(0, count)) def acquire(self): """Return next available identity or None. """ if self.available: return self.available.pop() else: return None def release(self, ident): """Mark identity as available. """ if ident < self.count: self.available.add(ident) def adjust(self, count): """Adjust identities with new count. If count is larger, add additional identities to the set. If count is lower, remove identities that are no longer valid. All apps that have invalid identities will be adjusted in the schedule cycle. """ if count >= self.count: self.available ^= set(six.moves.xrange(self.count, count)) else: self.available -= set(six.moves.xrange(count, self.count)) self.count = count class State(enum.Enum): """Enumeration of node/server states. """ # Ready to accept new applications. # TODO: Fix attribute name up = 'up' # pylint: disable=invalid-name # Applications need to be migrated. down = 'down' # Existing applications can stay, but will not accept new. frozen = 'frozen' class Affinity: """Model affinity and affinity limits. """ __slots__ = ( 'name', 'limits', 'constraints', ) def __init__(self, name, limits=None): self.name = name self.limits = collections.defaultdict(lambda: float('inf')) if limits: self.limits.update(limits) # freeze affinity shape constraints. self.constraints = tuple([self.name] + sorted(self.limits.values())) class Application: """Application object. """ __slots__ = ( 'global_order', 'name', 'demand', 'affinity', 'priority', 'allocation', 'data_retention_timeout', 'server', 'lease', 'identity', 'identity_group', 'identity_group_ref', 'schedule_once', 'evicted', 'placement_expiry', 'renew', 'unschedule', 'final_rank', 'final_util', 'constraints', ) def __init__(self, name, priority, demand, affinity, affinity_limits=None, data_retention_timeout=0, lease=0, identity_group=None, identity=None, schedule_once=False): self.global_order = _global_order() self.allocation = None self.server = None self.name = name self.affinity = Affinity(affinity, affinity_limits) self.priority = priority self.demand = np.array(demand, dtype=float) self.data_retention_timeout = data_retention_timeout self.lease = lease self.identity_group = identity_group self.identity = identity self.identity_group_ref = None self.schedule_once = schedule_once self.evicted = False self.unschedule = False self.placement_expiry = None self.renew = False def shape(self): """Return tuple of application (constraints, demand). Application shape is tuple of constraints that affect application placement. Currently this includes affinity constraints and app lease time. """ constraints = (self.affinity.constraints + (self.lease,)) if self.allocation: constraints += self.allocation.constraints return constraints, self.demand def acquire_identity(self): """Try to acquire identity if belong to the group. Returns True if successfull or if identity group is none. """ if not self.identity_group_ref: return True if self.identity is None: self.identity = self.identity_group_ref.acquire() _LOGGER.info('Acquired identity: %s: %s - %s', self.name, self.identity_group, self.identity) return self.identity is not None def release_identity(self): """Release app identity. """ if self.identity_group_ref and self.identity is not None: self.identity_group_ref.release(self.identity) self.identity = None def force_set_identity(self, identity): """Force identity of the app. """ if identity is not None: assert self.identity_group_ref self.identity = identity self.identity_group_ref.available.discard(identity) def has_identity(self): """Checks if app has identity if identity group is specified. """ return self.identity_group_ref is None or self.identity is not None @property def traits(self): """The app traits are derived from allocation. """ if self.allocation is None: return 0 else: return self.allocation.traits @six.add_metaclass(abc.ABCMeta) class Strategy: """Base class for all placement strategies. """ @abc.abstractmethod def suggested_node(self): """Suggested node that should be tried first. """ pass @abc.abstractmethod def next_node(self): """Next node to try, if previous suggestion was rejected. """ pass class SpreadStrategy(Strategy): """Spread strategy will suggest new node for each subsequent placement. """ __slots__ = ( 'current_idx', 'node', ) def __init__(self, node): self.current_idx = 0 self.node = node def suggested_node(self): """Suggest next node from the cycle. """ for _ in six.moves.xrange(0, len(self.node.children)): if self.current_idx == len(self.node.children): self.current_idx = 0 current = self.node.children[self.current_idx] self.current_idx += 1 if current: return current # Not a single non-none node. return None def next_node(self): """Suggest next node from the cycle. """ return self.suggested_node() class PackStrategy(Strategy): """Pack strategy will suggest same node until it is full. """ __slots__ = ( 'current_idx', 'node', ) def __init__(self, node): self.current_idx = 0 self.node = node def suggested_node(self): """Suggest same node as previous placement. """ for _ in six.moves.xrange(0, len(self.node.children)): if self.current_idx == len(self.node.children): self.current_idx = 0 node = self.node.children[self.current_idx] if node: return node return None def next_node(self): """Suggest next node from the cycle. """ self.current_idx += 1 return self.suggested_node() class TraitSet: """Hierarchical set of traits. """ __slots__ = ( 'self_traits', 'children_traits', 'traits', ) def __init__(self, traits=0): if not traits: traits = 0 # Private traits. assert isinstance(traits, six.integer_types) self.self_traits = traits # Union of all children traits. self.children_traits = dict() self._recalculate() def _recalculate(self): """Calculate combined set of all traits. """ self.traits = self.self_traits for trait in six.itervalues(self.children_traits): self.traits |= trait def has(self, traits): """Check if all traits are present. """ return (self.traits & traits) == traits def add(self, child, traits): """Add a child with given traits. """ # Update children traits. self.children_traits[child] = traits self._recalculate() def remove(self, child): """Remove child traits from the list. """ if child in self.children_traits: del self.children_traits[child] self._recalculate() def is_same(self, other): """Compares own traits, ignore child. """ return self.self_traits == other.self_traits class AffinityCounter: """Manages affinity count. """ __slots__ = ( 'affinity_counter', ) def __init__(self): self.affinity_counter = collections.Counter() class Node: """Abstract placement node. """ __slots__ = ( 'name', 'level', 'free_capacity', 'parent', 'children', 'children_by_name', 'traits', 'labels', 'affinity_counters', 'valid_until', '_state', '_state_since', ) def __init__(self, name, traits, level, valid_until=0): self.name = name self.level = level self.free_capacity = zero_capacity() self.parent = None self.children = list() self.children_by_name = dict() self.traits = TraitSet(traits) self.labels = set() self.affinity_counters = collections.Counter() self.valid_until = valid_until self._state = State.up self._state_since = time.time() def empty(self): """Return true if there are no children. """ return not bool(self.children_by_name) def children_iter(self): """Iterate over active children. """ for child in self.children: if child: yield child def get_state(self): """Returns tuple of (state, since). """ return self. _state, self._state_since def set_state(self, state, since): """Sets the state and time since. """ if self._state is not state: self._state_since = since self._state = state _LOGGER.debug('state: %s - (%s, %s)', self.name, self._state, self._state_since) @property def state(self): """Return current state. """ return self._state @state.setter def state(self, new_state): """Set node state and records time. """ self.set_state(new_state, time.time()) def add_child_traits(self, node): """Recursively add child traits up. """ self.traits.add(node.name, node.traits.traits) if self.parent: self.parent.remove_child_traits(self.name) self.parent.add_child_traits(self) def adjust_valid_until(self, child_valid_until): """Recursively adjust valid until time. """ if child_valid_until: self.valid_until = max(self.valid_until, child_valid_until) else: if self.empty(): self.valid_until = 0 else: self.valid_until = max([node.valid_until for node in self.children_iter()]) if self.parent: self.parent.adjust_valid_until(child_valid_until) def remove_child_traits(self, node_name): """Recursively remove child traits up. """ self.traits.remove(node_name) if self.parent: self.parent.remove_child_traits(self.name) self.parent.add_child_traits(self) def reset_children(self): """Reset children to empty list. """ for child in self.children_iter(): child.parent = None self.children = list() self.children_by_name = dict() def add_node(self, node): """Add child node, set the traits and propagate traits up. """ assert node.parent is None assert node.name not in self.children_by_name node.parent = self self.children.append(node) self.children_by_name[node.name] = node self.add_child_traits(node) self.increment_affinity(node.affinity_counters) self.add_labels(node.labels) self.adjust_valid_until(node.valid_until) def add_labels(self, labels): """Recursively add labels to self and parents. """ self.labels.update(labels) if self.parent: self.parent.add_labels(self.labels) def remove_node(self, node): """Remove child node and adjust the traits. """ assert node.name in self.children_by_name del self.children_by_name[node.name] for idx in six.moves.xrange(0, len(self.children)): if self.children[idx] == node: self.children[idx] = None self.remove_child_traits(node.name) self.decrement_affinity(node.affinity_counters) self.adjust_valid_until(None) node.parent = None return node def remove_node_by_name(self, nodename): """Removes node by name. """ assert nodename in self.children_by_name return self.remove_node(self.children_by_name[nodename]) def check_app_constraints(self, app): """Find app placement on the node. """ if app.allocation is not None: if app.allocation.label not in self.labels: _LOGGER.info('Missing label: %s on %s', app.allocation.label, self.name) return False if app.traits != 0 and not self.traits.has(app.traits): _LOGGER.info('Missing traits: %s on %s', app.traits, self.name) return False if not self.check_app_affinity_limit(app): return False if _any_gt(app.demand, self.free_capacity): _LOGGER.info('Not enough free capacity: %s', self.free_capacity) return False return True def check_app_affinity_limit(self, app): """Check app affinity limits """ count = self.affinity_counters[app.affinity.name] limit = app.affinity.limits[self.level] return count < limit def put(self, _app): """Abstract method, should never be called. """ raise Exception('Not implemented.') def size(self, label): """Returns total capacity of the children. """ if self.empty() or label not in self.labels: return eps_capacity() return np.sum([ n.size(label) for n in self.children_iter()], 0) def members(self): """Return set of all leaf node names. """ names = dict() for node in self.children_iter(): names.update(node.members()) return names def increment_affinity(self, counters): """Increment affinity counters recursively. """ self.affinity_counters.update(counters) if self.parent: self.parent.increment_affinity(counters) def decrement_affinity(self, counters): """Decrement affinity counters recursively. """ self.affinity_counters.subtract(counters) if self.parent: self.parent.decrement_affinity(counters) class Bucket(Node): """Collection of nodes/buckets. """ __slots__ = ( 'affinity_strategies', 'traits', ) _default_strategy_t = SpreadStrategy def __init__(self, name, traits=0, level=None): super(Bucket, self).__init__(name, traits, level) self.affinity_strategies = dict() self.traits = TraitSet(traits) def set_affinity_strategy(self, affinity, strategy_t): """Initilaizes placement strategy for given affinity. """ self.affinity_strategies[affinity] = strategy_t(self) def get_affinity_strategy(self, affinity): """Returns placement strategy for the affinity, defaults to spread. """ if affinity not in self.affinity_strategies: self.set_affinity_strategy(affinity, Bucket._default_strategy_t) return self.affinity_strategies[affinity] def adjust_capacity_up(self, new_capacity): """Node can only increase capacity. """ self.free_capacity = np.maximum(self.free_capacity, new_capacity) if self.parent: self.parent.adjust_capacity_up(self.free_capacity) def adjust_capacity_down(self, prev_capacity=None): """Called when capacity is decreased. """ if self.empty(): self.free_capacity = zero_capacity() if self.parent: self.parent.adjust_capacity_down() else: if prev_capacity is not None and _all_lt(prev_capacity, self.free_capacity): return free_capacity = zero_capacity() for child_node in self.children_iter(): if child_node.state is not State.up: continue free_capacity = np.maximum(free_capacity, child_node.free_capacity) # If resulting free_capacity is less the previous, we need to # adjust the parent, otherwise, nothing needs to be done. prev_capacity = self.free_capacity.copy() if _any_lt(free_capacity, self.free_capacity): self.free_capacity = free_capacity if self.parent: self.parent.adjust_capacity_down(prev_capacity) def add_node(self, node): """Adds node to the bucket. """ super(Bucket, self).add_node(node) self.adjust_capacity_up(node.free_capacity) def remove_node(self, node): """Removes node from the bucket. """ super(Bucket, self).remove_node(node) # if _any_isclose(self.free_capacity, node.free_capacity): self.adjust_capacity_down(node.free_capacity) return node def put(self, app): """Try to put app on one of the nodes that belong to the bucket. """ # Check if it is feasible to put app on some node low in the # hierarchy _LOGGER.debug('bucket.put: %s => %s', app.name, self.name) if not self.check_app_constraints(app): return False strategy = self.get_affinity_strategy(app.affinity.name) node = strategy.suggested_node() if node is None: _LOGGER.debug('All nodes in the bucket deleted.') return False nodename0 = node.name first = True while True: # End of iteration. if not first and node.name == nodename0: _LOGGER.debug('Finished iterating on: %s.', self.name) break first = False _LOGGER.debug('Trying node: %s:', node.name) if node.state is not State.up: _LOGGER.debug('Node not up: %s, %s', node.name, node.state) else: if node.put(app): return True node = strategy.next_node() return False class Server(Node): """Server object, final app placement. """ __slots__ = ( 'init_capacity', 'apps', 'up_since', 'presence_id', ) def __init__(self, name, capacity, up_since=0, valid_until=0, traits=0, label=None, presence_id=None): super(Server, self).__init__(name, traits=traits, level='server', valid_until=valid_until) self.labels = set([label]) self.init_capacity = np.array(capacity, dtype=float) self.free_capacity = self.init_capacity.copy() self.apps = dict() self.up_since = up_since self.presence_id = presence_id def __str__(self): return 'server: %s %s' % (self.name, self.init_capacity) def is_same(self, other): """Compares capacity and traits against another server. valid_until is ignored, as server comes up after reboot will have different valid_until value. """ return (self.labels == other.labels and _all_eq(self.init_capacity, other.init_capacity) and self.traits.is_same(other.traits)) def put(self, app): """Tries to put the app on the server. """ assert app.name not in self.apps _LOGGER.debug('server.put: %s => %s', app.name, self.name) if not self.check_app_lifetime(app): return False if not self.check_app_constraints(app): return False prev_capacity = self.free_capacity.copy() self.free_capacity -= app.demand self.apps[app.name] = app self.increment_affinity([app.affinity.name]) app.server = self.name if self.parent: self.parent.adjust_capacity_down(prev_capacity) if app.placement_expiry is None: app.placement_expiry = time.time() + app.lease return True def restore(self, app, placement_expiry=None): """Put app back on the server, ignore app lifetime. """ _LOGGER.debug('server.restore: %s => %s (%s)', app.name, self.name, placement_expiry) lease = app.lease # If not explicit if placement_expiry is None: placement_expiry = app.placement_expiry app.lease = 0 rc = self.put(app) app.lease = lease app.placement_expiry = placement_expiry return rc def renew(self, app): """Try to extend the placement for app lease. """ can_renew = self.check_app_lifetime(app) if can_renew: app.placement_expiry = time.time() + app.lease return can_renew def check_app_lifetime(self, app): """Check if the app lease fits until server is rebooted. """ # app with 0 lease can be placed anywhere (ignore potentially # expired servers) if not app.lease: return True return time.time() + app.lease < self.valid_until def remove(self, app_name): """Removes app from the server. """ assert app_name in self.apps app = self.apps[app_name] del self.apps[app_name] app.server = None app.evicted = True app.unschedule = False app.placement_expiry = None self.free_capacity += app.demand self.decrement_affinity([app.affinity.name]) if self.parent: self.parent.adjust_capacity_up(self.free_capacity) def remove_all(self): """Remove all apps. """ # iterate over copy of the keys, as we are removing them in the loop. for appname in list(self.apps): self.remove(appname) def size(self, label): """Return server capacity. """ if label not in self.labels: return eps_capacity() return self.init_capacity def members(self): """Return set of all leaf node names. """ return {self.name: self} def set_state(self, state, since): """Change host state. """ if self.state is state: return super(Server, self).set_state(state, since) if state == State.up: if self.parent: self.parent.adjust_capacity_up(self.free_capacity) elif state in (State.down, State.frozen): if self.parent: self.parent.adjust_capacity_down(self.free_capacity) else: raise Exception('Invalid state: ' % state) class Allocation: """Allocation manages queue of apps sharing same reserved capacity. In reality allocation is tied to grn via application proid. Applications within the allocation are organized by application priority. Allocations are ranked, and the rank is used to globally order applications from different allocations into global queue. Default allocation has rank 100. Defining allocation with lower rank will result in all it's applications to be evaluated first regardless of utilization. This is used to model "system" applications that should be always present regardless of utilization. Allocation queue can be capped with max_utilization parameter. If set, it will specify the max_utilization which will be considered for scheduling. """ __slots__ = ( 'reserved', 'rank', 'rank_adjustment', 'traits', 'label', 'max_utilization', 'apps', 'sub_allocations', 'path', 'constraints', ) def __init__(self, reserved=None, rank=None, traits=None, max_utilization=None, partition=None): self.set_reserved(reserved) self.rank = None self.rank_adjustment = 0 self.traits = 0 self.label = partition self.max_utilization = _MAX_UTILIZATION self.reserved = zero_capacity() self.set_max_utilization(max_utilization) self.set_traits(traits) self.update(reserved, rank, 0) self.apps = dict() self.sub_allocations = dict() self.path = [] # Freeze shape constraintes. self.constraints = (self.label, self.traits,) @property def name(self): """Returns full allocation name. """ return '/'.join(self.path) def set_reserved(self, reserved): """Update reserved capacity. """ if reserved is None: self.reserved = zero_capacity() elif isinstance(reserved, int): assert reserved == 0 self.reserved = zero_capacity() elif isinstance(reserved, float): assert reserved == 0.0 self.reserved = zero_capacity() elif isinstance(reserved, list): assert len(reserved) == DIMENSION_COUNT self.reserved = np.array(reserved, dtype=float) elif isinstance(reserved, np.ndarray): self.reserved = reserved else: assert 'Unsupported type: %r' % type(reserved) def update(self, reserved, rank, rank_adjustment, max_utilization=None): """Updates allocation. """ if rank is not None: self.rank = rank else: self.rank = DEFAULT_RANK if rank_adjustment is not None: self.rank_adjustment = rank_adjustment self.set_reserved(reserved) self.set_max_utilization(max_utilization) def set_max_utilization(self, max_utilization): """Sets max_utilization, accounting for default None value. """ if max_utilization is not None: self.max_utilization = max_utilization else: self.max_utilization = _MAX_UTILIZATION def set_traits(self, traits): """Set traits, account for default None value. """ if not traits: self.traits = 0 else: self.traits = traits def add(self, app): """Add application to the allocation queue. Once added, the scheduler will make an attempt to place the app on one of the cell nodes. """ # Check that there are no duplicate app names. if app.name in self.apps: _LOGGER.warning( 'Duplicate app on alllocation queue: %s', app.name ) return app.allocation = self self.apps[app.name] = app def remove(self, name): """Remove application from the allocation queue. """ if name in self.apps: self.apps[name].allocation = None del self.apps[name] def priv_utilization_queue(self): """Returns tuples for sorted by global utilization. Apps in the queue are ordered by priority, insertion order. Adding or removing maintains invariant that apps utilization monotonically increases as well. Returns local prioritization queue in a tuple where first element is utilization ratio, so that this queue is suitable for merging into global priority queue. """ def _app_key(app): """Compares apps by priority, state, global index """ return (-app.priority, 0 if app.server else 1, app.global_order, app.name) prio_queue = sorted(six.viewvalues(self.apps), key=_app_key) acc_demand = zero_capacity() available = self.reserved + np.finfo(float).eps util_before = utilization(acc_demand, self.reserved, available) for app in prio_queue: acc_demand = acc_demand + app.demand util_after = utilization(acc_demand, self.reserved, available) # Priority 0 apps are treated specially - utilization is set to # max float. # # This ensures that they are at the end of the all queues. if app.priority == 0: util_before = _MAX_UTILIZATION util_after = _MAX_UTILIZATION # All things equal, already scheduled applications have priority # over pending. pending = 0 if app.server else 1 if util_after <= self.max_utilization - 1: rank = self.rank if util_before < 0: rank -= self.rank_adjustment else: rank = _UNPLACED_RANK entry = (rank, util_before, util_after, pending, app.global_order, app) util_before = util_after yield entry def utilization_queue(self, free_capacity, visitor=None): """Returns utilization queue including the sub-allocs. All app queues from self and sub-allocs are merged in standard order, and then utilization is recalculated based on total reserved capacity of this alloc and sub-allocs combined. The function maintains invariant that any app (self or inside sub-alloc with utilization < 1 will remain with utilzation < 1. """ total_reserved = self.total_reserved() queues = [ alloc.utilization_queue(free_capacity, visitor) for alloc in six.itervalues(self.sub_allocations) ] queues.append(self.priv_utilization_queue()) acc_demand = zero_capacity() available = total_reserved + free_capacity + np.finfo(float).eps util_before = utilization(acc_demand, total_reserved, available) for item in heapq.merge(*queues): rank, _u_before, _u_after, pending, order, app = item acc_demand = acc_demand + app.demand util_after = utilization(acc_demand, total_reserved, available) if app.priority == 0: util_before = _MAX_UTILIZATION util_after = _MAX_UTILIZATION # - lower rank allocations take precedence. # - for same rank, utilization takes precedence # - False < True, so for apps with same utilization we prefer # those that already running (False == not pending) # - Global order entry = (rank, util_before, util_after, pending, order, app) if visitor: visitor(self, entry, acc_demand) util_before = util_after yield entry def total_reserved(self): """Total reserved capacity including sub-allocs. """ return six.moves.reduce( lambda acc, alloc: acc + alloc.total_reserved(), six.itervalues(self.sub_allocations), self.reserved ) def add_sub_alloc(self, name, alloc): """Add child allocation. """ self.sub_allocations[name] = alloc assert not alloc.path alloc.path = self.path + [name] alloc.label = self.label def remove_sub_alloc(self, name): """Remove chlid allocation. """ if name in self.sub_allocations: del self.sub_allocations[name] def get_sub_alloc(self, name): """Return sub allocation, create empty if it does not exist. """ if name not in self.sub_allocations: self.add_sub_alloc(name, Allocation()) return self.sub_allocations[name] def all_apps(self): """Return all apps in allocation and sub-allocations.""" all_apps = list(six.itervalues(self.apps)) for alloc in six.itervalues(self.sub_allocations): all_apps.extend(alloc.all_apps()) return all_apps class Partition: """Cell partition. """ __slots__ = ( 'allocation', 'max_server_uptime', 'max_lease', 'threshold', 'label', '_reboot_buckets', '_reboot_dates', '_reboot_last', ) def __init__(self, max_server_uptime=None, max_lease=None, threshold=None, label=None, reboot_schedule=None, now=None): self.label = label self.allocation = Allocation(partition=label) # Default - if not max_server_uptime: max_server_uptime = DEFAULT_SERVER_UPTIME if not max_lease: max_lease = DEFAULT_MAX_APP_LEASE if not threshold: threshold = DEFAULT_THRESHOLD self.max_server_uptime = max_server_uptime self.max_lease = max_lease self.threshold = threshold if not reboot_schedule: # reboot every day reboot_schedule = {day: (23, 59, 59) for day in range(7)} if not now: now = time.time() self._reboot_dates = reboot_dates( reboot_schedule, start_date=datetime.date.fromtimestamp(now) ) self._reboot_buckets = [] self._reboot_last = now self.tick(now) def _find_bucket(self, timestamp): """Try to find bucket with given timestamp. """ for bucket in self._reboot_buckets: if bucket.timestamp == timestamp: return bucket return None def add(self, server, timestamp=None): """Add server. """ bucket = None if timestamp: bucket = self._find_bucket(timestamp) # servers with larger than max lifetime should be rebooted at # the next opportunity if (self._reboot_buckets[0].timestamp > server.up_since + DEFAULT_SERVER_UPTIME): bucket = self._reboot_buckets[0] if not bucket: bucket = min(reversed(self._reboot_buckets), key=lambda b: b.cost(server)) bucket.add(server) def remove(self, server): """Remove server. """ for bucket in self._reboot_buckets: bucket.remove(server) def tick(self, now): """Do per-tick-bookkeeping. """ while self._reboot_last <= now + DEFAULT_SERVER_UPTIME: bucket = RebootBucket(next(self._reboot_dates)) self._reboot_buckets.append(bucket) self._reboot_last = bucket.timestamp while self._reboot_buckets[0].timestamp < now: self._reboot_buckets.pop(0) class PartitionDict(dict): """Dict that creates partitions on demand. We use this instead of collections.defaultdict so that we can provide the new partition with its label, to be propagated to its allocations. """ def __missing__(self, label): """Create a new partition, passing the label to its constructor. """ self[label] = Partition(label=label) return self[label] # pylint: disable=invalid-name def reboot_dates(schedule, start_date=None): """Generate list of valid reboot dates. """ date = datetime.date.today() if start_date: date = start_date while True: weekday = date.weekday() if weekday in schedule: h, m, s = schedule[weekday] yield time.mktime((date.year, date.month, date.day, h, m, s, 0, 0, 0)) date += datetime.timedelta(days=1) class RebootBucket: """Bucket of servers to be rebooted at the same time. """ __slots__ = ( 'timestamp', 'servers', ) def __init__(self, timestamp): self.timestamp = timestamp self.servers = [] def add(self, server): """Add server to this bucket. """ self.servers.append(server) server.valid_until = self.timestamp _LOGGER.info('Setting valid until on server: %s %s', server.name, server.valid_until) def remove(self, server): """Remove server from this bucket. """ try: self.servers.remove(server) except ValueError: pass def cost(self, server): """The cost of adding server to this bucket. """ if self.timestamp > server.up_since + DEFAULT_SERVER_UPTIME: return float('inf') if self.timestamp < server.up_since + MIN_SERVER_UPTIME: return float('inf') return len(self.servers) class PlacementFeasibilityTracker: """Tracks similar apps placement failures.""" def __init__(self): self.recorder = dict() def feasible(self, app): """Checks if it is feasible to satisfy demand.""" constraints, demand = app.shape() if constraints in self.recorder: # If demand is >= than recorded failure, placement is not feasible. if _all_ge(demand, self.recorder[constraints]): return False return True def adjust(self, app): """Adjust info about failed placement.""" constraints, demand = app.shape() if constraints not in self.recorder: self.recorder[constraints] = demand else: if _all_le(demand, self.recorder[constraints]): self.recorder[constraints] = demand class Cell(Bucket): """Top level node. """ __slots__ = ( 'partitions', 'next_event_at', 'apps', 'identity_groups', ) def __init__(self, name): super(Cell, self).__init__(name, traits=0, level='cell') self.partitions = PartitionDict() self.apps = dict() self.identity_groups = collections.defaultdict(IdentityGroup) self.next_event_at = np.inf def add_app(self, allocation, app): """Adds application to the scheduled list. """ assert allocation is not None if app.allocation: app.allocation.remove(app.name) allocation.add(app) self.apps[app.name] = app if app.identity_group: app.identity_group_ref = self.identity_groups[app.identity_group] def remove_app(self, appname): """Remove app from scheduled list. """ if appname not in self.apps: return app = self.apps[appname] servers = self.members() if app.server in servers: servers[app.server].remove(app.name) if app.allocation: app.allocation.remove(app.name) app.release_identity() del self.apps[appname] def configure_identity_group(self, name, count): """Add identity group to the cell. """ if name not in self.identity_groups: self.identity_groups[name] = IdentityGroup(count) else: self.identity_groups[name].adjust(count) def remove_identity_group(self, name): """Remove identity group. """ ident_group = self.identity_groups.get(name) if ident_group: in_use = False for app in six.itervalues(self.apps): if app.identity_group_ref == ident_group: ident_group.adjust(0) in_use = True break if not in_use: del self.identity_groups[name] def _fix_invalid_placements(self, queue, servers): """If app is placed on non-existent server, set server to None. """ for app in queue: if app.server and app.server not in servers: app.server = None app.evicted = True app.release_identity() def _record_rank_and_util(self, queue): """Set final rank and utilization for all apps in the queue. """ for item in queue: rank = item[0] util = item[1] app = item[-1] app.final_rank = rank app.final_util = util def _fix_invalid_identities(self, queue, servers): """Check that app identity is valid for given identity group. """ for app in queue: if app.identity is not None and app.identity_group_ref is not None: # Can happen if identity group was adjusted to lower count. if app.identity >= app.identity_group_ref.count: # Can't release identity as it is invalid. _LOGGER.info('Identity exceeds limit: %s - %s, limit %s', app.name, app.identity, app.identity_group_ref.count) app.identity = None # Invalidate any existing placement. if app.server: servers[app.server].remove(app.name) def _handle_inactive_servers(self, servers): """Migrate apps from inactive servers. """ self.next_event_at = np.inf for server in six.itervalues(servers): state, since = server.get_state() to_be_moved = [] if state == State.down: _LOGGER.debug('Server state is down: %s', server.name) for name, app in six.iteritems(server.apps): if app.data_retention_timeout is None: expires_at = 0 else: expires_at = since + app.data_retention_timeout if expires_at <= time.time(): _LOGGER.debug('Expired placement: %s', name) app.release_identity() to_be_moved.append(name) else: _LOGGER.debug('Keep placement: %s until %s', name, expires_at) self.next_event_at = min(expires_at, self.next_event_at) elif state == State.frozen: _LOGGER.debug('Server state is frozen: %s', server.name) to_be_moved = [ name for name, app in six.iteritems(server.apps) if app.unschedule ] for name in to_be_moved: server.remove(name) def _find_placements(self, queue, servers): """Run the queue and find placements. """ # TODO: refactor to get rid of warnings. # # pylint: disable=too-many-branches,too-many-statements # # At this point, if app.server is defined, it points to attached # server. evicted = dict() reversed_queue = queue[::-1] placement_tracker = PlacementFeasibilityTracker() for app in queue: _LOGGER.debug('scheduling %s', app.name) if app.final_rank == _UNPLACED_RANK: if app.server: assert app.server in servers assert app.has_identity() servers[app.server].remove(app.name) app.release_identity() continue restore = {} if app.renew: assert app.server assert app.has_identity() assert app.server in servers server = servers[app.server] if not server.renew(app): # Save information that will be used to restore placement # in case renewal fails. _LOGGER.debug('Cannot renew app %s on server %s', app.name, app.server) restore['server'] = server restore['placement_expiry'] = app.placement_expiry server.remove(app.name) # At this point app was either renewed on the same server, or # temporarily removed from server if renew failed. # # If placement will be found, renew should remain False. If # placement will not be found, renew will be set to True when # placement is restored to the server it was running. app.renew = False if app.server: assert app.server in servers assert app.has_identity() continue assert app.server is None if not app.acquire_identity(): _LOGGER.info('Unable to acquire identity: %s, %s', app.name, app.identity_group) continue # If app was evicted before, try to restore to the same node. if app in evicted: assert app.has_identity() evicted_from, app_expiry = evicted[app] del evicted[app] if evicted_from.restore(app, app_expiry): app.evicted = False continue assert app.server is None if app.schedule_once and app.evicted: continue # Check if placement is feasible. if not placement_tracker.feasible(app): _LOGGER.info( 'Placement not feasible: %s %r', app.name, app.shape() ) continue if not self.put(app): # There is not enough capacity, from the end of the queue, # evict apps, freeing capacity. for evicted_app in reversed_queue: # We reached the app we can't place if evicted_app == app: break # The app is not yet placed, skip if not evicted_app.server: continue assert evicted_app.server in servers evicted_app_server = servers[evicted_app.server] # Do not consider servers that are not up. if evicted_app_server.state is not State.up: continue evicted[evicted_app] = (evicted_app_server, evicted_app.placement_expiry) evicted_app_server.remove(evicted_app.name) # TODO: we need to check affinity limit constraints on # each level, all the way to the top. if evicted_app_server.put(app): break # Placement failed. if not app.server: # If renewal attempt failed, restore previous placement and # expiry date. if restore: restore['server'].restore(app, restore['placement_expiry']) app.renew = True else: app.release_identity() placement_tracker.adjust(app) def schedule_alloc(self, allocation, servers): """Run the scheduler for given allocation. """ begin = time.time() size = self.size(allocation.label) util_queue = list(allocation.utilization_queue(size)) self._record_rank_and_util(util_queue) queue = [item[-1] for item in util_queue] self._find_placements(queue, servers) _LOGGER.info('Scheduled %s (%d) apps in %r', allocation.label, len(queue), time.time() - begin) def schedule(self): """Run the scheduler. """ begin = time.time() all_apps = [] for label, partition in six.iteritems(self.partitions): allocation = partition.allocation all_apps.extend(allocation.all_apps()) before = [(app.name, app.server, app.placement_expiry) for app in all_apps] servers = self.members() self._fix_invalid_placements(six.viewvalues(self.apps), servers) self._handle_inactive_servers(servers) self._fix_invalid_identities(six.viewvalues(self.apps), servers) for label, partition in six.iteritems(self.partitions): allocation = partition.allocation allocation.label = label self.schedule_alloc(allocation, servers) after = [(app.server, app.placement_expiry) for app in all_apps] placement = [ tuple(itertools.chain(b, a)) for b, a in six.moves.zip(before, after) ] for appname, s_before, exp_before, s_after, exp_after in placement: if s_before != s_after: _LOGGER.info('New placement: %s - %s => %s', appname, s_before, s_after) else: if exp_before != exp_after: _LOGGER.info('Renewed: %s [%s] - %s => %s', appname, s_before, exp_before, exp_after) _LOGGER.info('Total scheduler time for %s apps: %r (sec)', len(all_apps), time.time() - begin) return placement def resolve_reboot_conflicts(self): """Adjust server exipiration time to avoid conflicts. """ pass def dumps(cell): """Serializes cell to string. """ del cell return '' def loads(data): """Loads scheduler from string. """ del data assert False, 'not implemented.'
30.016648
79
0.578203
6,203
54,090
4.876028
0.102047
0.013489
0.01058
0.00972
0.268796
0.166303
0.112379
0.090888
0.0772
0.067315
0
0.003823
0.332612
54,090
1,801
80
30.033315
0.83404
0.198743
0
0.269159
0
0
0.039318
0.00052
0
0
0
0.00111
0.026168
1
0.124299
false
0.003738
0.014953
0.000935
0.253271
0.000935
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c758c753c3644ae1a4c381597cfe0cc82c7e378b
1,260
py
Python
banners/bannerRan.py
gothyyy/AIDungeon
c198371c34d914e9d996559ef850c87a76f572c4
[ "MIT" ]
1
2019-12-30T21:45:06.000Z
2019-12-30T21:45:06.000Z
banners/bannerRan.py
gothyyy/AIDungeon
c198371c34d914e9d996559ef850c87a76f572c4
[ "MIT" ]
null
null
null
banners/bannerRan.py
gothyyy/AIDungeon
c198371c34d914e9d996559ef850c87a76f572c4
[ "MIT" ]
null
null
null
import random import sys import time import json import os import warnings import numpy as np import glob, os stat_mini = 1 stat_max = 0 listBanners = [] #HOW TO USE IT: #1 copy the opening.txt #2 remove the graphic (but do keep top logo for consistency) #3 add ASCII art that is 78 or less characters in width #4 save txt file under a complete new name class bannerRan: def __init__(self): banner_number = load_banner() #insert function to get random self.banner_number = banner_number def load_banner(): global stat_max global stat_mini global listBanners hey = scanBanners() #load text and get proper numbers choose_between = r(stat_mini, stat_max) x = random.choice(listBanners) return x def r(x,y): #randmom, picks between X and Y return int(str(random.randint(x,y))) def scanBanners(): global stat_max global listBanners dir_path = os.path.dirname(os.path.realpath(__file__)) # directory of banners path #os.chdir("") i = 0 for file in glob.glob("banners/*.txt"): i+=1 listBanners.append(file) #print(str(i), file) stat_max = i x = dir_path return x
14.823529
86
0.640476
183
1,260
4.278689
0.52459
0.0447
0.040868
0.048531
0
0
0
0
0
0
0
0.011074
0.283333
1,260
84
87
15
0.856035
0.269841
0
0.166667
0
0
0.014301
0
0
0
0
0
0
1
0.111111
false
0
0.222222
0.027778
0.444444
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c758e049e83a8786ae62f5c9ab2545ec4624de3e
511
py
Python
BondMarket/app/theme_lib.py
Meith0717/BondMarket
83d99bd5930758e73b4fe74a92e706c7bc0eadb6
[ "Apache-2.0" ]
null
null
null
BondMarket/app/theme_lib.py
Meith0717/BondMarket
83d99bd5930758e73b4fe74a92e706c7bc0eadb6
[ "Apache-2.0" ]
null
null
null
BondMarket/app/theme_lib.py
Meith0717/BondMarket
83d99bd5930758e73b4fe74a92e706c7bc0eadb6
[ "Apache-2.0" ]
null
null
null
from dataclasses import dataclass @dataclass class theme: name : str bg_color : str fg_color : str lb_color : str ttk_theme : str LIGHT = theme( name='LIGHT', bg_color=None, fg_color='black', lb_color='#f0f0f0', ttk_theme='xpnative' ) DARK = theme( name='DARK', bg_color='#424242', fg_color='white', lb_color='#424242', ttk_theme='black' )
19.653846
35
0.485323
53
511
4.45283
0.396226
0.114407
0
0
0
0
0
0
0
0
0
0.05
0.412916
511
25
36
20.44
0.736667
0
0
0
0
0
0.109054
0
0
0
0
0
0
1
0
false
0
0.045455
0
0.318182
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c7592054e40573b08b4d8a7a1efd9326b5695f4f
3,877
py
Python
run.py
rimijoker/CA-MTL
068e25e0860a8ec81462018126eace4c004bacd4
[ "MIT" ]
1
2021-08-03T03:54:02.000Z
2021-08-03T03:54:02.000Z
run.py
rimijoker/CA-MTL
068e25e0860a8ec81462018126eace4c004bacd4
[ "MIT" ]
null
null
null
run.py
rimijoker/CA-MTL
068e25e0860a8ec81462018126eace4c004bacd4
[ "MIT" ]
1
2021-07-31T09:44:00.000Z
2021-07-31T09:44:00.000Z
import os import sys import re import json import logging import torch from transformers import ( HfArgumentParser, set_seed, AutoTokenizer, AutoConfig, EvalPrediction, ) from src.model.ca_mtl import CaMtl, CaMtlArguments from src.utils.misc import MultiTaskDataArguments, Split from src.mtl_trainer import MultiTaskTrainer, MultiTaskTrainingArguments from src.data.mtl_dataset import MultiTaskDataset from src.data.task_dataset import TaskDataset logger = logging.getLogger(__name__) def setup_logging(training_args): logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1), training_args.fp16, ) def parse_cmd_args(): parser = HfArgumentParser( ( CaMtlArguments, MultiTaskDataArguments, MultiTaskTrainingArguments, ) ) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): model_args, data_args, training_args = parser.parse_json_file( json_file=os.path.abspath(sys.argv[1]) ) else: ( model_args, data_args, training_args, ) = parser.parse_args_into_dataclasses() logger.info("Training/evaluation parameters %s", training_args) return model_args, data_args, training_args def create_eval_datasets(mode, data_args, tokenizer): eval_datasets = {} for task_id, task_name in enumerate(data_args.tasks): eval_datasets[task_name] = TaskDataset( task_name, task_id, data_args, tokenizer, mode=mode ) if task_name == "mnli": # Loop to handle MNLI double evaluation (matched, mis-matched) eval_datasets["mnli-mm"] = TaskDataset( "mnli-mm", task_id, data_args, tokenizer, mode=mode ) return eval_datasets def main(): model_args, data_args, training_args = parse_cmd_args() setup_logging(training_args) set_seed(training_args.seed) config = AutoConfig.from_pretrained( CaMtl.get_base_model(model_args.model_name_or_path), ) model = CaMtl.from_pretrained( CaMtl.get_base_model(model_args.model_name_or_path), model_args, data_args, config=config) model.freeze_encoder_layers(model_args) logger.info(model) tokenizer = AutoTokenizer.from_pretrained( CaMtl.get_base_model(model_args.model_name_or_path), ) logger.info("Training tasks: %s", ", ".join([t for t in data_args.tasks])) trainer = MultiTaskTrainer( tokenizer, data_args, model=model, args=training_args, train_dataset=MultiTaskDataset(data_args, tokenizer, limit_length=50) if training_args.do_train else None, eval_datasets=create_eval_datasets(Split.dev, data_args, tokenizer) if training_args.do_eval or training_args.evaluate_during_training else None, test_datasets=create_eval_datasets(Split.test, data_args, tokenizer) if training_args.do_predict else None, ) if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None ) if training_args.do_eval: trainer.evaluate() if training_args.do_predict: trainer.predict() def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
26.923611
98
0.660562
474
3,877
5.105485
0.280591
0.109091
0.040496
0.039669
0.273967
0.221901
0.158264
0.105372
0.072314
0.072314
0
0.004118
0.248388
3,877
143
99
27.111888
0.826356
0.020892
0
0.117117
0
0.009009
0.063802
0
0
0
0
0
0
1
0.045045
false
0
0.108108
0
0.171171
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c75af988694e7b9961b260a9f014fab177797bfa
1,033
py
Python
examples/readWebsocket.py
uadlq/PhyPiDAQ-PiOS11
fc6060551be2cc0143a157081341bf3c338d9fbd
[ "BSD-2-Clause" ]
null
null
null
examples/readWebsocket.py
uadlq/PhyPiDAQ-PiOS11
fc6060551be2cc0143a157081341bf3c338d9fbd
[ "BSD-2-Clause" ]
null
null
null
examples/readWebsocket.py
uadlq/PhyPiDAQ-PiOS11
fc6060551be2cc0143a157081341bf3c338d9fbd
[ "BSD-2-Clause" ]
null
null
null
#!/usr/bin/env python3 """Read data in CSV format from websocket """ import sys import asyncio import websockets # read url from command line if len(sys.argv) >= 2: uri = sys.argv[1] else: # host url and port uri = "ws://localhost:8314" print("*==* ", sys.argv[0], " Lese Daten von url ", uri) async def read_ws(): """asynchronous read from websocket """ async with websockets.connect(uri, ping_interval=None) as websocket: # test connection await websocket.send("req_connect") answ = await websocket.recv() if answ == "ack_connect": print("** connected to websocket ", uri) # get data await websocket.send("getData") while True: inp = await websocket.recv() if inp == '\n': # empty record, end print("empty input - closing") sys.exit(0) else: print('read: %s ' % inp, end='') # run web client asyncio.get_event_loop().run_until_complete(read_ws())
25.195122
72
0.580833
130
1,033
4.546154
0.576923
0.094755
0.060914
0.067682
0
0
0
0
0
0
0
0.012295
0.291384
1,033
40
73
25.825
0.795082
0.157793
0
0.086957
0
0
0.160934
0
0
0
0
0
0
1
0
false
0
0.130435
0
0.130435
0.173913
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c75b6da97a2671884ced55ad3cbef590baf2e5c6
2,187
py
Python
settings/__init__.py
arcana261/python-grpc-boilerplate
dd20767ad5540a49e1db802ce578c7b8e416ccbb
[ "Unlicense" ]
null
null
null
settings/__init__.py
arcana261/python-grpc-boilerplate
dd20767ad5540a49e1db802ce578c7b8e416ccbb
[ "Unlicense" ]
null
null
null
settings/__init__.py
arcana261/python-grpc-boilerplate
dd20767ad5540a49e1db802ce578c7b8e416ccbb
[ "Unlicense" ]
null
null
null
import os import sys import itertools import json _NONE = object() class SettingManager: _sentry = object() def __init__(self): self.env = os.getenv('ENV', 'prd') try: self._default = __import__('settings.default', fromlist=['*']) except ModuleNotFoundError: self._default = object() try: self._env = __import__('settings.{}'.format(self.env), fromlist=['*']) except ModuleNotFoundError: self._env = object() self._loaded = [] def load(self, filename, fmt='json'): filename = os.path.abspath(filename) if fmt == 'json': with open(filename) as f: self._loaded.append((filename, json.load(f))) def unload(self, filename): filename = os.path.abspath(filename) self._loaded = [(f, v) for f, v in self._loaded if f != filename] def __getattr__(self, item): result = SettingManager._sentry for _, values in self._loaded: if item in values: result = values[item] result = os.getenv(item, result) if result is SettingManager._sentry: result = getattr(self._env, item, getattr(self._default, item, SettingManager._sentry)) if result is SettingManager._sentry: raise AttributeError return result def __contains__(self, item): try: self.__getattr__(item) return True except AttributeError: return False def get(self, item, default=_NONE): try: return self.__getattr__(item) except AttributeError: if default is not _NONE: return default raise AttributeError def __iter__(self): chained = itertools.chain(getattr(self._default, '__dict__', dict()).keys(), getattr(self._env, '__dict__', dict()).keys()) for _, values in self._loaded: chained = itertools.chain(chained, values.keys()) return iter(filter(lambda x: not x.startswith('_'), set(chained))) sys.modules[__name__] = SettingManager()
27.683544
99
0.577503
227
2,187
5.259912
0.281938
0.035176
0.030151
0.061977
0.134003
0
0
0
0
0
0
0
0.313672
2,187
78
100
28.038462
0.79547
0
0
0.280702
0
0
0.027435
0
0
0
0
0
0
1
0.122807
false
0
0.105263
0
0.368421
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c75d41f3ecd90250dc9544657aba89378f5765d0
2,150
py
Python
services/UserService.py
erginbalta/FarmChain
a542d19212f176b7b5d12806078459da105e5afa
[ "Apache-2.0" ]
1
2021-01-16T14:38:21.000Z
2021-01-16T14:38:21.000Z
services/UserService.py
erginbalta/FarmChain
a542d19212f176b7b5d12806078459da105e5afa
[ "Apache-2.0" ]
null
null
null
services/UserService.py
erginbalta/FarmChain
a542d19212f176b7b5d12806078459da105e5afa
[ "Apache-2.0" ]
1
2020-07-23T04:00:07.000Z
2020-07-23T04:00:07.000Z
import mysql.connector import socket from contextlib import closing import json import random packetType= ["INF","TRN","USR"] database = mysql.connector.connect( host="localhost", user="root", port="3307", passwd="ergin00000", database="farmchain" ) def userIdCreator(): data = [] numericId = 0 id = "" with open("/datas/userInformation.json",'r') as f: user = json.load(f) numericId = len(user) + 1 id = str(packetType[2])+str(numericId) return id def transactionIdCreator(): idKey = packetType[1] numericId = random.randint(10000,99999) id = idKey+str(numericId) return id def getUserConnectionInfo(): hst = socket.gethostname() usrHost = socket.gethostbyname(hst) usrPort = findFreePort() return [usrHost,usrPort] def findFreePort(): with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s: s.bind(('', 0)) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) return s.getsockname()[1] def checkOnlineMiners(): mycursor = database.cursor() sql = "select * from miners where status = 1;" mycursor.execute(sql) result = mycursor.fetchall() return result def minerInfo(): result = checkOnlineMiners() info = result[0] host = result[1] port = result[2] return [host,port] def userInfoPacket(password,name,surname,company,status): info = getUserConnectionInfo() userId = userIdCreator() name = str(name).lower() surname = str(surname).lower() company = str(company).lower() status = str(status).lower() packet = [packetType[0],[userId,password,name,surname,company,status],info[0],info[1]] return packet def transactionPacketCreator(productId,productName,productNumber,fromPlace,toPlace,date): info = getUserConnectionInfo() transactionId = transactionIdCreator() productName = str(productName).lower() fromPlace = str(fromPlace).lower() toPlace = str(toPlace).lower() packet = [packetType[1],[transactionId,productId,productName,productNumber,fromPlace,toPlace,date],info[0],info[1]] return packet
26.875
119
0.676744
239
2,150
6.07113
0.39749
0.02481
0.02481
0.027567
0.1847
0.152998
0.078567
0
0
0
0
0.020713
0.191628
2,150
79
120
27.21519
0.814154
0
0
0.090909
0
0
0.051724
0.012582
0
0
0
0
0
1
0.121212
false
0.045455
0.075758
0
0.318182
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c76014b2a087d9f2456ffc8e8847fb9b397481a4
8,148
py
Python
sdcc2elf.py
Vector35/llil_transpiler
6f6f368d34cb872460ad1634ddcbc4207276feb6
[ "MIT" ]
14
2019-08-23T13:49:07.000Z
2021-12-24T20:09:57.000Z
sdcc2elf.py
Vector35/llil_transpiler
6f6f368d34cb872460ad1634ddcbc4207276feb6
[ "MIT" ]
null
null
null
sdcc2elf.py
Vector35/llil_transpiler
6f6f368d34cb872460ad1634ddcbc4207276feb6
[ "MIT" ]
1
2021-12-24T20:10:00.000Z
2021-12-24T20:10:00.000Z
#!/usr/bin/env python # # convert SDCC .rel files to 32-bit ELF relocatable # # resulting file is simple: # # ------------------------ # ELF header # ------------------------ # .text section # .shstrtab section # .strtab section # .symtab section # ------------------------ # NULL elf32_shdr # .text elf32_shdr # .shstrtab elf32_shdr # .symtab elf32_shdr # .strtab elf32_shdr # ------------------------ import os import re import sys from struct import pack #------------------------------------------------------------------------------ # ELF helpers #------------------------------------------------------------------------------ (PF_X, PF_W, PF_R) = (1,2,4) (SHT_NULL, SHT_PROGBITS, SHT_STRTAB) = (0,1,3) sz_ehdr = 0x34 sz_shdr = 0x28 def align(fp, to=4, pad=b'\x00'): while fp.tell() % to: fp.write(pad) #------------------------------------------------------------------------------ # read .map file for symbols #------------------------------------------------------------------------------ fpath_map = sys.argv[2] assert fpath_map.endswith('.map') with open(fpath_map) as fp: lines = fp.readlines() (_CODE_ADDR, _CODE_SZ) = (None, None) (i_code, i_header) = (None, None) for (i, line) in enumerate(lines): if line.startswith('_CODE'): m = re.match(r'^_CODE\s+([A-F0-9]{8})\s+([A-F0-9]{8})', line) (addr, size) = map(lambda x: int(x, 16), m.group(1,2)) if not i_code: i_code = i _CODE_ADDR = addr _CODE_SZ = size else: if addr != _CODE_ADDR: raise Exception('conflicting code segment addresses') if size != _CODE_SZ: raise Exception('conflicting code segment sizes') if line.startswith('_HEADER0'): i_header = i break assert i_code and i_header and i_code < i_header syms = [] for line in lines[i_code:i_header]: m = re.search(r'([A-F0-9]{8})\s+(_\w+)', line) if m: (addr, symname) = m.group(1, 2) print('found %s: %s' % (addr, symname)) syms.append((symname, int(addr, 16))); assert syms print('_CODE [%08X, %08X)' % (_CODE_ADDR, _CODE_ADDR+_CODE_SZ)) print('_CODE symbols from') for (name, addr) in syms: print('%08X: %s' % (addr, name)) #------------------------------------------------------------------------------ # read .ihx file #------------------------------------------------------------------------------ fpath_ihx = sys.argv[1] assert fpath_ihx.endswith('.ihx') code_area = [b'\x00'] * (_CODE_ADDR + _CODE_SZ) with open(fpath_ihx) as fp: for line in fp.readlines(): m = re.match(r'^:(..)(....)00(.*)(..)', line) if m: (count, addr, data, csum) = m.group(1,2,3,4) count = int(count,16) assert count == len(data)/2 addr = int(addr,16) if not (addr >= _CODE_ADDR and addr < (_CODE_ADDR + _CODE_SZ)): continue print('%08X: ' % addr, end='') for i in range(count): byte_str = data[2*i]+data[2*i+1] print('%s ' % byte_str, end='') code_area[addr + i] = pack('B', int(byte_str, 16)) print('') continue m = re.match(r'^:00000001FF', line) if m: break raise Exception('got unexpected IHX line: %s' % line) assert code_area #print(code_area) #------------------------------------------------------------------------------ # write ELF #------------------------------------------------------------------------------ # process symbols, build string table syms = sorted(syms, key=lambda name_addr: name_addr[1]) func2size = {} func2stroffs = {} strtab = b'\x00' for i in range(len(syms)): (name, addr) = syms[i] if i == len(syms)-1: func2size[name] = len(code_area) - addr else: func2size[name] = syms[i+1][1] - addr func2stroffs[name] = len(strtab) strtab = strtab + name.encode('utf-8') + b'\x00' print('%04X: %s size %X' % (addr, name, func2size[name])) fp = open('tests.elf', 'wb') # elf32_hdr (placeholder, we'll come back to fill in offsets) print('elf32_hdr @ %X' % fp.tell()) fp.write(b'\x00' * sz_ehdr) # .text section contents o_text = fp.tell() print('placing .text @ %X' % o_text) for byte in code_area: fp.write(byte) sz_text = fp.tell() - o_text # .shstrtab section contents scn_shstrtab = b'\x00.text\x00.shstrtab\x00.symtab\x00.strtab\x00' align(fp) o_shstrtab = fp.tell() print('placing .shstrtab @ %X' % o_shstrtab) fp.write(scn_shstrtab) sz_shstrtab = fp.tell() - o_shstrtab # .symtab section contents align(fp) o_symtab = fp.tell() print('placing .symtab @ %X' % o_symtab) for (name, addr) in syms: st_name = func2stroffs[name] st_value = addr st_size = func2size[name] st_info = 0x12 # bind:1(GLOBAL) type:2(FUNC) st_other = 0 st_shndx = 0x1 # section header index: 0'th: NULL 1'th: .text Elf32_Sym = pack('<IIIBBH', st_name, st_value, st_size, st_info, st_other, st_shndx) fp.write(Elf32_Sym) sz_symtab = fp.tell() - o_symtab # .strtab section contents align(fp) o_strtab = fp.tell() print('placing .strtab @ %X' % o_strtab) fp.write(strtab) sz_strtab = fp.tell() - o_strtab # null section header (index 0) align(fp) o_shdr_null = fp.tell() print('placing shdr NULL @ %X' % o_shdr_null) fp.write(b'\x00' * sz_shdr) # .text section header (index 1) o_shdr_text = fp.tell() print('placing shdr .text @ %X' % fp.tell()) sh_name = scn_shstrtab.index(b'.text') sh_type = 1 # SHT_PROGBITS sh_flags = 6 # ALLOC|EXECINSTR sh_addr = 0 sh_offset = o_text sh_size = sz_text sh_link = 0 sh_info = 0 sh_addralign = 4 sh_entsize = 0 tmp = pack('<IIIIIIIIII', \ sh_name, sh_type, sh_flags, sh_addr, sh_offset, sh_size, sh_link, sh_info, \ sh_addralign, sh_entsize) fp.write(tmp) # .shstrtab section header (index 2) o_shdr_shstrtab = fp.tell() print('placing shdr .shstrtab @ %X' % fp.tell()) sh_name = scn_shstrtab.index(b'.shstrtab') sh_type = 3 #SHT_STRTAB sh_flags = 0 sh_addr = 0 sh_offset = o_shstrtab sh_size = sz_shstrtab sh_link = 0 sh_info = 0 sh_addralign = 1 sh_entsize = 0 tmp = pack('<IIIIIIIIII', \ sh_name, sh_type, sh_flags, sh_addr, sh_offset, sh_size, sh_link, sh_info, \ sh_addralign, sh_entsize) fp.write(tmp) # .symtab section header (index 3) o_shdr_symtab = fp.tell() print('placing shdr .symtab @ %X' % fp.tell()) sh_name = scn_shstrtab.index(b'.symtab') sh_type = 2 #SHT_SYMTAB sh_flags = 0 sh_addr = 0 sh_offset = o_symtab sh_size = sz_symtab sh_link = 4 # link to scn #4 (find strings in .strtab) sh_info = 0 sh_addralign = 4 sh_entsize = 0 tmp = pack('<IIIIIIIIII', \ sh_name, sh_type, sh_flags, sh_addr, sh_offset, sh_size, sh_link, sh_info, \ sh_addralign, sh_entsize) fp.write(tmp) # .strtab section header (index 4) o_shdr_strtab = fp.tell() print('placing shdr .strtab @ %X' % fp.tell()) sh_name = scn_shstrtab.index(b'.strtab') sh_type = 3 #SHT_STRTAB sh_flags = 0 sh_addr = 0 sh_offset = o_strtab sh_size = sz_strtab sh_link = 0 sh_info = 0 sh_addralign = 1 sh_entsize = 0 tmp = pack('<IIIIIIIIII', \ sh_name, sh_type, sh_flags, sh_addr, sh_offset, sh_size, sh_link, sh_info, \ sh_addralign, sh_entsize) fp.write(tmp) # seek back, write real elf header hdr = b'\x7FELF' hdr += b'\x01' # e_ident[EI_CLASS] 32-bit hdr += b'\x01' # e_ident[EI_DATA] LSB (little-end) hdr += b'\x01\x00\x00' # version, osabi, abiversion hdr += b'\x00'*7 assert len(hdr) == 16 hdr += pack('<H', 1) # e_type = ET_REL hdr += pack('<H', 220) # e_machine = EM_Z80 hdr += pack('<I', 1) # e_version = EV_CURRENT hdr += pack('<I', 0) # e_entry hdr += pack('<I', 0) # e_phoff hdr += pack('<I', o_shdr_null) # e_shoff hdr += pack('<I', 0) # e_flags hdr += pack('<H', sz_ehdr) # e_ehsize hdr += pack('<H', 0) # e_phentsize hdr += pack('<H', 0) # e_phnum hdr += pack('<H', sz_shdr) # e_shentsize hdr += pack('<H', 5) # e_shnum hdr += pack('<H', 2) # e_shstrndx = index of .shstrtab assert len(hdr) == sz_ehdr fp.seek(0, os.SEEK_SET) fp.write(hdr) # done! fp.close()
27.714286
88
0.567624
1,200
8,148
3.65
0.179167
0.026027
0.022603
0.036986
0.299315
0.18242
0.171918
0.171918
0.17032
0.137443
0
0.030615
0.202258
8,148
293
89
27.808874
0.643231
0.232327
0
0.299517
0
0.004831
0.117742
0.021025
0
0
0.002426
0
0.038647
1
0.004831
false
0
0.019324
0
0.024155
0.086957
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c760d11b6bcb337986c7f02b8372675729e8a684
3,743
py
Python
eval.py
nikinsta/deep-siamese-text-similarity-on-python-3
80fffd86da1d9f6bc0cb154a9415ff767d944777
[ "MIT" ]
null
null
null
eval.py
nikinsta/deep-siamese-text-similarity-on-python-3
80fffd86da1d9f6bc0cb154a9415ff767d944777
[ "MIT" ]
null
null
null
eval.py
nikinsta/deep-siamese-text-similarity-on-python-3
80fffd86da1d9f6bc0cb154a9415ff767d944777
[ "MIT" ]
null
null
null
#! /usr/bin/env python import tensorflow as tf import numpy as np import os import time import datetime from tensorflow.contrib import learn from input_helpers import InputHelper # Parameters # ================================================== # Eval Parameters tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)") tf.flags.DEFINE_string("checkpoint_dir", "", "Checkpoint directory from training run") tf.flags.DEFINE_string("eval_filepath", "match_valid.tsv", "Evaluate on this data (Default: None)") tf.flags.DEFINE_string("vocab_filepath", "runs/1479874609/checkpoints/vocab", "Load training time vocabulary (Default: None)") tf.flags.DEFINE_string("model", "runs/1479874609/checkpoints/model-32000", "Load trained model checkpoint (Default: None)") # Misc Parameters tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement") tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices") FLAGS = tf.flags.FLAGS FLAGS._parse_flags() print("\nParameters:") for attr, value in sorted(FLAGS.__flags.items()): print("{}={}".format(attr.upper(), value)) print("") if FLAGS.eval_filepath==None or FLAGS.vocab_filepath==None or FLAGS.model==None : print("Eval or Vocab filepaths are empty.") exit() # load data and map id-transform based on training time vocabulary inpH = InputHelper() x1_test,x2_test,y_test = inpH.getTestDataSet(FLAGS.eval_filepath, FLAGS.vocab_filepath, 30) print("\nEvaluating...\n") # Evaluation # ================================================== checkpoint_file = FLAGS.model print(checkpoint_file) graph = tf.Graph() with graph.as_default(): session_conf = tf.ConfigProto( allow_soft_placement=FLAGS.allow_soft_placement, log_device_placement=FLAGS.log_device_placement) sess = tf.Session(config=session_conf) with sess.as_default(): # Load the saved meta graph and restore variables saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file)) sess.run(tf.initialize_all_variables()) saver.restore(sess, checkpoint_file) # Get the placeholders from the graph by name input_x1 = graph.get_operation_by_name("input_x1").outputs[0] input_x2 = graph.get_operation_by_name("input_x2").outputs[0] input_y = graph.get_operation_by_name("input_y").outputs[0] dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0] # Tensors we want to evaluate predictions = graph.get_operation_by_name("output/distance").outputs[0] accuracy = graph.get_operation_by_name("accuracy/accuracy").outputs[0] sim = graph.get_operation_by_name("accuracy/temp_sim").outputs[0] #emb = graph.get_operation_by_name("embedding/W").outputs[0] #embedded_chars = tf.nn.embedding_lookup(emb,input_x) # Generate batches for one epoch batches = inpH.batch_iter(list(zip(x1_test,x2_test,y_test)), 2*FLAGS.batch_size, 1, shuffle=False) # Collect the predictions here all_predictions = [] all_d=[] for db in batches: x1_dev_b,x2_dev_b,y_dev_b = zip(*db) batch_predictions, batch_acc, sim = sess.run([predictions,accuracy,sim], {input_x1: x1_dev_b, input_x2: x2_dev_b, input_y:y_dev_b, dropout_keep_prob: 1.0}) all_predictions = np.concatenate([all_predictions, batch_predictions]) print(batch_predictions) all_d = np.concatenate([all_d, sim]) print("DEV acc {}".format(batch_acc)) for ex in all_predictions: print(ex) correct_predictions = float(np.mean(all_d == y_test)) print("Accuracy: {:g}".format(correct_predictions))
42.05618
167
0.696767
509
3,743
4.880157
0.3222
0.021739
0.05475
0.061192
0.124396
0.096618
0
0
0
0
0
0.018164
0.161635
3,743
88
168
42.534091
0.773423
0.142399
0
0
0
0
0.195931
0.022535
0
0
0
0
0
1
0
false
0
0.135593
0
0.135593
0.169492
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c76173ed74a504071f1116fc3a7dc17a1c832c39
4,626
py
Python
accounts/views.py
nikhiljohn10/django-auth
01d97e8173436c3446f039cfa6472ece3cd9f96a
[ "MIT" ]
null
null
null
accounts/views.py
nikhiljohn10/django-auth
01d97e8173436c3446f039cfa6472ece3cd9f96a
[ "MIT" ]
null
null
null
accounts/views.py
nikhiljohn10/django-auth
01d97e8173436c3446f039cfa6472ece3cd9f96a
[ "MIT" ]
null
null
null
from django.urls import reverse from django.conf import settings from django.contrib import messages from django.shortcuts import render, redirect from django.core.mail import send_mail from django.contrib.auth import login, logout, views, authenticate from django.views.generic.edit import CreateView from django.contrib.sessions.models import Session from django.contrib.auth.decorators import login_required, permission_required from accounts.tools import activater, mailer from accounts.forms import SignUpForm, LoginForm from accounts.models import User @login_required @permission_required("is_staff", login_url='/dashboard/') def gmail(request): request.session['oauth_state'] = mailer.auth_state return redirect(mailer.auth_uri) @login_required @permission_required("is_staff", login_url='/dashboard/') def gmail_verify(request): code = request.GET.get('code','') state = request.GET.get('state','') if code and state == request.session['oauth_state']: mailer.verify(code) return redirect('dash:gmail') class UserLogin(views.LoginView): template_name = 'auth/login.html' authentication_form = LoginForm def form_valid(self, form): user = form.get_user() login(self.request, user) if not self.request.POST.get('remember_me', None): self.request.session.set_expiry(0) messages.info(self.request, f"You are now logged in as {user}") return redirect(self.get_success_url()) class SignUpView(CreateView): form_class = SignUpForm template_name = 'auth/signup.html' def form_valid(self, form): if mailer.activated: user = form.save() mailer.send_mail( "Django Verification Code", "Hi "+str(user)+",\nClick this link to activate: " + reverse('auth:verify_email', args=( user, activater.make_token(user))), [user.email]) login(self.request, user) else: messages.error(self.request, "Gmail is not activate. Contact site administrator.") return redirect('auth:signup') return redirect('core:home') def user_manage_permission(user, username): if not user.is_staff: if user.username == username: return True else: if user.username != username: return True return False @login_required @permission_required("is_staff", login_url='/dashboard/') def user_force_logout(request, username): user = User.objects.get(username=username) sessions = [s.delete() for s in Session.objects.all() if s.get_decoded().get('_auth_user_id') == str(user.id)] print(sessions) return redirect('dash:users') def user_verify_email(request, username, token): user = User.objects.get(username=username) if activater.check_token(user, token): print(user, "is verified") user.email_verified = True user.save() return redirect('dash:users') @login_required def user_disable(request, username): if user_manage_permission(request.user, username): user = User.objects.get(username=username) user.is_active = False user.save() messages.error(request, 'Profile successfully disabled.') else: messages.error( request, 'You are not allowed to perform this operation.') if request.user.is_staff: return redirect('dash:users') else: return redirect('dash:profile') @login_required def user_enable(request, username): if user_manage_permission(request.user, username): user = User.objects.get(username=username) user.is_active = True user.save() messages.success(request, 'Profile successfully enabled.') else: messages.error( request, 'You are not allowed to perform this operation.') if request.user.is_staff: return redirect('dash:users') else: return redirect('dash:profile') @login_required def user_delete(request, username): if user_manage_permission(request.user, username): user = User.objects.get(username=username) user.delete() messages.error(request, 'Profile successfully deleted.') else: messages.error( request, 'You are not allowed to perform this operation.') if request.user.is_staff: return redirect('dash:users') else: return redirect('dash:profile') user_login = UserLogin.as_view() user_signup = SignUpView.as_view() user_logout = views.LogoutView.as_view()
31.684932
78
0.671422
562
4,626
5.402135
0.234875
0.059947
0.05336
0.029644
0.409091
0.350461
0.318182
0.304348
0.304348
0.304348
0
0.000278
0.222655
4,626
145
79
31.903448
0.843993
0
0
0.425
0
0
0.141159
0
0
0
0
0
0
1
0.083333
false
0
0.1
0
0.366667
0.016667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c769abd3fe7f81479f81afe9e3156873d7f5b0e2
17,050
py
Python
utils/manisfestManager.py
ovitrac/pizza3
0f4dc6e362fd8665c72ec13328df05f9119dfbc3
[ "MIT" ]
1
2022-02-07T14:10:10.000Z
2022-02-07T14:10:10.000Z
utils/manisfestManager.py
ovitrac/Pizza3
0f4dc6e362fd8665c72ec13328df05f9119dfbc3
[ "MIT" ]
null
null
null
utils/manisfestManager.py
ovitrac/Pizza3
0f4dc6e362fd8665c72ec13328df05f9119dfbc3
[ "MIT" ]
null
null
null
#!/usr/bin/env python ############################################################################### # # # manifestManager.py # # # # Work with online data manifests (creating / syncing / validating) # # # # Copyright (C) Michael Imelfort # # # ############################################################################### # # # This program is free software: you can redistribute it and/or modify # # it under the terms of the GNU General Public License as published by # # the Free Software Foundation, either version 3 of the License, or # # (at your option) any later version. # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with this program. If not, see <http://www.gnu.org/licenses/>. # # # ############################################################################### __author__ = "Michael Imelfort" __copyright__ = "Copyright 2014" __credits__ = ["Michael Imelfort"] __license__ = "GPLv3" __maintainer__ = "Michael Imelfort" __email__ = "mike@mikeimelfort.com" __version__ = "0.35" ############################################################################### ############################################################################### ############################################################################### ############################################################################### __MANIFEST__ = ".dmanifest" ############################################################################### ############################################################################### ############################################################################### ############################################################################### # system includes import os import hashlib import urllib.request, urllib.error, urllib.parse import urllib.request, urllib.parse, urllib.error import shutil import errno # local includes from fileEntity import FileEntity as FE ############################################################################### ############################################################################### ############################################################################### ############################################################################### class ManifestManager(object): """Use this interface for storing and managing file and paths""" def __init__(self, manType=None, timeout=30): self.timeout = timeout self.myExtensions = [".py",".sh"] self.files = [] if manType is not None: self.type = manType else: self.type = "generic" def createManifest(self, path, manifestName=None): """inventory all files in path and create a manifest file""" if manifestName is None: manifestName = __MANIFEST__ # make the root file entity root_path = os.path.abspath(path) root_fe = FE('root', ".", None, "-", 0) self.files.append(root_fe) # now make all the ones below parents = [root_fe] dirs, files = self.listdir(path)[:2] self.walk(parents, root_path, '', dirs, files, skipFile=manifestName) with open(os.path.join(path, manifestName), 'w') as man_fh: # print the header man_fh.write("#\t::: %s ::: \tPizza3 manifest version %s\n\n" % (self.type, __version__)) for f in self.files: if f.parent is not None: man_fh.write("%s\n" % f) def diffManifests(self, localManifestLocation, sourceManifestLocation, localManifestName=None, sourceManifestName=None, printDiffs=False): """check for any differences between two manifests if remote is true then sourceManifestLocation is a URL returns a list of files that need to be updated """ if localManifestName is None: localManifestName = __MANIFEST__ if sourceManifestName is None: sourceManifestName = __MANIFEST__ # get the "type" of the local manifest l_type = "generic" with open(os.path.join(localManifestLocation, localManifestName)) as l_man: for line in l_man: if line[0] == "#": l_type = self.getManType(line) break # load the source manifest s_type = "generic" source_man = {} source = "" # first we assume it is remote try: s_man = urllib.request.urlopen(sourceManifestLocation + "/" + sourceManifestName, None, self.timeout) source = sourceManifestLocation + "/" except ValueError: # then it is probably a file s_man = open(os.path.join(sourceManifestLocation, sourceManifestName)) source = os.path.join(sourceManifestLocation) + os.path.sep except urllib.error.URLError: # problems connecting to server, perhaps user is behind a proxy or firewall print("Error: failed to connect to server.") return (None, None, None, None, None) first_line = True for line in s_man: if first_line: first_line = False if line[0] == "#": # get the type of the manifest s_type = self.getManType(line) if s_type != l_type: print("Error: type of source manifest (%s) does not match type of local manifest (%s)" % (s_type, l_type)) return (None, None, None, None, None) else: # no type specified print("Error: type of source manifest is not specified. Is this a valid manifest file?") return (None, None, None, None, None) self.type = l_type if line[0] != "#": fields = line.rstrip().split("\t") # set the dict up as {path => [hash, size, seenLocal] source_man[fields[0]] = [fields[1], fields[2], False] # keep lists of modifications deleted = [] addedDirs = [] addedFiles = [] modified = [] with open(os.path.join(localManifestLocation, localManifestName)) as l_man: for line in l_man: if line[0] != "#": fields = line.rstrip().split("\t") try: if source_man[fields[0]][0] != fields[1]: # hashes don't match modified.append(fields[0]) # seen this file source_man[fields[0]][2] = True except KeyError: # this file has been deleted from the source manifest deleted.append(fields[0]) # check for new files for f in list(source_man.keys()): if source_man[f][2] == False: if source_man[f][0] == '-': addedDirs.append(f) else: addedFiles.append(f) if printDiffs: new_size = 0 modified_size = 0 for f in addedFiles: new_size += int(source_man[f][1]) for f in modified: modified_size += int(source_man[f][1]) if len(addedFiles) > 0: print("#------------------------------------------------------") print("# Source contains %d new file(s) (%s)" % (len(addedFiles), self.formatData(new_size))) for f in addedFiles: print("\t".join([self.formatData(int(source_man[f][1])), f])) if len(addedDirs) > 0: print("#------------------------------------------------------") print("# Source contains %d new folders(s)" % (len(addedDirs))) for f in addedDirs: print(f) if len(modified) > 0: print("#------------------------------------------------------") print("# Source contains %d modified file(s) (%s)" % (len(modified), self.formatData(modified_size))) for f in modified: print(f) if len(deleted) > 0: print("#------------------------------------------------------") print("# %d files have been deleted in the source:" % len(deleted)) for f in deleted: print(f) else: return (source, [(a, source_man[a]) for a in addedFiles], [(a, source_man[a]) for a in addedDirs], deleted, [(m, source_man[m]) for m in modified]) def updateManifest(self, localManifestLocation, sourceManifestLocation, localManifestName=None, sourceManifestName=None, prompt=True): """Update local files based on remote changes""" # get the diffs source, added_files, added_dirs, deleted, modified = self.diffManifests(localManifestLocation, sourceManifestLocation, localManifestName, sourceManifestName) # bail if the diff failed if source is None: return False # no changes by default do_down = False if prompt: total_size = 0 for f in added_files: total_size += int(f[1][1]) for f in modified: total_size += int(f[1][1]) if total_size != 0: print("****************************************************************") print("%d new file(s) to be downloaded from source" % len(added_files)) print("%d existing file(s) to be updated" % len(modified)) print("%s will need to be downloaded" % self.formatData(total_size)) do_down = self.promptUserDownload() if not do_down: print("Download aborted") update_manifest = False if do_down: update_manifest = True for add in added_dirs: # make the dirs first full_path = os.path.abspath(os.path.join(localManifestLocation, add[0])) self.makeSurePathExists(full_path) for add in added_files: full_path = os.path.abspath(os.path.join(localManifestLocation, add[0])) urllib.request.urlretrieve(source+add[0], full_path) for modify in modified: full_path = os.path.abspath(os.path.join(localManifestLocation, modify[0])) urllib.request.urlretrieve(source+modify[0], full_path) if update_manifest: print("(re) creating manifest file (please be patient)") self.createManifest(localManifestLocation, manifestName=localManifestName) return True def getManType(self, line): """Work out the manifest type from the first line of the file""" return line.rstrip().split("##")[1] def formatData(self, amount): """Pretty print file sizes""" if amount < 1024*1024: return "%d B" % amount elif amount < 1024*1024*1024: return "%0.2f MB" % (float(amount)/(1024.*1024.)) elif amount < 1024*1024*1024*1024: return "%0.2f GB" % (float(amount)/(1024.*1024.*1024.)) elif amount < 1024*1024*1024*1024*1024: return "%0.2f TB" % (float(amount)/(1024.*1024.*1024.*1024.)) #----------------------------------------------------------------------------- # FS utilities def makeSurePathExists(self, path): try: os.makedirs(path) except OSError as exception: if exception.errno != errno.EEXIST: raise def promptUserDownload(self): """Check that the user is OK with making changes""" input_not_ok = True minimal=False valid_responses = {'Y':True,'N':False} vrs = ",".join([x.lower() for x in list(valid_responses.keys())]) while(input_not_ok): if(minimal): option = input("Download? ("+vrs+") : ").upper() else: option = input("Confirm you want to download this data\n" \ "Changes *WILL* be permanent\n" \ "Continue? ("+vrs+") : ").upper() if(option in valid_responses): print("****************************************************************") return valid_responses[option] else: print("ERROR: unrecognised choice '"+option+"'") minimal = True def walk(self, parents, full_path, rel_path, dirs, files, skipFile=__MANIFEST__): """recursive walk through directory tree""" # first do files here for f in files: if (f != skipFile) and os.path.splitext(f)[1] in self.myExtensions: path = os.path.join(full_path, f) self.files.append(FE(f, rel_path, parents[-1], self.hashfile(path), os.path.getsize(path) ) ) for d in dirs: # the walk will go into these dirs first tmp_fe = FE(d, rel_path, parents[-1], "-", 0) self.files.append(tmp_fe) parents.append(tmp_fe) new_full_path = os.path.join(full_path, d) new_rel_path = os.path.join(rel_path, d) new_dirs, new_files = self.listdir(new_full_path)[:2] self.walk(parents, new_full_path, new_rel_path, new_dirs, new_files) parents.pop() def listdir(self, path): """List dirs, files etc in path (one dir deep)""" dirs, files, links = [], [], [] for name in os.listdir(path): path_name = os.path.join(path, name) if os.path.isdir(path_name): dirs.append(name) elif os.path.isfile(path_name): files.append(name) elif os.path.islink(path_name): links.append(name) return dirs, files, links def hashfile(self, fileName, blocksize=65536): """Hash a file and return the digest""" hasher = hashlib.sha256() with open(fileName,"rb") as fh: buf = fh.read(blocksize) while len(buf) > 0: hasher.update(buf.strip()) buf = fh.read(blocksize) return hasher.hexdigest() return "?" ############################################################################### ############################################################################### ############################################################################### ############################################################################### # %% DEBUG # =================================================== # main() # =================================================== # for debugging purposes (code called as a script) # the code is called from here # =================================================== if __name__ == '__main__': man = ManifestManager() man.createManifest("/home/olivi/billy/python",manifestName="Pizza3.manifest")
44.285714
130
0.436716
1,545
17,050
4.709385
0.231068
0.018142
0.016493
0.013194
0.195574
0.150082
0.101842
0.061435
0.04508
0.037933
0
0.014996
0.358592
17,050
385
131
44.285714
0.650329
0.187977
0
0.199219
0
0
0.104642
0.032229
0
0
0
0
0
1
0.042969
false
0
0.027344
0
0.132813
0.097656
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c76c70c2e310ab6dd7d23270c230a7b48cbff5cf
729
py
Python
temperature.py
rhwlr/TEST_PRELIM_SKILLS_EXAM
a776ab7631fac8bed1aea0470918e6250752ce8e
[ "MIT" ]
null
null
null
temperature.py
rhwlr/TEST_PRELIM_SKILLS_EXAM
a776ab7631fac8bed1aea0470918e6250752ce8e
[ "MIT" ]
null
null
null
temperature.py
rhwlr/TEST_PRELIM_SKILLS_EXAM
a776ab7631fac8bed1aea0470918e6250752ce8e
[ "MIT" ]
null
null
null
class Temperature: def __init__(self, kelvin=None, celsius=None, fahrenheit=None): values = [x for x in [kelvin, celsius, fahrenheit] if x] if len(values) < 1: raise ValueError('Need argument') if len(values) > 1: raise ValueError('Only one argument') if celsius is not None: self.kelvin = celsius + 273.15 elif fahrenheit is not None: self.kelvin = (fahrenheit - 32) * 5 / 9 + 273.15 else: self.kelvin = kelvin if self.kelvin < 0: raise ValueError('Temperature in Kelvin cannot be negative') def __str__(self): return f'Temperature = {self.kelvin} Kelvins'
29.16
72
0.562414
86
729
4.674419
0.453488
0.149254
0.054726
0.059701
0.228856
0.134328
0
0
0
0
0
0.035639
0.345679
729
25
73
29.16
0.807128
0
0
0
0
0
0.143836
0
0
0
0
0
0
1
0.117647
false
0
0
0.058824
0.235294
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c76ec369645b0f101be129ffedbb1f290be5f94b
510
py
Python
tests/test_ping.py
d-wysocki/flask-resty
2a5e7d7ea7e2130dce44b8f50625df72ad0dcd19
[ "MIT" ]
86
2015-11-25T07:09:10.000Z
2022-02-15T19:40:30.000Z
tests/test_ping.py
d-wysocki/flask-resty
2a5e7d7ea7e2130dce44b8f50625df72ad0dcd19
[ "MIT" ]
180
2015-11-24T23:02:53.000Z
2022-03-31T04:05:38.000Z
tests/test_ping.py
d-wysocki/flask-resty
2a5e7d7ea7e2130dce44b8f50625df72ad0dcd19
[ "MIT" ]
17
2015-12-28T11:05:47.000Z
2022-03-15T12:10:02.000Z
import pytest from flask_resty import Api from flask_resty.testing import assert_response # ----------------------------------------------------------------------------- @pytest.fixture(autouse=True) def routes(app): api = Api(app, "/api") api.add_ping("/ping") # ----------------------------------------------------------------------------- def test_ping(base_client): response = base_client.get("/ping") assert_response(response, 200) assert response.get_data(as_text=True) == ""
23.181818
79
0.490196
50
510
4.8
0.5
0.175
0.116667
0
0
0
0
0
0
0
0
0.006682
0.119608
510
21
80
24.285714
0.52784
0.303922
0
0
0
0
0.039773
0
0
0
0
0
0.272727
1
0.181818
false
0
0.272727
0
0.454545
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c76f8dffc967eba49049f65ff4df98887b137c0d
1,476
py
Python
tests/test_vetters.py
pllim/exovetter
75c6ca609331c04a55c0a6b4c858be71a4dfdfea
[ "MIT", "BSD-3-Clause" ]
null
null
null
tests/test_vetters.py
pllim/exovetter
75c6ca609331c04a55c0a6b4c858be71a4dfdfea
[ "MIT", "BSD-3-Clause" ]
null
null
null
tests/test_vetters.py
pllim/exovetter
75c6ca609331c04a55c0a6b4c858be71a4dfdfea
[ "MIT", "BSD-3-Clause" ]
null
null
null
from numpy.testing import assert_allclose from astropy.io import ascii from astropy import units as u import lightkurve as lk from exovetter import const as exo_const from exovetter import vetters from exovetter.tce import Tce from astropy.utils.data import get_pkg_data_filename def get_wasp18_tce(): tce = Tce(period=0.94124 * u.day, epoch=58374.669883 * u.day, epoch_offset=-2400000.5 * u.day, depth=0.00990112 * exo_const.frac_amp, duration=0.08932 * u.day, event_name='WASP-18 b', target_name='WASP-18', snr=50) return tce def get_wasp18_lightcurve(): lc_file = get_pkg_data_filename("data/wasp18b_flat_lightcurve.csv") lc_table = ascii.read(lc_file, data_start=1) lc = lk.LightCurve(time=lc_table['col2'], flux=lc_table['col3'], flux_err=lc_table['col4'], time_format="btjd") return lc def test_vetters(): tce = get_wasp18_tce() lc = get_wasp18_lightcurve() metrics = dict() vetter_list = [vetters.Lpp(), vetters.OddEven(), vetters.TransitPhaseCoverage()] for v in vetter_list: vetter = v _ = vetter.run(tce, lc) metrics.update(vetter.__dict__) assert_allclose(metrics['norm_lpp'], 7.93119, rtol=1e-3) assert_allclose(metrics['tp_cover'], 1.0, rtol=1e-5) assert_allclose(metrics['odd_depth'][0], 0.99, rtol=1e-1)
25.894737
71
0.638889
205
1,476
4.37561
0.44878
0.06243
0.070234
0.040134
0
0
0
0
0
0
0
0.070652
0.252033
1,476
56
72
26.357143
0.741848
0
0
0
0
0
0.060298
0.02168
0
0
0
0
0.105263
1
0.078947
false
0
0.210526
0
0.342105
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c770f106a56c64793bd9f4e329f2b5bb1fbfddef
4,270
py
Python
pyqtgraph/dockarea/DockDrop.py
hishizuka/pyqtgraph
4820625d93ffb41f324431d0d29b395cf91f339e
[ "MIT" ]
2,762
2015-01-02T14:34:10.000Z
2022-03-30T14:06:07.000Z
pyqtgraph/dockarea/DockDrop.py
hishizuka/pyqtgraph
4820625d93ffb41f324431d0d29b395cf91f339e
[ "MIT" ]
1,901
2015-01-12T03:20:30.000Z
2022-03-31T16:33:36.000Z
pyqtgraph/dockarea/DockDrop.py
hishizuka/pyqtgraph
4820625d93ffb41f324431d0d29b395cf91f339e
[ "MIT" ]
1,038
2015-01-01T04:05:49.000Z
2022-03-31T11:57:51.000Z
# -*- coding: utf-8 -*- from ..Qt import QtCore, QtGui class DockDrop(object): """Provides dock-dropping methods""" def __init__(self, allowedAreas=None): object.__init__(self) if allowedAreas is None: allowedAreas = ['center', 'right', 'left', 'top', 'bottom'] self.allowedAreas = set(allowedAreas) self.setAcceptDrops(True) self.dropArea = None self.overlay = DropAreaOverlay(self) self.overlay.raise_() def resizeOverlay(self, size): self.overlay.resize(size) def raiseOverlay(self): self.overlay.raise_() def dragEnterEvent(self, ev): src = ev.source() if hasattr(src, 'implements') and src.implements('dock'): #print "drag enter accept" ev.accept() else: #print "drag enter ignore" ev.ignore() def dragMoveEvent(self, ev): #print "drag move" # QDragMoveEvent inherits QDropEvent which provides posF() # PyQt6 provides only position() posF = ev.posF() if hasattr(ev, 'posF') else ev.position() ld = posF.x() rd = self.width() - ld td = posF.y() bd = self.height() - td mn = min(ld, rd, td, bd) if mn > 30: self.dropArea = "center" elif (ld == mn or td == mn) and mn > self.height()/3.: self.dropArea = "center" elif (rd == mn or ld == mn) and mn > self.width()/3.: self.dropArea = "center" elif rd == mn: self.dropArea = "right" elif ld == mn: self.dropArea = "left" elif td == mn: self.dropArea = "top" elif bd == mn: self.dropArea = "bottom" if ev.source() is self and self.dropArea == 'center': #print " no self-center" self.dropArea = None ev.ignore() elif self.dropArea not in self.allowedAreas: #print " not allowed" self.dropArea = None ev.ignore() else: #print " ok" ev.accept() self.overlay.setDropArea(self.dropArea) def dragLeaveEvent(self, ev): self.dropArea = None self.overlay.setDropArea(self.dropArea) def dropEvent(self, ev): area = self.dropArea if area is None: return if area == 'center': area = 'above' self.area.moveDock(ev.source(), area, self) self.dropArea = None self.overlay.setDropArea(self.dropArea) class DropAreaOverlay(QtGui.QWidget): """Overlay widget that draws drop areas during a drag-drop operation""" def __init__(self, parent): QtGui.QWidget.__init__(self, parent) self.dropArea = None self.hide() self.setAttribute(QtCore.Qt.WidgetAttribute.WA_TransparentForMouseEvents) def setDropArea(self, area): self.dropArea = area if area is None: self.hide() else: ## Resize overlay to just the region where drop area should be displayed. ## This works around a Qt bug--can't display transparent widgets over QGLWidget prgn = self.parent().rect() rgn = QtCore.QRect(prgn) w = min(30, prgn.width()/3.) h = min(30, prgn.height()/3.) if self.dropArea == 'left': rgn.setWidth(w) elif self.dropArea == 'right': rgn.setLeft(rgn.left() + prgn.width() - w) elif self.dropArea == 'top': rgn.setHeight(h) elif self.dropArea == 'bottom': rgn.setTop(rgn.top() + prgn.height() - h) elif self.dropArea == 'center': rgn.adjust(w, h, -w, -h) self.setGeometry(rgn) self.show() self.update() def paintEvent(self, ev): if self.dropArea is None: return p = QtGui.QPainter(self) rgn = self.rect() p.setBrush(QtGui.QBrush(QtGui.QColor(100, 100, 255, 50))) p.setPen(QtGui.QPen(QtGui.QColor(50, 50, 150), 3)) p.drawRect(rgn)
32.348485
91
0.525527
472
4,270
4.713983
0.313559
0.140225
0.043146
0.035955
0.141573
0.087191
0.069213
0.044944
0
0
0
0.011269
0.355738
4,270
131
92
32.59542
0.797528
0.112178
0
0.282828
0
0
0.031582
0
0
0
0
0
0
1
0.10101
false
0
0.010101
0
0.151515
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c773836d5d08ecba5ffb7e86e3b25bdc07e2351a
3,927
py
Python
cisco-ios-xr/ydk/models/cisco_ios_xr/SNMP_FRAMEWORK_MIB.py
bopopescu/ACI
dd717bc74739eeed4747b3ea9e36b239580df5e1
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
cisco-ios-xr/ydk/models/cisco_ios_xr/SNMP_FRAMEWORK_MIB.py
bopopescu/ACI
dd717bc74739eeed4747b3ea9e36b239580df5e1
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
cisco-ios-xr/ydk/models/cisco_ios_xr/SNMP_FRAMEWORK_MIB.py
bopopescu/ACI
dd717bc74739eeed4747b3ea9e36b239580df5e1
[ "ECL-2.0", "Apache-2.0" ]
1
2020-07-22T04:04:44.000Z
2020-07-22T04:04:44.000Z
""" SNMP_FRAMEWORK_MIB """ from collections import OrderedDict from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64 from ydk.filters import YFilter from ydk.errors import YError, YModelError from ydk.errors.error_handler import handle_type_error as _handle_type_error class SnmpSecurityLevel(Enum): """ SnmpSecurityLevel (Enum Class) .. data:: noAuthNoPriv = 1 .. data:: authNoPriv = 2 .. data:: authPriv = 3 """ noAuthNoPriv = Enum.YLeaf(1, "noAuthNoPriv") authNoPriv = Enum.YLeaf(2, "authNoPriv") authPriv = Enum.YLeaf(3, "authPriv") class SNMPFRAMEWORKMIB(Entity): """ .. attribute:: snmpengine **type**\: :py:class:`Snmpengine <ydk.models.cisco_ios_xr.SNMP_FRAMEWORK_MIB.SNMPFRAMEWORKMIB.Snmpengine>` """ _prefix = 'SNMP_FRAMEWORK_MIB' _revision = '2002-10-14' def __init__(self): super(SNMPFRAMEWORKMIB, self).__init__() self._top_entity = None self.yang_name = "SNMP-FRAMEWORK-MIB" self.yang_parent_name = "SNMP-FRAMEWORK-MIB" self.is_top_level_class = True self.has_list_ancestor = False self.ylist_key_names = [] self._child_container_classes = OrderedDict([("snmpEngine", ("snmpengine", SNMPFRAMEWORKMIB.Snmpengine))]) self._child_list_classes = OrderedDict([]) self._leafs = OrderedDict() self.snmpengine = SNMPFRAMEWORKMIB.Snmpengine() self.snmpengine.parent = self self._children_name_map["snmpengine"] = "snmpEngine" self._children_yang_names.add("snmpEngine") self._segment_path = lambda: "SNMP-FRAMEWORK-MIB:SNMP-FRAMEWORK-MIB" class Snmpengine(Entity): """ .. attribute:: snmpengineid **type**\: str **pattern:** (([0\-9a\-fA\-F]){2}(\:([0\-9a\-fA\-F]){2})\*)? .. attribute:: snmpengineboots **type**\: int **range:** 1..2147483647 .. attribute:: snmpenginetime **type**\: int **range:** 0..2147483647 .. attribute:: snmpenginemaxmessagesize **type**\: int **range:** 484..2147483647 """ _prefix = 'SNMP_FRAMEWORK_MIB' _revision = '2002-10-14' def __init__(self): super(SNMPFRAMEWORKMIB.Snmpengine, self).__init__() self.yang_name = "snmpEngine" self.yang_parent_name = "SNMP-FRAMEWORK-MIB" self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names = [] self._child_container_classes = OrderedDict([]) self._child_list_classes = OrderedDict([]) self._leafs = OrderedDict([ ('snmpengineid', YLeaf(YType.str, 'snmpEngineID')), ('snmpengineboots', YLeaf(YType.int32, 'snmpEngineBoots')), ('snmpenginetime', YLeaf(YType.int32, 'snmpEngineTime')), ('snmpenginemaxmessagesize', YLeaf(YType.int32, 'snmpEngineMaxMessageSize')), ]) self.snmpengineid = None self.snmpengineboots = None self.snmpenginetime = None self.snmpenginemaxmessagesize = None self._segment_path = lambda: "snmpEngine" self._absolute_path = lambda: "SNMP-FRAMEWORK-MIB:SNMP-FRAMEWORK-MIB/%s" % self._segment_path() def __setattr__(self, name, value): self._perform_setattr(SNMPFRAMEWORKMIB.Snmpengine, ['snmpengineid', 'snmpengineboots', 'snmpenginetime', 'snmpenginemaxmessagesize'], name, value) def clone_ptr(self): self._top_entity = SNMPFRAMEWORKMIB() return self._top_entity
28.456522
158
0.595111
363
3,927
6.170799
0.286501
0.063839
0.078571
0.026786
0.276786
0.261607
0.261607
0.261607
0.178571
0.178571
0
0.025285
0.28495
3,927
137
159
28.664234
0.772436
0.180545
0
0.245614
0
0
0.163279
0.050474
0
0
0
0
0
1
0.070175
false
0
0.087719
0
0.315789
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c773cb05d9fdb9aa7ea5543ac5440822be912b9e
2,941
py
Python
handlers/redirects.py
Bainky/Ventify
638486dc5f265a4907a5a193ea2a7c9b44e8e943
[ "MIT" ]
6
2021-03-11T11:43:17.000Z
2021-12-08T05:26:20.000Z
handlers/redirects.py
Bainky/Ventify
638486dc5f265a4907a5a193ea2a7c9b44e8e943
[ "MIT" ]
null
null
null
handlers/redirects.py
Bainky/Ventify
638486dc5f265a4907a5a193ea2a7c9b44e8e943
[ "MIT" ]
2
2021-03-24T05:31:12.000Z
2021-04-13T22:03:11.000Z
from aiogram.utils.markdown import hide_link from aiogram.types import CallbackQuery from loader import dp from utils import ( get_object, get_attributes_of_object ) from keyboards import ( anime_choose_safe_category, anime_sfw_categories, anime_nsfw_categories, animals_categories, menu_with_categories, control_buttons ) @dp.callback_query_handler(text="menu") async def call_menu_with_categories(call: CallbackQuery): """ Function for sending a menu, with a selection of safe content """ await call.answer() # Editing the message await call.message.edit_text( text=( "<b>🔗 Select a category to get a picture.</b>" ), reply_markup=menu_with_categories() ) @dp.callback_query_handler(text="anime") async def call_anime_categories(call: CallbackQuery): """ Redirect to select anime actions """ await call.answer() # Editing the message await call.message.edit_text( text=( "<b>⚜️ Choose what content you want to see.</b>" ), reply_markup=anime_choose_safe_category() ) @dp.callback_query_handler(text=["sfw", "nsfw"]) async def call_nsfw_categories(call: CallbackQuery): """ Redirect to anime content """ data = call.data.upper() message = call.message # Send answer await call.answer() if data == "SFW": kb = anime_sfw_categories() else: kb = anime_nsfw_categories() # Editing the message await message.edit_text( text=( f"<b>🍿 You are in the {data} category.</b>" ), reply_markup=kb ) @dp.callback_query_handler(text="animals") async def call_anime_categories(call: CallbackQuery): """ Redirect to animals content """ await call.answer() # Editing the message await call.message.edit_text( text=( "<b>🦄 You are in the category with animals.</b>" ), reply_markup=animals_categories() ) @dp.callback_query_handler() async def call_get_photography(call: CallbackQuery): """ Function for sending photos """ message = call.message data = call.data # Get json document api = get_attributes_of_object() if data == "generate_new": data = message.text.split("#")[1] obj = api[data]["object"] atr = api[data]["attribute"] mark = api[data]["entity"] if mark == "anime": mark = api[data]["safe"] if mark == "memes": mark = "menu" # We get a link to the preview photo link = await get_object(obj, atr) await call.answer() # Editing the message await message.edit_text( text=( f"{hide_link(link)} #{data}" ), reply_markup=control_buttons(mark) )
23.717742
61
0.598776
346
2,941
4.919075
0.260116
0.042303
0.044066
0.06463
0.386016
0.237368
0.237368
0.228555
0.228555
0.115746
0
0.000483
0.296158
2,941
124
62
23.717742
0.819324
0.055763
0
0.303797
0
0
0.116736
0
0
0
0
0
0
1
0
false
0
0.063291
0
0.063291
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c77456702d5939c9da605c3d65de2f70c1b95b26
8,695
py
Python
segmentation_test/Scripts/medpy_graphcut_voxel.py
rominashirazi/SpineSegmentation
fb08122ac6d9a598b60aecb4f1a1a2a31fba96ab
[ "MIT" ]
null
null
null
segmentation_test/Scripts/medpy_graphcut_voxel.py
rominashirazi/SpineSegmentation
fb08122ac6d9a598b60aecb4f1a1a2a31fba96ab
[ "MIT" ]
null
null
null
segmentation_test/Scripts/medpy_graphcut_voxel.py
rominashirazi/SpineSegmentation
fb08122ac6d9a598b60aecb4f1a1a2a31fba96ab
[ "MIT" ]
null
null
null
#!c:\users\hooma\documents\github\spinesegmentation\segmentation_test\scripts\python.exe """ Execute a graph cut on a voxel image based on some foreground and background markers. Copyright (C) 2013 Oskar Maier This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ # build-in modules from argparse import RawTextHelpFormatter import argparse import logging import os # third-party modules import scipy # path changes # own modules from medpy.core import ArgumentError, Logger from medpy.io import load, save, header from medpy import graphcut from medpy.graphcut.wrapper import split_marker # information __author__ = "Oskar Maier" __version__ = "r0.3.1, 2012-03-23" __email__ = "oskar.maier@googlemail.com" __status__ = "Release" __description__ = """ Perform a binary graph cut using Boykov's max-flow/min-cut algorithm. This implementation does only compute a boundary term and does not use any regional term. The desired boundary term can be selected via the --boundary argument. Depending on the selected term, an additional image has to be supplied as badditional. In the case of the difference of means, it is the original image. Furthermore the algorithm requires a binary image with foreground markers and a binary image with background markers. Additionally a filename for the created binary mask marking foreground and background has to be supplied. Note that the input images must be of the same dimensionality, otherwise an exception is thrown. Note to take into account the input images orientation. Note that the quality of the resulting segmentations depends also on the quality of the supplied markers. Copyright (C) 2013 Oskar Maier This program comes with ABSOLUTELY NO WARRANTY; This is free software, and you are welcome to redistribute it under certain conditions; see the LICENSE file or <http://www.gnu.org/licenses/> for details. """ # code def main(): # parse cmd arguments parser = getParser() parser.parse_args() args = getArguments(parser) # prepare logger logger = Logger.getInstance() if args.debug: logger.setLevel(logging.DEBUG) elif args.verbose: logger.setLevel(logging.INFO) # check if output image exists if not args.force: if os.path.exists(args.output): logger.warning('The output image {} already exists. Exiting.'.format(args.output)) exit(-1) # select boundary term ['diff_linear', 'diff_exp', 'diff_div', 'diff_pow', 'max_linear', 'max_exp', 'max_div', 'max_pow'] if 'diff_linear' == args.boundary: boundary_term = graphcut.energy_voxel.boundary_difference_linear logger.info('Selected boundary term: linear difference of intensities') elif 'diff_exp' == args.boundary: boundary_term = graphcut.energy_voxel.boundary_difference_exponential logger.info('Selected boundary term: exponential difference of intensities') elif 'diff_div' == args.boundary: boundary_term = graphcut.energy_voxel.boundary_difference_division logger.info('Selected boundary term: divided difference of intensities') elif 'diff_pow' == args.boundary: boundary_term = graphcut.energy_voxel.boundary_difference_power logger.info('Selected boundary term: power based / raised difference of intensities') elif 'max_linear' == args.boundary: boundary_term = graphcut.energy_voxel.boundary_maximum_linear logger.info('Selected boundary term: linear maximum of intensities') elif 'max_exp' == args.boundary: boundary_term = graphcut.energy_voxel.boundary_maximum_exponential logger.info('Selected boundary term: exponential maximum of intensities') elif 'max_div' == args.boundary: boundary_term = graphcut.energy_voxel.boundary_maximum_division logger.info('Selected boundary term: divided maximum of intensities') elif 'max_pow' == args.boundary: boundary_term = graphcut.energy_voxel.boundary_maximum_power logger.info('Selected boundary term: power based / raised maximum of intensities') # load input images badditional_image_data, reference_header = load(args.badditional) markers_image_data, _ = load(args.markers) # split marker image into fg and bg images fgmarkers_image_data, bgmarkers_image_data = split_marker(markers_image_data) # check if all images dimensions are the same if not (badditional_image_data.shape == fgmarkers_image_data.shape == bgmarkers_image_data.shape): logger.critical('Not all of the supplied images are of the same shape.') raise ArgumentError('Not all of the supplied images are of the same shape.') # extract spacing if required if args.spacing: spacing = header.get_pixel_spacing(reference_header) logger.info('Taking spacing of {} into account.'.format(spacing)) else: spacing = False # generate graph logger.info('Preparing BK_MFMC C++ graph...') gcgraph = graphcut.graph_from_voxels(fgmarkers_image_data, bgmarkers_image_data, boundary_term = boundary_term, boundary_term_args = (badditional_image_data, args.sigma, spacing)) # execute min-cut logger.info('Executing min-cut...') maxflow = gcgraph.maxflow() logger.debug('Maxflow is {}'.format(maxflow)) # reshape results to form a valid mask logger.info('Applying results...') result_image_data = scipy.zeros(bgmarkers_image_data.size, dtype=scipy.bool_) for idx in range(len(result_image_data)): result_image_data[idx] = 0 if gcgraph.termtype.SINK == gcgraph.what_segment(idx) else 1 result_image_data = result_image_data.reshape(bgmarkers_image_data.shape) # save resulting mask save(result_image_data.astype(scipy.bool_), args.output, reference_header, args.force) logger.info('Successfully terminated.') def getArguments(parser): "Provides additional validation of the arguments collected by argparse." return parser.parse_args() def getParser(): "Creates and returns the argparse parser object." parser = argparse.ArgumentParser(description=__description__, formatter_class=RawTextHelpFormatter) parser.add_argument('sigma', type=float, help='The sigma required for the boundary terms.') parser.add_argument('badditional', help='The additional image required by the boundary term. See there for details.') parser.add_argument('markers', help='Image containing the foreground (=1) and background (=2) markers.') parser.add_argument('output', help='The output image containing the segmentation.') parser.add_argument('--boundary', default='diff_exp', help='The boundary term to use. Note that the ones prefixed with diff_ require the original image, while the ones prefixed with max_ require the gradient image.', choices=['diff_linear', 'diff_exp', 'diff_div', 'diff_pow', 'max_linear', 'max_exp', 'max_div', 'max_pow']) parser.add_argument('-s', dest='spacing', action='store_true', help='Set this flag to take the pixel spacing of the image into account. The spacing data will be extracted from the baddtional image.') parser.add_argument('-f', dest='force', action='store_true', help='Set this flag to silently override files that exist.') parser.add_argument('-v', dest='verbose', action='store_true', help='Display more information.') parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.') return parser if __name__ == "__main__": main()
47.513661
328
0.692237
1,100
8,695
5.322727
0.295455
0.049189
0.026132
0.032792
0.293083
0.242869
0.21076
0.163279
0.138002
0.034159
0
0.003729
0.228867
8,695
183
329
47.513661
0.8695
0.151811
0
0
0
0.017544
0.46363
0.003483
0
0
0
0
0
1
0.026316
false
0
0.078947
0
0.122807
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c774862e87bf8aaea6f4bb5796d15dd56dc9ae0b
2,968
py
Python
_notes/book/conf.py
AstroMatt/astronaut-training-en
6250af8e10358016dcebee54bb9ad5bc40cfe4d1
[ "MIT" ]
1
2020-08-08T00:37:28.000Z
2020-08-08T00:37:28.000Z
_notes/book/conf.py
AstroMatt/astronaut-training-en
6250af8e10358016dcebee54bb9ad5bc40cfe4d1
[ "MIT" ]
null
null
null
_notes/book/conf.py
AstroMatt/astronaut-training-en
6250af8e10358016dcebee54bb9ad5bc40cfe4d1
[ "MIT" ]
null
null
null
author = 'Matt Harasymczuk' email = 'matt@astrotech.io' project = 'Astronaut Training Program' description = 'Astronaut Training Program' extensions = [ 'sphinx.ext.todo', 'sphinx.ext.imgmath', ] todo_emit_warnings = False todo_include_todos = True exclude_patterns = [] # ----------------------------------------------------------------------------- # Standard book config # ----------------------------------------------------------------------------- import os import re import subprocess import sys from datetime import datetime needs_sphinx = '2.2' mathjax_path = 'https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/latest.js?config=TeX-MML-AM_CHTML' mathjax_config = { 'extensions': ['tex2jax.js'], 'jax': ['input/TeX', 'output/HTML-CSS'], } html_theme = 'sphinx_rtd_theme' exclude_patterns = exclude_patterns + [ '.*', 'venv*', 'virtualenv*', '_extensions', '_img', '_slides', '_static', '_themes', '_tmp', '*/_template.rst', '*/contrib/*', '*/solution/*', '*/solutions/*', '**.ipynb_checkpoints', 'README.rst', 'TODO.rst', ] numfig_format = { 'section': 'Sect. %s.', 'figure': 'Fig. %s.', 'table': 'Tab. %s.', 'code-block': 'Code Listing %s.', } language = 'en' source_directory = '.' master_doc = 'index' highlight_language = 'python3' pygments_style = 'borland' numfig = True templates_path = ['_templates'] source_suffix = ['.rst'] imgmath_image_format = 'svg' today_fmt = '%Y-%m-%d' project_slug = re.sub(r'[\W]+', '', project) sha1 = subprocess.Popen('git log -1 --format="%h"', stdout=subprocess.PIPE, shell=True).stdout.read().decode().replace('\n', '') now = datetime.now() year = now.year today = now.strftime('%Y-%m-%d') version = f'#{sha1}, {today}' release = f'#{sha1}, {today}' copyright = f'{year}, {author} <{email}>' extensions_dir = os.path.join(os.path.dirname(__file__), '', '_extensions') sys.path.append(extensions_dir) htmlhelp_basename = project html_theme_path = ['_themes'] html_static_path = ['_static'] html_favicon = '_static/favicon.png' html_sidebars = {'sidebar': ['localtoc.html', 'sourcelink.html', 'searchbox.html']} html_show_sphinx = False html_context = { 'css_files': [ '_static/theme-overrides.css', ], } latex_documents = [(master_doc, f'{project_slug}.tex', project, author, 'manual')] latex_elements = { 'papersize': 'a4paper', 'pointsize': '10pt', 'figure_align': 'htbp', # Fix for: LaTeX Backend Fails with Citations In Figure Captions 'preamble': r""" \usepackage{etoolbox} \AtBeginEnvironment{figure}{\renewcommand{\phantomsection}{}} """ } epub_title = project epub_author = author epub_publisher = author epub_copyright = copyright epub_exclude_files = ['search.html'] man_pages = [ (master_doc, project_slug, project, [author], 1) ] texinfo_documents = [ (master_doc, project_slug, project, author, project, '', 'Miscellaneous'), ]
24.130081
128
0.624326
331
2,968
5.377644
0.549849
0.020225
0.026966
0.022472
0.037079
0.037079
0
0
0
0
0
0.005969
0.153302
2,968
122
129
24.327869
0.702348
0.080526
0
0
0
0.010101
0.354258
0.040015
0
0
0
0
0
1
0
false
0
0.050505
0
0.050505
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c775a30ea8b55f2cd0df98a3a7cc00417a074bda
18,286
py
Python
data_structures/trees/tree.py
onyonkaclifford/data-structures-and-algorithms
e0ca4bfa878273d06bf22c303e47762b8ec3870b
[ "MIT" ]
null
null
null
data_structures/trees/tree.py
onyonkaclifford/data-structures-and-algorithms
e0ca4bfa878273d06bf22c303e47762b8ec3870b
[ "MIT" ]
null
null
null
data_structures/trees/tree.py
onyonkaclifford/data-structures-and-algorithms
e0ca4bfa878273d06bf22c303e47762b8ec3870b
[ "MIT" ]
null
null
null
from abc import ABC, abstractmethod from typing import Any, Generator, Iterable, List, Union class Empty(Exception): pass class Tree(ABC): """A tree is a hierarchical collection of nodes containing items, with each node having a unique parent and zero, one or many children items. The topmost element in a non-empty tree, the root, has no parent. Tree vocabularies include, but are not limited to: 1. Root - the topmost element in a non-empty tree, it has no parent 2. Leaf - a node with zero children 3. Siblings - nodes that share a parent node 4. Edge - a pair of nodes such the one is the parent of the other 5. Path - a collection of nodes such that any pair of adjacent nodes have a parent/child relationship 6. Height - number of edges between a node and it's furthest leaf 7. Depth - number of edges between a node and the root 8. Level - number of nodes in the path between a node and the root, inclusive of both the node itself and the root 9. Ordered tree - a tree with a meaningful organisation among its nodes such that its nodes can be arranged in a linear manner from first to last """ class _Node: def __init__(self, key, value, parent=None, children: Union[List, None] = None): self.key = key self.value = value self.parent = parent self.children = children if children is not None else [] class _Position: """A representation of the position of a node within a tree""" def __init__(self, belongs_to, node): self.__variables = {"belongs_to": belongs_to} self.__node = node def is_owned_by(self, owner): """Check whether position belongs to the tree, owner. Time complexity: O(1). :param owner: object to check whether it's the owner of this position :returns: True of the position is owned by the object passed, else False """ return owner is self.__variables["belongs_to"] def manipulate_variables(self, owner, method: str, *params): """Manipulate member variables of this position. Methods of the owner list are the only ones that can call this method. Time complexity: O(1). :param owner: tree object that owns this position :param method: method name of tree object that will manipulate the member variables of this position :param params: extra optional parameters to pass to the method :returns: the return value of the tree method whose name is passed """ if not self.is_owned_by(owner): raise ValueError("Position doesn't belong to the passed owner") return getattr(owner, method)(self.__variables, *params) def manipulate_node(self, owner, method: str, *params): """Manipulate the node held by this position. Methods of the owner list are the only ones that can call this method. Time complexity: O(1). :param owner: tree object that owns this position :param method: method name of tree object that will manipulate the node contained in this position :param params: extra optional parameters to pass to the method :returns: the return value of the tree method whose name is passed """ if not self.is_owned_by(owner): raise ValueError("Position doesn't belong to the passed owner") return getattr(owner, method)(self.__node, *params) def get_data(self): """Return the data stored by the node held by this position. Time complexity: O(1). :returns: data stored in node contained in this position """ return self.__node.key, self.__node.value def __init__(self): self._root: Union[Tree._Node, None] = None self._length = 0 self.__generator: Union[Generator, None] = None def __len__(self) -> int: """Return total number of items in tree :return: count of items in tree """ return self._length def __repr__(self) -> str: """Return a string representation of the tree :return: the string representation of the tree """ def helper(current_position): children = self.get_children(current_position) num_of_children = len(children) last_child_idx = num_of_children - 1 data_dict["string_data"] += f"{current_position.get_data()[0]}" for i, j in enumerate(children): data_dict["string_data"] += "(" if i == 0 else ", " helper(j) data_dict["string_data"] += ")" if i == last_child_idx else "" if self.is_empty(): return "" data_dict = {"string_data": ""} helper(Tree._Position(self, self._root)) return data_dict["string_data"] def __iter__(self) -> Iterable: """Return a tree iterable :return: tree iterable """ return self def __next__(self) -> _Position: """Return next position of tree iterator, implemented based on level-order traversal :return: next position :raises StopIteration: when the cursor denoting the current position surpasses the last position of the tree """ if self.__generator is None: self.__generator = self.traverse_tree_level_order() try: next_position = next(self.__generator) except StopIteration as e: self.__generator = None raise e return next_position @staticmethod def _validate_node(node): """Helper function to check if the node passed is a tree node. Returns the node passed if the validation passes, else raises a TypeError. Time complexity: O(1). :param node: node to validate :returns: the node passed if it passes validation :raises TypeError: if the validation fails """ if not isinstance(node, Tree._Node): raise TypeError("Not a tree node") return node @staticmethod def _invalidate_position(variables): """Helper function to set the belongs_to key of a dictionary to None. Used to revoke the ownership of a position by this tree. Time complexity: O(1). :returns: the dictionary passed, with the belongs_to key set to None """ variables["belongs_to"] = None return variables def is_empty(self) -> bool: """Return True if tree is empty, else False. Time complexity: O(1). :returns: True if tree is empty, else False """ return self._root is None def is_root(self, position: _Position) -> bool: """Check if the passed position contains the root node. Time complexity: O(1). :returns: True if the passed position holds the root node, else False """ if not position.is_owned_by(self): raise ValueError("Position doesn't belong to this tree") node = position.manipulate_node(self, "_validate_node") return node.parent is None def is_leaf(self, position: _Position) -> bool: """Check if the passed position contains a leaf. Time complexity: O(1). :returns: True if the passed position holds a leaf node, else False """ if not position.is_owned_by(self): raise ValueError("Position doesn't belong to this tree") return len(self.get_children(position)) == 0 def get_root(self) -> Union[_Position, None]: """Return the root position. Time complexity: O(1). :returns: the root position """ if self.is_empty(): return None else: return Tree._Position(self, self._root) def get_parent(self, position: _Position) -> Union[_Position, None]: """Return the parent of the given position. Time complexity: O(1). :param position: position containing the node whose parent is being sought :returns: the position of parent of the node contained in the passed position. None if the position passed contains the root node. """ if not position.is_owned_by(self): raise ValueError("Position doesn't belong to this tree") node = position.manipulate_node(self, "_validate_node") if self.is_root(Tree._Position(self, node)): return None else: return Tree._Position(self, node.parent) def get_children(self, position: _Position) -> Union[List[_Position], None]: """Return the children of the given position. Time complexity: O(1). :param position: position containing the node whose children are being sought :returns: the positions of the children of the node contained in the passed position. None if the position has no children. """ if not position.is_owned_by(self): raise ValueError("Position doesn't belong to this tree") node = position.manipulate_node(self, "_validate_node") children = node.children if children is None: return None else: return [Tree._Position(self, i) for i in children if i is not None] def get_siblings(self, position: _Position) -> Union[List[_Position], None]: """Return the siblings of the given position. Time complexity: O(1). :param position: position containing the node whose children are being sought :returns: the positions of the siblings of the node contained in the passed position """ if not position.is_owned_by(self): raise ValueError("Position doesn't belong to this tree") node = position.manipulate_node(self, "_validate_node") parent = node.parent if parent is None: return [] return [Tree._Position(self, i) for i in parent.children if i is not node] def get_height_of_node(self, position: _Position) -> int: """Return the number of edges between a node and the farthest leaf among its descendants. Time complexity: O(n). :param position: position containing the node whose height is being sought :returns: the number of edges between a node and the farthest leaf among its descendants """ if not position.is_owned_by(self): raise ValueError("Position doesn't belong to this tree") if self.is_leaf(position): return 0 return 1 + max(self.get_height_of_node(p) for p in self.get_children(position)) def get_height_of_tree(self) -> int: """Return the number of edges between the root node and the farthest leaf. Time complexity: O(n). :returns: the number of edges between the root node and the farthest leaf """ if self.is_empty(): raise Empty("Tree is empty") return self.get_height_of_node(Tree._Position(self, self._root)) def get_depth_of_node(self, position: _Position) -> int: """Return the number of edges between a node and the root. Time complexity: O(n). :param position: position containing the node whose depth is being sought :returns: the number of edges between a node and the root """ if not position.is_owned_by(self): raise ValueError("Position doesn't belong to this tree") if self.is_root(position): return 0 return 1 + self.get_depth_of_node(self.get_parent(position)) def get_depth_of_tree(self) -> int: """Return the number of edges between the farthest leaf and the root. Time complexity: O(n). :returns: the number of edges between the farthest leaf and the root """ return self.get_height_of_tree() def get_level_of_node(self, position: _Position) -> int: """Return the number of nodes between a node and the root, inclusive of itself. Time complexity: O(n). :param position: position containing the node whose level is being sought :returns: the number of nodes between a node and the root, inclusive of itself """ if not position.is_owned_by(self): raise ValueError("Position doesn't belong to this tree") return 1 + self.get_depth_of_node(position) def traverse_subtree_pre_order(self, position: _Position) -> Generator: """Pre-order traverse subtree whose root is the passed position and return a generator of the positions it contains :param position: position containing the node that's the root of the subtree to be traversed :returns: a generator of the positions """ if not position.is_owned_by(self): raise ValueError("Position doesn't belong to this tree") yield position for i in self.get_children(position): for j in self.traverse_subtree_pre_order(i): yield j def traverse_tree_pre_order(self) -> Generator: """Pre-order traverse tree and return a generator of the positions it contains :returns: a generator of the positions """ position = self.get_root() if position is not None: for i in self.traverse_subtree_pre_order(position): yield i def traverse_subtree_post_order(self, position: _Position) -> Generator: """Post-order traverse subtree whose root is the passed position and return a generator of the positions it contains :param position: position containing the node that's the root of the subtree to be traversed :returns: a generator of the positions """ if not position.is_owned_by(self): raise ValueError("Position doesn't belong to this tree") for i in self.get_children(position): for j in self.traverse_subtree_post_order(i): yield j yield position def traverse_tree_post_order(self) -> Generator: """Post-order traverse tree and return a generator of the positions it contains :returns: a generator of the positions """ position = self.get_root() if position is not None: for i in self.traverse_subtree_post_order(position): yield i def traverse_subtree_level_order(self, position: _Position) -> Generator: """Level-by-level traverse subtree whose root is the passed position and return a generator of the positions it contains :param position: position containing the node that's the root of the subtree to be traversed :returns: a generator of the positions """ if not position.is_owned_by(self): raise ValueError("Position doesn't belong to this tree") def helper(root_node, level): if root_node is not None: if level == 1: yield Tree._Position(self, root_node) elif level > 1: for child in root_node.children: for k in helper(child, level - 1): yield k node = position.manipulate_node(self, "_validate_node") number_of_levels = self.get_height_of_node(position) + 1 for i in range(1, number_of_levels + 1): for j in helper(node, i): yield j def traverse_tree_level_order(self) -> Generator: """Level-by-level traverse tree and return a generator of the positions it contains :returns: a generator of the positions """ position = self.get_root() if position is not None: for i in self.traverse_subtree_level_order(position): yield i def delete(self, position: _Position) -> None: """Delete a value from the tree :param position: position containing the node to be removed from the tree """ self._length -= 1 if not position.is_owned_by(self): raise ValueError("Position doesn't belong to this tree") def insert_node(node_to_insert, is_node_left_child, parent_node): if node_to_insert is not None: node_to_insert.parent = parent_node if is_node_left_child is not None: if is_node_left_child: parent_node.children[0] = node_to_insert else: parent_node.children[1] = node_to_insert def delete_node(node_to_delete, is_root): parent = node_to_delete.parent left = node_to_delete.children[0] right = node_to_delete.children[1] is_left_child = None if parent is None else node_to_delete.key < parent.key if left is None: insert_node(right, is_left_child, parent) if is_root: self._root = right else: current_node = left right_child = current_node.children[1] if right_child is None: current_node.children[1] = right insert_node(current_node, is_left_child, parent) if is_root: self._root = current_node else: new_node = Tree._Node( right_child.key, right_child.value, children=[current_node, right], ) insert_node(new_node, is_left_child, parent) if is_root: self._root = new_node delete_node(right_child, False) node = position.manipulate_node(self, "_validate_node") is_root_node = self.is_root(position) _ = position.manipulate_variables(self, "_invalidate_position") delete_node(node, is_root_node) @abstractmethod def insert(self, key: Any, value: Any) -> None: """Insert a value into the tree :param key: unique identifier of the item to be added to the tree :param value: item to be added to the tree """ self._length += 1
39.240343
119
0.626107
2,429
18,286
4.567723
0.096336
0.015322
0.024335
0.035331
0.607301
0.535016
0.497792
0.452817
0.435511
0.42037
0
0.003606
0.302472
18,286
465
120
39.324731
0.866249
0.371268
0
0.310811
0
0
0.073674
0.003058
0
0
0
0
0
1
0.166667
false
0.013514
0.009009
0
0.324324
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c775ae8fda6ca73f18c286d16c2c597ac2a87d30
6,857
py
Python
nodes/audio.py
sddhrthrt/COVFEFE
bc74ff0b5ee4d675482928110dda81443d4bec63
[ "Apache-2.0" ]
null
null
null
nodes/audio.py
sddhrthrt/COVFEFE
bc74ff0b5ee4d675482928110dda81443d4bec63
[ "Apache-2.0" ]
null
null
null
nodes/audio.py
sddhrthrt/COVFEFE
bc74ff0b5ee4d675482928110dda81443d4bec63
[ "Apache-2.0" ]
null
null
null
from abc import ABC, abstractmethod import os import logging from nodes.helper import FileOutputNode from utils import file_utils from utils import signal_processing as sp from utils.shell_run import shell_run from config import OPENSMILE_HOME class Mp3ToWav(FileOutputNode): def run(self, mp3_file): self.log(logging.INFO, "Starting %s" % (mp3_file)) if not mp3_file.endswith(".mp3"): self.log(logging.ERROR,"Failed %s. Not mp3 file" % (mp3_file)) return wav_file = self.derive_new_file_path(mp3_file, "wav") if file_utils.should_run(mp3_file, wav_file): res = shell_run(["lame", "--decode", mp3_file, wav_file]) if res != 0: self.log(logging.ERROR,"Failed %s -> %s with lame error code %i" % (mp3_file, wav_file, res)) return self.log(logging.INFO, "Done %s -> %s" % (mp3_file, wav_file)) self.emit(wav_file) class ResampleWav(FileOutputNode): def setup(self, new_sr): self.new_sr = new_sr def run(self, wav_file): self.log(logging.INFO, "Starting %s" % (wav_file)) if not wav_file.endswith(".wav"): self.log(logging.ERROR,"Failed %s. Not wav file" % (wav_file)) return new_wav_file = self.derive_new_file_path(wav_file, "wav") if file_utils.should_run(wav_file, new_wav_file): res = shell_run(["sox", wav_file, "--rate", str(self.new_sr), new_wav_file]) if res != 0: self.log(logging.ERROR,"Failed %s -> %s with lame error code %i" % (wav_file, new_wav_file, res)) return self.log(logging.INFO, "Done %s -> %s" % (wav_file, new_wav_file)) self.emit(new_wav_file) class ShellCommand(FileOutputNode): """ Take as input a format string representing a shell command that can accept an in_file and out_file. For example "someCommand -i {in_file} -o {out_file}" ext: Extension of the output file, ex. "wav", "csv" """ def setup(self, command, ext): self.command = command self.ext = ext def run(self, in_file): self.log(logging.INFO, "Starting %s" % (in_file)) out_file = self.derive_new_file_path(in_file, self.ext) if file_utils.should_run(in_file, out_file): cmd = self.command.format(in_file=in_file, out_file=out_file) res = shell_run(cmd.split(" ")) if res != 0: self.log(logging.ERROR,"Failed %s -> %s with error code %i. cmd: %s" % (in_file, out_file, res, cmd)) return self.log(logging.INFO, "Done %s -> %s" % (in_file, out_file)) self.emit(out_file) class OpenSmileRunner(FileOutputNode): """ conf_file: Either absolute path to an opensmile conf file or the name of a config file in opensmile's config folder out_flag: Flag to use for the output file. extra_flags: A string of extra flags to pass to SMILExtract. out_ext: Extension of the output file """ def setup(self, conf_file, out_flag="-csvoutput", extra_flags="-nologfile -noconsoleoutput -appendcsv 0", out_ext="csv"): self.conf_file = file_utils.locate_file(conf_file, [os.path.join(OPENSMILE_HOME, "config")]) self.extra_flags = extra_flags.split(" ") self.out_flag = out_flag self.out_ext = out_ext self.opensmile_exec = file_utils.locate_file("SMILExtract", [OPENSMILE_HOME, os.path.join(OPENSMILE_HOME, "bin")], use_path=True) def run(self, in_file): self.log(logging.INFO, "Starting %s" % (in_file)) out_file = self.derive_new_file_path(in_file, self.out_ext) if file_utils.should_run(in_file, out_file): cmd = [self.opensmile_exec, "-C", self.conf_file, "-I", in_file, self.out_flag, out_file] + self.extra_flags res = shell_run(cmd) if res != 0: self.log(logging.ERROR,"Failed %s -> %s with SmileExtract error code %i. cmd: %s" % (in_file, out_file, res, " ".join(cmd))) return self.log(logging.INFO, "Done %s -> %s" % (in_file, out_file)) self.emit([out_file]) class IS10_Paraling(OpenSmileRunner): def get_conf_name(self): return "IS10_paraling.conf" def get_command(self, wav_file, out_file): return [self.os_exec, "-C", self.conf_file, "-I", wav_file, "-csvoutput", out_file, "-nologfile", "-noconsoleoutput", "-appendcsv", "0"] class IS10_Paraling_lld(OpenSmileRunner): def get_conf_name(self): return "IS10_paraling.conf" def get_command(self, wav_file, out_file): return [self.os_exec, "-C", self.conf_file, "-I", wav_file, "-lldcsvoutput", out_file, "-nologfile", "-noconsoleoutput", "-appendcsv", "0"] class SplitSegments(FileOutputNode): """ segment_mapping_fn is a pointer to a function that takes as input a file and sample rate and returns a list of all the segments in that file in the format [(start1, end1, segname1), (start2, end2, segname2), ...] where start and end are in given in samples. Each tuple in the list can also have a 4th item, which can be any string. This string will get saved in segname.txt This is useful for isolating events of interest in audio files. For example, if the segment mapping function returns a list of where all speech occurs in the input audio, this will isolate all occurrences of speech into individual files. The 4th item may contain the annotation of what was said in the segment. """ def setup(self, segment_mapping_fn): self.segment_mapping_fn = segment_mapping_fn def run(self, in_file): self.log(logging.INFO, "Starting %s" % (in_file)) if not in_file.endswith(".wav"): self.log(logging.ERROR, "Failed %s. Not wav file" % (in_file)) return sr, original_data = sp.read_wave(in_file, first_channel=True) segments = self.segment_mapping_fn(in_file, sr) for segment in segments: if len(segment) == 3: start, end, seg_name = segment extra_info = None elif len(segment) == 4: start, end, seg_name, extra_info = segment else: self.log(logging.ERROR, "Failed %s. Segment length must be 3 or 4" % (in_file)) return seg_path = os.path.join(self.out_dir, "%s.wav" % seg_name) sp.write_wav(seg_path, sr, original_data[start:end]) extra_path = None if extra_info: extra_path = os.path.join(self.out_dir, "%s.txt" % seg_name) with open(extra_path, "w") as f: f.write(extra_info) self.emit([seg_path, extra_path])
36.865591
147
0.624763
980
6,857
4.17449
0.193878
0.046199
0.058176
0.039599
0.413835
0.388658
0.35737
0.285749
0.273527
0.273527
0
0.007708
0.262068
6,857
185
148
37.064865
0.800791
0.167858
0
0.259259
0
0
0.119655
0
0
0
0
0
0
1
0.12037
false
0
0.074074
0.037037
0.37037
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c776010ff719981072eef5b7305ecf5eee272758
12,914
py
Python
texar/torch/modules/pretrained/gpt2.py
VegB/VLN-Transformer
da1fa71e419d8d05c96749445230a77338edba09
[ "Apache-2.0" ]
19
2020-07-29T15:25:45.000Z
2022-01-19T17:49:42.000Z
texar/torch/modules/pretrained/gpt2.py
VegB/VLN-Transformer
da1fa71e419d8d05c96749445230a77338edba09
[ "Apache-2.0" ]
3
2021-02-16T10:26:23.000Z
2021-06-08T16:50:40.000Z
texar/torch/modules/pretrained/gpt2.py
VegB/VLN-Transformer
da1fa71e419d8d05c96749445230a77338edba09
[ "Apache-2.0" ]
null
null
null
# Copyright 2019 The Texar Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utils of GPT2 Modules. """ import json import os import warnings from abc import ABC from typing import Any, Dict import torch from texar.torch.modules.pretrained.pretrained_base import PretrainedMixin __all__ = [ "PretrainedGPT2Mixin", ] _GPT2_PATH = "https://storage.googleapis.com/gpt-2/models/" _CHECKPOINT_FILES = [ "checkpoint", "encoder.json", "hparams.json", "vocab.bpe", "model.ckpt.data-00000-of-00001", "model.ckpt.index", "model.ckpt.meta"] class PretrainedGPT2Mixin(PretrainedMixin, ABC): r"""A mixin class to support loading pre-trained checkpoints for modules that implement the GPT2 model. The GPT2 model was proposed in `Language Models are Unsupervised Multitask Learners`_ by `Radford et al.` from OpenAI. It is a unidirectional Transformer model pre-trained using the vanilla language modeling objective on a large corpus. The available GPT2 models are as follows: * ``gpt2-small``: Small version of GPT-2, 124M parameters. * ``gpt2-medium``: Medium version of GPT-2, 355M parameters. * ``gpt2-large``: Large version of GPT-2, 774M parameters. We provide the following GPT2 classes: * :class:`~texar.torch.modules.GPT2Encoder` for text encoding. * :class:`~texar.torch.modules.GPT2Decoder` for text generation and decoding. * :class:`~texar.torch.modules.GPT2Classifier` for text classification and sequence tagging. .. _`Language Models are Unsupervised Multitask Learners`: https://openai.com/blog/better-language-models/ """ _MODEL_NAME = "GPT2" _MODEL2URL = { 'gpt2-small': [_GPT2_PATH + f"124M/{file}" for file in _CHECKPOINT_FILES], 'gpt2-medium': [_GPT2_PATH + f"355M/{file}" for file in _CHECKPOINT_FILES], 'gpt2-large': [_GPT2_PATH + f"774M/{file}" for file in _CHECKPOINT_FILES], } _IS_DECODE = False # Raise warning for the deprecated pre-trained model names class MyDict(dict): def __contains__(self, key): if key == '117M': warnings.warn("Pre-trained model name '117M' is deprecated, " "use 'gpt2-small' instead.", UserWarning) return True elif key == '345M': warnings.warn("Pre-trained model name '345M' is deprecated, " "use 'gpt2-medium' instead.", UserWarning) return True else: return super().__contains__(key) _DEPRECATED_MODEL2URL = { '117M': [_GPT2_PATH + f"124M/{file}" for file in _CHECKPOINT_FILES], '345M': [_GPT2_PATH + f"355M/{file}" for file in _CHECKPOINT_FILES], } _MODEL2URL.update(_DEPRECATED_MODEL2URL) _MODEL2URL = MyDict(_MODEL2URL) # type: ignore def _transform_config(self, pretrained_model_name: str, # type: ignore cache_dir: str) -> Dict[str, Any]: info = list(os.walk(cache_dir)) root, _, files = info[0] config_path = None for file in files: if file.endswith('hparams.json'): config_path = os.path.join(root, file) if config_path is None: raise ValueError(f"Cannot find the config file in {cache_dir}") with open(config_path) as f: config_gpt = json.loads(f.read()) hidden_dim = config_gpt["n_embd"] configs = { "vocab_size": config_gpt["n_vocab"], "context_size": config_gpt["n_ctx"], "embedding_size": config_gpt["n_embd"], "embed": { "dim": hidden_dim, }, "position_size": config_gpt["n_ctx"], "position_embed": { "dim": hidden_dim } } module_name = 'decoder' if self._IS_DECODE else 'encoder' configs.update({module_name: { "dim": hidden_dim, "num_blocks": config_gpt["n_layer"], "embedding_dropout": 0, "residual_dropout": 0, "multihead_attention": { "use_bias": True, "num_units": hidden_dim, "num_heads": config_gpt["n_head"], "output_dim": hidden_dim, }, "initializer": { "type": "variance_scaling_initializer", "kwargs": { "factor": 1.0, "mode": "FAN_AVG", "uniform": True, }, }, "poswise_feedforward": { "layers": [ { "type": "Linear", "kwargs": { "in_features": hidden_dim, "out_features": hidden_dim * 4, "bias": True, } }, { "type": "GPTGELU", "kwargs": {} }, { "type": "Linear", "kwargs": { "in_features": hidden_dim * 4, "out_features": hidden_dim, "bias": True, } } ], "name": "ffn", }, }}) if self._IS_DECODE: configs[module_name].update({'use_gpt_config': True}) else: configs[module_name].update({'use_bert_config': False}) return configs def _init_from_checkpoint(self, pretrained_model_name: str, cache_dir: str, load_output_layer: bool = True, **kwargs): r"""Initialize model parameters from weights stored in the pre-trained checkpoint. Args: pretrained_model_name (str): Name of the pre-trained model. cache_dir (str): Path to the cache directory. load_output_layer (bool): If `False`, will not load weights of the output layer. Set this argument to `False` when loading weights into a GPT2 encoder. Defaults to `True`. """ try: import numpy as np import tensorflow as tf except ImportError: print("Loading TensorFlow models in PyTorch requires installing " "TensorFlow. Please see https://www.tensorflow.org/install/ " "for installation instructions.") raise module_name = 'decoder' if self._IS_DECODE else 'encoder' global_tensor_map = { "model/wte": "word_embedder.embedding", "model/wpe": "position_embedder.embedding", "model/ln_f/b": module_name + ".final_layer_norm.bias", "model/ln_f/g": module_name + ".final_layer_norm.weight", } layer_tensor_map = { "ln_1/b": module_name + ".self_attn_layer_norm.{}.bias", "ln_1/g": module_name + ".self_attn_layer_norm.{}.weight", "ln_2/b": module_name + ".poswise_layer_norm.{}.bias", "ln_2/g": module_name + ".poswise_layer_norm.{}.weight", "mlp/c_fc/b": module_name + ".poswise_networks.{}._layers.0.bias", "mlp/c_proj/b": module_name + ".poswise_networks.{}._layers.2.bias", "attn/c_proj/b": module_name + ".self_attns.{}.O_dense.bias", } layer_transpose_map = { "mlp/c_fc/w": module_name + ".poswise_networks.{}._layers.0.weight", "mlp/c_proj/w": module_name + ".poswise_networks.{}._layers.2." "weight", "attn/c_proj/w": module_name + ".self_attns.{}.O_dense.weight", } tf_path = os.path.abspath(os.path.join(cache_dir, 'model.ckpt')) # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, _ in init_vars: array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array.squeeze()) tensor_names = [] for name, _ in self.named_parameters(): tensor_names.append(name) for name, array in zip(names, arrays): if name in global_tensor_map: v_name = global_tensor_map[name] if name == "model/wte": pointer = self._name_to_variable(v_name) assert pointer.shape == array.shape pointer.data = torch.from_numpy(array) if load_output_layer: output_pointer = self._name_to_variable( "decoder._output_layer.weight") assert output_pointer.shape == array.shape output_pointer.data = torch.from_numpy(array) elif name == "model/wpe": pointer = self._name_to_variable(v_name) assert pointer.shape == array.shape pointer.data = torch.from_numpy(array) else: pointer = self._name_to_variable(v_name) assert pointer.shape == array.shape pointer.data = torch.from_numpy(array) else: name_tmp = name.split("/") layer_no = name_tmp[1][1:] name = "/".join(name_tmp[2:]) if name in layer_tensor_map: v_name = layer_tensor_map[name].format(layer_no) pointer = self._name_to_variable(v_name) assert pointer.shape == array.shape pointer.data = torch.from_numpy(array) elif name in layer_transpose_map: v_name = layer_transpose_map[name].format(layer_no) pointer = self._name_to_variable(v_name) array_t = np.transpose(array) assert pointer.shape == array_t.shape pointer.data = torch.from_numpy(array_t) elif name == "attn/c_attn/w": index_d = array.shape[-1] // 3 Q_w = np.transpose(array[:, :index_d]) K_w = np.transpose(array[:, index_d: 2 * index_d]) V_w = np.transpose(array[:, 2 * index_d:]) q_weight = self._name_to_variable( f"{module_name}.self_attns.{layer_no}.Q_dense.weight") k_weight = self._name_to_variable( f"{module_name}.self_attns.{layer_no}.K_dense.weight") v_weight = self._name_to_variable( f"{module_name}.self_attns.{layer_no}.V_dense.weight") assert q_weight.shape == Q_w.shape assert k_weight.shape == K_w.shape assert v_weight.shape == V_w.shape q_weight.data = torch.from_numpy(Q_w) k_weight.data = torch.from_numpy(K_w) v_weight.data = torch.from_numpy(V_w) elif name == "attn/c_attn/b": d = array.shape[0] Q_b = array[: d // 3] K_b = array[d // 3: 2 * d // 3] V_b = array[2 * d // 3:] q_bias = self._name_to_variable( f"{module_name}.self_attns.{layer_no}.Q_dense.bias") k_bias = self._name_to_variable( f"{module_name}.self_attns.{layer_no}.K_dense.bias") v_bias = self._name_to_variable( f"{module_name}.self_attns.{layer_no}.V_dense.bias") assert q_bias.shape == Q_b.shape assert k_bias.shape == K_b.shape assert v_bias.shape == V_b.shape q_bias.data = torch.from_numpy(Q_b) k_bias.data = torch.from_numpy(K_b) v_bias.data = torch.from_numpy(V_b) else: print("Name error", name) raise Exception
40.73817
80
0.533065
1,440
12,914
4.53125
0.225
0.035249
0.018391
0.033103
0.321839
0.250881
0.17977
0.162452
0.155402
0.142529
0
0.015203
0.363327
12,914
316
81
40.867089
0.778399
0.160369
0
0.144068
0
0
0.190423
0.073658
0
0
0
0
0.050847
1
0.012712
false
0
0.042373
0
0.101695
0.008475
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c77641557884ec300d6f17e14694ed49328569cf
4,930
py
Python
Image classifier/train.py
anirudha-bs/Farm_assist
f824b7594befdb1132da2a5c03500a1885c6f036
[ "MIT" ]
null
null
null
Image classifier/train.py
anirudha-bs/Farm_assist
f824b7594befdb1132da2a5c03500a1885c6f036
[ "MIT" ]
null
null
null
Image classifier/train.py
anirudha-bs/Farm_assist
f824b7594befdb1132da2a5c03500a1885c6f036
[ "MIT" ]
null
null
null
from __future__ import absolute_import, division, print_function, unicode_literals import tensorflow as tf from keras import regularizers from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D from tensorflow.keras.preprocessing.image import ImageDataGenerator from keras.models import load_model import numpy as np from keras.preprocessing.image import img_to_array, load_img from keras.preprocessing import image import os import numpy as np import matplotlib.pyplot as plt # defining classes def soil(result): soil_type="" if result[0]==2: soil_type="Red soil" elif result[0]==1: soil_type="Black soil" else: soil_type="Alluvial soil" return soil_type # Adding dataset paths PATH = 'new_datasets' train_dir = os.path.join(PATH, 'train') validation_dir = os.path.join(PATH, 'validation') test_dir = os.path.join(PATH, 'test') train_red_dir = os.path.join(train_dir, 'Red_soil') validation_red_dir = os.path.join(validation_dir, 'Red_soil') train_black_dir = os.path.join(train_dir, 'Black_soil') validation_black_dir = os.path.join(validation_dir, 'Black_soil') train_all_dir = os.path.join(train_dir, 'Alluvial_soil') validation_all_dir = os.path.join(validation_dir, 'Alluvial_soil') num_soil_tr = len(os.listdir(train_red_dir)) + len(os.listdir(train_black_dir)) +len(os.listdir(train_all_dir)) num_soil_val = len(os.listdir(validation_red_dir)) + len(os.listdir(validation_black_dir)) + len((os.listdir(validation_all_dir))) print("Total training images = ",num_soil_tr) print("Total validation images = ",num_soil_val) # hyperparameters batch_size = 100 epochs = 15 IMG_HEIGHT = 128 IMG_WIDTH = 128 classes_num=3 # data generators train_image_generator = ImageDataGenerator(rescale=1./255) validation_image_generator = ImageDataGenerator(rescale=1./255) train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size, directory=train_dir, shuffle=True, target_size=(IMG_HEIGHT, IMG_WIDTH), class_mode='categorical') val_data_gen = validation_image_generator.flow_from_directory(batch_size=batch_size, directory=validation_dir, target_size=(IMG_HEIGHT, IMG_WIDTH), shuffle=True, class_mode='categorical') # defining the model model = Sequential([ Conv2D(16, 5, activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH ,3)), MaxPooling2D(pool_size=(3, 3)), Dropout(0.2), Conv2D(32, 5, activation='relu'), MaxPooling2D(pool_size=(3, 3)), Dropout(0.2), Conv2D(64, 5, activation='relu'), MaxPooling2D(pool_size=(3, 3)), Dropout(0.3), Flatten(), Dense(32, activation='relu'), Dense(classes_num, activation='softmax') ]) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) model.summary() history = model.fit_generator( train_data_gen, steps_per_epoch= num_soil_tr// batch_size, epochs=epochs, validation_data=val_data_gen, validation_steps=num_soil_val // batch_size ) acc = history.history['accuracy'] val_acc = history.history['val_accuracy'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs_range = range(epochs) # training and validation graphs plt.figure(figsize=(8, 8)) plt.subplot(1, 2, 1) plt.plot(epochs_range, acc, label='Training Accuracy') plt.plot(epochs_range, val_acc, label='Validation Accuracy') plt.legend(loc='lower right') plt.title('Training and Validation Accuracy') plt.subplot(1, 2, 2) plt.plot(epochs_range, loss, label='Training Loss') plt.plot(epochs_range, val_loss, label='Validation Loss') plt.legend(loc='upper right') plt.title('Training and Validation Loss') plt.show() model.save('new_soil_classify.h5') # for testing trained model with images differnent class image_path="red.jpg" img = image.load_img(image_path, target_size=(IMG_HEIGHT, IMG_WIDTH)) plt.imshow(img) img = np.expand_dims(img, axis=0) result=model.predict_classes(img) plt.title(result[0]) plt.show() image_path1="black.jpg" img1 = image.load_img(image_path1, target_size=(IMG_HEIGHT, IMG_WIDTH)) plt.imshow(img1) img1 = np.expand_dims(img1, axis=0) result=model.predict_classes(img1) plt.title(result[0]) plt.show() image_path="all.jpg" img = image.load_img(image_path, target_size=(IMG_HEIGHT, IMG_WIDTH)) plt.imshow(img) img = np.expand_dims(img, axis=0) result=model.predict_classes(img) plt.title(result[0]) plt.show()
29.878788
130
0.683773
663
4,930
4.846154
0.232278
0.014006
0.02521
0.036415
0.372238
0.303455
0.185496
0.175537
0.164332
0.150638
0
0.019847
0.20284
4,930
164
131
30.060976
0.79771
0.035294
0
0.239316
0
0
0.099221
0
0
0
0
0
0
1
0.008547
false
0
0.111111
0
0.128205
0.025641
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c776c16efce7e570422a5d1752b829a85d1dbe4b
686
py
Python
questions/q118_linked_list_loop_removal/code.py
aadhityasw/Competitive-Programs
901a48d35f024a3a87c32a45b7f4531e8004a203
[ "MIT" ]
null
null
null
questions/q118_linked_list_loop_removal/code.py
aadhityasw/Competitive-Programs
901a48d35f024a3a87c32a45b7f4531e8004a203
[ "MIT" ]
1
2021-05-15T07:56:51.000Z
2021-05-15T07:56:51.000Z
questions/q118_linked_list_loop_removal/code.py
aadhityasw/Competitive-Programs
901a48d35f024a3a87c32a45b7f4531e8004a203
[ "MIT" ]
null
null
null
def removeLoop(head): ptr = head ptr2 = head while True : if ptr is None or ptr2 is None or ptr2.next is None : return ptr = ptr.next ptr2 = ptr2.next.next if ptr is ptr2 : loopNode = ptr break ptr = loopNode.next count = 1 while ptr is not loopNode : ptr = ptr.next count += 1 ptr = head ptr1 = head ptr2 = head.next while count > 1 : ptr2 = ptr2.next ptr1 = ptr1.next count -= 1 while ptr is not ptr2 : ptr = ptr.next ptr2 = ptr2.next ptr1 = ptr1.next ptr1.next = None
19.055556
61
0.478134
87
686
3.770115
0.218391
0.060976
0.091463
0.073171
0.371951
0.371951
0.140244
0
0
0
0
0.05914
0.457726
686
35
62
19.6
0.822581
0
0
0.321429
0
0
0
0
0
0
0
0
0
1
0.035714
false
0
0
0
0.071429
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c779118332635de2c8ae2f98f07d435f86ed8e76
2,361
py
Python
fastrunner/httprunner3/report/html/gen_report.py
Chankee/AutoTestRunner
5f329b0dfac91ccd3541aabf46cc997cc4f01da3
[ "MIT" ]
1
2020-04-30T08:41:19.000Z
2020-04-30T08:41:19.000Z
httprunner/report/html/gen_report.py
Barronliu/httprunner
463b8c68cbd413fd2bb66852752149bc1609e98d
[ "Apache-2.0" ]
null
null
null
httprunner/report/html/gen_report.py
Barronliu/httprunner
463b8c68cbd413fd2bb66852752149bc1609e98d
[ "Apache-2.0" ]
null
null
null
import io import os from datetime import datetime from jinja2 import Template from loguru import logger from httprunner.exceptions import SummaryEmpty def gen_html_report(summary, report_template=None, report_dir=None, report_file=None): """ render html report with specified report name and template Args: summary (dict): test result summary data report_template (str): specify html report template path, template should be in Jinja2 format. report_dir (str): specify html report save directory report_file (str): specify html report file path, this has higher priority than specifying report dir. """ if not summary["time"] or summary["stat"]["testcases"]["total"] == 0: logger.error(f"test result summary is empty ! {summary}") raise SummaryEmpty if not report_template: report_template = os.path.join( os.path.abspath(os.path.dirname(__file__)), "template.html" ) logger.debug("No html report template specified, use default.") else: logger.info(f"render with html report template: {report_template}") logger.info("Start to render Html report ...") start_at_timestamp = summary["time"]["start_at"] utc_time_iso_8601_str = datetime.utcfromtimestamp(start_at_timestamp).isoformat() summary["time"]["start_datetime"] = utc_time_iso_8601_str if report_file: report_dir = os.path.dirname(report_file) report_file_name = os.path.basename(report_file) else: report_dir = report_dir or os.path.join(os.getcwd(), "reports") # fix #826: Windows does not support file name include ":" report_file_name = "{}.html".format(utc_time_iso_8601_str.replace(":", "").replace("-", "")) if not os.path.isdir(report_dir): os.makedirs(report_dir) report_path = os.path.join(report_dir, report_file_name) with io.open(report_template, "r", encoding='utf-8') as fp_r: template_content = fp_r.read() with io.open(report_path, 'w', encoding='utf-8') as fp_w: rendered_content = Template( template_content, extensions=["jinja2.ext.loopcontrols"] ).render(summary) fp_w.write(rendered_content) logger.info(f"Generated Html report: {report_path}") return report_path
36.323077
110
0.674714
310
2,361
4.941935
0.33871
0.058747
0.027415
0.039164
0.054178
0
0
0
0
0
0
0.011394
0.219399
2,361
64
111
36.890625
0.819859
0.182126
0
0.04878
0
0
0.169304
0.012131
0
0
0
0
0
1
0.02439
false
0
0.146341
0
0.195122
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c779400f9f454e7ffcd25d7cea5b32ebe4fe996a
730
py
Python
SD/lab1/client.py
matheuscr30/UFU
e947e5a4ccd5c025cb8ef6e00b42ea1160742712
[ "MIT" ]
null
null
null
SD/lab1/client.py
matheuscr30/UFU
e947e5a4ccd5c025cb8ef6e00b42ea1160742712
[ "MIT" ]
11
2020-01-28T22:59:24.000Z
2022-03-11T23:59:04.000Z
SD/lab1/client.py
matheuscr30/UFU
e947e5a4ccd5c025cb8ef6e00b42ea1160742712
[ "MIT" ]
null
null
null
#client.py #!/usr/bin/python # This is client.py file import socket # Import socket module s = socket.socket() # Create a socket object host = socket.gethostname() # Get local machine name port = 12352 # Reserve a port for your service. s.connect((host, port)) while True: message = input('Digite mensagem: ') s.send(bytes(message, encoding='utf8')) if message == 'SAIR': breaks print('Mensagem enviada.') print('Esperando resposta.') answer = s.recv(1024).decode('utf8') print('Resposta recebida: ' + answer) print('Desconectando.') s.close()
27.037037
82
0.536986
77
730
5.090909
0.675325
0.040816
0
0
0
0
0
0
0
0
0
0.023158
0.349315
730
26
83
28.076923
0.802105
0.245205
0
0
0
0
0.180147
0
0
0
0
0
0
1
0
false
0
0.0625
0
0.0625
0.25
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c77b3c34564c716c04ed2a2e2297c397f73e511f
1,741
py
Python
homeassistant/components/kaiterra/const.py
MrDelik/core
93a66cc357b226389967668441000498a10453bb
[ "Apache-2.0" ]
30,023
2016-04-13T10:17:53.000Z
2020-03-02T12:56:31.000Z
homeassistant/components/kaiterra/const.py
MrDelik/core
93a66cc357b226389967668441000498a10453bb
[ "Apache-2.0" ]
24,710
2016-04-13T08:27:26.000Z
2020-03-02T12:59:13.000Z
homeassistant/components/kaiterra/const.py
MrDelik/core
93a66cc357b226389967668441000498a10453bb
[ "Apache-2.0" ]
11,956
2016-04-13T18:42:31.000Z
2020-03-02T09:32:12.000Z
"""Consts for Kaiterra integration.""" from datetime import timedelta from homeassistant.const import ( CONCENTRATION_MICROGRAMS_PER_CUBIC_METER, CONCENTRATION_MILLIGRAMS_PER_CUBIC_METER, CONCENTRATION_PARTS_PER_BILLION, CONCENTRATION_PARTS_PER_MILLION, PERCENTAGE, Platform, ) DOMAIN = "kaiterra" DISPATCHER_KAITERRA = "kaiterra_update" AQI_SCALE = { "cn": [0, 50, 100, 150, 200, 300, 400, 500], "in": [0, 50, 100, 200, 300, 400, 500], "us": [0, 50, 100, 150, 200, 300, 500], } AQI_LEVEL = { "cn": [ "Good", "Satisfactory", "Moderate", "Unhealthy for sensitive groups", "Unhealthy", "Very unhealthy", "Hazardous", ], "in": [ "Good", "Satisfactory", "Moderately polluted", "Poor", "Very poor", "Severe", ], "us": [ "Good", "Moderate", "Unhealthy for sensitive groups", "Unhealthy", "Very unhealthy", "Hazardous", ], } ATTR_VOC = "volatile_organic_compounds" ATTR_AQI_LEVEL = "air_quality_index_level" ATTR_AQI_POLLUTANT = "air_quality_index_pollutant" AVAILABLE_AQI_STANDARDS = ["us", "cn", "in"] AVAILABLE_UNITS = [ "x", PERCENTAGE, "C", "F", CONCENTRATION_MILLIGRAMS_PER_CUBIC_METER, CONCENTRATION_MICROGRAMS_PER_CUBIC_METER, CONCENTRATION_PARTS_PER_MILLION, CONCENTRATION_PARTS_PER_BILLION, ] AVAILABLE_DEVICE_TYPES = ["laseregg", "sensedge"] CONF_AQI_STANDARD = "aqi_standard" CONF_PREFERRED_UNITS = "preferred_units" DEFAULT_AQI_STANDARD = "us" DEFAULT_PREFERRED_UNIT: list[str] = [] DEFAULT_SCAN_INTERVAL = timedelta(seconds=30) PLATFORMS = [Platform.SENSOR, Platform.AIR_QUALITY]
22.907895
51
0.649627
184
1,741
5.804348
0.445652
0.029963
0.048689
0.097378
0.325843
0.325843
0.123596
0.123596
0.123596
0
0
0.043997
0.229753
1,741
75
52
23.213333
0.752424
0.01838
0
0.4375
0
0
0.222548
0.044627
0
0
0
0
0
1
0
false
0
0.03125
0
0.03125
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c77bfffe662ca6c238ec477ceec482de486d7271
2,931
py
Python
timeline/models.py
KolibriSolutions/BepMarketplace
c47d252fd744cde6b927e37c34d7a103c6162be5
[ "BSD-3-Clause" ]
1
2019-06-29T15:24:24.000Z
2019-06-29T15:24:24.000Z
timeline/models.py
KolibriSolutions/BepMarketplace
c47d252fd744cde6b927e37c34d7a103c6162be5
[ "BSD-3-Clause" ]
2
2020-01-12T17:47:33.000Z
2020-01-12T17:47:45.000Z
timeline/models.py
KolibriSolutions/BepMarketplace
c47d252fd744cde6b927e37c34d7a103c6162be5
[ "BSD-3-Clause" ]
2
2019-06-29T15:24:26.000Z
2020-01-08T15:15:03.000Z
# Bep Marketplace ELE # Copyright (c) 2016-2021 Kolibri Solutions # License: See LICENSE file or https://github.com/KolibriSolutions/BepMarketplace/blob/master/LICENSE # from datetime import datetime from django.core.exceptions import ValidationError from django.db import models class TimeSlot(models.Model): """ A timeslot is a year in which the current BEP runs. It consists of multiple timephases. """ Name = models.CharField(max_length=250) Begin = models.DateField() End = models.DateField() def __str__(self): return self.Name def clean(self): if not self.Begin or not self.End: raise ValidationError('Please fill in all required fields.') if self.Begin > self.End: raise ValidationError("End date should be larger than begin date") class Meta: ordering = ["Begin"] def is_finished(self): return self.End < datetime.now().date() class TimePhase(models.Model): """ A time phase is a phase the system is in. Each phase has its own pages and permissions. """ Types = ( (1, "Generating project proposals"), (2, "Projects quality check"), (3, "Students choosing projects"), (4, "Distribution of projects"), (5, "Gather and process objections"), (6, "Execution of the projects"), (7, "Presentation of results"), ) Description = models.IntegerField(choices=Types) Begin = models.DateField() End = models.DateField() CountdownEnd = models.DateField(null=True, blank=True, help_text='Fake end date, to set the homepage clock to an earlier date. ' 'A trick to motivate people.') TimeSlot = models.ForeignKey(TimeSlot, on_delete=models.PROTECT, related_name="timephases") def __str__(self): return self.Types[self.Description - 1][1] + " in " + str(self.TimeSlot) def clean(self): if not self.Begin or not self.End or not hasattr(self, 'TimeSlot'): raise ValidationError('Please fill in all required fields.') if self.Begin > self.End: raise ValidationError("End date should be larger than begin date") if not (self.TimeSlot.Begin <= self.Begin <= self.TimeSlot.End): raise ValidationError("Begin date should be in time slot {}".format(self.TimeSlot)) if not (self.TimeSlot.Begin <= self.End <= self.TimeSlot.End): raise ValidationError("End date should be in time slot {}".format(self.TimeSlot)) if self.TimeSlot.timephases.filter(Description=self.Description).exists(): if self.TimeSlot.timephases.get(Description=self.Description) != self: raise ValidationError("Time slot {} already has time phase {}".format(self.TimeSlot, self.Description)) class Meta: ordering = ['TimeSlot', 'Begin']
38.565789
119
0.643125
357
2,931
5.243697
0.392157
0.070513
0.061432
0.043269
0.342415
0.29594
0.228632
0.214744
0.214744
0.214744
0
0.009107
0.250768
2,931
75
120
39.08
0.843352
0.116343
0
0.313725
0
0
0.221049
0
0
0
0
0
0
1
0.098039
false
0
0.058824
0.058824
0.470588
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c77e4ddc9f8fe255a8511d43e707cc1ce8c44d20
19,717
py
Python
timeflux/nodes/ml.py
OpenMindInnovation/timeflux
fd27ea6706df80fa52fb73ea3dba65e14ccd088c
[ "MIT" ]
null
null
null
timeflux/nodes/ml.py
OpenMindInnovation/timeflux
fd27ea6706df80fa52fb73ea3dba65e14ccd088c
[ "MIT" ]
null
null
null
timeflux/nodes/ml.py
OpenMindInnovation/timeflux
fd27ea6706df80fa52fb73ea3dba65e14ccd088c
[ "MIT" ]
null
null
null
"""Machine Learning""" import importlib import numpy as np import pandas as pd import json from jsonschema import validate from sklearn.pipeline import make_pipeline from timeflux.core.node import Node from timeflux.core.exceptions import ValidationError, WorkerInterrupt from timeflux.helpers.background import Task from timeflux.helpers.port import make_event, match_events, get_meta from timeflux.helpers.clock import now, min_time, max_time # Statuses IDLE = 0 ACCUMULATING = 1 FITTING = 2 READY = 3 class Pipeline(Node): """Fit, transform and predict. Training on continuous data is always unsupervised. Training on epoched data can either be supervised or unsupervised. If fit is `False`, input events are ignored, and initital training is not performed. Automatically set to False if mode is either 'fit_predict' or fit_transform'. Automatically set to True if mode is either 'predict', 'predict_proba' or 'predict_log_proba'. Attributes: i (Port): Continuous data input, expects DataFrame. i_* (Port): Epoched data input, expects DataFrame. i_training (Port): Continuous training data input, expects DataFrame. i_training_* (Port): Epoched training data input, expects DataFrame. i_events (Port): Event input, expects DataFrame. o (Port): Continuous data output, provides DataFrame. o_* (Port): Epoched data output, provides DataFrame. o_events (Port): Event output, provides DataFrame. Args: steps (dict): Pipeline steps and settings fit (bool): mode ('predict'|'predict_proba'|'predict_log_proba'|'transform'|'fit_predict'|'fit_transform'): meta_label (str|tuple|None): event_start_accumulation (str): event_stop_accumulation (str): event_start_training (str): event_reset (str): buffer_size (str): passthrough (bool): resample (bool): resample_direction ('right'|'left'|'both'): resample_rate (None|float): model: Load a pickle model - NOT IMPLEMENTED cv: Cross-validation - NOT IMPLEMENTED """ def __init__( self, steps, fit=True, mode="predict", meta_label=("epoch", "context", "target"), event_start_accumulation="accumulation_starts", event_stop_accumulation="accumulation_stops", event_start_training="training_starts", event_reset=None, buffer_size="5s", passthrough=False, resample=False, resample_direction="right", resample_rate=None, model=None, cv=None, use_task = True, ): # TODO: validation # TODO: model loading from file # TODO: cross-validation # TODO: provide more context for errors self.fit = fit self.mode = mode self.meta_label = meta_label self.event_start_accumulation = event_start_accumulation self.event_stop_accumulation = event_stop_accumulation self.event_start_training = event_start_training self.event_reset = event_reset self.passthrough = passthrough self.resample = resample self.resample_direction = resample_direction self.resample_rate = resample_rate self.use_task = use_task self._buffer_size = pd.Timedelta(buffer_size) self._make_pipeline(steps) self._reset() def update(self): # Let's get ready self._clear() # Reset if self.event_reset: matches = match_events(self.i_events, self.event_reset) if matches is not None: self.logger.debug("Reset") if self._task is not None: if self._status == FITTING: self._task.stop() self._reset() # Are we dealing with continuous data or epochs? if self._dimensions is None: port_name = "i_training" if self.fit else "i" if getattr(self, port_name).ready(): self._dimensions = 2 elif len(list(self.iterate(port_name + "_*"))) > 0: self._dimensions = 3 # Set the accumulation boundaries if self._accumulation_start is None: matches = match_events(self.i_events, self.event_start_accumulation) if matches is not None: self._accumulation_start = matches.index.values[0] self._status = ACCUMULATING self.logger.debug("Start accumulation") if self._accumulation_stop is None: matches = match_events(self.i_events, self.event_stop_accumulation) if matches is not None: self._accumulation_stop = matches.index.values[0] self.logger.debug("Stop accumulation") # Always buffer a few seconds, in case the start event is coming late if self._status == IDLE: start = (now() - self._buffer_size).to_datetime64() stop = max_time() self._accumulate(start, stop) # Accumulate between boundaries if self._status == ACCUMULATING: start = self._accumulation_start stop = self._accumulation_stop if self._accumulation_stop else max_time() self._accumulate(start, stop) # Should we start fitting the model? if self._status < FITTING: if match_events(self.i_events, self.event_start_training) is not None: self._status = FITTING self.logger.debug("Start training") if self.use_task: self._task = Task( self._pipeline, "fit", self._X_train, self._y_train ).start() else: try: self._pipeline = self._pipeline.fit(self._X_train, self._y_train) self._fitted_success = True except Exception as error: self._fitted_success = False # Is the model ready? if self._status == FITTING: ready_to_proceed = False if self.use_task: status = self._task.status() if status: ready_to_proceed = True else: ready_to_proceed = True if ready_to_proceed: if self.use_task: success = status["success"] else: success = self._fitted_success if success: if self.use_task: self._pipeline = status["instance"] self.logger.debug(f"Model fitted in {status['time']} seconds") else: self.logger.debug(f"Model fitted") self._status = READY # TODO: this can potentially be overwritten in _send() self.o_events.data = make_event("ready") else: if self.use_task: self.logger.error( f"An error occured while fitting: {status['exception'].args[0]}" ) self.logger.debug( "\nTraceback (most recent call last):\n" + "".join(status["traceback"]) ) else: self.logger.error( f"An error occured while fitting" ) raise WorkerInterrupt() # Run the pipeline if self._status == READY: self._receive() if self._X is not None: args = [self._X] if self.mode.startswith("fit"): args.append(self._y) # TODO: optionally loop through epochs instead of sending them all at once self._out = getattr(self._pipeline, self.mode)(*args) # Set output streams self._send() def terminate(self): # Kill the fit subprocess if self._task is not None: self._task.stop() def _reset(self): self._X_train = None self._y_train = None self._X_train_indices = np.array([], dtype=np.datetime64) self._accumulation_start = None self._accumulation_stop = None self._dimensions = None self._shape = () self._task = None if self.mode.startswith("fit"): self.fit = False elif self.mode.startswith("predict"): self.fit = True if self.fit: self._status = IDLE else: self._status = READY def _clear(self): self._X = None self._y = None self._X_indices = [] self._X_columns = [] self._X_meta = None self._out = None def _make_pipeline(self, steps): schema = { "type": "array", "minItems": 1, "items": { "type": "object", "properties": { "module": {"type": "string"}, "class": {"type": "string"}, "args": {"type": "object"}, }, "required": ["module", "class"], }, } try: validate(instance=steps, schema=schema) except Exception as error: raise ValidationError("steps", error.message) pipeline = [] for step in steps: try: args = step["args"] if "args" in step else {} m = importlib.import_module(step["module"]) c = getattr(m, step["class"]) i = c(**args) pipeline.append(i) except ImportError as error: raise ValidationError("steps", f"could not import '{step['module']}'") except AttributeError as error: raise ValidationError( "steps", f"could not find class '{step['class']}'" ) except TypeError as error: raise ValidationError( "steps", f"could not instantiate class '{step['class']}' with the given params", ) # TODO: memory and verbose args self._pipeline = make_pipeline(*pipeline, memory=None, verbose=False) def _accumulate(self, start, stop): # Do nothing if no fitting required if not self.fit: return # Set defaults indices = np.array([], dtype=np.datetime64) # Accumulate continuous data if self._dimensions == 2: if self.i_training.ready(): data = self.i_training.data mask = (data.index >= start) & (data.index < stop) data = data[mask] if not data.empty: if self._X_train is None: self._X_train = data.values self._shape = self._X_train.shape[1] indices = data.index.values else: if data.shape[1] == self._shape: self._X_train = np.vstack((self._X_train, data.values)) indices = data.index.values else: self.logger.warning("Invalid shape") # Accumulate epoched data if self._dimensions == 3: for _, _, port in self.iterate("i_training_*"): if port.ready(): index = port.data.index.values[0] if index >= start and index < stop: data = port.data.values label = get_meta(port, self.meta_label) if self._shape and (data.shape != self._shape): self.logger.warning("Invalid shape") continue if self.meta_label is not None and label is None: self.logger.warning("Invalid label") continue if self._X_train is None: self._X_train = np.array([data]) self._shape = self._X_train.shape[1:] else: self._X_train = np.vstack((self._X_train, [data])) indices = np.append(indices, index) if label is not None: if self._y_train is None: self._y_train = np.array([label]) else: self._y_train = np.append(self._y_train, [label]) # Store indices if indices.size != 0: self._X_train_indices = np.append(self._X_train_indices, indices) # Trim if self._X_train is not None: mask = (self._X_train_indices >= start) & (self._X_train_indices < stop) self._X_train = self._X_train[mask] self._X_train_indices = self._X_train_indices[mask] if self._y_train is not None: self._y_train = self._y_train[mask] def _receive(self): # Continuous data if self._dimensions == 2: if self.i.ready(): if not self._X_columns: self._X_columns = list(self.i.data.columns) if self._shape and (self.i.data.shape[1] != self._shape): self.logger.warning("Invalid shape") else: self._X = self.i.data.values self._X_indices = self.i.data.index.values self._X_meta = self.i.meta # Epochs if self._dimensions == 3: for name, _, port in self.iterate("i_*"): if port.ready() and "training" not in name and "events" not in name: data = port.data.values meta = port.meta indices = port.data.index.values label = get_meta(port, self.meta_label) if not self._X_columns: self._X_columns = list(port.data.columns) if self._shape and (data.shape != self._shape): self.logger.warning("Invalid shape") continue if not self.fit and self.meta_label is not None and label is None: self.logger.warning("Invalid label") continue if self._X is None: self._X = [] if self._y is None and label is not None: self._y = [] if self._X_meta is None: self._X_meta = [] self._X.append(data) self._X_indices.append(indices) self._X_meta.append(meta) if label is not None: self._y.append(label) def _send(self): # Passthrough if self._status < READY and self.passthrough: inputs = [] for _, suffix, port in self.iterate("i*"): if not suffix.startswith("_training") and not suffix.startswith( "_events" ): inputs.append((suffix, port)) for suffix, src_port in inputs: dst_port = getattr(self, "o" + suffix) dst_port.data = src_port.data dst_port.meta = src_port.meta # Model if self._out is not None: if "predict" in self.mode: # Send events if len(self._X_indices) == len(self._out): # TODO: skip JSON serialization? data = [ [self.mode, json.dumps({"result": self._np_to_native(result)})] for result in self._out ] times = ( self._X_indices if self._dimensions == 2 else np.asarray(self._X_indices)[:, 0] ) # Keep the first timestamp of each epoch names = ["label", "data"] meta = ( self._X_meta if self._dimensions == 2 else {"epochs": self._X_meta} ) # port.meta should always be an object self.o_events.set(data, times, names, meta) else: self.logger.warning( "Number of predictions inconsistent with input length" ) else: # Send data if self._dimensions == 2: try: self.o.data = self._reindex( self._out, self._X_indices, self._X_columns ) self.o.meta = self._X_meta except Exception as e: self.logger.warning(getattr(e, "message", repr(e))) if self._dimensions == 3: if len(self._X_indices) == len(self._out): for i, (data, times) in enumerate( zip(self._out, self._X_indices) ): try: getattr(self, "o_" + str(i)).data = self._reindex( data, times, self._X_columns ) getattr(self, "o_" + str(i)).meta = self._X_meta[i] except Exception as e: self.logger.warning(getattr(e, "message", repr(e))) else: self.logger.warning( "Number of transforms inconsistent with number of epochs" ) def _np_to_native(self, data): """Convert numpy scalars and objects to native types.""" return getattr(data, "tolist", lambda: data)() def _reindex(self, data, times, columns): if len(data) != len(times): if self.resample: # Resample at a specific frequency kwargs = {"periods": len(data)} if self.resample_rate is None: kwargs["freq"] = pd.infer_freq(times) kwargs["freq"] = pd.tseries.frequencies.to_offset(kwargs["freq"]) else: kwargs["freq"] = pd.DateOffset(seconds=1 / self.resample_rate) if self.resample_direction == "right": kwargs["start"] = times[0] elif self.resample_direction == "left": kwargs["end"] = times[-1] else: def middle(a): return int(np.ceil(len(a) / 2)) - 1 kwargs["start"] = times[middle(times)] - ( middle(data) * kwargs["freq"] ) times = pd.date_range(**kwargs) else: # Linearly arange between first and last times = pd.date_range(start=times[0], end=times[-1], periods=len(data)) return pd.DataFrame(data, times, columns)
39.121032
103
0.502054
2,041
19,717
4.656541
0.149927
0.028935
0.0242
0.010943
0.284617
0.213173
0.162774
0.139625
0.103009
0.052609
0
0.003437
0.409799
19,717
503
104
39.198807
0.813268
0.125932
0
0.251969
0
0
0.058392
0.0017
0
0
0
0.001988
0
1
0.031496
false
0.007874
0.036745
0.002625
0.081365
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c781463cac684dcc8d5bd7e224347018ce45563c
3,641
py
Python
1-lab-lambdaDynamoDB/source/cdk/app.py
donnieprakoso/workshop-buildingRESTAPIwithAWS
b3287d5749b65648710dde4e736ba55b73371c6b
[ "Apache-2.0" ]
23
2021-04-24T06:32:58.000Z
2022-03-27T11:04:57.000Z
1-lab-lambdaDynamoDB/source/cdk/app.py
ivandi1980/workshop-restAPI
b3287d5749b65648710dde4e736ba55b73371c6b
[ "Apache-2.0" ]
null
null
null
1-lab-lambdaDynamoDB/source/cdk/app.py
ivandi1980/workshop-restAPI
b3287d5749b65648710dde4e736ba55b73371c6b
[ "Apache-2.0" ]
5
2021-04-24T12:10:02.000Z
2021-11-18T13:34:33.000Z
#!/usr/bin/env python3 from aws_cdk import aws_iam as _iam from aws_cdk import aws_lambda as _lambda from aws_cdk import aws_dynamodb as _ddb from aws_cdk import core class CdkStack(core.Stack): def __init__(self, scope: core.Construct, id: str, stack_prefix:str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Model all required resources ddb_table = _ddb.Table( self, id='{}-data'.format(stack_prefix), table_name='{}-data'.format(stack_prefix), partition_key=_ddb.Attribute(name='ID', type=_ddb.AttributeType.STRING), removal_policy=core.RemovalPolicy.DESTROY, # THIS IS NOT RECOMMENDED FOR PRODUCTION USE read_capacity=1, write_capacity=1) ## IAM Roles lambda_role = _iam.Role( self, id='{}-lambda-role'.format(stack_prefix), assumed_by=_iam.ServicePrincipal('lambda.amazonaws.com')) cw_policy_statement = _iam.PolicyStatement(effect=_iam.Effect.ALLOW) cw_policy_statement.add_actions("logs:CreateLogGroup") cw_policy_statement.add_actions("logs:CreateLogStream") cw_policy_statement.add_actions("logs:PutLogEvents") cw_policy_statement.add_actions("logs:DescribeLogStreams") cw_policy_statement.add_resources("*") lambda_role.add_to_policy(cw_policy_statement) # Add role for DynamoDB dynamodb_policy_statement = _iam.PolicyStatement( effect=_iam.Effect.ALLOW) dynamodb_policy_statement.add_actions("dynamodb:PutItem") dynamodb_policy_statement.add_actions("dynamodb:GetItem") dynamodb_policy_statement.add_actions("dynamodb:Scan") dynamodb_policy_statement.add_actions("dynamodb:Query") dynamodb_policy_statement.add_actions("dynamodb:ConditionCheckItem") dynamodb_policy_statement.add_resources(ddb_table.table_arn) lambda_role.add_to_policy(dynamodb_policy_statement) ## AWS Lambda Functions fnLambda_storeData = _lambda.Function( self, "{}-function-storeData".format(stack_prefix), code=_lambda.AssetCode("../lambda-functions/store-data"), handler="app.handler", timeout=core.Duration.seconds(60), role=lambda_role, runtime=_lambda.Runtime.PYTHON_3_8) fnLambda_storeData.add_environment("TABLE_NAME", ddb_table.table_name) fnLambda_listData = _lambda.Function( self, "{}-function-getData".format(stack_prefix), code=_lambda.AssetCode("../lambda-functions/list-data"), handler="app.handler", role=lambda_role, timeout=core.Duration.seconds(60), runtime=_lambda.Runtime.PYTHON_3_8) fnLambda_listData.add_environment("TABLE_NAME", ddb_table.table_name) core.CfnOutput(self, "{}-output-dynamodbTable".format(stack_prefix), value=ddb_table.table_name, export_name="{}-ddbTable".format(stack_prefix)) core.CfnOutput(self, "{}-output-lambdaStoreData".format(stack_prefix), value=fnLambda_storeData.function_name, export_name="{}-lambdaStoreDataName".format(stack_prefix)) core.CfnOutput(self, "{}-output-lambdaListData".format(stack_prefix), value=fnLambda_listData.function_name, export_name="{}-lambdaListDataName".format(stack_prefix)) stack_prefix='restAPI-lab1-lambdaDynamoDB' app = core.App() stack = CdkStack(app, stack_prefix, stack_prefix=stack_prefix) core.Tags.of(stack).add('Name',stack_prefix) app.synth()
44.950617
177
0.682505
413
3,641
5.692494
0.290557
0.079541
0.091876
0.095704
0.418545
0.327095
0.187154
0.122501
0
0
0
0.004159
0.207635
3,641
80
178
45.5125
0.810745
0.040099
0
0.193548
0
0
0.147405
0.078004
0
0
0
0
0
1
0.016129
false
0
0.064516
0
0.096774
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c7821ff30782af7bc27dc24920e0c07f5856c1a5
326
py
Python
module_6_lets_make_a_web_app/webapp/yield.py
JCarlos831/python_getting_started_-pluralsight-
5059a1019c46eb8174fc86989fab7cc0c4caffd4
[ "MIT" ]
null
null
null
module_6_lets_make_a_web_app/webapp/yield.py
JCarlos831/python_getting_started_-pluralsight-
5059a1019c46eb8174fc86989fab7cc0c4caffd4
[ "MIT" ]
null
null
null
module_6_lets_make_a_web_app/webapp/yield.py
JCarlos831/python_getting_started_-pluralsight-
5059a1019c46eb8174fc86989fab7cc0c4caffd4
[ "MIT" ]
null
null
null
students = [] def read_file(): try: f = open("students.txt", "r") for student in read_students(f): students.append(student) f.close() except Exception: print("Could not read file") def read_students(f): for line in f: yield line read_file() print(students)
16.3
40
0.57362
42
326
4.357143
0.5
0.131148
0.142077
0
0
0
0
0
0
0
0
0
0.309816
326
20
41
16.3
0.813333
0
0
0
0
0
0.097859
0
0
0
0
0
0
1
0.142857
false
0
0
0
0.142857
0.142857
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c78545f3c73bfddebce8e778857a5662b6cdc383
610
py
Python
pug/dj/miner/model_mixin.py
hobson/pug-dj
55678b08755a55366ce18e7d3b8ea8fa4491ab04
[ "MIT" ]
null
null
null
pug/dj/miner/model_mixin.py
hobson/pug-dj
55678b08755a55366ce18e7d3b8ea8fa4491ab04
[ "MIT" ]
5
2021-09-07T23:53:24.000Z
2022-03-11T23:22:04.000Z
pug/dj/miner/model_mixin.py
hobson/pug-dj
55678b08755a55366ce18e7d3b8ea8fa4491ab04
[ "MIT" ]
1
2015-04-23T14:45:04.000Z
2015-04-23T14:45:04.000Z
from pug.nlp.db import representation from django.db import models class RepresentationMixin(models.Model): """Produce a meaningful string representation of a model with `str(model.objects.all[0])`.""" __unicode__ = representation class Meta: abstract = True class DateMixin(models.Model): """Add updated and created fields that auto-populate to create a ORM-level transaction log for auditing (though not a full log, just 2 events).""" updated = models.DateTimeField(auto_now=True) created = models.DateTimeField(auto_now_add=True) class Meta: abstract = True
32.105263
150
0.727869
81
610
5.395062
0.617284
0.036613
0.077803
0.09611
0
0
0
0
0
0
0
0.004024
0.185246
610
18
151
33.888889
0.875252
0.37377
0
0.363636
0
0
0
0
0
0
0
0
0
1
0
false
0
0.181818
0
0.818182
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c785e70d66977d68cd692ad4e28b80dae9e1f5c0
4,255
py
Python
custom_components/kodi_media_sensors/config_flow.py
JurajNyiri/kodi-media-sensors
055065e52b34555df95a905fc556d3086626deee
[ "MIT" ]
5
2021-03-20T23:32:58.000Z
2022-03-12T02:01:39.000Z
custom_components/kodi_media_sensors/config_flow.py
JurajNyiri/kodi-media-sensors
055065e52b34555df95a905fc556d3086626deee
[ "MIT" ]
11
2021-02-09T16:40:34.000Z
2022-03-20T11:43:06.000Z
custom_components/kodi_media_sensors/config_flow.py
JurajNyiri/kodi-media-sensors
055065e52b34555df95a905fc556d3086626deee
[ "MIT" ]
3
2021-02-09T17:01:25.000Z
2022-02-23T22:21:16.000Z
import logging from typing import Any, Dict, Optional from homeassistant import config_entries from homeassistant.components.kodi.const import DOMAIN as KODI_DOMAIN from homeassistant.core import callback import voluptuous as vol from .const import ( OPTION_HIDE_WATCHED, OPTION_USE_AUTH_URL, OPTION_SEARCH_LIMIT, OPTION_SEARCH_LIMIT_DEFAULT_VALUE, CONF_KODI_INSTANCE, DOMAIN, CONF_SENSOR_RECENTLY_ADDED_TVSHOW, CONF_SENSOR_RECENTLY_ADDED_MOVIE, CONF_SENSOR_PLAYLIST, CONF_SENSOR_SEARCH, ) _LOGGER = logging.getLogger(__name__) class KodiMediaSensorsConfigFlow(config_entries.ConfigFlow, domain=DOMAIN): """Kodi Media Sensors config flow.""" async def async_step_user(self, user_input: Optional[Dict[str, Any]]): """Handle a flow initialized via the user interface.""" # Find all configured kodi instances to allow the user to select one. kodi_instances: Dict[str, str] = { entry.entry_id: entry.title for entry in self.hass.config_entries.async_entries(KODI_DOMAIN) if entry.source != "ignore" } data_schema = vol.Schema( { vol.Required(CONF_KODI_INSTANCE): vol.In(list(kodi_instances.values())), vol.Optional(CONF_SENSOR_RECENTLY_ADDED_TVSHOW, default=False): bool, vol.Optional(CONF_SENSOR_RECENTLY_ADDED_MOVIE, default=False): bool, vol.Optional(CONF_SENSOR_PLAYLIST, default=False): bool, vol.Optional(CONF_SENSOR_SEARCH, default=False): bool, } ) errors = {} if not kodi_instances: errors["base"] = "kodi_not_configured" if user_input is not None: config_entry_id: Optional[str] = None for entry_id, title in kodi_instances.items(): if title == user_input[CONF_KODI_INSTANCE]: config_entry_id = entry_id break if config_entry_id is None: errors["base"] = "kodi_not_configured" if not errors: return self.async_create_entry( title="Kodi Media Sensors", data={ CONF_KODI_INSTANCE: config_entry_id, CONF_SENSOR_RECENTLY_ADDED_TVSHOW: user_input[ CONF_SENSOR_RECENTLY_ADDED_TVSHOW ], CONF_SENSOR_RECENTLY_ADDED_MOVIE: user_input[ CONF_SENSOR_RECENTLY_ADDED_MOVIE ], CONF_SENSOR_PLAYLIST: user_input[CONF_SENSOR_PLAYLIST], CONF_SENSOR_SEARCH: user_input[CONF_SENSOR_SEARCH], }, ) return self.async_show_form( step_id="user", data_schema=data_schema, errors=errors, ) @staticmethod @callback def async_get_options_flow(config_entry): """Get the options flow for this handler.""" return OptionsFlowHandler(config_entry) class OptionsFlowHandler(config_entries.OptionsFlow): """Handles options flow for the component.""" def __init__(self, config_entry: config_entries.ConfigEntry) -> None: self.config_entry = config_entry async def async_step_init(self, user_input=None): """Manage the options.""" if user_input is not None: return self.async_create_entry(title="", data=user_input) hide_watched = self.config_entry.options.get(OPTION_HIDE_WATCHED, False) use_auth_url = self.config_entry.options.get(OPTION_USE_AUTH_URL, False) search_limit = self.config_entry.options.get( OPTION_SEARCH_LIMIT, OPTION_SEARCH_LIMIT_DEFAULT_VALUE ) options_schema = vol.Schema( { vol.Optional(OPTION_HIDE_WATCHED, default=hide_watched): bool, vol.Optional(OPTION_USE_AUTH_URL, default=use_auth_url): bool, vol.Optional(OPTION_SEARCH_LIMIT, default=search_limit): int, } ) return self.async_show_form( step_id="init", data_schema=options_schema, )
36.681034
88
0.624207
477
4,255
5.21174
0.224319
0.06436
0.057924
0.074014
0.360821
0.34473
0.176589
0.108608
0.045857
0.045857
0
0
0.302703
4,255
115
89
37
0.837883
0.042068
0
0.086957
0
0
0.019598
0
0
0
0
0
0
1
0.021739
false
0
0.076087
0
0.173913
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c7879b591e4a17bc5cbafd6cd291d2d73183569a
23,794
py
Python
apps/project/views/issue.py
rainydaygit/testtcloudserver
8037603efe4502726a4d794fb1fc0a3f3cc80137
[ "MIT" ]
349
2020-08-04T10:21:01.000Z
2022-03-23T08:31:29.000Z
apps/project/views/issue.py
rainydaygit/testtcloudserver
8037603efe4502726a4d794fb1fc0a3f3cc80137
[ "MIT" ]
2
2021-01-07T06:17:05.000Z
2021-04-01T06:01:30.000Z
apps/project/views/issue.py
rainydaygit/testtcloudserver
8037603efe4502726a4d794fb1fc0a3f3cc80137
[ "MIT" ]
70
2020-08-24T06:46:14.000Z
2022-03-25T13:23:27.000Z
from flask import request from apps.auth.auth_require import required from apps.project.business.issue import IssueBusiness, IssueRecordBusiness, IssueDashBoardBusiness from apps.project.extentions import parse_json_form, validation, parse_list_args2 from library.api.render import json_detail_render, json_list_render2 from library.api.tBlueprint import tblueprint bpname = 'issue' view_permission = f'{bpname}_view' modify_permission = f'{bpname}_modify' issue = tblueprint(bpname, __name__) # 新增issue @issue.route('/', methods=['POST']) @required(modify_permission) @validation('POST:issue_create') def issue_add_handler(): """ @api {post} /v1/issue 新增 缺陷 @apiName CreateIssue @apiGroup 项目 @apiDescription 新增 缺陷 @apiParam {int} module_id 模块 ID @apiParam {int} handler 处理人 ID @apiParam {int} issue_type 类型 @apiParam {int} chance 出现几率 @apiParam {int} level 级别 @apiParam {int} priority 优先级 @apiParam {int} system 系统 @apiParam {string} title 标题 @apiParam {string} attach 福建 @apiParam {string} description 描述 @apiParam {int} detection_chance 用户识别度 @apiParam {int} project_id 项目 ID @apiParam {int} version 版本 @apiParam {int} creator 创建人 ID @apiParam {int} modifier 修改人 ID @apiParam {int} [requirement_id] 关联的 需求 ID @apiParam {string} [tag] 标签 @apiParamExample {json} Request-Example: { "module_id": 340, "handler": 93, "issue_type": 0, "chance": 0, "level": 0, "priority": 0, "system": 4, "title": "123", "attach": "{\"images\":[],\"files\":[],\"videos\":[]}", "description": "<p>test</p>", "detection_chance": 0, "project_id": 4, "version": 168, "creator": 93, "modifier": 93, "requirement_id": 123, "tag": 13,14 } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [], "message": "ok" } """ (system, version, project_id, module_id, creator, modifier, handler, issue_type, chance, level, priority, stage,title, attach, handle_status, description, comment, detection_chance, requirement_id, case_covered, tag) = parse_json_form('issue_create') ret = IssueBusiness.create(system, version, project_id, module_id, creator, modifier, handler, issue_type, chance, level, priority, stage, title, attach, handle_status, description, comment, detection_chance, requirement_id, case_covered, tag) return json_detail_render(ret) # 根据id修改,删除issue @issue.route('/<int:issue_id>', methods=['POST']) @required(modify_permission) @validation('POST:issue_modify') def issue_modify_handler(issue_id): """ @api {post} /v1/issue/{int:id} 修改 缺陷 @apiName ModifyIssue @apiGroup 项目 @apiDescription 修改 缺陷 @apiParam {int} module_id 模块 ID @apiParam {int} handler 处理人 ID @apiParam {int} issue_type 类型 @apiParam {int} chance 出现几率 @apiParam {int} level 级别 @apiParam {int} priority 优先级 @apiParam {int} system 系统 @apiParam {string} title 标题 @apiParam {string} attach 福建 @apiParam {string} description 描述 @apiParam {int} detection_chance 用户识别度 @apiParam {int} project_id 项目 ID @apiParam {int} version 版本 @apiParam {int} creator 创建人 ID @apiParam {int} modifier 修改人 ID @apiParam {int} [requirement_id] 关联的 需求 ID @apiParam {string} [tag] 标签 @apiParamExample {json} Request-Example: { "module_id": 340, "handler": 93, "issue_type": 0, "chance": 0, "level": 0, "priority": 0, "system": 4, "title": "123", "attach": "{\"images\":[],\"files\":[],\"videos\":[]}", "description": "<p>test</p>", "detection_chance": 0, "project_id": 4, "version": 168, "creator": 93, "modifier": 93, "requirement_id": 1, "tag": 13,14 } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [], "message": "ok" } """ (system, version, project_id, module_id, modifier, handler, issue_type, chance, level, priority, stage, title, attach, handle_status, description, comment, detection_chance, requirement_id, case_covered, tag) = parse_json_form('issue_modify') ret = IssueBusiness.modify(issue_id, system, version, project_id, module_id, modifier, handler, issue_type, chance, level, priority, stage, title, attach, handle_status, description, comment, detection_chance, requirement_id, case_covered, tag) return json_detail_render(ret) # 根据id修改,删除issue @issue.route('/<int:issue_id>', methods=['DELETE']) def issue_delete_handler(issue_id): """ @api {delete} /v1/issue/{int:id} 删除 缺陷 @apiName DeleteIssue @apiGroup 项目 @apiDescription 删除 缺陷 @apiParamExample {json} Request-Example: - @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [], "message": "ok" } """ ret = IssueBusiness.delete(issue_id) return json_detail_render(ret) # 切换issue状态 @issue.route('/handlestatus/<int:issue_id>', methods=['POST']) @required(modify_permission) @validation('POST:handle_status') def issue_board_status_handler(issue_id): """ @api {post} /v1/issue/handlestatus/{int:id} 切换 缺陷状态 @apiName ModifyIssueStatus @apiGroup 项目 @apiDescription 切换 缺陷状态 @apiParamExample {json} Request-Example: { "handle_status": 1 } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [], "message": "ok" } """ handle_status = parse_json_form('handle_status')[0] ret = IssueBusiness.status_switch(issue_id, handle_status) return json_detail_render(ret) # 切换issue处理人 @issue.route('/handler/<int:issue_id>', methods=['POST']) @validation('POST:handler_switch') @required(modify_permission) def issue_handler_switch_handler(issue_id): """ @api {post} /v1/issue/handler/{int:id} 切换 缺陷处理人 @apiName ModifyIssueSwitch @apiGroup 项目 @apiDescription 切换 缺陷处理人 @apiParamExample {json} Request-Example: { "handler": 11 } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [], "message": "ok" } """ handler = parse_json_form('handler_switch') ret = IssueBusiness.handler_switch(issue_id, handler) return json_detail_render(ret) # 切换issue等级 @issue.route('/level/<int:issue_id>', methods=['POST']) @required(modify_permission) @validation('POST:level_switch') def issue_level_switch_handler(issue_id): """ @api {post} /v1/issue/level/{int:id} 切换 缺陷等级 @apiName ModifyIssueLevel @apiGroup 项目 @apiDescription 切换 缺陷等级 @apiParamExample {json} Request-Example: { "level": 3 } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [], "message": "ok" } """ level = parse_json_form('level_switch') ret = IssueBusiness.level_switch(issue_id, level) return json_detail_render(ret) # 切换issue优先级 @issue.route('/priority/<int:issue_id>', methods=['POST']) @required(modify_permission) @validation('POST:priority_switch') def issue_priority_switch_handler(issue_id): """ @api {post} /v1/issue/priority/{int:id} 切换 缺陷优先级 @apiName ModifyIssuePriority @apiGroup 项目 @apiDescription 切换 缺陷优先级 @apiParamExample {json} Request-Example: { "priority": 3 } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [], "message": "ok" } """ priority = parse_json_form('priority_switch') ret = IssueBusiness.priority_switch(issue_id, priority) return json_detail_render(ret) # 修改issue的comment @issue.route('/comment/<int:issue_id>', methods=['POST']) @validation('POST:add_comment') @required(modify_permission) def issue_add_comment_handler(issue_id): """ @api {post} /v1/issue/comment/{int:id} 切换 缺陷备注 @apiName ModifyIssueComment @apiGroup 项目 @apiDescription 切换 缺陷备注 @apiParamExample {json} Request-Example: { "comment": 3 } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [], "message": "ok" } """ comment = parse_json_form('add_comment') ret = IssueBusiness.add_comment(issue_id, comment) return json_detail_render(ret) # 查询issue-projectid,versionid @issue.route('/', methods=['GET']) def issue_query_all_handler(): """ @api {get} /v1/issue/ 查询 issue 列表 @apiName SearchIssue @apiGroup 项目 @apiDescription 查询 issue 列表 @apiParam {int} [projectid] 项目 ID @apiParam {int} [versionid] 版本 ID @apiParam {string} [creator_id] 创建人 ID,使用 ',' 分割 @apiParam {string} [handler_id] 处理人 ID,使用 ',' 分割 @apiParam {int} [title] 标题 @apiParam {string} [handle_status] 处理状态 ID,使用 ',' 分割 @apiParam {string} [module_id] 模块 ID,使用 ',' 分割 @apiParam {string} [priority] 优先级 ID,使用 ',' 分割 @apiParam {int} [page_size] 分页 页面大小 @apiparam {int} [page_index] 分页 页数 @apiParamExample {json} Request-Example: { "projectid": 4, "versionid": 173, "creator_id": "1,2,3,4", "page_size": 1 } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [ { "attach": "{"images":[],"files":[],"videos":[]}", "chance": 2, "comment": "", "creation_time": "2019-08-08 20:58:49", "creator": [ { "id": 96, "name": "张宇" } ], "description": "", "detection_chance": "", "handle_status": 2, "handler": [ { "id": 96, "name": "张宇" } ], "issue_number": "T398", "issue_type": 1, "issueid": 398, "level": 1, "modified_time": "2019-08-08 20:58:49", "modifier": [], "module": [ { "id": 329, "name": "用例二级2222" } ], "priority": 1, "project_id": 4, "rank": 12, "reopen": 0, "repair_time": "", "requirement_id": "", "requirement_title": "", "stage": "", "status": 0, "system": "", "test_time": "", "title": "1.2.7issuse55555", "version": [ { "id": 173, "name": "1.2.7" } ], "weight": "" } ], "message": "ok", "page_index": 1, "page_size": 1, "total": 8 } """ requirement_id = request.args.get('requirement_id') if requirement_id: page_size, page_index = parse_list_args2() data, count = IssueBusiness.paginate_data_by_rid(page_size, page_index, requirement_id) return json_list_render2(0, data, page_size, page_index, count) else: page_size, page_index = parse_list_args2() data, count = IssueBusiness.paginate_data(page_size, page_index) return json_list_render2(0, data, page_size, page_index, count) # 查询issue历史记录 @issue.route('/record', methods=['GET']) def issue_record_query_all_handler(): """ @api {get} /v1/issue/record 查询 缺陷历史记录列表 @apiName GetIssueRecordList @apiGroup 项目 @apiDescription 查询 缺陷历史记录列表 @apiParam {int} projectid 项目 ID @apiParam {int} versionid 版本 ID @apiParamExample {json} Request-Example: ?projectid=1 @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [ { "attach": "{"images":[],"files":[],"videos":[]}", "chance": 0, "comment": "", "creation_time": "2019-05-10 16:23:28", "creator": [ { "id": 12, "name": "刘焕焕" } ], "description": "<p>分享微信不成功.</p>", "detection_chance": 0, "handle_status": 1, "handler": [ { "id": 12, "name": "刘焕焕" } ], "issue_number": "T309", "issue_type": 0, "issueid": 309, "level": 1, "modified_time": "2019-05-13 13:02:45", "modifier": [], "module": [ { "id": 291, "name": "V2.4.9版本用例飞科" } ], "priority": 1, "project_id": 1, "rank": 20, "reopen": 0, "repair_time": "", "requirement_id": "", "requirement_title": "", "stage": "", "status": 0, "system": 1, "test_time": "", "title": "分享微信不成功", "version": [ { "id": 128, "name": "V2.4.9" } ], "weight": "" } ], "message": "ok" } """ data = IssueRecordBusiness.query_all_json() return json_detail_render(0, data) # 查询issue历史记录详情 @issue.route('/record/detail/<int:issue_id>', methods=['GET']) def issue_record_detail_handler(issue_id): """ @api {get} /v1/issue/record/detail/{int:issue_id} 查询 缺陷历史记录详情 @apiName GetIssueRecordDetailById @apiGroup 项目 @apiDescription 查询 缺陷历史记录详情 @apiParamExample {json} Request-Example: - @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [ { "modified_time": "2018-12-19 14:59:34", "modifier_id": 1, "modifier_name": "王金龙", "operation": "修改了处理状态 待办 为 处理中" }, { "modified_time": "2018-12-18 20:28:39", "modifier_id": 1, "modifier_name": "王金龙", "operation": "创建了BUG title" } ], "message": "ok" } """ data = IssueRecordBusiness.query_record_detail(issue_id) return json_detail_render(0, data) # 根据id查询issue @issue.route('/<int:issue_id>', methods=['GET']) def issue_query_handler(issue_id): """ @api {get} /v1/issue/{int:issue_id} 查询 缺陷详情 (id) @apiName GetIssueById @apiGroup 项目 @apiDescription 查询 缺陷详情 通过 ID @apiParamExample {json} Request-Example: - @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code":0, "data":[ { "attach":"attach", "chance":1, "comment":"", "creation_time":"2018-12-18 20:28:39", "creator":[ { "id":1, "name":"王金龙" } ], "description":"description", "handle_status":3, "handler":[ { "id":1, "name":"王金龙" } ], "issue_number":"T1", "issue_type":1, "issueid":1, "level":1, "modified_time":"2019-03-01 16:46:10", "modifier":[ { "id":1, "name":"王金龙" } ], "module":[ { "id":1, "name":"音频" } ], "priority":1, "project_id":1, "reopen":0, "repair_time":"0:00:05", "requirement_id":"", "requirement_title":"", "stage":1, "status":0, "system":0, "test_time":"2 days, 20:21:05", "title":"title", "version":[ { "id":1, "name":"str" } ], "weight":"" } ], "message":"ok" } """ data = IssueBusiness.query_by_id(issue_id) return json_detail_render(0, data) # issue关闭和打开的dashboard @issue.route('/dashboard', methods=['POST']) @required(view_permission) @validation('POST:issue_dashboard') def issue_dashboard_work_handler(): start_date, end_date = parse_json_form('issue_dashboard') data = IssueDashBoardBusiness.issue_dashboard(start_date, end_date) return json_detail_render(0, data) # 查询测试人员每天创建的issue个数 @issue.route('/dashboard/tester', methods=['POST']) @required(view_permission) @validation('POST:issue_dashboard') def tester_issue_work_handler(): start_date, end_date = parse_json_form('issue_dashboard') data = IssueDashBoardBusiness.issue_all_tester_dashboard(start_date, end_date) return json_detail_render(0, data) # issue的状态分布和优先级分布 @issue.route('/dashboard/project', methods=['POST']) @required(view_permission) @validation('POST:issue_dashboard') def issue_project_dashboard_handler(): """ @api {POST} /v1/issue/dashboard/project 查询 缺陷状态分布和优先级分布 @apiName GetIssueByStatusAndPriority @apiGroup 项目 @apiDescription 查询 缺陷状态分布和优先级分布 @apiParamExample {json} Request-Example: { "start_date": "2019-01-02 10:10:11", "end_date": "2019-01-03 10:10:12", } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [ { "modified_time": "2018-12-19 14:59:34", "modifier_id": 1, "modifier_name": "王金龙", "operation": "修改了处理状态 待办 为 处理中" }, { "modified_time": "2018-12-18 20:28:39", "modifier_id": 1, "modifier_name": "王金龙", "operation": "创建了BUG title" } ], "message": "ok" } """ start_date, end_date = parse_json_form('issue_dashboard') data = IssueDashBoardBusiness.issue_project_dashboard(start_date, end_date) return json_detail_render(0, data) # 看板根据pro_id查询issue各个状态的数量 @issue.route('/dashboard/project/<int:pro_id>', methods=['GET']) def issue_query_pro_handler(pro_id): """ @api {post} /v1/issue/dashboard/project/{int:project_id} 查询 看板缺陷 根据 project ID @apiName GetBoardIssueByProjectId @apiGroup 项目 @apiDescription 根据 project ID 查询 看板缺陷 @apiParamExample {json} Request-Example: - @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code":0, "data":[ { "info":[ { "detail":[ { "count":1, "handle_status":1 }, { "count":1, "handle_status":2 }, { "count":1, "handle_status":3 } ], "total":3, "version":1 }, { "detail":[ { "count":1, "handle_status":4 } ], "total":1, "version":2 }, { "detail":[ { "count":1, "handle_status":1 } ], "total":1, "version":3 }, { "detail":[ { "count":3, "handle_status":4 } ], "total":3, "version":4 }, { "detail":[ { "count":1, "handle_status":1 }, { "count":1, "handle_status":4 } ], "total":2, "version":128 } ], "project_id":1 } ], "message":"ok" } """ data = IssueDashBoardBusiness.issue_project_id_dashboard(pro_id) return json_detail_render(0, data) # 绑定 issue 到 requirement @issue.route('/bind/requirement', methods=['POST']) @required(modify_permission) @validation('POST:issue_bind_requirement') def issue_bind_requirement(): """ @api {post} /v1/issue/bind/requirement 绑定 缺陷 issue 到 需求 requirement @apiName IssueBindRequirement @apiGroup 项目 @apiDescription 绑定 缺陷到需求 @apiParam {int} issue_id 缺陷 ID @apiParam {int} requirement_id 需求 ID @apiParamExample {json} Request-Example: { "issue": 11, "requirement_id": 22 } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [], "message": "ok" } """ requirement_id, issue_id = parse_json_form('issue_bind_requirement') ret, msg = IssueBusiness.issue_bind_requirement(issue_id, requirement_id) return json_detail_render(ret, [], msg) # 导出 issue 列表 @issue.route('/export', methods=['GET']) def issue_export(): """ @api {get} /v1/issue/ 导出 issue 到 xls @apiName IssueExportToXls @apiGroup 项目 @apiDescription 导出 issue 到 xls @apiParam {int} [projectid] 项目 ID @apiParam {int} [versionid] 版本 ID @apiParam {int} [creator_id] 创建人 ID @apiParam {int} [title] 标题 @apiParam {int} [handle_status] 处理状态 ID @apiParam {int} [module_id] 模块 ID @apiParam {int} [priority] 优先级 ID @apiParam {int} [page_size] 分页 页面大小 @apiparam {int} [page_index] 分页 页数 @apiParamExample {json} Request-Example: { "projectid": 4, "versionid": 173, "page_size": 1 } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": "http://tcloud-static.oss-cn-beijing.aliyuncs.com/issue_export/0/Issues-20190809.164431.xls", "message": "ok" } """ issue_url = IssueBusiness.export() return json_detail_render(code=0, data=issue_url)
29.159314
114
0.504329
2,295
23,794
5.061438
0.125054
0.041667
0.021264
0.032197
0.592975
0.528065
0.507317
0.472624
0.445334
0.445334
0
0.035347
0.366269
23,794
815
115
29.195092
0.734996
0.617256
0
0.307692
0
0
0.118233
0.034384
0
0
0
0
0
1
0.138462
false
0
0.046154
0
0.330769
0.015385
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c788076445fbf7d0da81cc5cf12ab9482e59b110
357
py
Python
translator.py
liuprestin/pyninjaTUT-translator
903642ff56f573ed9c58b6f7db4e6fbb4e722c8d
[ "MIT" ]
null
null
null
translator.py
liuprestin/pyninjaTUT-translator
903642ff56f573ed9c58b6f7db4e6fbb4e722c8d
[ "MIT" ]
null
null
null
translator.py
liuprestin/pyninjaTUT-translator
903642ff56f573ed9c58b6f7db4e6fbb4e722c8d
[ "MIT" ]
null
null
null
from translate import Translator translator = Translator(to_lang="zh") try: with open('./example.md', mode='r') as in_file: text = in_file.read() with open('./example-tranlated.md', mode='w') as trans_file: trans_file.write(translator.translate(text)) except FileNotFoundError as e: print('check your file path')
27.461538
64
0.661064
48
357
4.8125
0.625
0.17316
0.12987
0
0
0
0
0
0
0
0
0
0.207283
357
13
65
27.461538
0.816254
0
0
0
0
0
0.162011
0.061453
0
0
0
0
0
1
0
false
0
0.111111
0
0.111111
0.111111
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c78915846f029ced4be55e06e50f81dcf24cc440
21,941
py
Python
xcbgen/xtypes.py
tizenorg/framework.uifw.xorg.xcb.xcb-proto
d5ce7205c9bdd3e28d96d162617e32de1c126f8b
[ "X11" ]
1
2022-03-21T15:39:01.000Z
2022-03-21T15:39:01.000Z
targetfs/XSGX/lib/python2.6/site-packages/xcbgen/xtypes.py
jhofstee/Graphics_SDK
805bd44f347ed40699a84979bc9f3e8eb085fd9e
[ "Fair", "Unlicense" ]
null
null
null
targetfs/XSGX/lib/python2.6/site-packages/xcbgen/xtypes.py
jhofstee/Graphics_SDK
805bd44f347ed40699a84979bc9f3e8eb085fd9e
[ "Fair", "Unlicense" ]
null
null
null
''' This module contains the classes which represent XCB data types. ''' from xcbgen.expr import Field, Expression import __main__ class Type(object): ''' Abstract base class for all XCB data types. Contains default fields, and some abstract methods. ''' def __init__(self, name): ''' Default structure initializer. Sets up default fields. Public fields: name is a tuple of strings specifying the full type name. size is the size of the datatype in bytes, or None if variable-sized. nmemb is 1 for non-list types, None for variable-sized lists, otherwise number of elts. booleans for identifying subclasses, because I can't figure out isinstance(). ''' self.name = name self.size = None self.nmemb = None self.resolved = False # Screw isinstance(). self.is_simple = False self.is_list = False self.is_expr = False self.is_container = False self.is_reply = False self.is_union = False self.is_pad = False self.is_switch = False self.is_bitcase = False def resolve(self, module): ''' Abstract method for resolving a type. This should make sure any referenced types are already declared. ''' raise Exception('abstract resolve method not overridden!') def out(self, name): ''' Abstract method for outputting code. These are declared in the language-specific modules, and there must be a dictionary containing them declared when this module is imported! ''' raise Exception('abstract out method not overridden!') def fixed_size(self): ''' Abstract method for determining if the data type is fixed-size. ''' raise Exception('abstract fixed_size method not overridden!') def make_member_of(self, module, complex_type, field_type, field_name, visible, wire, auto): ''' Default method for making a data type a member of a structure. Extend this if the data type needs to add an additional length field or something. module is the global module object. complex_type is the structure object. see Field for the meaning of the other parameters. ''' new_field = Field(self, field_type, field_name, visible, wire, auto) # We dump the _placeholder_byte if any fields are added. for (idx, field) in enumerate(complex_type.fields): if field == _placeholder_byte: complex_type.fields[idx] = new_field return complex_type.fields.append(new_field) class SimpleType(Type): ''' Derived class which represents a cardinal type like CARD32 or char. Any type which is typedef'ed to cardinal will be one of these. Public fields added: none ''' def __init__(self, name, size): Type.__init__(self, name) self.is_simple = True self.size = size self.nmemb = 1 def resolve(self, module): self.resolved = True def fixed_size(self): return True out = __main__.output['simple'] # Cardinal datatype globals. See module __init__ method. tcard8 = SimpleType(('uint8_t',), 1) tcard16 = SimpleType(('uint16_t',), 2) tcard32 = SimpleType(('uint32_t',), 4) tint8 = SimpleType(('int8_t',), 1) tint16 = SimpleType(('int16_t',), 2) tint32 = SimpleType(('int32_t',), 4) tchar = SimpleType(('char',), 1) tfloat = SimpleType(('float',), 4) tdouble = SimpleType(('double',), 8) class Enum(SimpleType): ''' Derived class which represents an enum. Fixed-size. Public fields added: values contains a list of (name, value) tuples. value is empty, or a number. bits contains a list of (name, bitnum) tuples. items only appear if specified as a bit. bitnum is a number. ''' def __init__(self, name, elt): SimpleType.__init__(self, name, 4) self.values = [] self.bits = [] for item in list(elt): # First check if we're using a default value if len(list(item)) == 0: self.values.append((item.get('name'), '')) continue # An explicit value or bit was specified. value = list(item)[0] if value.tag == 'value': self.values.append((item.get('name'), value.text)) elif value.tag == 'bit': self.values.append((item.get('name'), '%u' % (1 << int(value.text, 0)))) self.bits.append((item.get('name'), value.text)) def resolve(self, module): self.resolved = True def fixed_size(self): return True out = __main__.output['enum'] class ListType(Type): ''' Derived class which represents a list of some other datatype. Fixed- or variable-sized. Public fields added: member is the datatype of the list elements. parent is the structure type containing the list. expr is an Expression object containing the length information, for variable-sized lists. ''' def __init__(self, elt, member, *parent): Type.__init__(self, member.name) self.is_list = True self.member = member self.parents = list(parent) if elt.tag == 'list': elts = list(elt) self.expr = Expression(elts[0] if len(elts) else elt, self) elif elt.tag == 'valueparam': self.expr = Expression(elt, self) self.size = member.size if member.fixed_size() else None self.nmemb = self.expr.nmemb if self.expr.fixed_size() else None def make_member_of(self, module, complex_type, field_type, field_name, visible, wire, auto): if not self.fixed_size(): # We need a length field. # Ask our Expression object for it's name, type, and whether it's on the wire. lenfid = self.expr.lenfield_type lenfield_name = self.expr.lenfield_name lenwire = self.expr.lenwire needlen = True # See if the length field is already in the structure. for parent in self.parents: for field in parent.fields: if field.field_name == lenfield_name: needlen = False # It isn't, so we need to add it to the structure ourself. if needlen: type = module.get_type(lenfid) lenfield_type = module.get_type_name(lenfid) type.make_member_of(module, complex_type, lenfield_type, lenfield_name, True, lenwire, False) # Add ourself to the structure by calling our original method. Type.make_member_of(self, module, complex_type, field_type, field_name, visible, wire, auto) def resolve(self, module): if self.resolved: return self.member.resolve(module) self.expr.resolve(module, self.parents) # Find my length field again. We need the actual Field object in the expr. # This is needed because we might have added it ourself above. if not self.fixed_size(): for parent in self.parents: for field in parent.fields: if field.field_name == self.expr.lenfield_name and field.wire: self.expr.lenfield = field break self.resolved = True def fixed_size(self): return self.member.fixed_size() and self.expr.fixed_size() class ExprType(Type): ''' Derived class which represents an exprfield. Fixed size. Public fields added: expr is an Expression object containing the value of the field. ''' def __init__(self, elt, member, *parents): Type.__init__(self, member.name) self.is_expr = True self.member = member self.parents = parents self.expr = Expression(list(elt)[0], self) self.size = member.size self.nmemb = 1 def resolve(self, module): if self.resolved: return self.member.resolve(module) self.resolved = True def fixed_size(self): return True class PadType(Type): ''' Derived class which represents a padding field. ''' def __init__(self, elt): Type.__init__(self, tcard8.name) self.is_pad = True self.size = 1 self.nmemb = 1 if (elt == None) else int(elt.get('bytes'), 0) def resolve(self, module): self.resolved = True def fixed_size(self): return True class ComplexType(Type): ''' Derived class which represents a structure. Base type for all structure types. Public fields added: fields is an array of Field objects describing the structure fields. ''' def __init__(self, name, elt): Type.__init__(self, name) self.is_container = True self.elt = elt self.fields = [] self.nmemb = 1 self.size = 0 self.lenfield_parent = [self] def resolve(self, module): if self.resolved: return pads = 0 # Resolve all of our field datatypes. for child in list(self.elt): if child.tag == 'pad': field_name = 'pad' + str(pads) fkey = 'CARD8' type = PadType(child) pads = pads + 1 visible = False elif child.tag == 'field': field_name = child.get('name') fkey = child.get('type') type = module.get_type(fkey) visible = True elif child.tag == 'exprfield': field_name = child.get('name') fkey = child.get('type') type = ExprType(child, module.get_type(fkey), *self.lenfield_parent) visible = False elif child.tag == 'list': field_name = child.get('name') fkey = child.get('type') type = ListType(child, module.get_type(fkey), *self.lenfield_parent) visible = True elif child.tag == 'valueparam': field_name = child.get('value-list-name') fkey = 'CARD32' type = ListType(child, module.get_type(fkey), *self.lenfield_parent) visible = True elif child.tag == 'switch': field_name = child.get('name') # construct the switch type name from the parent type and the field name field_type = self.name + (field_name,) type = SwitchType(field_type, child, *self.lenfield_parent) visible = True type.make_member_of(module, self, field_type, field_name, visible, True, False) type.resolve(module) continue else: # Hit this on Reply continue # Get the full type name for the field field_type = module.get_type_name(fkey) # Add the field to ourself type.make_member_of(module, self, field_type, field_name, visible, True, False) # Recursively resolve the type (could be another structure, list) type.resolve(module) self.calc_size() # Figure out how big we are self.resolved = True def calc_size(self): self.size = 0 for m in self.fields: if not m.wire: continue if m.type.fixed_size(): self.size = self.size + (m.type.size * m.type.nmemb) else: self.size = None break def fixed_size(self): for m in self.fields: if not m.type.fixed_size(): return False return True class SwitchType(ComplexType): ''' Derived class which represents a List of Items. Public fields added: bitcases is an array of Bitcase objects describing the list items ''' def __init__(self, name, elt, *parents): ComplexType.__init__(self, name, elt) self.parents = parents # FIXME: switch cannot store lenfields, so it should just delegate the parents self.lenfield_parent = list(parents) + [self] # self.fields contains all possible fields collected from the Bitcase objects, # whereas self.items contains the Bitcase objects themselves self.bitcases = [] self.is_switch = True elts = list(elt) self.expr = Expression(elts[0] if len(elts) else elt, self) def resolve(self, module): if self.resolved: return # pads = 0 parents = list(self.parents) + [self] # Resolve all of our field datatypes. for index, child in enumerate(list(self.elt)): if child.tag == 'bitcase': field_name = child.get('name') if field_name is None: field_type = self.name + ('bitcase%d' % index,) else: field_type = self.name + (field_name,) # use self.parent to indicate anchestor, # as switch does not contain named fields itself type = BitcaseType(index, field_type, child, *parents) # construct the switch type name from the parent type and the field name if field_name is None: type.has_name = False # Get the full type name for the field field_type = type.name visible = True # add the field to ourself type.make_member_of(module, self, field_type, field_name, visible, True, False) # recursively resolve the type (could be another structure, list) type.resolve(module) inserted = False for new_field in type.fields: # We dump the _placeholder_byte if any fields are added. for (idx, field) in enumerate(self.fields): if field == _placeholder_byte: self.fields[idx] = new_field inserted = True break if False == inserted: self.fields.append(new_field) self.calc_size() # Figure out how big we are self.resolved = True def make_member_of(self, module, complex_type, field_type, field_name, visible, wire, auto): if not self.fixed_size(): # We need a length field. # Ask our Expression object for it's name, type, and whether it's on the wire. lenfid = self.expr.lenfield_type lenfield_name = self.expr.lenfield_name lenwire = self.expr.lenwire needlen = True # See if the length field is already in the structure. for parent in self.parents: for field in parent.fields: if field.field_name == lenfield_name: needlen = False # It isn't, so we need to add it to the structure ourself. if needlen: type = module.get_type(lenfid) lenfield_type = module.get_type_name(lenfid) type.make_member_of(module, complex_type, lenfield_type, lenfield_name, True, lenwire, False) # Add ourself to the structure by calling our original method. Type.make_member_of(self, module, complex_type, field_type, field_name, visible, wire, auto) # size for switch can only be calculated at runtime def calc_size(self): pass # note: switch is _always_ of variable size, but we indicate here wether # it contains elements that are variable-sized themselves def fixed_size(self): return False # for m in self.fields: # if not m.type.fixed_size(): # return False # return True class Struct(ComplexType): ''' Derived class representing a struct data type. ''' out = __main__.output['struct'] class Union(ComplexType): ''' Derived class representing a union data type. ''' def __init__(self, name, elt): ComplexType.__init__(self, name, elt) self.is_union = True out = __main__.output['union'] class BitcaseType(ComplexType): ''' Derived class representing a struct data type. ''' def __init__(self, index, name, elt, *parent): elts = list(elt) self.expr = Expression(elts[0] if len(elts) else elt, self) ComplexType.__init__(self, name, elts[1:]) self.has_name = True self.index = 1 self.lenfield_parent = list(parent) + [self] self.parents = list(parent) self.is_bitcase = True def make_member_of(self, module, switch_type, field_type, field_name, visible, wire, auto): ''' register BitcaseType with the corresponding SwitchType module is the global module object. complex_type is the structure object. see Field for the meaning of the other parameters. ''' new_field = Field(self, field_type, field_name, visible, wire, auto) # We dump the _placeholder_byte if any bitcases are added. for (idx, field) in enumerate(switch_type.bitcases): if field == _placeholder_byte: switch_type.bitcases[idx] = new_field return switch_type.bitcases.append(new_field) def resolve(self, module): if self.resolved: return self.expr.resolve(module, self.parents+[self]) # Resolve the bitcase expression ComplexType.resolve(self, module) class Reply(ComplexType): ''' Derived class representing a reply. Only found as a field of Request. ''' def __init__(self, name, elt): ComplexType.__init__(self, name, elt) self.is_reply = True def resolve(self, module): if self.resolved: return # Add the automatic protocol fields self.fields.append(Field(tcard8, tcard8.name, 'response_type', False, True, True)) self.fields.append(_placeholder_byte) self.fields.append(Field(tcard16, tcard16.name, 'sequence', False, True, True)) self.fields.append(Field(tcard32, tcard32.name, 'length', False, True, True)) ComplexType.resolve(self, module) class Request(ComplexType): ''' Derived class representing a request. Public fields added: reply contains the reply datatype or None for void requests. opcode contains the request number. ''' def __init__(self, name, elt): ComplexType.__init__(self, name, elt) self.reply = None self.opcode = elt.get('opcode') for child in list(elt): if child.tag == 'reply': self.reply = Reply(name, child) def resolve(self, module): if self.resolved: return # Add the automatic protocol fields if module.namespace.is_ext: self.fields.append(Field(tcard8, tcard8.name, 'major_opcode', False, True, True)) self.fields.append(Field(tcard8, tcard8.name, 'minor_opcode', False, True, True)) self.fields.append(Field(tcard16, tcard16.name, 'length', False, True, True)) ComplexType.resolve(self, module) else: self.fields.append(Field(tcard8, tcard8.name, 'major_opcode', False, True, True)) self.fields.append(_placeholder_byte) self.fields.append(Field(tcard16, tcard16.name, 'length', False, True, True)) ComplexType.resolve(self, module) if self.reply: self.reply.resolve(module) out = __main__.output['request'] class Event(ComplexType): ''' Derived class representing an event data type. Public fields added: opcodes is a dictionary of name -> opcode number, for eventcopies. ''' def __init__(self, name, elt): ComplexType.__init__(self, name, elt) self.opcodes = {} tmp = elt.get('no-sequence-number') self.has_seq = (tmp == None or tmp.lower() == 'false' or tmp == '0') def add_opcode(self, opcode, name, main): self.opcodes[name] = opcode if main: self.name = name def resolve(self, module): if self.resolved: return # Add the automatic protocol fields self.fields.append(Field(tcard8, tcard8.name, 'response_type', False, True, True)) if self.has_seq: self.fields.append(_placeholder_byte) self.fields.append(Field(tcard16, tcard16.name, 'sequence', False, True, True)) ComplexType.resolve(self, module) out = __main__.output['event'] class Error(ComplexType): ''' Derived class representing an error data type. Public fields added: opcodes is a dictionary of name -> opcode number, for errorcopies. ''' def __init__(self, name, elt): ComplexType.__init__(self, name, elt) self.opcodes = {} def add_opcode(self, opcode, name, main): self.opcodes[name] = opcode if main: self.name = name def resolve(self, module): if self.resolved: return # Add the automatic protocol fields self.fields.append(Field(tcard8, tcard8.name, 'response_type', False, True, True)) self.fields.append(Field(tcard8, tcard8.name, 'error_code', False, True, True)) self.fields.append(Field(tcard16, tcard16.name, 'sequence', False, True, True)) ComplexType.resolve(self, module) out = __main__.output['error'] _placeholder_byte = Field(PadType(None), tcard8.name, 'pad0', False, True, False)
34.661927
112
0.593182
2,677
21,941
4.730669
0.121031
0.017056
0.018951
0.016582
0.589782
0.526532
0.481522
0.454517
0.436276
0.425063
0
0.006605
0.31685
21,941
632
113
34.716772
0.838281
0.250581
0
0.562842
0
0
0.033972
0
0
0
0
0.001582
0
1
0.120219
false
0.002732
0.005464
0.016393
0.237705
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c78a85d9115e200586e2ed2d790dc6b616c4151d
3,769
py
Python
BioKlustering-Website/mlmodel/parser/kmeans.py
solislemuslab/mycovirus-website
bc8d3d5f9a9472c35e40334f19e90bbf782f7a1b
[ "MIT" ]
1
2021-11-23T15:07:58.000Z
2021-11-23T15:07:58.000Z
BioKlustering-Website/mlmodel/parser/kmeans.py
solislemuslab/mycovirus-website
bc8d3d5f9a9472c35e40334f19e90bbf782f7a1b
[ "MIT" ]
2
2020-10-23T15:40:49.000Z
2020-10-28T13:21:16.000Z
BioKlustering-Website/mlmodel/parser/kmeans.py
solislemuslab/bioklustering
bc8d3d5f9a9472c35e40334f19e90bbf782f7a1b
[ "MIT" ]
2
2021-11-04T20:01:36.000Z
2021-11-23T15:13:34.000Z
# Copyright 2020 by Luke Selberg, Solis-Lemus Lab, WID. # All rights reserved. # This file is part of the BioKlustering Website. import pandas as pd from Bio import SeqIO from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.cluster import KMeans from sklearn.decomposition import PCA from sklearn.cluster import MeanShift from sklearn import preprocessing import numpy as np import os from .helpers import plotly_dash_show_plot def parseFasta(data): d = {fasta.id : str(fasta.seq) for fasta in SeqIO.parse(data, "fasta")} pd.DataFrame([d]) s = pd.Series(d, name='Sequence') s.index.name = 'ID' s.reset_index() return pd.DataFrame(s) def kmerXTable(s, a, b): tfid_vector = TfidfVectorizer(analyzer='char', ngram_range=(a,b)) s_hat = tfid_vector.fit_transform(s.Sequence) kmerNames = tfid_vector.get_feature_names() kmers = s_hat.toarray() return pd.DataFrame(kmers,columns=kmerNames, index = s.index) # credit to chunrong def read_fasta_sequences(sequence_paths): all_sequences = pd.DataFrame() for path in sequence_paths: path = os.path.join("media", path) sequence = parseFasta(path) all_sequences = pd.concat([all_sequences, sequence]) return all_sequences def kmeans(userId, fasta, klength_min, klength_max, rNum, cNum, method): inputData = read_fasta_sequences(fasta) inputData["Sequence"] = inputData["Sequence"].apply(lambda x: x.replace("-", "")) kmerXTableInput = kmerXTable(inputData, klength_min, klength_max) km = KMeans(random_state = rNum, n_clusters = cNum) km.fit(kmerXTableInput) y_hat = km.predict(kmerXTableInput) plotly_kmertable = kmerXTableInput if method == "PCA": plotly_kmertable = preprocessing.normalize(kmerXTableInput) plot_div = plotly_dash_show_plot(userId, plotly_kmertable, y_hat, "Unsupervised Kmeans", method) inputData.insert(0, "Labels", y_hat) return [[inputData], [plot_div]] def kmeans_semiSupervised(userId, fasta, klength_min, klength_max, rNum, y_hat, method): inputData = read_fasta_sequences(fasta) inputData["Sequence"] = inputData["Sequence"].apply(lambda x: x.replace("-", "")) kmerXTableInput = kmerXTable(inputData, klength_min, klength_max) PCAembedding = PCA(n_components=10) NkmerXTableInput = preprocessing.normalize(kmerXTableInput) PCAembedding_low = PCAembedding.fit_transform(NkmerXTableInput) ms = MeanShift() ms.fit(PCAembedding_low) cluster_centers = ms.cluster_centers_ import warnings with warnings.catch_warnings(): warnings.simplefilter("ignore") kmms = KMeans(init = cluster_centers, n_clusters = len(cluster_centers)) kmms_labels = kmms.fit_predict(PCAembedding_low) # convert all clusters into two clusters kmerXTableInput["pLabels"] = kmms_labels kmerXTableInput["aLabels"] = y_hat.tolist() newLabels_clusters_1 = kmerXTableInput[kmerXTableInput["aLabels"] == 1]["pLabels"].tolist() newLabels_clusters_0 = kmerXTableInput[kmerXTableInput["aLabels"] == 0]["pLabels"].tolist() newLabels = [] for label in kmms_labels: if newLabels_clusters_1.count(label) > newLabels_clusters_0.count(label): newLabels.append(1) else: newLabels.append(0) kmerTable = kmerXTableInput.drop(columns=["pLabels", "aLabels"]) plotly_kmertable = kmerTable plotly_labels = np.array(newLabels) if method == "PCA": plotly_kmertable = preprocessing.normalize(kmerTable) plotly_div = plotly_dash_show_plot(userId, plotly_kmertable, plotly_labels, "Semi-supervised Kmeans", method) inputData.insert(0, "Labels", newLabels) return [[inputData], [plotly_div]]
37.316832
113
0.717166
453
3,769
5.781457
0.337748
0.034364
0.025964
0.030546
0.232913
0.232913
0.206949
0.143566
0.111493
0.111493
0
0.005156
0.176705
3,769
100
114
37.69
0.838866
0.047758
0
0.105263
0
0
0.049958
0
0
0
0
0
0
1
0.065789
false
0
0.144737
0
0.276316
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c78a9dbe76748ffc4b552241c18002c06e087035
1,920
py
Python
workflow/src/routing.py
mibexsoftware/alfred-stash-workflow
5cdba4d14c8998b937c1aa6af8e3417251fac540
[ "MIT" ]
13
2016-03-31T16:19:59.000Z
2019-09-26T20:47:57.000Z
workflow/src/routing.py
mibexsoftware/alfred-stash-workflow
5cdba4d14c8998b937c1aa6af8e3417251fac540
[ "MIT" ]
6
2015-09-18T15:24:43.000Z
2019-10-23T16:51:39.000Z
workflow/src/routing.py
mibexsoftware/alfred-stash-workflow
5cdba4d14c8998b937c1aa6af8e3417251fac540
[ "MIT" ]
3
2015-09-16T18:05:32.000Z
2020-01-04T19:41:21.000Z
# -*- coding: utf-8 -*- from src import icons, __version__ from src.actions import HOST_URL from src.actions.configure import ConfigureWorkflowAction from src.actions.help import HelpWorkflowAction from src.actions.index import IndexWorkflowAction from src.actions.projects import ProjectWorkflowAction from src.actions.pull_requests import PullRequestWorkflowAction from src.actions.repositories import RepositoryWorkflowAction from src.util import workflow, call_alfred WORKFLOW_ACTIONS = { ':config': ConfigureWorkflowAction, ':projects': ProjectWorkflowAction, ':repos': RepositoryWorkflowAction, ':pullrequests': PullRequestWorkflowAction, ':help': HelpWorkflowAction } def route(args): # e.g., args = ":config sethost http://localhost,--exec" command_string = args[0] # :config sethost http://localhost command = command_string.split(' ') if not workflow().settings.get(HOST_URL, None) and 'sethost' not in command: call_alfred('stash:config sethost ') return handler = IndexWorkflowAction action = next(iter(command), None) if action: handler = WORKFLOW_ACTIONS.get(action, IndexWorkflowAction) if '--exec' in args: handler().execute(command, cmd_pressed='--cmd' in args, shift_pressed='--shift' in args) else: # show menu handler().menu(command) _notify_if_upgrade_available() workflow().send_feedback() def _notify_if_upgrade_available(): if workflow().update_available: new_version = workflow().cached_data('__workflow_update_status', max_age=0)['version'] workflow().add_item('An update is available!', 'Update the workflow from version {} to {}'.format(__version__, new_version), arg=':config update', valid=True, icon=icons.UPDATE)
39.183673
105
0.678125
203
1,920
6.226601
0.428571
0.049842
0.077532
0.041139
0
0
0
0
0
0
0
0.002001
0.219271
1,920
48
106
40
0.841227
0.061979
0
0
0
0
0.109131
0.013363
0
0
0
0
0
1
0.05
false
0
0.225
0
0.3
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c78d0f81c7f3ce50a968bb140ed1caaa45e4bf4b
547
py
Python
PE032.py
CaptainSora/Python-Project-Euler
056400f434eec837ece5ef06653b310ebfcc3d4e
[ "MIT" ]
null
null
null
PE032.py
CaptainSora/Python-Project-Euler
056400f434eec837ece5ef06653b310ebfcc3d4e
[ "MIT" ]
null
null
null
PE032.py
CaptainSora/Python-Project-Euler
056400f434eec837ece5ef06653b310ebfcc3d4e
[ "MIT" ]
null
null
null
from itertools import count from _pandigital_tools import is_pandigital def pand_products(): """ Returns the sum of all numbers n which have a factorization a * b = n such that a, b, n are (cumulatively) 1 through 9 pandigital. """ total = set() for a in range(2, 100): for b in count(a): if len(str(a) + str(b) + str(a * b)) > 9: break elif is_pandigital(a, b, a * b): total.add(a * b) return sum(total) def solve(vol=0): return pand_products()
23.782609
78
0.570384
83
547
3.686747
0.554217
0.039216
0.019608
0
0
0
0
0
0
0
0
0.02168
0.325411
547
22
79
24.863636
0.807588
0.23766
0
0
0
0
0
0
0
0
0
0
0
1
0.153846
false
0
0.153846
0.076923
0.461538
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c78e2f38914cd69e3bd290dd0efeba4071626991
14,594
py
Python
corehq/apps/accounting/utils.py
satyaakam/commcare-hq
233f255ff20ab3a16013e9fdfdb9c1dcf632e415
[ "BSD-3-Clause" ]
null
null
null
corehq/apps/accounting/utils.py
satyaakam/commcare-hq
233f255ff20ab3a16013e9fdfdb9c1dcf632e415
[ "BSD-3-Clause" ]
1
2021-06-02T04:45:16.000Z
2021-06-02T04:45:16.000Z
corehq/apps/accounting/utils.py
satyaakam/commcare-hq
233f255ff20ab3a16013e9fdfdb9c1dcf632e415
[ "BSD-3-Clause" ]
null
null
null
import datetime import logging from collections import defaultdict, namedtuple from django.conf import settings from django.template.loader import render_to_string from django.urls import reverse from django.utils.translation import ugettext_lazy as _ from django_prbac.models import Grant, Role, UserRole from corehq.const import USER_DATE_FORMAT from dimagi.utils.couch.database import iter_docs from dimagi.utils.dates import add_months from corehq import privileges from corehq.apps.accounting.exceptions import ( AccountingError, ProductPlanNotFoundError, ) from corehq.apps.domain.models import Domain from corehq.util.quickcache import quickcache from corehq.util.view_utils import absolute_reverse logger = logging.getLogger('accounting') EXCHANGE_RATE_DECIMAL_PLACES = 9 def log_accounting_error(message, show_stack_trace=False): logger.error("[BILLING] %s" % message, exc_info=show_stack_trace) def log_accounting_info(message): logger.info("[BILLING] %s" % message) def months_from_date(reference_date, months_from_date): year, month = add_months(reference_date.year, reference_date.month, months_from_date) return datetime.date(year, month, 1) def ensure_domain_instance(domain): if not isinstance(domain, Domain): domain = Domain.get_by_name(domain) return domain def fmt_feature_rate_dict(feature, feature_rate=None): """ This will be turned into a JSON representation of this Feature and its FeatureRate """ if feature_rate is None: feature_rate = feature.get_rate() return { 'name': feature.name, 'feature_type': feature.feature_type, 'feature_id': feature.id, 'rate_id': feature_rate.id, 'monthly_fee': str(feature_rate.monthly_fee), 'monthly_limit': feature_rate.monthly_limit, 'per_excess_fee': str(feature_rate.per_excess_fee), } def fmt_product_rate_dict(product_name, product_rate=None): """ This will be turned into a JSON representation of this SoftwareProductRate """ from corehq.apps.accounting.models import SoftwareProductRate if product_rate is None: try: product_rate = SoftwareProductRate.objects.filter( is_active=True, name=product_name, ).latest('date_created') except SoftwareProductRate.DoesNotExist: product_rate = SoftwareProductRate.objects.create(name=product_name, is_active=True) return { 'name': product_rate.name, 'rate_id': product_rate.id, 'monthly_fee': str(product_rate.monthly_fee), } def get_privileges(plan_version): role = plan_version.role.get_cached_role() return set([grant.to_role.slug for grant in role.memberships_granted.all()]) ChangeStatusResult = namedtuple('ChangeStatusResult', ['adjustment_reason', 'downgraded_privs', 'upgraded_privs']) def get_change_status(from_plan_version, to_plan_version): from_privs = ( get_privileges(from_plan_version) if from_plan_version is not None else set(privileges.MAX_PRIVILEGES) ) to_privs = get_privileges(to_plan_version) if to_plan_version is not None else set() downgraded_privs = from_privs.difference(to_privs) upgraded_privs = to_privs from corehq.apps.accounting.models import SubscriptionAdjustmentReason as Reason if from_plan_version is None: adjustment_reason = Reason.CREATE else: adjustment_reason = Reason.SWITCH if len(downgraded_privs) == 0 and len(upgraded_privs) > 0: adjustment_reason = Reason.UPGRADE elif len(upgraded_privs) == 0 and len(downgraded_privs) > 0: adjustment_reason = Reason.DOWNGRADE return ChangeStatusResult(adjustment_reason, downgraded_privs, upgraded_privs) def domain_has_privilege_cache_args(domain, privilege_slug, **assignment): return [ domain.name if isinstance(domain, Domain) else domain, privilege_slug ] @quickcache(domain_has_privilege_cache_args, timeout=10) def domain_has_privilege(domain, privilege_slug, **assignment): from corehq.apps.accounting.models import Subscription try: plan_version = Subscription.get_subscribed_plan_by_domain(domain) privilege = Role.get_privilege(privilege_slug, assignment) if privilege is None: return False if plan_version.role.has_privilege(privilege): return True except ProductPlanNotFoundError: return False except AccountingError: pass return False @quickcache(['domain_name'], timeout=15 * 60) def domain_is_on_trial(domain_name): from corehq.apps.accounting.models import Subscription subscription = Subscription.get_active_subscription_by_domain(domain_name) return subscription and subscription.is_trial def is_active_subscription(date_start, date_end, today=None): today = today or datetime.date.today() return ((date_start is None or date_start <= today) and (date_end is None or today < date_end)) def has_subscription_already_ended(subscription): return (subscription.date_end is not None and subscription.date_end <= datetime.date.today()) def get_money_str(amount): if amount is not None: if amount < 0: fmt = "-$%0.2f" amount = abs(amount) else: fmt = "$%0.2f" return fmt % amount return "" def get_address_from_invoice(invoice): from corehq.apps.accounting.invoice_pdf import Address from corehq.apps.accounting.models import BillingContactInfo try: contact_info = BillingContactInfo.objects.get( account=invoice.account, ) return Address( name=( "%s %s" % (contact_info.first_name if contact_info.first_name is not None else "", contact_info.last_name if contact_info.last_name is not None else "") ), company_name=contact_info.company_name, first_line=contact_info.first_line, second_line=contact_info.second_line, city=contact_info.city, region=contact_info.state_province_region, postal_code=contact_info.postal_code, country=contact_info.country, ) except BillingContactInfo.DoesNotExist: return Address() def get_dimagi_from_email(): return ("Dimagi CommCare Accounts <%(email)s>" % { 'email': settings.INVOICING_CONTACT_EMAIL, }) def quantize_accounting_decimal(decimal_value): return "%0.2f" % decimal_value def fmt_dollar_amount(decimal_value): return _("USD %s") % quantize_accounting_decimal(decimal_value) def get_customer_cards(username, domain): from corehq.apps.accounting.models import ( StripePaymentMethod, PaymentMethodType, ) import stripe try: payment_method = StripePaymentMethod.objects.get( web_user=username, method_type=PaymentMethodType.STRIPE ) stripe_customer = payment_method.customer return dict(stripe_customer.cards) except StripePaymentMethod.DoesNotExist: pass except stripe.error.AuthenticationError: if not settings.STRIPE_PRIVATE_KEY: log_accounting_info("Private key is not defined in settings") else: raise return None def is_accounting_admin(user): accounting_privilege = Role.get_privilege(privileges.ACCOUNTING_ADMIN) if accounting_privilege is None: return False try: return user.prbac_role.has_privilege(accounting_privilege) except (AttributeError, UserRole.DoesNotExist): return False def make_anchor_tag(href, name, attrs=None): context = { 'href': href, 'name': name, 'attrs': attrs or {}, } return render_to_string('accounting/partials/anchor_tag.html', context) def get_default_domain_url(domain): from corehq.apps.domain.views.settings import DefaultProjectSettingsView return absolute_reverse( DefaultProjectSettingsView.urlname, args=[domain], ) def ensure_grants(grants_to_privs, dry_run=False, verbose=False, roles_by_slug=None): """ Adds a parameterless grant between grantee and priv, looked up by slug. :param grants_to_privs: An iterable of two-tuples: `(grantee_slug, priv_slugs)`. Will only be iterated once. """ dry_run_tag = "[DRY RUN] " if dry_run else "" if roles_by_slug is None: roles_by_slug = {role.slug: role for role in Role.objects.all()} granted = defaultdict(set) for grant in Grant.objects.select_related('from_role', 'to_role').all(): granted[grant.from_role.slug].add(grant.to_role.slug) grants_to_create = [] for grantee_slug, priv_slugs in grants_to_privs: if grantee_slug not in roles_by_slug: logger.info('grantee %s does not exist.', grantee_slug) continue for priv_slug in priv_slugs: if priv_slug not in roles_by_slug: logger.info('privilege %s does not exist.', priv_slug) continue if priv_slug in granted[grantee_slug]: if verbose or dry_run: logger.info('%sPrivilege already granted: %s => %s', dry_run_tag, grantee_slug, priv_slug) else: granted[grantee_slug].add(priv_slug) if verbose or dry_run: logger.info('%sGranting privilege: %s => %s', dry_run_tag, grantee_slug, priv_slug) if not dry_run: grants_to_create.append(Grant( from_role=roles_by_slug[grantee_slug], to_role=roles_by_slug[priv_slug] )) if grants_to_create: Role.get_cache().clear() Grant.objects.bulk_create(grants_to_create) def log_removed_grants(priv_slugs, dry_run=False): grants = Grant.objects.filter(to_role__slug__in=list(priv_slugs)) if grants: logger.info("%sRemoving privileges: %s", ("[DRY RUN] " if dry_run else ""), ", ".join(g.to_role.slug for g in grants), ) def get_account_name_from_default_name(default_name): from corehq.apps.accounting.models import BillingAccount if not BillingAccount.objects.filter(name=default_name).exists(): return default_name else: matching_regex_count = BillingAccount.objects.filter( name__iregex=r'^%s \(\d+\)$' % default_name, ).count() return '%s (%d)' % ( default_name, matching_regex_count + 1 ) def cancel_future_subscriptions(domain_name, from_date, web_user): from corehq.apps.accounting.models import ( Subscription, SubscriptionAdjustment, SubscriptionAdjustmentReason, ) for later_subscription in Subscription.visible_objects.filter( subscriber__domain=domain_name, date_start__gt=from_date, ).order_by('date_start').all(): later_subscription.date_end = later_subscription.date_start later_subscription.save() SubscriptionAdjustment.record_adjustment( later_subscription, reason=SubscriptionAdjustmentReason.CANCEL, web_user=web_user, note="Cancelled due to changing subscription", ) def pause_current_subscription(domain_name, web_user, current_subscription): from corehq.apps.accounting.models import ( Subscription, DefaultProductPlan, SoftwarePlanEdition, SubscriptionAdjustmentMethod, SubscriptionType, ProBonoStatus, FundingSource, ) cancel_future_subscriptions(domain_name, datetime.date.today(), web_user) paused_plan_version = DefaultProductPlan.get_default_plan_version( SoftwarePlanEdition.PAUSED ) if current_subscription.is_below_minimum_subscription: current_subscription.update_subscription( date_start=current_subscription.date_start, date_end=current_subscription.date_start + datetime.timedelta(days=30) ) return Subscription.new_domain_subscription( account=current_subscription.account, domain=domain_name, plan_version=paused_plan_version, date_start=current_subscription.date_start + datetime.timedelta(days=30), web_user=web_user, adjustment_method=SubscriptionAdjustmentMethod.USER, service_type=SubscriptionType.PRODUCT, pro_bono_status=ProBonoStatus.NO, funding_source=FundingSource.CLIENT, do_not_invoice=True, no_invoice_reason='Paused plan', ) else: return current_subscription.change_plan( paused_plan_version, web_user=web_user, adjustment_method=SubscriptionAdjustmentMethod.USER, service_type=SubscriptionType.PRODUCT, pro_bono_status=ProBonoStatus.NO, do_not_invoice=True, no_invoice_reason='Paused plan', ) def is_downgrade(current_edition, next_edition): from corehq.apps.accounting.models import SoftwarePlanEdition plans = SoftwarePlanEdition.SELF_SERVICE_ORDER + [SoftwarePlanEdition.ENTERPRISE] return plans.index(current_edition) > plans.index(next_edition) def clear_plan_version_cache(): from corehq.apps.accounting.models import SoftwarePlan for software_plan in SoftwarePlan.objects.all(): SoftwarePlan.get_version.clear(software_plan) def get_paused_plan_context(request, domain): from corehq.apps.accounting.models import Subscription from corehq.apps.domain.views import SelectPlanView current_sub = Subscription.get_active_subscription_by_domain(domain) if (not current_sub or not current_sub.plan_version.is_paused or not current_sub.previous_subscription): return {} previous_edition = (current_sub.previous_subscription.plan_version.plan.edition if current_sub.previous_subscription else "") return { 'is_paused': True, 'previous_edition': previous_edition, 'paused_date': current_sub.date_start.strftime(USER_DATE_FORMAT), 'change_plan_url': reverse(SelectPlanView.urlname, args=[domain]), 'can_edit_billing_info': request.couch_user.is_domain_admin(domain), }
34.419811
114
0.688434
1,702
14,594
5.619859
0.190952
0.021955
0.024882
0.035128
0.217459
0.16644
0.139049
0.088866
0.051856
0.03701
0
0.002146
0.233658
14,594
423
115
34.501182
0.853094
0.023229
0
0.143713
0
0
0.050419
0.003943
0
0
0
0
0
1
0.08982
false
0.005988
0.095808
0.01497
0.290419
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c78ed3281b65fd17334bed8b20f794b80892e233
802
py
Python
RSA/Algorithm/EEA.py
Pumpkin-NN/Cryptography
968e3f55fcc6a02d0badeec157776ca8f07607b8
[ "MIT" ]
null
null
null
RSA/Algorithm/EEA.py
Pumpkin-NN/Cryptography
968e3f55fcc6a02d0badeec157776ca8f07607b8
[ "MIT" ]
null
null
null
RSA/Algorithm/EEA.py
Pumpkin-NN/Cryptography
968e3f55fcc6a02d0badeec157776ca8f07607b8
[ "MIT" ]
null
null
null
def extended_euclidean_algorithm(a, b): # Initial s = 1 s = 1 list_s = [] list_t = [] # Algorithm while b > 0: # Find the remainder of a, b r = a % b if r > 0: # The t expression t = (r - (a * s)) // b list_t.append(t) list_s.append(s) # Use b to be the new a a = b if r > 0: # Use the remainder to be the new b b = r else: break # Find the coefficients s and t for i in range(len(list_t)): if i+1 < len(list_t): # Find the coefficient t t = list_t[0] + (list_t[(len(list_t)-1)] * s) # Find the coefficient s s = list_s[i] + list_t[i] * list_t[i+1] return t
23.588235
57
0.438903
122
802
2.770492
0.295082
0.133136
0.071006
0.029586
0.035503
0
0
0
0
0
0
0.020737
0.458853
802
33
58
24.30303
0.758065
0.24813
0
0.1
0
0
0
0
0
0
0
0
0
1
0.05
false
0
0
0
0.1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c790fdff7571a6a4a1222a967671954a3b60828b
1,468
py
Python
source/documentModel/representations/DocumentNGramSymWinGraph.py
Vyvy-vi/Ngram-Graphs
3b990e5fd92543f7152b4a2c8e689e771578c047
[ "Apache-2.0" ]
178
2016-09-21T19:51:28.000Z
2021-09-07T17:37:06.000Z
source/documentModel/representations/DocumentNGramSymWinGraph.py
Vyvy-vi/Ngram-Graphs
3b990e5fd92543f7152b4a2c8e689e771578c047
[ "Apache-2.0" ]
null
null
null
source/documentModel/representations/DocumentNGramSymWinGraph.py
Vyvy-vi/Ngram-Graphs
3b990e5fd92543f7152b4a2c8e689e771578c047
[ "Apache-2.0" ]
17
2016-10-21T02:11:13.000Z
2020-10-07T19:11:54.000Z
""" DocumentNGramSymWinGraph.py Created on May 23, 2017, 4:56 PM """ import networkx as nx import pygraphviz as pgv import matplotlib.pyplot as plt from networkx.drawing.nx_agraph import graphviz_layout from DocumentNGramGraph import DocumentNGramGraph class DocumentNGramSymWinGraph(DocumentNGramGraph): # an extension of DocumentNGramGraph # for symmetric windowing def buildGraph(self,verbose = False, d=[]): # set Data @class_variable self.setData(d) Data = self._Data # build ngram ng = self.build_ngram() s = len(ng) # calculate window win = self._Dwin//2 # initialize graph self._Graph = nx.Graph() if(s>=2 and win>=1): # max possible window size (bounded by win) o = min(win,s)+1 window = ng[1:o] i = o # first build the full window for gram in ng[0:s-1]: for w in window: self.addEdgeInc(gram,w) window.pop(0) # if window's edge has reached # it's the limit of ng stop # appending if i<s: window.append(ng[i][:]) i+=1 # print Graph (optional) if verbose: self.GraphDraw(self._GPrintVerbose) return self._Graph
26.690909
55
0.52248
164
1,468
4.621951
0.530488
0.026385
0
0
0
0
0
0
0
0
0
0.020455
0.400545
1,468
54
56
27.185185
0.840909
0.241144
0
0
0
0
0
0
0
0
0
0
0
1
0.037037
false
0
0.185185
0
0.296296
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c791642581cbd1a8e05d99ab1f306e65029dc666
2,212
py
Python
examples/EC2Conditions.py
DrLuke/troposphere
05672a2b0cf87215dbd6a2a656669e0d3c92d0e5
[ "BSD-2-Clause" ]
1
2021-04-03T22:24:36.000Z
2021-04-03T22:24:36.000Z
examples/EC2Conditions.py
cartermeyers/troposphere
4b42fa0d65f73cec28184b5349aa198fb8ee5b2e
[ "BSD-2-Clause" ]
1
2021-06-25T15:20:46.000Z
2021-06-25T15:20:46.000Z
examples/EC2Conditions.py
cartermeyers/troposphere
4b42fa0d65f73cec28184b5349aa198fb8ee5b2e
[ "BSD-2-Clause" ]
5
2020-05-10T13:50:32.000Z
2021-09-09T09:06:54.000Z
from __future__ import print_function from troposphere import ( Template, Parameter, Ref, Condition, Equals, And, Or, Not, If ) from troposphere import ec2 parameters = { "One": Parameter( "One", Type="String", ), "Two": Parameter( "Two", Type="String", ), "Three": Parameter( "Three", Type="String", ), "Four": Parameter( "Four", Type="String", ), "SshKeyName": Parameter( "SshKeyName", Type="String", ) } conditions = { "OneEqualsFoo": Equals( Ref("One"), "Foo" ), "NotOneEqualsFoo": Not( Condition("OneEqualsFoo") ), "BarEqualsTwo": Equals( "Bar", Ref("Two") ), "ThreeEqualsFour": Equals( Ref("Three"), Ref("Four") ), "OneEqualsFooOrBarEqualsTwo": Or( Condition("OneEqualsFoo"), Condition("BarEqualsTwo") ), "OneEqualsFooAndNotBarEqualsTwo": And( Condition("OneEqualsFoo"), Not(Condition("BarEqualsTwo")) ), "OneEqualsFooAndBarEqualsTwoAndThreeEqualsPft": And( Condition("OneEqualsFoo"), Condition("BarEqualsTwo"), Equals(Ref("Three"), "Pft") ), "OneIsQuzAndThreeEqualsFour": And( Equals(Ref("One"), "Quz"), Condition("ThreeEqualsFour") ), "LaunchInstance": And( Condition("OneEqualsFoo"), Condition("NotOneEqualsFoo"), Condition("BarEqualsTwo"), Condition("OneEqualsFooAndNotBarEqualsTwo"), Condition("OneIsQuzAndThreeEqualsFour") ), "LaunchWithGusto": And( Condition("LaunchInstance"), Equals(Ref("One"), "Gusto") ) } resources = { "Ec2Instance": ec2.Instance( "Ec2Instance", Condition="LaunchInstance", ImageId=If("ConditionNameEqualsFoo", "ami-12345678", "ami-87654321"), InstanceType="t1.micro", KeyName=Ref("SshKeyName"), SecurityGroups=["default"], ) } t = Template() for p in parameters.values(): t.add_parameter(p) for k in conditions: t.add_condition(k, conditions[k]) for r in resources.values(): t.add_resource(r) print(t.to_json())
22.343434
77
0.573689
175
2,212
7.2
0.371429
0.039683
0.028571
0.066667
0
0
0
0
0
0
0
0.013109
0.275769
2,212
98
78
22.571429
0.773408
0
0
0.266667
0
0
0.292495
0.092224
0
0
0
0
0
1
0
false
0
0.033333
0
0.033333
0.022222
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c79252ab386af5d00249bc02769ec35279e30201
768
py
Python
fist_phase/08_objects.py
kapuni/exercise_py
b60ba8462d2545cae57483bcb0b3428b03c5d522
[ "MIT" ]
null
null
null
fist_phase/08_objects.py
kapuni/exercise_py
b60ba8462d2545cae57483bcb0b3428b03c5d522
[ "MIT" ]
null
null
null
fist_phase/08_objects.py
kapuni/exercise_py
b60ba8462d2545cae57483bcb0b3428b03c5d522
[ "MIT" ]
null
null
null
class Student(object): # __init__是一个特殊方法用于在创建对象时进行初始化操作 # 通过这个方法我们可以为学生对象绑定name和age两个属性 def __init__(self, name, age): self.name = name self.age = age def study(self, course_name): print('%s正在学习%s.' % (self.name, course_name)) # PEP 8要求标识符的名字用全小写多个单词用下划线连接 # 但是部分程序员和公司更倾向于使用驼峰命名法(驼峰标识) def watch_movie(self): if self.age < 18: print('%s只能观看《熊出没》.' % self.name) else: print('%s正在观看岛国爱情大电影.' % self.name) def main(): # 创建学生对象并指定姓名和年龄 stu1 = Student('骆昊', 38) # 给对象发study消息 stu1.study('Python程序设计') # 给对象发watch_av消息 stu1.watch_movie() stu2 = Student('王大锤', 15) stu2.study('思想品德') stu2.watch_movie() if __name__ == '__main__': main()
23.272727
53
0.605469
81
768
5.469136
0.493827
0.090293
0
0
0
0
0
0
0
0
0
0.023009
0.264323
768
33
54
23.272727
0.761062
0.205729
0
0
0
0
0.102819
0
0
0
0
0
0
1
0.2
false
0
0
0
0.25
0.15
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c79307bf6012742aa0a7a562893d0160e400a873
1,108
py
Python
lrtc_lib/data/load_dataset.py
MovestaDev/low-resource-text-classification-framework
4380755a65b35265e84ecbf4b87e872d79e8f079
[ "Apache-2.0" ]
57
2020-11-18T15:13:06.000Z
2022-03-28T22:33:26.000Z
lrtc_lib/data/load_dataset.py
MovestaDev/low-resource-text-classification-framework
4380755a65b35265e84ecbf4b87e872d79e8f079
[ "Apache-2.0" ]
5
2021-02-23T22:11:07.000Z
2021-12-13T00:13:48.000Z
lrtc_lib/data/load_dataset.py
MovestaDev/low-resource-text-classification-framework
4380755a65b35265e84ecbf4b87e872d79e8f079
[ "Apache-2.0" ]
14
2021-02-10T08:55:27.000Z
2022-02-23T22:37:54.000Z
# (c) Copyright IBM Corporation 2020. # LICENSE: Apache License 2.0 (Apache-2.0) # http://www.apache.org/licenses/LICENSE-2.0 import logging from lrtc_lib.data_access import single_dataset_loader from lrtc_lib.data_access.processors.dataset_part import DatasetPart from lrtc_lib.oracle_data_access import gold_labels_loader logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s') def load(dataset: str, force_new: bool = False): for part in DatasetPart: dataset_name = dataset + '_' + part.name.lower() # load dataset (generate Documents and TextElements) if force_new: single_dataset_loader.clear_all_saved_files(dataset_name) single_dataset_loader.load_dataset(dataset_name, force_new) # load gold labels if force_new: gold_labels_loader.clear_gold_labels_file(dataset_name) gold_labels_loader.load_gold_labels(dataset_name, force_new) logging.info('-' * 60) if __name__ == '__main__': dataset_name = 'polarity' load(dataset=dataset_name)
35.741935
115
0.730144
151
1,108
5.02649
0.430464
0.101449
0.043478
0.039526
0.055336
0
0
0
0
0
0
0.014161
0.17148
1,108
31
116
35.741935
0.812636
0.168773
0
0.111111
0
0.055556
0.090611
0.027293
0
0
0
0
0
1
0.055556
false
0
0.222222
0
0.277778
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c79467938af160abb2d49f1add583ea15a8cc080
8,019
py
Python
graphql_compiler/compiler/emit_match.py
BarracudaPff/code-golf-data-pythpn
42e8858c2ebc6a061012bcadb167d29cebb85c5e
[ "MIT" ]
null
null
null
graphql_compiler/compiler/emit_match.py
BarracudaPff/code-golf-data-pythpn
42e8858c2ebc6a061012bcadb167d29cebb85c5e
[ "MIT" ]
null
null
null
graphql_compiler/compiler/emit_match.py
BarracudaPff/code-golf-data-pythpn
42e8858c2ebc6a061012bcadb167d29cebb85c5e
[ "MIT" ]
null
null
null
"""Convert lowered IR basic blocks to MATCH query strings.""" from collections import deque import six from .blocks import Filter, MarkLocation, QueryRoot, Recurse, Traverse from .expressions import TrueLiteral from .helpers import get_only_element_from_collection, validate_safe_string def _get_vertex_location_name(location): """Get the location name from a location that is expected to point to a vertex.""" mark_name, field_name = location.get_location_name() if field_name is not None: raise AssertionError(u"Location unexpectedly pointed to a field: {}".format(location)) return mark_name def _first_step_to_match(match_step): """Transform the very first MATCH step into a MATCH query string.""" parts = [] if match_step.root_block is not None: if not isinstance(match_step.root_block, QueryRoot): raise AssertionError(u"Expected None or QueryRoot root block, received: " u"{} {}".format(match_step.root_block, match_step)) match_step.root_block.validate() start_class = get_only_element_from_collection(match_step.root_block.start_class) parts.append(u"class: %s" % (start_class,)) if match_step.coerce_type_block is not None: raise AssertionError(u"Invalid MATCH step: {}".format(match_step)) if match_step.where_block: match_step.where_block.validate() parts.append(u"where: (%s)" % (match_step.where_block.predicate.to_match(),)) if match_step.as_block is None: raise AssertionError(u"Found a MATCH step without a corresponding Location. " u"This should never happen: {}".format(match_step)) else: match_step.as_block.validate() parts.append(u"as: %s" % (_get_vertex_location_name(match_step.as_block.location),)) return u"{{ %s }}" % (u", ".join(parts),) def _subsequent_step_to_match(match_step): """Transform any subsequent (non-first) MATCH step into a MATCH query string.""" if not isinstance(match_step.root_block, (Traverse, Recurse)): raise AssertionError(u"Expected Traverse root block, received: " u"{} {}".format(match_step.root_block, match_step)) is_recursing = isinstance(match_step.root_block, Recurse) match_step.root_block.validate() traversal_command = u".%s('%s')" % (match_step.root_block.direction, match_step.root_block.edge_name) parts = [] if match_step.coerce_type_block: coerce_type_set = match_step.coerce_type_block.target_class if len(coerce_type_set) != 1: raise AssertionError(u"Found MATCH type coercion block with more than one target class:" u" {} {}".format(coerce_type_set, match_step)) coerce_type_target = list(coerce_type_set)[0] parts.append(u"class: %s" % (coerce_type_target,)) if is_recursing: parts.append(u"while: ($depth < %d)" % (match_step.root_block.depth,)) if match_step.where_block: match_step.where_block.validate() parts.append(u"where: (%s)" % (match_step.where_block.predicate.to_match(),)) if not is_recursing and match_step.root_block.optional: parts.append(u"optional: true") if match_step.as_block: match_step.as_block.validate() parts.append(u"as: %s" % (_get_vertex_location_name(match_step.as_block.location),)) return u"%s {{ %s }}" % (traversal_command, u", ".join(parts)) def _represent_match_traversal(match_traversal): """Emit MATCH query code for an entire MATCH traversal sequence.""" output = [] output.append(_first_step_to_match(match_traversal[0])) for step in match_traversal[1:]: output.append(_subsequent_step_to_match(step)) return u"".join(output) def _represent_fold(fold_location, fold_ir_blocks): """Emit a LET clause corresponding to the IR blocks for a @fold scope.""" start_let_template = u"$%(mark_name)s = %(base_location)s" traverse_edge_template = u'.%(direction)s("%(edge_name)s")' base_template = start_let_template + traverse_edge_template edge_direction, edge_name = fold_location.get_first_folded_edge() mark_name, _ = fold_location.get_location_name() base_location_name, _ = fold_location.base_location.get_location_name() validate_safe_string(mark_name) validate_safe_string(base_location_name) validate_safe_string(edge_direction) validate_safe_string(edge_name) template_data = {"mark_name": mark_name, "base_location": base_location_name, "direction": edge_direction, "edge_name": edge_name} final_string = base_template % template_data for block in fold_ir_blocks: if isinstance(block, Filter): final_string += u"[" + block.predicate.to_match() + u"]" elif isinstance(block, Traverse): template_data = {"direction": block.direction, "edge_name": block.edge_name} final_string += traverse_edge_template % template_data elif isinstance(block, MarkLocation): pass else: raise AssertionError(u"Found an unexpected IR block in the folded IR blocks: " u"{} {} {}".format(type(block), block, fold_ir_blocks)) final_string += ".asList()" return final_string def _construct_output_to_match(output_block): """Transform a ConstructResult block into a MATCH query string.""" output_block.validate() selections = (u"%s AS `%s`" % (output_block.fields[key].to_match(), key) for key in sorted(output_block.fields.keys())) return u"SELECT %s FROM" % (u", ".join(selections),) def _construct_where_to_match(where_block): """Transform a Filter block into a MATCH query string.""" if where_block.predicate == TrueLiteral: raise AssertionError(u"Received WHERE block with TrueLiteral predicate: {}".format(where_block)) return u"WHERE " + where_block.predicate.to_match() def emit_code_from_single_match_query(match_query): """Return a MATCH query string from a list of IR blocks.""" query_data = deque([u"MATCH "]) if not match_query.match_traversals: raise AssertionError(u"Unexpected falsy value for match_query.match_traversals received: " u"{} {}".format(match_query.match_traversals, match_query)) match_traversal_data = [_represent_match_traversal(x) for x in match_query.match_traversals] query_data.append(match_traversal_data[0]) for traversal_data in match_traversal_data[1:]: query_data.append(u", ") query_data.append(traversal_data) query_data.appendleft(u" (") query_data.append(u"RETURN $matches)") fold_data = sorted([_represent_fold(fold_location, fold_ir_blocks) for fold_location, fold_ir_blocks in six.iteritems(match_query.folds)]) if fold_data: query_data.append(u" LET ") query_data.append(fold_data[0]) for fold_clause in fold_data[1:]: query_data.append(u", ") query_data.append(fold_clause) query_data.appendleft(_construct_output_to_match(match_query.output_block)) if match_query.where_block is not None: query_data.append(_construct_where_to_match(match_query.where_block)) return u" ".join(query_data) def emit_code_from_multiple_match_queries(match_queries): """Return a MATCH query string from a list of MatchQuery namedtuples.""" optional_variable_base_name = "$optional__" union_variable_name = "$result" query_data = deque([u"SELECT EXPAND(", union_variable_name, u")", u" LET "]) optional_variables = [] sub_queries = [emit_code_from_single_match_query(match_query) for match_query in match_queries] for (i, sub_query) in enumerate(sub_queries): variable_name = optional_variable_base_name + str(i) variable_assignment = variable_name + u" = (" sub_query_end = u")," query_data.append(variable_assignment) query_data.append(sub_query) query_data.append(sub_query_end) optional_variables.append(variable_name) query_data.append(union_variable_name) query_data.append(u" = UNIONALL(") query_data.append(u", ".join(optional_variables)) query_data.append(u")") return u" ".join(query_data) def emit_code_from_ir(schema_info, compound_match_query): """Return a MATCH query string from a CompoundMatchQuery.""" match_queries = compound_match_query.match_queries if len(match_queries) == 1: query_string = emit_code_from_single_match_query(match_queries[0]) elif len(match_queries) > 1: query_string = emit_code_from_multiple_match_queries(match_queries) else: raise AssertionError(u"Received CompoundMatchQuery with an empty list of MatchQueries: " u"{}".format(match_queries)) return query_string
52.411765
152
0.775907
1,190
8,019
4.907563
0.141176
0.061644
0.041096
0.040068
0.335616
0.252568
0.214555
0.176027
0.141781
0.084247
0
0.001538
0.107994
8,019
153
153
52.411765
0.814903
0.085921
0
0.147887
0
0
0.129759
0.00811
0
0
0
0
0.070423
1
0.070423
false
0.007042
0.035211
0
0.176056
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c7964aa0abe4f31ae2f01cae5205b2c444d9f154
8,436
py
Python
geocircles/backend/gamestate.py
tmick0/geocircles
12845d006eeb0a4032679209a953c1cb072d06d7
[ "MIT" ]
null
null
null
geocircles/backend/gamestate.py
tmick0/geocircles
12845d006eeb0a4032679209a953c1cb072d06d7
[ "MIT" ]
null
null
null
geocircles/backend/gamestate.py
tmick0/geocircles
12845d006eeb0a4032679209a953c1cb072d06d7
[ "MIT" ]
null
null
null
import sqlite3 from enum import Enum import random __all__ = ['state_mgr', 'game_state', 'next_state'] class game_state (Enum): NEW_GAME = 0 WAITING_FOR_HOST = 1 HOST_CHOOSING = 2 GUEST_GUESSING = 3 GUEST_CHOOSING = 4 HOST_GUESSING = 5 def next_state(s): if s == game_state.WAITING_FOR_HOST.value: return game_state.GUEST_CHOOSING.value elif s == game_state.GUEST_CHOOSING.value: return game_state.HOST_GUESSING.value elif s == game_state.HOST_CHOOSING.value: return game_state.GUEST_GUESSING.value elif s == game_state.GUEST_GUESSING.value: return game_state.GUEST_CHOOSING.value elif s == game_state.HOST_GUESSING.value: return game_state.HOST_CHOOSING.value class state_mgr (object): def __init__(self, path): self.db = sqlite3.connect(path) cur = self.db.cursor() cur.execute(''' create table if not exists game ( game_id integer primary key, state integer default {:d} ) '''.format(game_state.NEW_GAME.value)) cur.execute(''' create table if not exists session ( session_id integer primary key, game_id integer not null references game (game_id), position integer not null, display_name text not null ) ''') cur.execute(''' create table if not exists challenge ( challenge_id integer primary key autoincrement, game_id integer not null references game (game_id), lat real not null, lon real not null, pano text not null, heading real not null, pitch real not null, zoom real not null, guesses int not null, radius int not null ) ''') cur.execute(''' create table if not exists guess ( guess_id integer primary key autoincrement, challenge_id integer not null references challenge (challenge_id), lat real not null, lon real not null, radius real not null, good integer not null ) ''') cur.execute(''' create table if not exists rules ( game_id integer primary key not null references game (game_id), max_circle integer not null, min_circle integer not null, num_circles integer not null, num_guesses integer not null, difficulty text not null ) ''') self.db.commit() def create_game(self, display_name): game = random.getrandbits(16) session = random.getrandbits(32) cur = self.db.cursor() cur.execute('insert into game (game_id) values (?)', [game]) cur.execute('insert into session (session_id, game_id, position, display_name) values (?, ?, ?, ?)', [ session, game, 0, display_name]) self.db.commit() return game, session def join_game(self, game, display_name): session = random.getrandbits(32) cur = self.db.cursor() cur.execute('insert into session (session_id, game_id, position, display_name) values (?, ?, ?, ?)', [ session, game, 1, display_name]) cur.execute('update game set state = ? where game_id = ?', [game_state.WAITING_FOR_HOST.value, game]) self.db.commit() return session def set_rules(self, game, rules): cur = self.db.cursor() cur.execute(''' insert into rules (game_id, max_circle, min_circle, num_circles, num_guesses, difficulty) values (?, ?, ?, ?, ?, ?) ''', [game, rules['start_size'], rules['end_size'], rules['num_circles'], rules['num_guesses'], rules['difficulty']]) self.db.commit() def get_rules(self, game): cur = self.db.cursor() cur.execute(''' select max_circle, min_circle, num_circles, num_guesses, difficulty from rules where game_id = ? ''', [game]) start_size, end_size, num_circles, num_guesses, difficulty = cur.fetchone() return { 'start_size': start_size, 'end_size': end_size, 'num_circles': num_circles, 'num_guesses': num_guesses, 'difficulty': difficulty } def resume_session(self, session): cur = self.db.cursor() cur.execute( 'select game.game_id, state, position, display_name from session left join game on session.game_id = game.game_id where session_id = ?', [session]) return cur.fetchone() def get_host_session(self, session): cur = self.db.cursor() cur.execute(''' select game.game_id, host.session_id from session as guest left join game on guest.game_id = game.game_id left join session as host on host.game_id = game.game_id where guest.session_id = ? and host.position = 0 ''', [session]) return cur.fetchone() def get_guest_session(self, session): cur = self.db.cursor() cur.execute(''' select game.game_id, guest.session_id from session as host left join game on host.game_id = game.game_id left join session as guest on guest.game_id = game.game_id where host.session_id = ? and guest.position = 1 ''', [session]) return cur.fetchone() def get_session_info(self, session): cur = self.db.cursor() cur.execute( 'select game.game_id, game.state, session.position from session left join game on session.game_id = game.game_id where session_id = ?', [session]) return cur.fetchone() def get_game_sessions(self, game): cur = self.db.cursor() cur.execute( 'select session_id from session where game_id = ? order by position asc', [game]) return [sid for (sid,) in cur.fetchall()] def set_state(self, game, state): cur = self.db.cursor() cur.execute('update game set state = ? where game_id = ?', [state, game]) self.db.commit() def set_challenge(self, game, lat, lon, pano, heading, pitch, zoom, guesses, radius): cur = self.db.cursor() cur.execute('insert into challenge (game_id, lat, lon, pano, heading, pitch, zoom, guesses, radius) values (?, ?, ?, ?, ?, ?, ?, ?, ?)', [ game, lat, lon, pano, heading, pitch, zoom, guesses, radius]) self.db.commit() def update_challenge(self, game, guesses, radius): cur = self.db.cursor() cur.execute( 'select challenge_id from challenge where game_id = ? order by challenge_id desc', [game]) challenge, = cur.fetchone() cur.execute('update challenge set guesses = ?, radius = ? where challenge_id = ?', [ guesses, radius, challenge]) self.db.commit() def get_challenge(self, game): cur = self.db.cursor() cur.execute( 'select lat, lon, pano, heading, pitch, zoom, guesses, radius from challenge where game_id = ? order by challenge_id desc', [game]) return cur.fetchone() def set_guess(self, game, lat, lon, radius, good): cur = self.db.cursor() cur.execute( 'select challenge_id from challenge where game_id = ? order by challenge_id desc', [game]) challenge, = cur.fetchone() cur.execute('insert into guess (challenge_id, lat, lon, radius, good) values (?, ?, ?, ?, ?)', [ challenge, lat, lon, radius, good]) self.db.commit() def get_guesses(self, game): cur = self.db.cursor() cur.execute( 'select challenge_id from challenge where game_id = ? order by challenge_id desc', [game]) challenge, = cur.fetchone() cur.execute( 'select lat, lon, radius, good from guess where challenge_id = ? order by guess_id asc', [challenge]) res = [] for lat, lon, radius, good in cur.fetchall(): res.append({ 'lat': lat, 'lon': lon, 'radius': radius, 'good': good }) return res def close(self): self.db.close() self.db = None
36.838428
159
0.580844
1,026
8,436
4.616959
0.110136
0.045598
0.030399
0.050665
0.607346
0.523116
0.460207
0.434452
0.400464
0.251425
0
0.003111
0.31413
8,436
228
160
37
0.815589
0
0
0.363636
0
0.020202
0.455785
0
0
0
0
0
0
1
0.090909
false
0
0.015152
0
0.222222
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c79c07c8078e5f1d72628e2e7fc0c80e75f6489c
12,955
py
Python
addon_common/common/decorators.py
Unnoen/retopoflow
73c7cfc10a0b58937198d60e308ba5248b446490
[ "OML" ]
1
2022-01-10T23:40:21.000Z
2022-01-10T23:40:21.000Z
addon_common/common/decorators.py
Unnoen/retopoflow
73c7cfc10a0b58937198d60e308ba5248b446490
[ "OML" ]
null
null
null
addon_common/common/decorators.py
Unnoen/retopoflow
73c7cfc10a0b58937198d60e308ba5248b446490
[ "OML" ]
null
null
null
''' Copyright (C) 2021 CG Cookie http://cgcookie.com hello@cgcookie.com Created by Jonathan Denning, Jonathan Williamson This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ''' import os import re import json import time import inspect from functools import wraps import bpy debug_run_test_calls = False def debug_test_call(*args, **kwargs): def wrapper(fn): if debug_run_test_calls: ret = str(fn(*args,*kwargs)) print('TEST: %s()' % fn.__name__) if args: print(' arg:', args) if kwargs: print(' kwa:', kwargs) print(' ret:', ret) return fn return wrapper def stats_wrapper(fn): return fn if not hasattr(stats_report, 'stats'): stats_report.stats = dict() frame = inspect.currentframe().f_back f_locals = frame.f_locals filename = os.path.basename(frame.f_code.co_filename) clsname = f_locals['__qualname__'] if '__qualname__' in f_locals else '' linenum = frame.f_lineno fnname = fn.__name__ key = '%s%s (%s:%d)' % ( clsname + ('.' if clsname else ''), fnname, filename, linenum ) stats = stats_report.stats stats[key] = { 'filename': filename, 'clsname': clsname, 'linenum': linenum, 'fileline': '%s:%d' % (filename, linenum), 'fnname': fnname, 'count': 0, 'total time': 0, 'average time': 0, } def wrapped(*args, **kwargs): time_beg = time.time() ret = fn(*args, **kwargs) time_end = time.time() time_delta = time_end - time_beg d = stats[key] d['count'] += 1 d['total time'] += time_delta d['average time'] = d['total time'] / d['count'] return ret return wrapped def stats_report(): return stats = stats_report.stats if hasattr(stats_report, 'stats') else dict() l = max(len(k) for k in stats) def fmt(s): return s + ' ' * (l - len(s)) print() print('Call Statistics Report') cols = [ ('class', 'clsname', '%s'), ('func', 'fnname', '%s'), ('location', 'fileline', '%s'), # ('line','linenum','% 10d'), ('count', 'count', '% 8d'), ('total (sec)', 'total time', '% 10.4f'), ('avg (sec)', 'average time', '% 10.6f'), ] data = [stats[k] for k in sorted(stats)] data = [[h] + [f % row[c] for row in data] for (h, c, f) in cols] colwidths = [max(len(d) for d in col) for col in data] totwidth = sum(colwidths) + len(colwidths) - 1 def rpad(s, l): return '%s%s' % (s, ' ' * (l - len(s))) def printrow(i_row): row = [col[i_row] for col in data] print(' '.join(rpad(d, w) for (d, w) in zip(row, colwidths))) printrow(0) print('-' * totwidth) for i in range(1, len(data[0])): printrow(i) def add_cache(attr, default): def wrapper(fn): setattr(fn, attr, default) return fn return wrapper class LimitRecursion: def __init__(self, count, def_ret): self.count = count self.def_ret = def_ret self.calls = 0 def __call__(self, fn): def wrapped(*args, **kwargs): ret = self.def_ret if self.calls < self.count: try: self.calls += 1 ret = fn(*args, **kwargs) finally: self.calls -= 1 return ret return wrapped @add_cache('data', {'nested':0, 'last':None}) def timed_call(label): def wrapper(fn): def wrapped(*args, **kwargs): data = timed_call.data if data['last']: print(data['last']) data['last'] = f'''{" " * data['nested']}Timing {label}''' data['nested'] += 1 time_beg = time.time() ret = fn(*args, **kwargs) time_end = time.time() time_delta = time_end - time_beg if data['last']: print(f'''{data['last']}: {time_delta:0.4f}s''') data['last'] = None else: print(f'''{" " * data['nested']}{time_delta:0.4f}s''') data['nested'] -= 1 return ret return wrapped return wrapper # corrected bug in previous version of blender_version fn wrapper # https://github.com/CGCookie/retopoflow/commit/135746c7b4ee0052ad0c1842084b9ab983726b33#diff-d4260a97dcac93f76328dfaeb5c87688 def blender_version_wrapper(op, ver): self = blender_version_wrapper if not hasattr(self, 'fns'): major, minor, rev = bpy.app.version self.blenderver = '%d.%02d' % (major, minor) self.fns = fns = {} self.ops = { '<': lambda v: self.blenderver < v, '>': lambda v: self.blenderver > v, '<=': lambda v: self.blenderver <= v, '==': lambda v: self.blenderver == v, '>=': lambda v: self.blenderver >= v, '!=': lambda v: self.blenderver != v, } update_fn = self.ops[op](ver) def wrapit(fn): nonlocal self, update_fn fn_name = fn.__name__ fns = self.fns error_msg = "Could not find appropriate function named %s for version Blender %s" % (fn_name, self.blenderver) if update_fn: fns[fn_name] = fn def callit(*args, **kwargs): nonlocal fns, fn_name, error_msg fn = fns.get(fn_name, None) assert fn, error_msg ret = fn(*args, **kwargs) return ret return callit return wrapit def only_in_blender_version(*args, ignore_others=False, ignore_return=None): self = only_in_blender_version if not hasattr(self, 'fns'): major, minor, rev = bpy.app.version self.blenderver = '%d.%02d' % (major, minor) self.fns = {} self.ignores = {} self.ops = { '<': lambda v: self.blenderver < v, '>': lambda v: self.blenderver > v, '<=': lambda v: self.blenderver <= v, '==': lambda v: self.blenderver == v, '>=': lambda v: self.blenderver >= v, '!=': lambda v: self.blenderver != v, } self.re_blender_version = re.compile(r'^(?P<comparison><|<=|==|!=|>=|>) *(?P<version>\d\.\d\d)$') matches = [self.re_blender_version.match(arg) for arg in args] assert all(match is not None for match in matches), f'At least one arg did not match version comparison: {args}' results = [self.ops[match.group('comparison')](match.group('version')) for match in matches] version_matches = all(results) def wrapit(fn): fn_name = fn.__name__ if version_matches: assert fn_name not in self.fns, f'Multiple functions {fn_name} match the Blender version {self.blenderver}' self.fns[fn_name] = fn if ignore_others and fn_name not in self.ignores: self.ignores[fn_name] = ignore_return @wraps(fn) def callit(*args, **kwargs): fn = self.fns.get(fn_name, None) if fn_name not in self.ignores: assert fn, f'Could not find appropriate function named {fn_name} for version Blender version {self.blenderver}' elif fn is None: return self.ignores[fn_name] return fn(*args, **kwargs) return callit return wrapit def warn_once(warning): def wrapper(fn): nonlocal warning @wraps(fn) def wrapped(*args, **kwargs): nonlocal warning if warning: print(warning) warning = None return fn(*args, **kwargs) return wrapped return wrapper class PersistentOptions: class WrappedDict: def __init__(self, cls, filename, version, defaults, update_external): self._dirty = False self._last_save = time.time() self._write_delay = 2.0 self._defaults = defaults self._update_external = update_external self._defaults['persistent options version'] = version self._dict = {} if filename: src = inspect.getsourcefile(cls) path = os.path.split(os.path.abspath(src))[0] self._fndb = os.path.join(path, filename) else: self._fndb = None self.read() if self._dict.get('persistent options version', None) != version: self.reset() self.update_external() def update_external(self): upd = self._update_external if upd: upd() def dirty(self): self._dirty = True self.update_external() def clean(self, force=False): if not force: if not self._dirty: return if time.time() < self._last_save + self._write_delay: return if self._fndb: json.dump(self._dict, open(self._fndb, 'wt'), indent=2, sort_keys=True) self._dirty = False self._last_save = time.time() def read(self): self._dict = {} if self._fndb and os.path.exists(self._fndb): try: self._dict = json.load(open(self._fndb, 'rt')) except Exception as e: print('Exception caught while trying to read options from "%s"' % self._fndb) print(str(e)) for k in set(self._dict.keys()) - set(self._defaults.keys()): print('Deleting extraneous key "%s" from options' % k) del self._dict[k] self.update_external() self._dirty = False def keys(self): return self._defaults.keys() def reset(self): keys = list(self._dict.keys()) for k in keys: del self._dict[k] self._dict['persistent options version'] = self['persistent options version'] self.dirty() self.clean() def __getitem__(self, key): return self._dict[key] if key in self._dict else self._defaults[key] def __setitem__(self, key, val): assert key in self._defaults, 'Attempting to write "%s":"%s" to options, but key does not exist in defaults' % (str(key), str(val)) if self[key] == val: return self._dict[key] = val self.dirty() self.clean() def gettersetter(self, key, fn_get_wrap=None, fn_set_wrap=None): if not fn_get_wrap: fn_get_wrap = lambda v: v if not fn_set_wrap: fn_set_wrap = lambda v: v oself = self class GetSet: def get(self): return fn_get_wrap(oself[key]) def set(self, v): v = fn_set_wrap(v) if oself[key] != v: oself[key] = v return GetSet() def __init__(self, filename=None, version=None): self._filename = filename self._version = version self._db = None def __call__(self, cls): upd = getattr(cls, 'update', None) if upd: u = upd def wrap(): def upd_wrap(*args, **kwargs): u(None) return upd_wrap upd = wrap() self._db = PersistentOptions.WrappedDict(cls, self._filename, self._version, cls.defaults, upd) db = self._db class WrappedClass: def __init__(self, *args, **kwargs): self._db = db self._def = cls.defaults def __getitem__(self, key): return self._db[key] def __setitem__(self, key, val): self._db[key] = val def keys(self): return self._db.keys() def reset(self): self._db.reset() def clean(self): self._db.clean() def gettersetter(self, key, fn_get_wrap=None, fn_set_wrap=None): return self._db.gettersetter(key, fn_get_wrap=fn_get_wrap, fn_set_wrap=fn_set_wrap) return WrappedClass
33.475452
143
0.542802
1,573
12,955
4.310871
0.192626
0.016812
0.019466
0.037163
0.239493
0.166052
0.110308
0.110308
0.10028
0.10028
0
0.010255
0.33763
12,955
386
144
33.562176
0.779979
0.076187
0
0.329114
0
0
0.09862
0.009118
0
0
0
0
0.015823
1
0.151899
false
0
0.022152
0.025316
0.303797
0.056962
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c79d02fd3237e472a6910ab89fe822c176242e9f
11,414
py
Python
venv/Lib/site-packages/pandas/tests/window/moments/test_moments_consistency_ewm.py
ajayiagbebaku/NFL-Model
afcc67a85ca7138c58c3334d45988ada2da158ed
[ "MIT" ]
28,899
2016-10-13T03:32:12.000Z
2022-03-31T21:39:05.000Z
venv/Lib/site-packages/pandas/tests/window/moments/test_moments_consistency_ewm.py
ajayiagbebaku/NFL-Model
afcc67a85ca7138c58c3334d45988ada2da158ed
[ "MIT" ]
31,004
2016-10-12T23:22:27.000Z
2022-03-31T23:17:38.000Z
venv/Lib/site-packages/pandas/tests/window/moments/test_moments_consistency_ewm.py
ajayiagbebaku/NFL-Model
afcc67a85ca7138c58c3334d45988ada2da158ed
[ "MIT" ]
15,149
2016-10-13T03:21:31.000Z
2022-03-31T18:46:47.000Z
import numpy as np import pytest from pandas import ( DataFrame, Series, concat, ) import pandas._testing as tm @pytest.mark.parametrize("func", ["cov", "corr"]) def test_ewm_pairwise_cov_corr(func, frame): result = getattr(frame.ewm(span=10, min_periods=5), func)() result = result.loc[(slice(None), 1), 5] result.index = result.index.droplevel(1) expected = getattr(frame[1].ewm(span=10, min_periods=5), func)(frame[5]) tm.assert_series_equal(result, expected, check_names=False) @pytest.mark.parametrize("name", ["cov", "corr"]) def test_ewm_corr_cov(name): A = Series(np.random.randn(50), index=np.arange(50)) B = A[2:] + np.random.randn(48) A[:10] = np.NaN B[-10:] = np.NaN result = getattr(A.ewm(com=20, min_periods=5), name)(B) assert np.isnan(result.values[:14]).all() assert not np.isnan(result.values[14:]).any() @pytest.mark.parametrize("min_periods", [0, 1, 2]) @pytest.mark.parametrize("name", ["cov", "corr"]) def test_ewm_corr_cov_min_periods(name, min_periods): # GH 7898 A = Series(np.random.randn(50), index=np.arange(50)) B = A[2:] + np.random.randn(48) A[:10] = np.NaN B[-10:] = np.NaN result = getattr(A.ewm(com=20, min_periods=min_periods), name)(B) # binary functions (ewmcov, ewmcorr) with bias=False require at # least two values assert np.isnan(result.values[:11]).all() assert not np.isnan(result.values[11:]).any() # check series of length 0 empty = Series([], dtype=np.float64) result = getattr(empty.ewm(com=50, min_periods=min_periods), name)(empty) tm.assert_series_equal(result, empty) # check series of length 1 result = getattr(Series([1.0]).ewm(com=50, min_periods=min_periods), name)( Series([1.0]) ) tm.assert_series_equal(result, Series([np.NaN])) @pytest.mark.parametrize("name", ["cov", "corr"]) def test_different_input_array_raise_exception(name): A = Series(np.random.randn(50), index=np.arange(50)) A[:10] = np.NaN msg = "other must be a DataFrame or Series" # exception raised is Exception with pytest.raises(ValueError, match=msg): getattr(A.ewm(com=20, min_periods=5), name)(np.random.randn(50)) def create_mock_weights(obj, com, adjust, ignore_na): if isinstance(obj, DataFrame): if not len(obj.columns): return DataFrame(index=obj.index, columns=obj.columns) w = concat( [ create_mock_series_weights( obj.iloc[:, i], com=com, adjust=adjust, ignore_na=ignore_na ) for i, _ in enumerate(obj.columns) ], axis=1, ) w.index = obj.index w.columns = obj.columns return w else: return create_mock_series_weights(obj, com, adjust, ignore_na) def create_mock_series_weights(s, com, adjust, ignore_na): w = Series(np.nan, index=s.index) alpha = 1.0 / (1.0 + com) if adjust: count = 0 for i in range(len(s)): if s.iat[i] == s.iat[i]: w.iat[i] = pow(1.0 / (1.0 - alpha), count) count += 1 elif not ignore_na: count += 1 else: sum_wts = 0.0 prev_i = -1 count = 0 for i in range(len(s)): if s.iat[i] == s.iat[i]: if prev_i == -1: w.iat[i] = 1.0 else: w.iat[i] = alpha * sum_wts / pow(1.0 - alpha, count - prev_i) sum_wts += w.iat[i] prev_i = count count += 1 elif not ignore_na: count += 1 return w @pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4]) def test_ewm_consistency_mean(consistency_data, adjust, ignore_na, min_periods): x, is_constant, no_nans = consistency_data com = 3.0 result = x.ewm( com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na ).mean() weights = create_mock_weights(x, com=com, adjust=adjust, ignore_na=ignore_na) expected = ( x.multiply(weights).cumsum().divide(weights.cumsum()).fillna(method="ffill") ) expected[ x.expanding().count() < (max(min_periods, 1) if min_periods else 1) ] = np.nan tm.assert_equal(result, expected.astype("float64")) @pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4]) def test_ewm_consistency_consistent(consistency_data, adjust, ignore_na, min_periods): x, is_constant, no_nans = consistency_data com = 3.0 if is_constant: count_x = x.expanding().count() mean_x = x.ewm( com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na ).mean() # check that correlation of a series with itself is either 1 or NaN corr_x_x = x.ewm( com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na ).corr(x) exp = x.max() if isinstance(x, Series) else x.max().max() # check mean of constant series expected = x * np.nan expected[count_x >= max(min_periods, 1)] = exp tm.assert_equal(mean_x, expected) # check correlation of constant series with itself is NaN expected[:] = np.nan tm.assert_equal(corr_x_x, expected) @pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4]) def test_ewm_consistency_var_debiasing_factors( consistency_data, adjust, ignore_na, min_periods ): x, is_constant, no_nans = consistency_data com = 3.0 # check variance debiasing factors var_unbiased_x = x.ewm( com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na ).var(bias=False) var_biased_x = x.ewm( com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na ).var(bias=True) weights = create_mock_weights(x, com=com, adjust=adjust, ignore_na=ignore_na) cum_sum = weights.cumsum().fillna(method="ffill") cum_sum_sq = (weights * weights).cumsum().fillna(method="ffill") numerator = cum_sum * cum_sum denominator = numerator - cum_sum_sq denominator[denominator <= 0.0] = np.nan var_debiasing_factors_x = numerator / denominator tm.assert_equal(var_unbiased_x, var_biased_x * var_debiasing_factors_x) @pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4]) @pytest.mark.parametrize("bias", [True, False]) def test_moments_consistency_var( consistency_data, adjust, ignore_na, min_periods, bias ): x, is_constant, no_nans = consistency_data com = 3.0 mean_x = x.ewm( com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na ).mean() var_x = x.ewm( com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na ).var(bias=bias) assert not (var_x < 0).any().any() if bias: # check that biased var(x) == mean(x^2) - mean(x)^2 mean_x2 = ( (x * x) .ewm(com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na) .mean() ) tm.assert_equal(var_x, mean_x2 - (mean_x * mean_x)) @pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4]) @pytest.mark.parametrize("bias", [True, False]) def test_moments_consistency_var_constant( consistency_data, adjust, ignore_na, min_periods, bias ): x, is_constant, no_nans = consistency_data com = 3.0 if is_constant: count_x = x.expanding(min_periods=min_periods).count() var_x = x.ewm( com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na ).var(bias=bias) # check that variance of constant series is identically 0 assert not (var_x > 0).any().any() expected = x * np.nan expected[count_x >= max(min_periods, 1)] = 0.0 if not bias: expected[count_x < 2] = np.nan tm.assert_equal(var_x, expected) @pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4]) @pytest.mark.parametrize("bias", [True, False]) def test_ewm_consistency_std(consistency_data, adjust, ignore_na, min_periods, bias): x, is_constant, no_nans = consistency_data com = 3.0 var_x = x.ewm( com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na ).var(bias=bias) std_x = x.ewm( com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na ).std(bias=bias) assert not (var_x < 0).any().any() assert not (std_x < 0).any().any() # check that var(x) == std(x)^2 tm.assert_equal(var_x, std_x * std_x) @pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4]) @pytest.mark.parametrize("bias", [True, False]) def test_ewm_consistency_cov(consistency_data, adjust, ignore_na, min_periods, bias): x, is_constant, no_nans = consistency_data com = 3.0 var_x = x.ewm( com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na ).var(bias=bias) assert not (var_x < 0).any().any() cov_x_x = x.ewm( com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na ).cov(x, bias=bias) assert not (cov_x_x < 0).any().any() # check that var(x) == cov(x, x) tm.assert_equal(var_x, cov_x_x) @pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4]) @pytest.mark.parametrize("bias", [True, False]) def test_ewm_consistency_series_cov_corr( consistency_data, adjust, ignore_na, min_periods, bias ): x, is_constant, no_nans = consistency_data com = 3.0 if isinstance(x, Series): var_x_plus_y = ( (x + x) .ewm(com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na) .var(bias=bias) ) var_x = x.ewm( com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na ).var(bias=bias) var_y = x.ewm( com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na ).var(bias=bias) cov_x_y = x.ewm( com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na ).cov(x, bias=bias) # check that cov(x, y) == (var(x+y) - var(x) - # var(y)) / 2 tm.assert_equal(cov_x_y, 0.5 * (var_x_plus_y - var_x - var_y)) # check that corr(x, y) == cov(x, y) / (std(x) * # std(y)) corr_x_y = x.ewm( com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na ).corr(x, bias=bias) std_x = x.ewm( com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na ).std(bias=bias) std_y = x.ewm( com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na ).std(bias=bias) tm.assert_equal(corr_x_y, cov_x_y / (std_x * std_y)) if bias: # check that biased cov(x, y) == mean(x*y) - # mean(x)*mean(y) mean_x = x.ewm( com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na ).mean() mean_y = x.ewm( com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na ).mean() mean_x_times_y = ( (x * x) .ewm( com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na ) .mean() ) tm.assert_equal(cov_x_y, mean_x_times_y - (mean_x * mean_y))
34.173653
88
0.615297
1,694
11,414
3.932113
0.097403
0.121603
0.077766
0.081069
0.709203
0.631887
0.620327
0.600811
0.568834
0.550818
0
0.021289
0.251008
11,414
333
89
34.276276
0.757866
0.060364
0
0.498084
0
0
0.020553
0
0
0
0
0
0.091954
1
0.05364
false
0
0.015326
0
0.084291
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c79e030266cfddaf92e93230023130a13241d6c0
6,895
py
Python
brainex/query.py
ebuntel/BrainExTemp
991038155a6e9289af90da3d800210841ef23ff1
[ "MIT" ]
1
2020-09-04T16:15:26.000Z
2020-09-04T16:15:26.000Z
brainex/query.py
ebuntel/Brainextemp
991038155a6e9289af90da3d800210841ef23ff1
[ "MIT" ]
null
null
null
brainex/query.py
ebuntel/Brainextemp
991038155a6e9289af90da3d800210841ef23ff1
[ "MIT" ]
null
null
null
# TODO finish implementing query import math from pyspark import SparkContext # from genex.cluster import sim_between_seq from brainex.op.query_op import sim_between_seq from brainex.parse import strip_function, remove_trailing_zeros from .classes import Sequence from brainex.database import genexengine def query(q: Sequence, gc: genexengine, loi: list, sc: SparkContext, k:int=1, ex_sameID: bool=False, overlap: float= 1.0, mode:str='genex'): """ :param q: query sequence :param gc: Gcluster in which to query :param loi: list of two integer values, specifying the query range, if set to None, is going to query all length :param sc: spark context on which to run the query operation :param k: integer, specifying to return top k matches :param ex_sameID: boolean, whether to include sequences from the time series with the same id as the query sequence :param overlap: float, how much overlapping between queries lookups :param mode: query mode, supported modes are 'genex' and 'bf' (bf = brute force) """ if mode == 'genex': gquery() elif mode == 'bf': bfquery() else: raise Exception('Unsupported query mode: ' + mode) def get_query_from_dict(): pass def get_query_sequence_from_file(file: str): resList = [] with open(file, 'r') as f: for i, line in enumerate(f): if not i: features = list(map(lambda x: strip_function(x), line.strip()[:-1].split(','))) if line != "" and line != "\n": data = remove_trailing_zeros(line.split(",")[:-1]) series_data = data[len(features):] resList.append(series_data) if len(resList[0]) == 0: return resList[1:] else: return resList def gquery(query_list: list, gc_data: dict, loi: list, input_list: list, k:int=1, ex_sameID: bool=False, overlap: float= 1.0, ): """ Because Gcluster object doesn't have map property, we have to use dict as input :param file: :param gc_data: :param loi: :param input_list: :param k: :param ex_sameID: :param overlap: :return: """ # get query from id, start, end point # get query from csv file # # query_list = [] # query_set = get_query_from_csv_with_id(file) # print(query_set) # for cur_query in query_set: # query_list.append(get_query_from_sequence(cur_query[0], int(cur_query[1]), int(cur_query[2]), input_list)) # print(query_list) return custom_query(query_list, loi, gc_data, k, input_list) def bfquery(): print() # # def custom_query_operation(q: Sequence, gc: Gcluster, loi: list, sc: SparkContext, # k:int=1, ex_sameID: bool=False, overlap: float= 1.0): # # query_result = filter_rdd_back.repartition(16).map( # lambda clusters: custom_query(q, loi, gc, k, # global_time_series_dict.value, )) # # changed here # # plot_query_result(query_sequence, query_result, global_time_series_dict.value) # return query_result def get_query_from_sequence(id: tuple, start: int, end: int, input_list: list): """ :param id: :param start: :param end: :param input_list: :return: a list """ try: input_dict = dict(input_list) # validate by converting input_list into a dict except (TypeError, ValueError): raise Exception('sequence: fetch_data: input_list is not key-value pair.') return input_dict[id][start: end] def custom_query(query_sequences: list, loi: list, Gcluster_data:dict, k : int, input_list:list): # """ # # :param query_sequences: list of list: the list of sequences to be queried # :param cluster: dict[key = representative, value = list of timeSeriesObj] -> representative is timeSeriesObj # the sequences in the cluster are all of the SAME length # :param k: int # :return list of time series objects: best k matches. Again note they are all of the SAME length # """ """ :param query_sequences: :param query_range: :param Gcluster_data: :param k: :param input_list: :return: """ # get query from csv file which contains lists of list of query actual clusters # get query from csv file which contains lists of tuple of id, start, endpoint query_result = dict() if not isinstance(query_sequences, list) or len(query_sequences) == 0: raise ValueError("query sequence must be a list and not empty") cur_query_number = 0 if isinstance(query_sequences[0], list): print("length of query is [" + str(len(query_sequences)) + "]" + "[" + str(len(query_sequences[0])) + "]") print("query is a list of list") for cur_query in query_sequences: if isinstance(cur_query, list): query_result[cur_query_number] = get_most_k_sim(cur_query, loi, Gcluster_data, k, input_list) cur_query_number += 1 return query_result else: return get_most_k_sim(query_sequences, loi, Gcluster_data, k, input_list) def get_most_k_sim(query_sequence: list, loi: list, Gcluster_data : dict, k, input_list:list): """ :param query_sequence: :param query_range: :param Gcluster_data: :param k: :param input_list: :return: """ min_rprs = None # the representative that is closest to the query distance min_dist = math.inf target_cluster = [] print("length of gcluster clusters is " + str(len(Gcluster_data[1]))) for cur_rprs_seq in Gcluster_data[1].keys(): # TODO do we want to get raw clusters here, or set the raw in timeSeriesObj before calling query (no parsing) if (cur_rprs_seq.end - cur_rprs_seq.start + 1) in range(loi[0], loi[1] + 1): # modify here, not use get clusters from objects, use values cur_dist = sim_between_seq(query_sequence, cur_rprs_seq.fetch_data(input_list)) if cur_dist < min_dist: min_rprs = cur_rprs_seq min_dist = cur_dist else: break if min_rprs: print('min representative is ' + min_rprs.__str__()) print('min dist' + str(min_dist)) # print("Querying Cluster of length: " + str(len(get_data_for_timeSeriesObj(min_rprs, time_series_dict)))) target_cluster = Gcluster_data[1].get(min_rprs) print('len of cluster is ' + str(len(target_cluster))) # print("sorting") # target_cluster.sort(key=lambda cluster_sequence: sim_between_seq(query_sequence, cluster_sequence.data)) k = int(k) return target_cluster[0:k] # return the k most similar sequences else: return None
33.634146
119
0.639014
950
6,895
4.449474
0.215789
0.034067
0.022711
0.014194
0.198959
0.132955
0.106932
0.081382
0.081382
0.062929
0
0.006302
0.263524
6,895
204
120
33.79902
0.826113
0.407107
0
0.061728
0
0
0.068643
0
0
0
0
0.009804
0
1
0.098765
false
0.012346
0.074074
0
0.271605
0.08642
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c79e23eb5e67f7342ba09df2a42c01c2772ded3a
4,161
py
Python
main.py
orgr/arbitrage_bot
39365dce0dcae0f6bb4baf1d7c32392e28b6c623
[ "MIT" ]
null
null
null
main.py
orgr/arbitrage_bot
39365dce0dcae0f6bb4baf1d7c32392e28b6c623
[ "MIT" ]
1
2021-12-13T03:48:08.000Z
2021-12-13T04:58:36.000Z
main.py
orgr/arbitrage_bot
39365dce0dcae0f6bb4baf1d7c32392e28b6c623
[ "MIT" ]
null
null
null
import sys import time from typing import List import asyncio import ccxt.async_support as ccxt # import ccxt import itertools from enum import Enum class Color(Enum): GREEN = '\033[92m' YELLOW = '\033[93m' RED = '\033[91m' RESET = '\033[0m' def colorize(s, color: Color): # return color.value + s + Color.RESET.value return "{}{}{}".format(color.value, s, Color.RESET.value) def green(s): return colorize(s, Color.GREEN) def yellow(s): return colorize(s, Color.YELLOW) def red(s): return colorize(s, Color.RED) class ArbitrageOpportunity(Enum): NONE = 0 BUY = 1 SELL = 2 def __str__(self): return self.name def get_complementary_trade(t: ArbitrageOpportunity): assert (t != ArbitrageOpportunity.NONE) return ArbitrageOpportunity.BUY if t == ArbitrageOpportunity.SELL else ArbitrageOpportunity.SELL class Price: def __init__(self, exchange, symbol, bid, ask): self.exchange = exchange self.symbol = symbol self.bid = bid self.ask = ask def is_opportunity(self, other): if self.bid > other.ask: return ArbitrageOpportunity.BUY # buy from other exchange if self.ask < other.bid: return ArbitrageOpportunity.SELL # buy from this exchange return ArbitrageOpportunity.NONE def compare_prices(p1: Price, p2: Price): return p1.is_opportunity(p2) async def get_price(symbol, exchange) -> Price: orderbook = await exchange.fetch_order_book(symbol, 10) bid = orderbook['bids'][0][0] if len(orderbook['bids']) > 0 else None ask = orderbook['asks'][0][0] if len(orderbook['asks']) > 0 else None # spread = (ask - bid) if (bid and ask) else None # print(ex.id, 'market price', {'bid': bid, 'ask': ask, 'spread': spread}) if bid is None or ask is None: return None return Price(exchange, symbol, float(bid), float(ask)) async def main(): if len(sys.argv) < 3: print("Usage: python {} <exchange id 1> <exchange id 2> ...".format(sys.argv[0])) return exchanges = [] try: # initialize exchanges tasks = [] for ex_id in sys.argv[1:]: try: ex = getattr(ccxt, ex_id)({'enableRateLimit': True}) # type: ccxt.Exchange # ex.set_sandbox_mode(enabled=True) except AttributeError: print("{} is not supported".format(ex_id)) return except ccxt.NotSupported: print("{} paper trading is not supported".format(ex_id)) return tasks.append(asyncio.create_task(ex.load_markets())) exchanges.append(ex) [await t for t in tasks] all_symbols = [symbol for ex in exchanges for symbol in ex.symbols] unique_arbitrable_symbols = set([symbol for symbol in all_symbols if all_symbols.count(symbol) > 1]) for symbol in unique_arbitrable_symbols: tasks = [] for ex in exchanges: tasks.append(asyncio.create_task(get_price(symbol, ex))) [await t for t in tasks] prices = [t.result() for t in tasks] if len(prices) > 1: arbitrage_pairs = itertools.combinations(prices, r=2) for p in arbitrage_pairs: opportunity = compare_prices(p[0], p[1]) if opportunity != ArbitrageOpportunity.NONE: print(green("{}: {} from {}, {} from {}".format(symbol, opportunity, p[1].exchange.id, get_complementary_trade(opportunity), p[0].exchange.id))) else: print(yellow(symbol)) # close all connections on KeyboardInterrupts and errors finally: [await ex.close() for ex in exchanges] if __name__ == '__main__': asyncio.run(main())
30.152174
111
0.564768
483
4,161
4.768116
0.26294
0.015632
0.024316
0.020842
0.130699
0.065132
0.042553
0
0
0
0
0.016541
0.331651
4,161
137
112
30.372263
0.811579
0.084595
0
0.098901
0
0
0.056269
0
0
0
0
0
0.010989
1
0.098901
false
0
0.076923
0.065934
0.450549
0.054945
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c79ee6a1b6ebeba170b33fbfe523726f9f206dbb
1,497
py
Python
examples/click-ninja/clickninja-final.py
predicatemike/predigame
096e8379beb1d40ccb3f19ed2bb3ad82b405bb7f
[ "Apache-2.0" ]
null
null
null
examples/click-ninja/clickninja-final.py
predicatemike/predigame
096e8379beb1d40ccb3f19ed2bb3ad82b405bb7f
[ "Apache-2.0" ]
null
null
null
examples/click-ninja/clickninja-final.py
predicatemike/predigame
096e8379beb1d40ccb3f19ed2bb3ad82b405bb7f
[ "Apache-2.0" ]
null
null
null
WIDTH = 20 HEIGHT = 14 TITLE = 'Click Ninja' BACKGROUND = 'board' def destroy(s): sound('swoosh') if s.name == 'taco': score(50) else: score(5) # draw a splatting image at the center position of the image image('redsplat', center=s.event_pos, size=2).fade(1.0) s.fade(0.25) def failure(s): score(-20) if s.name == 'bomb': s.destroy() image('explode', center=s.center, size=10).pulse(0.05) if s.name == 'bomb' or score() < 0: sound('scream') text('You Survived %s seconds' % time(), MAROON) callback(gameover, 0.01) def spawn(): speed = randint(2, 10) size = randint(1,4) target = choice(['bananas', 'cherries', 'olives', 'ham', 'hotdog', 'fries','icee', 'pizza']) if randint(1, 4) == 2: target = 'bomb' if randint(1, 10) == 5: target = 'taco' sound('launch') arc = rand_arc() s = image(target, arc[0], size=size) if target == 'bomb': s.speed(speed).spin(1).clicked(failure) s.move_to(arc[1], arc[2], callback = s.destroy) elif target == 'taco': s.speed(5).spin().clicked(destroy) s.move_to((-10, -2), (-5, HEIGHT/2), (WIDTH+1, HEIGHT/2), callback = s.destroy) else: s.speed(speed).clicked(destroy) s.move_to(arc[1], arc[2], callback = lambda: failure(s)) callback(spawn, rand(0.1, 3)) score(color = PURPLE) callback(spawn, 1) keydown('r', reset)
24.145161
86
0.549766
212
1,497
3.858491
0.400943
0.02934
0.025672
0.026895
0.099022
0.056235
0.056235
0.056235
0
0
0
0.049405
0.269873
1,497
61
87
24.540984
0.698994
0.038744
0
0.042553
0
0
0.100905
0
0
0
0
0
0
1
0.06383
false
0
0
0
0.06383
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c79f981e96642b4e8be1f381e054bf741fdc029f
7,166
py
Python
nni/retiarii/hub/pytorch/nasbench201.py
nbl97/nni
1530339d3e964a5ea95a0afde1775ec9167cdcc0
[ "MIT" ]
2,305
2018-09-07T12:42:26.000Z
2019-05-06T20:14:24.000Z
nni/retiarii/hub/pytorch/nasbench201.py
nbl97/nni
1530339d3e964a5ea95a0afde1775ec9167cdcc0
[ "MIT" ]
379
2018-09-10T10:19:50.000Z
2019-05-06T18:04:46.000Z
nni/retiarii/hub/pytorch/nasbench201.py
nbl97/nni
1530339d3e964a5ea95a0afde1775ec9167cdcc0
[ "MIT" ]
314
2018-09-08T05:36:08.000Z
2019-05-06T08:48:51.000Z
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. from typing import Callable, Dict import torch import torch.nn as nn from nni.retiarii import model_wrapper from nni.retiarii.nn.pytorch import NasBench201Cell __all__ = ['NasBench201'] OPS_WITH_STRIDE = { 'none': lambda C_in, C_out, stride: Zero(C_in, C_out, stride), 'avg_pool_3x3': lambda C_in, C_out, stride: Pooling(C_in, C_out, stride, 'avg'), 'max_pool_3x3': lambda C_in, C_out, stride: Pooling(C_in, C_out, stride, 'max'), 'conv_3x3': lambda C_in, C_out, stride: ReLUConvBN(C_in, C_out, (3, 3), (stride, stride), (1, 1), (1, 1)), 'conv_1x1': lambda C_in, C_out, stride: ReLUConvBN(C_in, C_out, (1, 1), (stride, stride), (0, 0), (1, 1)), 'skip_connect': lambda C_in, C_out, stride: nn.Identity() if stride == 1 and C_in == C_out else FactorizedReduce(C_in, C_out, stride), } PRIMITIVES = ['none', 'skip_connect', 'conv_1x1', 'conv_3x3', 'avg_pool_3x3'] class ReLUConvBN(nn.Module): def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation): super(ReLUConvBN, self).__init__() self.op = nn.Sequential( nn.ReLU(inplace=False), nn.Conv2d(C_in, C_out, kernel_size, stride=stride, padding=padding, dilation=dilation, bias=False), nn.BatchNorm2d(C_out) ) def forward(self, x): return self.op(x) class SepConv(nn.Module): def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation): super(SepConv, self).__init__() self.op = nn.Sequential( nn.ReLU(inplace=False), nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=C_in, bias=False), nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False), nn.BatchNorm2d(C_out), ) def forward(self, x): return self.op(x) class Pooling(nn.Module): def __init__(self, C_in, C_out, stride, mode): super(Pooling, self).__init__() if C_in == C_out: self.preprocess = None else: self.preprocess = ReLUConvBN(C_in, C_out, 1, 1, 0, 1) if mode == 'avg': self.op = nn.AvgPool2d(3, stride=stride, padding=1, count_include_pad=False) elif mode == 'max': self.op = nn.MaxPool2d(3, stride=stride, padding=1) else: raise ValueError('Invalid mode={:} in Pooling'.format(mode)) def forward(self, x): if self.preprocess: x = self.preprocess(x) return self.op(x) class Zero(nn.Module): def __init__(self, C_in, C_out, stride): super(Zero, self).__init__() self.C_in = C_in self.C_out = C_out self.stride = stride self.is_zero = True def forward(self, x): if self.C_in == self.C_out: if self.stride == 1: return x.mul(0.) else: return x[:, :, ::self.stride, ::self.stride].mul(0.) else: shape = list(x.shape) shape[1] = self.C_out zeros = x.new_zeros(shape, dtype=x.dtype, device=x.device) return zeros class FactorizedReduce(nn.Module): def __init__(self, C_in, C_out, stride): super(FactorizedReduce, self).__init__() self.stride = stride self.C_in = C_in self.C_out = C_out self.relu = nn.ReLU(inplace=False) if stride == 2: C_outs = [C_out // 2, C_out - C_out // 2] self.convs = nn.ModuleList() for i in range(2): self.convs.append(nn.Conv2d(C_in, C_outs[i], 1, stride=stride, padding=0, bias=False)) self.pad = nn.ConstantPad2d((0, 1, 0, 1), 0) else: raise ValueError('Invalid stride : {:}'.format(stride)) self.bn = nn.BatchNorm2d(C_out) def forward(self, x): x = self.relu(x) y = self.pad(x) out = torch.cat([self.convs[0](x), self.convs[1](y[:, :, 1:, 1:])], dim=1) out = self.bn(out) return out class ResNetBasicblock(nn.Module): def __init__(self, inplanes, planes, stride): super(ResNetBasicblock, self).__init__() assert stride == 1 or stride == 2, 'invalid stride {:}'.format(stride) self.conv_a = ReLUConvBN(inplanes, planes, 3, stride, 1, 1) self.conv_b = ReLUConvBN(planes, planes, 3, 1, 1, 1) if stride == 2: self.downsample = nn.Sequential( nn.AvgPool2d(kernel_size=2, stride=2, padding=0), nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, padding=0, bias=False)) elif inplanes != planes: self.downsample = ReLUConvBN(inplanes, planes, 1, 1, 0, 1) else: self.downsample = None self.in_dim = inplanes self.out_dim = planes self.stride = stride self.num_conv = 2 def forward(self, inputs): basicblock = self.conv_a(inputs) basicblock = self.conv_b(basicblock) if self.downsample is not None: inputs = self.downsample(inputs) # residual return inputs + basicblock @model_wrapper class NasBench201(nn.Module): """The full search space proposed by `NAS-Bench-201 <https://arxiv.org/abs/2001.00326>`__. It's a stack of :class:`NasBench201Cell`. """ def __init__(self, stem_out_channels: int = 16, num_modules_per_stack: int = 5, num_labels: int = 10): super().__init__() self.channels = C = stem_out_channels self.num_modules = N = num_modules_per_stack self.num_labels = num_labels self.stem = nn.Sequential( nn.Conv2d(3, C, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(C) ) layer_channels = [C] * N + [C * 2] + [C * 2] * N + [C * 4] + [C * 4] * N layer_reductions = [False] * N + [True] + [False] * N + [True] + [False] * N C_prev = C self.cells = nn.ModuleList() for C_curr, reduction in zip(layer_channels, layer_reductions): if reduction: cell = ResNetBasicblock(C_prev, C_curr, 2) else: ops: Dict[str, Callable[[int, int], nn.Module]] = { prim: lambda C_in, C_out: OPS_WITH_STRIDE[prim](C_in, C_out, 1) for prim in PRIMITIVES } cell = NasBench201Cell(ops, C_prev, C_curr, label='cell') self.cells.append(cell) C_prev = C_curr self.lastact = nn.Sequential( nn.BatchNorm2d(C_prev), nn.ReLU(inplace=True) ) self.global_pooling = nn.AdaptiveAvgPool2d(1) self.classifier = nn.Linear(C_prev, self.num_labels) def forward(self, inputs): feature = self.stem(inputs) for cell in self.cells: feature = cell(feature) out = self.lastact(feature) out = self.global_pooling(out) out = out.view(out.size(0), -1) logits = self.classifier(out) return logits
34.786408
110
0.579542
971
7,166
4.069001
0.175077
0.036446
0.028347
0.042521
0.314604
0.24981
0.219944
0.21387
0.181979
0.171096
0
0.027219
0.292492
7,166
205
111
34.956098
0.752071
0.029166
0
0.208589
0
0
0.027666
0
0
0
0
0
0.006135
1
0.08589
false
0
0.030675
0.01227
0.214724
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c7a32b4c1d013fec417f68425b02fe13d88c171e
9,292
py
Python
authalligator_client/entities.py
closeio/authalligator-client
fe93c9d2333d2949e44c48a2dd0a9a266734e026
[ "MIT" ]
null
null
null
authalligator_client/entities.py
closeio/authalligator-client
fe93c9d2333d2949e44c48a2dd0a9a266734e026
[ "MIT" ]
null
null
null
authalligator_client/entities.py
closeio/authalligator-client
fe93c9d2333d2949e44c48a2dd0a9a266734e026
[ "MIT" ]
1
2021-01-31T13:08:48.000Z
2021-01-31T13:08:48.000Z
import datetime from enum import Enum from typing import Any, Callable, Dict, List, Optional, Type, TypeVar, Union, cast import attr import ciso8601 import structlog from attr import converters from . import enums from .utils import as_json_dict, to_snake_case logger = structlog.get_logger() class Omitted(Enum): """Singleton written in a way mypy can parse. See https://www.python.org/dev/peps/pep-0484/#support-for-singleton-types-in-unions for more details. """ token = 0 OMITTED = Omitted.token """A singleton to differentiate between omitted vs explicit :obj:`None`.""" # helper type for entity_converter U = TypeVar("U", bound="BaseAAEntity") def entity_converter( entity_cls, # type: Union[List[Type[U]], Type[U]] ): # type: (...) -> Callable[[Union[Omitted, U, Dict]], Union[U, Omitted]] """ Convert a dictionary response into instances of the entity class. Usage: # disambiguates between type_a and type_b based on ``__typename`` converter = entity_converter([TypeA, TypeB]) my_instance = converter({'__typename': 'TypeB'}) XXX: mypy isn't expressive enough to annotate that the return type will be one of the _specific_ arg types and not the most generic bound base. We'll unfortunately have to ``# type: ignore`` on lines that call this. Args: entity_cls: the class (or classes) the value should be converted into. If multiple classes are provided as options, ``__typename`` must be included in the reponse to support disambiguation. Returns: A callable that will convert a dictionary to the right entity type. If more than one entity type is possible, that dictionary must have a ``__typename`` field present, which must match the ``TYPENAME`` on a provided entity. If none of the provided types match of if the fields don't align with the provided entity, a ``TypeError`` is raised. """ entity_classes = [] # type: List[Type[U]] if isinstance(entity_cls, (list, tuple)): entity_classes = entity_cls else: entity_classes = [entity_cls] def _entity_converter(val): # type: (Union[Dict[str, Any], U, Omitted]) -> Union[U, Omitted] # check if it's explitly been omitted (don't try to convert those) if val is OMITTED: return val # check if it's already an entity if any([isinstance(val, e_cls) for e_cls in entity_classes]): return cast(U, val) # definitely a dict now, since we check what it was earlier. (present # for type checking) val = cast(Dict[str, Any], val) # if there's more than one possibility for entity classes, pick the # right one based on ``__typename`` if len(entity_classes) == 1: # only one option, we don't need an explicit type selected_cls = entity_classes[0] # type: Type[U] else: # a few different return types are expected typename = val.pop("__typename", None) if typename is None: type_options = ", ".join([e.TYPENAME for e in entity_classes]) raise TypeError( 'No "__typename" present to disambiguate between possible ' "types: [{}]".format(type_options) ) matching_typename = next( (e for e in entity_classes if e.TYPENAME == typename), None ) # type: Optional[Type[U]] if matching_typename is None: raise TypeError('No entity found for type "{}"'.format(typename)) selected_cls = matching_typename return selected_cls.from_api_response(val) return _entity_converter @attr.attrs(frozen=True) class BaseAAEntity(object): TYPENAME = "" # type: str """The name of the graphql type in the schema. Used for disambiguation when there's more than one possible type being returned. """ as_dict = as_json_dict @classmethod def from_api_response(cls, data): # type: (Type[U], Dict[str, Any]) -> U # If __typename is present, this asserts that it matches this class's # expected typename typename = data.pop("__typename", None) if typename and typename != cls.TYPENAME: raise TypeError( ( "Given type \"{}\" doesn't match this entity's type: " '"{}". Is {} the right entity for ' "this data?" ).format(typename, cls.TYPENAME, cls.__name__) ) # convert top-level kwargs from camelCase to snake_case kwargs = {to_snake_case(k): v for k, v in data.items()} # mypy doesn't like that we're providing kwargs to a type whose init # doesn't accept any kwargs (even though subclasses do have attributes) return cls(**kwargs) # type: ignore @attr.attrs(frozen=True) class AccountError(BaseAAEntity): TYPENAME = "AccountError" code = attr.attrib(converter=enums.AccountErrorCode) # type: enums.AccountErrorCode message = attr.attrib() # type: Optional[str] retry_in = attr.attrib() # type: Optional[int] @attr.attrs(frozen=True) class Account(BaseAAEntity): TYPENAME = "Account" provider = attr.attrib(converter=enums.ProviderType) # type: enums.ProviderType username = attr.attrib() # type: str access_token = attr.attrib() # type: Optional[str] access_token_expires_at = attr.attrib( converter=converters.optional(ciso8601.parse_datetime), ) # type: Optional[datetime.datetime] @attr.attrs(frozen=True) class DeleteOperation(BaseAAEntity): """Base class for delete operation payloads. These payloads don't actually have any field information in them. While there's technically a "_" field in the schema, it's only a placeholder to work around the language not supporting empty responses. It has no meaning and will never have a meaningful value. This class has no specific equivalent type, it's just a convenience type for these entities. """ pass @attr.attrs(frozen=True) class DeleteOtherAccountKeysPayload(DeleteOperation): TYPENAME = "DeleteOtherAccountKeysPayload" @attr.attrs(frozen=True) class DeleteAccountKeyPayload(DeleteOperation): TYPENAME = "DeleteAccountKeyPayload" @attr.attrs(frozen=True) class DeleteAccountPayload(DeleteOperation): TYPENAME = "DeleteAccountPayload" @attr.attrs(frozen=True) class AuthorizeAccountPayload(BaseAAEntity): TYPENAME = "AuthorizeAccountPayload" account = attr.attrib( converter=entity_converter(Account), # type: ignore[misc] ) # type: Account account_key = attr.attrib() # type: str number_of_account_keys = attr.attrib() # type: int @attr.attrs(frozen=True) class VerifyAccountPayload(BaseAAEntity): TYPENAME = "VerifyAccountPayload" account = attr.attrib( converter=entity_converter(Account), # type: ignore[misc] ) # type: Account @attr.attrs(frozen=True) class Query(BaseAAEntity): account = attr.attrib( default=OMITTED, converter=entity_converter([Account, AccountError]), # type: ignore[misc] ) # type: Union[Omitted, Account, AccountError] @attr.attrs(frozen=True) class Mutation(BaseAAEntity): # mypy and the attrs plugin doens't like the `Omitted` default + converter # stuff authorize_account = attr.attrib( # type: ignore default=OMITTED, # ignore unsupport converter warning converter=cast( # type: ignore[misc] Union[Omitted, AuthorizeAccountPayload, AccountError], entity_converter([AuthorizeAccountPayload, AccountError]), ), ) # type: Union[Omitted, AuthorizeAccountPayload, AccountError] verify_account = attr.attrib( # type: ignore default=OMITTED, converter=cast( # type: ignore[misc] Union[Omitted, VerifyAccountPayload, AccountError], entity_converter([VerifyAccountPayload, AccountError]), ), ) # type: Union[Omitted, VerifyAccountPayload, AccountError] delete_account = attr.attrib( # type: ignore default=OMITTED, converter=cast( # type: ignore[misc] Union[Omitted, DeleteAccountPayload, AccountError], entity_converter([DeleteAccountPayload, AccountError]), ), ) # type: Union[Omitted, DeleteAccountPayload, AccountError] delete_account_key = attr.attrib( # type: ignore default=OMITTED, converter=cast( # type: ignore[misc] Union[Omitted, DeleteAccountKeyPayload, AccountError], entity_converter([DeleteAccountKeyPayload, AccountError]), ), ) # type: Union[Omitted, DeleteAccountKeyPayload, AccountError] delete_other_account_keys = attr.attrib( # type: ignore default=OMITTED, # ignore unsupport converter warning converter=cast( # type: ignore[misc] Union[Omitted, DeleteOtherAccountKeysPayload, AccountError], entity_converter([DeleteOtherAccountKeysPayload, AccountError]), ), ) # type: Union[Omitted, DeleteOtherAccountKeysPayload, AccountError]
35.19697
88
0.660461
1,075
9,292
5.616744
0.260465
0.028155
0.027327
0.034614
0.183173
0.10732
0.098377
0.097218
0.097218
0.097218
0
0.002141
0.246126
9,292
263
89
35.330798
0.859814
0.374839
0
0.244898
0
0
0.06595
0.013894
0
0
0
0
0
1
0.020408
false
0.006803
0.061224
0
0.380952
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c7a3e79d5fcb0530f653c35813c95268647570c7
9,739
py
Python
library/device.py
lompal/USBIPManager
b03d8d9c0befcd70b7f67cfe61c0664f48d2939d
[ "MIT" ]
24
2019-01-25T20:40:07.000Z
2020-11-20T08:12:14.000Z
library/device.py
lompal/USBIPManager
b03d8d9c0befcd70b7f67cfe61c0664f48d2939d
[ "MIT" ]
3
2018-11-28T14:04:57.000Z
2020-09-14T08:35:09.000Z
library/device.py
lompal/USBIPManager
b03d8d9c0befcd70b7f67cfe61c0664f48d2939d
[ "MIT" ]
6
2019-08-23T05:30:26.000Z
2020-11-20T08:12:03.000Z
from library import config, ini, lang, log, performance, periphery, queue from asyncio import get_event_loop from threading import Thread, Event from PyQt5.QtCore import QObject, pyqtSignal from PyQt5.QtWidgets import QTreeWidgetItem # noinspection PyPep8Naming class Signal(QObject): """ PyQt signals for correct daemon device tree calls from a different thread """ addTopLevelItem_ = pyqtSignal(object) setText_ = pyqtSignal(str, int, str) setToolTip_ = pyqtSignal(str, int, object) setIcon_ = pyqtSignal(str, int, object) def addTopLevelItem(self, daemon): """ Load daemon as a top-level item - emit the signal """ self.addTopLevelItem_.emit(daemon) def setText(self, bid, col, baud): """ Set incoming/outgoing bandwidth - emit the signal """ self.setText_.emit(bid, col, baud) def setToolTip(self, bid, col, html): """ Set tooltip for a daemon during capturing operation - emit the signal """ self.setToolTip_.emit(bid, col, html) def setIcon(self, bid, col, icon): """ Set status icon for a daemon during capturing operation - emit the signal """ self.setIcon_.emit(bid, col, icon) # noinspection PyPep8Naming class Tree(metaclass=config.Singleton): """ Daemon device bandwidth tree """ def __init__(self, base, ip_addr): self._base = base self._ip_addr = ip_addr self._sw_config = ini.SWConfig(self._base) self._lang = lang.Tree self._signal = Signal() self._signal.addTopLevelItem_.connect(lambda __daemon: self._addTopLevelItem(__daemon)) self._signal.setText_.connect(lambda __bid, __col, __baud: self._setText(__bid, __col, __baud)) self._signal.setToolTip_.connect(lambda __bid, __col, __html: self._setToolTip(__bid, __col, __html)) self._signal.setIcon_.connect(lambda __bid, __col, __icon: self._setIcon(__bid, __col, __icon)) def _getDaemon(self): """ """ _root = self._base.dev_tree.invisibleRootItem() for idx in range(_root.childCount()): _daemon = _root.child(idx) if _daemon.text(0) == self._ip_addr: return _daemon, idx return None, None def _takeDaemon(self, idx): """ """ return self._base.dev_tree.takeTopLevelItem(idx) def _loadDaemon(self): """ """ _daemon = QTreeWidgetItem([self._ip_addr]) self.addTopLevelItem(_daemon) return _daemon, 0 def _getDevice(self, bid): """ """ _daemon, _idx = self._getDaemon() if not _daemon: return None, None for idx in range(_daemon.childCount()): _dev = _daemon.child(idx) if _dev.text(0) == bid: return _daemon, _dev return _daemon, None def _addTopLevelItem(self, daemon): """ Load daemon as a top-level item - inner function """ self._base.dev_tree.addTopLevelItem(daemon) self._base.dev_tree.expandAll() def _setText(self, bid, col, baud): """ Set incoming/outgoing bandwidth - inner function """ _daemon, _dev = self._getDevice(bid) if _dev: _baud = _dev.child(0) _baud.setText(col, baud) def _setToolTip(self, bid, col, html): """ Set tooltip for a daemon during capturing operation - inner function """ _daemon, _dev = self._getDevice(bid) if _dev: _dev.setToolTip(col, html) def _setIcon(self, bid, col, icon): """ Set status icon for a daemon during capturing operation - inner function """ _daemon, _dev = self._getDevice(bid) if _dev: _dev.setIcon(col, icon) def addTopLevelItem(self, daemon): """ Load daemon as a top-level item from a different thread """ self._signal.addTopLevelItem(daemon) def setText(self, bid, col, baud): """ Set incoming/outgoing bandwidth from a different thread """ self._signal.setText(bid, col, baud) def setToolTip(self, bid, col, html): """ Set status tooltip for a daemon during capturing operation from a different thread """ self._signal.setToolTip(bid, col, html) def setIcon(self, bid, col, icon): """ Set status icon for a daemon during capturing operation from a different thread """ self._signal.setIcon(bid, col, icon) def loadDevice(self, bid): """ """ _device = QTreeWidgetItem([bid]) _daemon, _idx = self._getDaemon() if not _daemon: _daemon, _idx = self._loadDaemon() _daemon, _dev = self._getDevice(bid) if _dev: return _daemon = self._takeDaemon(_idx) if self._sw_config.dev_perf: _baud = QTreeWidgetItem([self._lang.ParamBaud, self._lang.ParamNA, self._lang.ParamNA]) _device.addChild(_baud) _daemon.addChild(_device) self.addTopLevelItem(_daemon) def unloadDevice(self, bid): """ """ _daemon, _dev = self._getDevice(bid) if _dev: _daemon.removeChild(_dev) def setIncoming(self, bid, baud): """ Set incoming bandwidth """ self.setText(bid, 1, baud) def setOutgoing(self, bid, baud): """ Set outgoing bandwidth """ self.setText(bid, 2, baud) # noinspection PyPep8Naming class USBTop(metaclass=config.Singleton): """ Daemon device bandwidth processing """ def __init__(self, base, ip_addr): self._base = base self._ip_addr = ip_addr self._loop = get_event_loop() self._sw_config = ini.SWConfig(self._base) self._manager = queue.Manager(self._base) self._name_running = f'USBTOP processing running : {self._ip_addr}' self._name_cancelling = f'USBTOP processing cancelling : {self._ip_addr}' self._ssh = periphery.SSH(self._base, self._ip_addr) self._log = log.Manager(self._base) self._lang = lang.USBTop self._tree = Tree(self._base, self._ip_addr) self._dmn_perf = performance.Device(self._base) self._thread = Thread() self._event = Event() self._pid = None # noinspection PyMethodMayBeStatic def _idx(self, row): """ """ return [param for param in row.split() if param.isdigit()].pop() def _processing(self, buf): """ """ _bid = None for row in buf: if 'Bus ID' in row: _bid = self._idx(row) continue if 'Device ID' in row: _did = self._idx(row) _value = row.split() self._dmn_perf.setProcessing(self._ip_addr, _bid, _did, (_value[4], _value[6])) def _exec(self): """ Run the USBTOP processing - daemon thread """ _query = 'sudo usbtop' _echo = self._ssh.exec(_query) if not all(_echo): return self._log.setError(f'{self._lang.LogSeparator} {self._ip_addr} : {self._lang.RunQuery} : {_query}') self._pid, _stdin, _stdout, _stderr = _echo _buf = list() while not self._event.is_set(): _line = _stdout.readline(2048) if not _line: return self._event.set() if '\x1b[2J\x1b[1;1H' in _line: self._processing(_buf) _buf = list() _buf.append(_line.strip().replace('\x1b[2J\x1b[1;1H', '')) continue _buf.append(_line.strip()) def __run(self): """ Run the USBTOP processing - inner function """ self._event = Event() self._thread = Thread(target=self._exec, name=self._name_running) self._thread.start() self._log.setSuccess(f'{self._lang.LogSeparator} {self._ip_addr} : {self._lang.RunSuccess}') self._event.wait() return self._event.is_set() async def _run(self): """ Run the USBTOP processing - coroutine """ if not self._sw_config.dev_perf: return self._log.setError(f'{self._lang.LogSeparator} {self._ip_addr} : {self._lang.EnableRequired}') if self.isRunning(): return self._log.setError(f'{self._lang.LogSeparator} {self._ip_addr} : {self._lang.AforeRun}') if not await self._ssh.establish(self._lang.LogSeparator): return self._log.setInformation(f'{self._lang.LogSeparator} {self._ip_addr} : {self._lang.CancelSuccess}') await self._loop.run_in_executor(None, self.__run) if self.isRunning(): self.cancel() async def _cancel(self): """ Cancel the USBTOP processing - coroutine """ if not self._sw_config.dev_perf: return self._log.setError(f'{self._lang.LogSeparator} {self._ip_addr} : {self._lang.EnableRequired}') if not self.isRunning(): return self._log.setError(f'{self._lang.LogSeparator} {self._ip_addr} : {self._lang.AforeCancel}') self._event.set() self._thread.join() if not self.isRunning(): self._ssh.kill(self._pid) return self._log.setWarning(f'{self._lang.LogSeparator} {self._ip_addr} : {self._lang.CancelSuccess}') return self._log.setError(f'{self._lang.LogSeparator} {self._ip_addr} : {self._lang.CancelError}') def run(self): """ Run the USBTOP processing - calling coroutine """ self._manager.exec(self._run, self._name_running) def cancel(self): """ Cancel the USBTOP processing - calling coroutine """ self._manager.exec(self._cancel, self._name_cancelling) def isRunning(self): """ Check if the USBTOP processing is running """ return self._thread.is_alive()
36.339552
118
0.618133
1,148
9,739
4.947735
0.158537
0.033803
0.03169
0.034507
0.446831
0.423063
0.388908
0.360211
0.335915
0.302465
0
0.003774
0.265325
9,739
267
119
36.475655
0.790077
0.133279
0
0.271186
0
0.00565
0.09487
0.054492
0
0
0
0
0
1
0.163842
false
0
0.028249
0
0.344633
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c7a3f3c709f3111aed4b0e26101a434835f55c66
3,959
py
Python
agent/minimax/submission.py
youkeyao/SJTU-CS410-Snakes-3V3-Group06
180ab3714686cdd879454cf103affc6bb03b7fcd
[ "MIT" ]
1
2022-01-09T13:59:34.000Z
2022-01-09T13:59:34.000Z
agent/minimax/submission.py
youkeyao/SJTU-CS410-Snakes-3V3-Group06
180ab3714686cdd879454cf103affc6bb03b7fcd
[ "MIT" ]
null
null
null
agent/minimax/submission.py
youkeyao/SJTU-CS410-Snakes-3V3-Group06
180ab3714686cdd879454cf103affc6bb03b7fcd
[ "MIT" ]
null
null
null
DEPTH = 3 # Action class Action: top = [1, 0, 0, 0] bottom = [0, 1, 0, 0] left = [0, 0, 1, 0] right = [0, 0, 0, 1] actlist = [(-1, 0), (1, 0), (0, -1), (0, 1)] mapAct = { actlist[0]: top, actlist[1]: bottom, actlist[2]: left, actlist[3]: right } def go(state, action, board_height, board_width): if action == (-1, 0): return ((state[0]+board_height-1) % board_height, state[1]) elif action == (1, 0): return ((state[0]+1) % board_height, state[1]) elif action == (0, 1): return (state[0], (state[1]+1) % board_width) elif action == (0, -1): return (state[0], (state[1]+board_width-1) % board_width) class GameState: obs = {} is_end = False def __init__(self, observation): self.obs = { 1: observation[1].copy(), 2: observation[2].copy(), 3: observation[3].copy(), 4: observation[4].copy(), 5: observation[5].copy(), 6: observation[6].copy(), 7: observation[7].copy(), 'board_width': observation['board_width'], 'board_height': observation['board_height'], } def generateSuccessor(self, index, action): successor = GameState(self.obs) index += 2 head = tuple(successor.obs[index][0]) tar = list(Action.go(head, action, self.obs['board_height'], self.obs['board_width'])) for i in range(1, 8): for cor in successor.obs[i]: if cor == tar: successor.is_end = True if i == 1: successor.obs[index].append(successor.obs[index][-1]) else: successor.obs[index].clear() successor.obs[index].insert(0, tar) successor.obs[index].pop() return successor def evaluationFunction(self): ans = 0 for i in range(2, 8): if i < 5: ans += len(self.obs[i]) else: ans -= len(self.obs[i]) return ans class MinimaxAgent: def __init__(self, obs): self.obs = obs def value(self, gameState, index, depth, a, b): index %= 6 if index == 0: return self.maxValue(gameState, index, depth + 1, a, b)[0] elif index < 3: return self.maxValue(gameState, index, depth, a, b)[0] else: return self.minValue(gameState, index, depth, a, b)[0] def maxValue(self, gameState, index, depth, a, b): if gameState.is_end or depth >= DEPTH: return [gameState.evaluationFunction(), None] v = -10000 ac = Action.actlist[0] for action in Action.actlist: next = gameState.generateSuccessor(index, action) value = self.value(next, index+1, depth, a, b) if value > v: v = value ac = action if v >= b: return [v, ac] a = max(a, v) return [v, ac] def minValue(self, gameState, index, depth, a, b): if gameState.is_end: return [gameState.evaluationFunction(), None] v = 10000 ac = Action.actlist[0] for action in Action.actlist: next = gameState.generateSuccessor(index, action) value = self.value(next, index+1, depth, a, b) if value < v: v = value ac = action if v <= a: return [v, ac] b = min(b, v) return [v, ac] def get_action(self, index): return self.maxValue(GameState(self.obs), index-2, 0, -10000, 10000)[1] def my_controller(observation, action_space, is_act_continuous=False): ac = Action.mapAct[MinimaxAgent(observation).get_action(observation['controlled_snake_index'])] return [ac]
32.186992
99
0.51023
481
3,959
4.128898
0.164241
0.009063
0.024673
0.050352
0.404834
0.355488
0.275932
0.252769
0.252769
0.222558
0
0.042303
0.35514
3,959
123
100
32.186992
0.735605
0.001516
0
0.196262
0
0
0.023026
0.005567
0
0
0
0
0
1
0.093458
false
0
0
0.009346
0.35514
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c7a95d54d497e531abccb6e65c1f8ff7b1fbb2e5
7,202
py
Python
semester3/oop/lab3/parser/client/MasterService/client.py
no1sebomb/University-Labs
1da5e7486f0b8a6119c077945aba8c89cdfc2e50
[ "WTFPL" ]
null
null
null
semester3/oop/lab3/parser/client/MasterService/client.py
no1sebomb/University-Labs
1da5e7486f0b8a6119c077945aba8c89cdfc2e50
[ "WTFPL" ]
null
null
null
semester3/oop/lab3/parser/client/MasterService/client.py
no1sebomb/University-Labs
1da5e7486f0b8a6119c077945aba8c89cdfc2e50
[ "WTFPL" ]
1
2020-11-01T23:54:52.000Z
2020-11-01T23:54:52.000Z
# coding=utf-8 from parser.client import * from parser.client.ResponseItem import * with (Path(__file__).resolve().parent / "config.json").open("rt") as siteConfigFile: SITE_CONFIG = json.load(siteConfigFile) class MasterService(Client): class Link: main = "https://steering.com.ua/" login = "https://steering.com.ua/login" search = "https://steering.com.ua/catalog?oe={}" name = "masterservice" def __init__(self): super().__init__() self.username = SITE_CONFIG["username"] self.password = SITE_CONFIG["password"] start_time = time() self.connected, self.logged = self.sign_in() self.login_time = "%.3f s" % (time() - start_time) def get_info(self, article, brand): if self.connected and self.logged: search_request = self.session.get(self.Link.search.format(article)) if search_request.status_code != 200: return self.response_brand(2, "Помилка з'єднання") html_tree = html.fromstring(search_request.text) items = html_tree.xpath('//table[@class="vi-table vi-responsive"]/tr') if not items: return self.response_brand(1, "Артикул не знайдено") for item in items: brand_expr = './/td[@data-label=""]/a[@rel="nofollow"]/text()' try: item_brand = self.clear(item.xpath(brand_expr)[0]) except IndexError: try: item_brand = self.clear(item.xpath('.//td[@data-label=""]/text()')[1]) except IndexError: item_brand = "" if self.compare(brand, item_brand): break else: return self.response_brand(1, "Бренд не знайдено") item_link = item.xpath('.//td/div/a/@href')[0] item_info_request = self.session.get(item_link) if item_info_request.status_code != 200: return self.response_brand(2, "Помилка з'єднання") item_info_tree = html.fromstring(item_info_request.text) item_info = item_info_tree.xpath('//table[@class="vi-item-table"]//td/text()') item = ResponseItem( article=self.clear(item_info[1]), brand=item_brand, name="".join(item_info_tree.xpath('//h1[@class="vi-item-name"]/span/text()')[:-1]), desc=self.clear(item_info[9]) ) try: item_price = self.clear(item_info_tree.xpath('//span[@class="value"]/span/text()')[0]).replace(" ", "") except IndexError: item_price = "0" item["price"] = "%.2f UAH" % float(item_price) try: item_stocks = item_info_tree.xpath('//td[@class="product-nalichie-table"]/table/tr/td/text()')[1:] except IndexError: pass else: name = None for number, value in enumerate(item_stocks): if number % 2: item["stocks"].append( {"name": name, "quantity": self.clear(value), "term": None} ) else: name = self.clear(value) try: image_link = self.Link.main[:-1] + item_info_tree.xpath('//div[@class="fotorama"]/img/@src')[0] except IndexError: pass else: item["image"] = self.get_image(image_link, image_id=image_link.split("/")[-1].split(".")[0]) car_using = item_info_tree.xpath('//div[@class="row vi-prim-auto"]//ul[@class="prim-car"]/li') for car in car_using: car_name = self.clear(car.xpath('./span/text()')[0]) car_models = car.xpath('./ul/li') for car_model in car_models: model_name = self.clear(car_model.xpath('./span/text()')[0]) model_vars = car_model.xpath('./ul/li/text()') for model_var in model_vars: try: item["using"][car_name].append(model_name + " " + self.clear(model_var)) except KeyError: item["using"][car_name] = [model_name + " " + self.clear(model_var)] oe = item_info_tree.xpath('//div[@class="row vi-prim-auto"]//div[@class="car-oe"]//dd[@class="content"]')[0] oe_codes = oe.xpath("./a/text()") for oe_code in oe_codes: try: item["cross"][""].append(self.clear(oe_code)) except KeyError: item["cross"][""] = [self.clear(oe_code)] analogs_table = item_info_tree.xpath('//table[@class="products-list vi-table vi-responsive"]')[0] analogs = analogs_table.xpath('.//tr[@class="even" or @class="odd"]') for analog in analogs: analogs_name_list = analog.xpath('.//a[@class="name"]/span/text()') try: analog_brand = self.clear(analog.xpath('.//div[@class="vendor"]/span[@class="value"]/text()')[0]) except IndexError: analog_brand = "" analog_item = ResponseItem( article=self.clear(analogs_name_list[-1]), brand=analog_brand, name=self.clear("".join(analogs_name_list[:-1])) ) analog_stocks = analog.xpath('.//td[@class="storage"]//td[not(contains(@class, "title_sklad"))]/text()') stock_name = "" for number, stock in enumerate(analog_stocks[1:]): if number % 2: analog_item["stocks"].append( {"name": stock_name, "quantity": self.clear(stock), "term": None} ) else: stock_name = self.clear(stock).replace(":", "") image_link = self.Link.main[:-1] + analog.xpath('.//td[@data-label="Фото"]//img/@src')[0] analog_item["image"] = self.get_image(image_link, image_id=image_link.split("/")[-1].split(".")[0]) item.set_analog(analog_item) return self.response_brand(0, "OK", item) elif self.connected: return self.response_brand(3, "Помилка авторизації") else: return self.response_brand(2, "Помилка з'єднання") def sign_in(self): main_page_request = self.session.get(self.Link.main) if main_page_request.status_code != 200: return 0, 0 login_request = self.session.post(self.Link.login, data={"login": self.username, "password": self.password}, headers={"Content-Type": "application/x-www-form-urlencoded"}) if login_request.status_code == 200: return 1, 1 else: return 1, 0
43.648485
120
0.509303
791
7,202
4.461441
0.217446
0.045905
0.030604
0.038538
0.250213
0.181921
0.115897
0.098895
0.087844
0.087844
0
0.011882
0.345598
7,202
164
121
43.914634
0.736898
0.001666
0
0.231884
0
0.014493
0.168197
0.090568
0
0
0
0
0
1
0.021739
false
0.028986
0.014493
0
0.130435
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c7a9d270039cb319b1e7bd45460f8d2badbcbfe0
1,562
py
Python
Tic-Tac-Pi/gameObjects/TextObject.py
mstubinis/Tic-Tac-Pi
b96db58332be4975f4a5b18b6dd45a0eac859528
[ "MIT" ]
2
2016-04-13T02:52:46.000Z
2017-11-20T22:41:36.000Z
Tic-Tac-Pi/gameObjects/TextObject.py
mstubinis/Tic-Tac-Pi
b96db58332be4975f4a5b18b6dd45a0eac859528
[ "MIT" ]
null
null
null
Tic-Tac-Pi/gameObjects/TextObject.py
mstubinis/Tic-Tac-Pi
b96db58332be4975f4a5b18b6dd45a0eac859528
[ "MIT" ]
3
2016-04-14T02:29:32.000Z
2020-04-27T06:08:07.000Z
import pygame from pygame.locals import * import resourceManager class TextObject(pygame.sprite.Sprite): def __init__(self,pos,fontSize,fontcolor,textstring): pygame.sprite.Sprite.__init__(self) #call Sprite initializer self.position = pos self.message = textstring self.color = fontcolor self.font = pygame.font.Font(None,fontSize) self.text = self.font.render(self.message, 1,fontcolor) self.rect = pygame.Rect((0,0),self.font.size(self.message)) self.rect.midtop = pos def is_clicked(self,events): if self.is_mouse_over() == True: for event in events: if event.type == pygame.MOUSEBUTTONDOWN: if event.button == 1: return True return False def is_mouse_over(self): mousePos = pygame.mouse.get_pos() if mousePos[0] < self.rect.x: return False if mousePos[0] > self.rect.x + self.rect.w: return False if mousePos[1] < self.rect.y: return False if mousePos[1] > self.rect.y + self.rect.h: return False return True def update_message(self,message): self.message = message self.text = self.font.render(message, 1,self.color) self.rect.w = self.font.size(message)[0] self.rect.h = self.font.size(message)[1] self.rect.midtop = self.position def update(self): pass def draw(self,screen): screen.blit(self.text, self.rect)
31.24
68
0.596031
198
1,562
4.626263
0.277778
0.104803
0.039301
0.068777
0.159389
0.111354
0.067686
0.067686
0
0
0
0.010018
0.297055
1,562
49
69
31.877551
0.824226
0.014725
0
0.170732
0
0
0
0
0
0
0
0
0
1
0.146341
false
0.02439
0.073171
0
0.414634
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0